/xen/xen/arch/x86/ |
A D | physdev.c | 193 if ( eoi.irq >= currd->nr_pirqs ) in do_physdev_op() 195 spin_lock(&currd->event_lock); in do_physdev_op() 201 if ( currd->arch.auto_unmask ) in do_physdev_op() 203 if ( is_pv_domain(currd) || domain_pirq_to_irq(currd, eoi.irq) > 0 ) in do_physdev_op() 205 if ( is_hvm_domain(currd) && in do_physdev_op() 217 spin_unlock(&currd->event_lock); in do_physdev_op() 258 currd->arch.auto_unmask = 1; in do_physdev_op() 266 ret = pirq_guest_unmask(currd); in do_physdev_op() 280 if ( is_hvm_domain(currd) && in do_physdev_op() 297 if ( pirq_shared(currd, irq) ) in do_physdev_op() [all …]
|
A D | domctl.c | 332 struct domain *currd = curr->domain; in arch_do_domctl() local 496 if ( (d == currd) || /* no domain_pause() */ in arch_do_domctl() 522 if ( (d == currd) || /* no domain_pause() */ in arch_do_domctl() 562 if ( (d == currd) || /* no domain_pause() */ in arch_do_domctl() 790 if ( ret && is_hardware_domain(currd) ) in arch_do_domctl() 855 if ( d == currd ) /* no domain_pause() */ in arch_do_domctl() 897 if ( d == currd ) /* no domain_pause() */ in arch_do_domctl() 912 if ( d == currd ) /* no domain_pause() */ in arch_do_domctl() 983 currd->domain_id, v); in arch_do_domctl() 1184 if ( d == currd ) in arch_do_domctl() [all …]
|
A D | time.c | 1113 const struct domain *currd = current->domain; in rtc_guest_read() local 1125 data = currd->arch.cmos_idx; in rtc_guest_read() 1129 if ( !ioports_access_permitted(currd, RTC_PORT(0), RTC_PORT(1)) ) in rtc_guest_read() 1132 outb(currd->arch.cmos_idx & 0x7f, RTC_PORT(0)); in rtc_guest_read() 1146 struct domain *currd = current->domain; in rtc_guest_write() local 1159 currd->arch.cmos_idx = data; in rtc_guest_write() 1163 if ( !ioports_access_permitted(currd, RTC_PORT(0), RTC_PORT(1)) ) in rtc_guest_write() 1168 hook(currd->arch.cmos_idx & 0x7f, data); in rtc_guest_write() 1171 outb(currd->arch.cmos_idx & 0x7f, RTC_PORT(0)); in rtc_guest_write()
|
A D | mm.c | 3356 struct domain *currd = curr->domain; in do_mmuext_op() local 3421 if ( is_hvm_domain(currd) ) in do_mmuext_op() 3507 if ( unlikely(pg_owner != currd) ) in do_mmuext_op() 3578 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3589 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3653 if ( likely(currd == pg_owner) ) in do_mmuext_op() 3660 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3671 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() 3689 if ( likely(currd == pg_owner) ) in do_mmuext_op() 3696 if ( unlikely(currd != pg_owner) ) in do_mmuext_op() [all …]
|
A D | msi.c | 951 struct domain *currd = current->domain; in msix_capability_init() local 952 struct domain *d = dev->domain ?: currd; in msix_capability_init() 954 if ( !is_hardware_domain(currd) || d != currd ) in msix_capability_init() 956 is_hardware_domain(currd) in msix_capability_init() 962 (!is_hardware_domain(currd) || domain_tot_pages(d)) ) in msix_capability_init()
|
A D | irq.c | 298 struct domain *currd = current->domain; in create_irq() local 300 ret = irq_permit_access(currd, irq); in create_irq() 304 currd, irq, ret); in create_irq() 306 desc->arch.creator_domid = currd->domain_id; in create_irq()
|
/xen/xen/arch/x86/pv/ |
A D | emul-priv-op.c | 224 if ( !is_hardware_domain(currd) ) in pci_cfg_ok() 290 sub_data = currd->arch.pci_cf8; in guest_io_read() 413 currd->arch.pci_cf8 = data; in guest_io_write() 734 if ( !is_pv_32bit_domain(currd) ) in read_cr() 785 gfn = !is_pv_32bit_domain(currd) in write_cr() 844 if ( is_pv_32bit_domain(currd) ) in read_msr() 850 if ( is_pv_32bit_domain(currd) ) in read_msr() 856 if ( is_pv_32bit_domain(currd) ) in read_msr() 872 if ( is_pv_32bit_domain(currd) ) in read_msr() 1245 struct domain *currd = curr->domain; in pv_emulate_privileged_op() local [all …]
|
A D | grant_table.c | 51 struct domain *currd = curr->domain; in create_grant_pv_mapping() local 58 nl1e = adjust_guest_l1e(nl1e, currd); in create_grant_pv_mapping() 77 page = get_page_from_mfn(gl1mfn, currd); in create_grant_pv_mapping() 122 put_page_from_l1e(ol1e, currd); in create_grant_pv_mapping() 141 struct domain *currd = curr->domain; in steal_linear_address() local 147 ASSERT(is_pv_domain(currd)); in steal_linear_address() 157 page = get_page_from_mfn(gl1mfn, currd); in steal_linear_address() 193 struct domain *currd = curr->domain; in replace_grant_pv_mapping() local 205 if ( !is_pv_32bit_domain(currd) ) in replace_grant_pv_mapping() 241 if ( is_pv_32bit_domain(currd) ) in replace_grant_pv_mapping() [all …]
|
A D | mm.c | 87 struct domain *currd = curr->domain; in pv_map_ldt_shadow_page() local 106 if ( is_pv_32bit_domain(currd) ) in pv_map_ldt_shadow_page() 113 page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC); in pv_map_ldt_shadow_page()
|
A D | descriptor-tables.c | 190 struct domain *currd = current->domain; in do_update_descriptor() local 198 if ( !IS_ALIGNED(gaddr, sizeof(d)) || !check_descriptor(currd, &d) ) in do_update_descriptor() 201 page = get_page_from_gfn(currd, gfn_x(gfn), NULL, P2M_ALLOC); in do_update_descriptor() 220 paging_mark_dirty(currd, mfn); in do_update_descriptor()
|
A D | ro-page-fault.c | 339 const struct domain *currd = current->domain; in pv_ro_page_fault() local 340 unsigned int addr_size = is_pv_32bit_domain(currd) ? 32 : BITS_PER_LONG; in pv_ro_page_fault() 343 .cpuid = currd->arch.cpuid, in pv_ro_page_fault() 358 mmio_ro = is_hardware_domain(currd) && in pv_ro_page_fault()
|
/xen/xen/common/ |
A D | argo.c | 1322 ASSERT(currd == current->domain); in fill_ring_data() 1554 if ( unlikely(!currd->argo) ) in unregister_ring() 1567 currd->argo->ring_count--; in unregister_ring() 1610 register_ring(struct domain *currd, in register_ring() argument 1699 if ( !currd->argo ) in register_ring() 1828 currd->argo->ring_count++; in register_ring() 1906 notify(struct domain *currd, in notify() argument 1917 if ( !currd->argo ) in notify() 1924 notify_check_pending(currd); in notify() 2095 rc = xsm_argo_enable(currd); in do_argo_op() [all …]
|
A D | memory.c | 493 const struct domain *currd = current->domain; in propagate_node() local 501 if ( is_hardware_domain(currd) || is_control_domain(currd) ) in propagate_node() 1092 struct domain *d, *currd = current->domain; in acquire_resource() local 1107 if ( paging_mode_translate(currd) && !is_hardware_domain(currd) ) in acquire_resource() 1156 if ( !paging_mode_translate(currd) ) in acquire_resource() 1171 rc = set_foreign_p2m_entry(currd, gfn_list[i], in acquire_resource()
|
A D | domain.c | 1541 struct domain *currd = current->domain; in do_vm_assist() local 1542 const unsigned long valid = arch_vm_assist_valid_mask(currd); in do_vm_assist() 1550 set_bit(type, &currd->vm_assist); in do_vm_assist() 1554 clear_bit(type, &currd->vm_assist); in do_vm_assist()
|
A D | grant_table.c | 577 const struct domain *currd = curr->domain; in steal_maptrack_handle() local 581 first = i = get_random() % currd->max_vcpus; in steal_maptrack_handle() 584 if ( currd->vcpu[i] ) in steal_maptrack_handle() 588 handle = _get_maptrack_handle(t, currd->vcpu[i]); in steal_maptrack_handle() 597 if ( i == currd->max_vcpus ) in steal_maptrack_handle() 609 struct domain *currd = current->domain; in put_maptrack_handle() local 617 v = currd->vcpu[maptrack_entry(t, handle).vcpu]; in put_maptrack_handle() 3069 struct domain *currd = current->domain; in gnttab_set_version() local 3070 struct grant_table *gt = currd->grant_table; in gnttab_set_version() 3114 res = gnttab_populate_status_frames(currd, gt, nr_grant_frames(gt)); in gnttab_set_version() [all …]
|
/xen/xen/arch/x86/hvm/ |
A D | hypercall.c | 85 const struct domain *currd = curr->domain; in hvm_physdev_op() local 94 if ( !has_pirq(currd) ) in hvm_physdev_op() 99 if ( !has_vpci(currd) || !is_hardware_domain(currd) ) in hvm_physdev_op() 168 struct domain *currd = curr->domain; in hvm_hypercall() local 180 if ( currd->arch.monitor.guest_request_userspace_enabled && in hvm_hypercall() 195 if ( (eax & 0x80000000) && is_viridian_domain(currd) ) in hvm_hypercall() 332 if ( unlikely(currd->arch.hvm.qemu_mapcache_invalidate) && in hvm_hypercall() 333 test_and_clear_bool(currd->arch.hvm.qemu_mapcache_invalidate) ) in hvm_hypercall()
|
A D | vioapic.c | 172 struct domain *currd = current->domain; in vioapic_hwdom_map_gsi() local 179 ASSERT(is_hardware_domain(currd)); in vioapic_hwdom_map_gsi() 192 ret = allocate_and_map_gsi_pirq(currd, pirq, &pirq); in vioapic_hwdom_map_gsi() 201 ret = pt_irq_create_bind(currd, &pt_irq_bind); in vioapic_hwdom_map_gsi() 206 spin_lock(&currd->event_lock); in vioapic_hwdom_map_gsi() 207 unmap_domain_pirq(currd, pirq); in vioapic_hwdom_map_gsi() 208 spin_unlock(&currd->event_lock); in vioapic_hwdom_map_gsi()
|
A D | hvm.c | 1743 struct domain *currd = curr->domain; in hvm_hap_nested_page_fault() local 1755 if ( nestedhvm_enabled(currd) in hvm_hap_nested_page_fault() 1811 hostp2m = p2m_get_hostp2m(currd); in hvm_hap_nested_page_fault() 1816 if ( altp2m_active(currd) ) in hvm_hap_nested_page_fault() 1960 paging_mark_pfn_dirty(currd, _pfn(gfn)); in hvm_hap_nested_page_fault() 2000 p2m_mem_paging_populate(currd, _gfn(gfn)); in hvm_hap_nested_page_fault() 2009 currd, gfn); in hvm_hap_nested_page_fault() 2371 struct domain *currd = curr->domain; in hvm_set_cr3() local 2419 domain_crash(currd); in hvm_set_cr3() 3744 struct domain *currd = curr->domain; in hvm_descriptor_access_intercept() local [all …]
|
A D | emulate.c | 161 struct domain *currd = curr->domain; in hvmemul_do_io() local 204 domain_crash(currd); in hvmemul_do_io() 297 get_gfn_query_unlocked(currd, gmfn, &p2mt); in hvmemul_do_io() 303 s = p2m_get_ioreq_server(currd, &flags); in hvmemul_do_io() 326 s = hvm_select_ioreq_server(currd, &p); in hvmemul_do_io() 337 if ( rc != X86EMUL_RETRY || currd->is_shutting_down ) in hvmemul_do_io() 711 struct domain *currd = current->domain; in hvmemul_unmap_linear_addr() local 725 paging_mark_dirty(currd, *mfn); in hvmemul_unmap_linear_addr()
|
A D | ioreq.c | 733 struct domain *currd = current->domain; in hvm_ioreq_server_init() local 739 get_knownalive_domain(currd); in hvm_ioreq_server_init() 740 s->emulator = currd; in hvm_ioreq_server_init()
|
/xen/xen/xsm/ |
A D | silo.c | 30 const struct domain *currd = current->domain; in silo_mode_dom_check() local 32 return (is_control_domain(currd) || is_control_domain(ldom) || in silo_mode_dom_check()
|
/xen/xen/arch/x86/hvm/viridian/ |
A D | viridian.c | 524 struct domain *currd = curr->domain; in viridian_hypercall() local 552 ASSERT(is_viridian_domain(currd)); in viridian_hypercall() 665 for_each_vcpu ( currd, v ) in viridian_hypercall()
|
/xen/xen/arch/x86/hvm/vmx/ |
A D | vmx.c | 3669 struct domain *currd = v->domain; in vmx_vmexit_handler() local 4074 if ( !currd->arch.cpuid->extd.rdtscp ) in vmx_vmexit_handler() 4386 struct domain *currd = curr->domain; in vmx_vmenter_helper() local 4435 struct ept_data *ept = &p2m_get_hostp2m(currd)->ept; in vmx_vmenter_helper() 4445 inv += 1 + nestedhvm_enabled(currd); in vmx_vmenter_helper() 4449 if ( altp2m_active(currd) ) in vmx_vmenter_helper() 4455 if ( currd->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) in vmx_vmenter_helper() 4458 ept = &currd->arch.altp2m_p2m[i]->ept; in vmx_vmenter_helper()
|
/xen/xen/arch/x86/hvm/svm/ |
A D | svm.c | 2164 const struct domain *currd = curr->domain; in svm_vmexit_do_rdtsc() local 2167 if ( rdtscp && !currd->arch.cpuid->extd.rdtscp ) in svm_vmexit_do_rdtsc()
|