/xen/xen/arch/x86/hvm/vmx/ |
A D | vmx.c | 611 if ( !v->arch.hvm.flag_dr_dirty ) in vmx_save_dr() 615 v->arch.hvm.flag_dr_dirty = 0; in vmx_save_dr() 630 if ( v->arch.hvm.flag_dr_dirty ) in __restore_debug_registers() 633 v->arch.hvm.flag_dr_dirty = 1; in __restore_debug_registers() 705 v->arch.hvm.guest_cr[3] = cr3; in vmx_restore_cr0_cr3() 1473 v->arch.hvm.hw_cr[0] = in vmx_update_guest_cr() 1488 v->arch.hvm.hw_cr[4] |= v->arch.hvm.guest_cr[4]; in vmx_update_guest_cr() 1542 v->arch.hvm.vmx.cr4_host_mask |= v->arch.hvm.vmx.vmx_realmode ? in vmx_update_guest_cr() 3684 v->arch.hvm.guest_cr[4] &= v->arch.hvm.vmx.cr4_host_mask; in vmx_vmexit_handler() 3685 v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] & in vmx_vmexit_handler() [all …]
|
A D | realmode.c | 115 if ( curr->arch.hvm.guest_cr[0] & X86_CR0_PE ) in vmx_realmode_emulate_one() 156 struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io; in vmx_realmode() 168 if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PE) && in vmx_realmode() 175 curr->arch.hvm.vmx.vmx_emulate = 1; in vmx_realmode() 176 while ( curr->arch.hvm.vmx.vmx_emulate && in vmx_realmode() 185 curr->arch.hvm.vmx.vmx_realmode && in vmx_realmode() 195 if ( curr->arch.hvm.vmx.vmx_realmode ) in vmx_realmode() 196 curr->arch.hvm.vmx.vmx_emulate = in vmx_realmode() 199 curr->arch.hvm.vmx.vmx_emulate = in vmx_realmode() 206 curr->arch.hvm.vmx.vmx_emulate = 1; in vmx_realmode() [all …]
|
A D | vmcs.c | 157 if ( d->arch.hvm.vmx.exec_sp == val ) in parse_ept_param_runtime() 160 d->arch.hvm.vmx.exec_sp = val; in parse_ept_param_runtime() 582 struct vmx_vcpu *vmx = &v->arch.hvm.vmx; in __vmx_clear_vmcs() 605 int cpu = v->arch.hvm.vmx.active_cpu; in vmx_clear_vmcs() 617 if ( v->arch.hvm.vmx.active_cpu == -1 ) in vmx_load_vmcs() 625 __vmptrld(v->arch.hvm.vmx.vmcs_pa); in vmx_load_vmcs() 1233 v->arch.hvm.vmx.cr4_host_mask = ~0UL; in construct_vmcs() 1297 v->arch.hvm.guest_cr[4] = 0; in construct_vmcs() 1410 struct vmx_vcpu *vmx = &v->arch.hvm.vmx; in vmx_add_msr() 1609 if ( !v->arch.hvm.vmx.pml_pg ) in vmx_vcpu_enable_pml() [all …]
|
A D | intr.c | 110 if ( !(v->arch.hvm.vmx.exec_control & ctl) ) in vmx_enable_intr_window() 112 v->arch.hvm.vmx.exec_control |= ctl; in vmx_enable_intr_window() 229 const unsigned int n = ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap); in vmx_sync_exit_bitmap() 232 while ( (i = find_first_bit(&v->arch.hvm.vmx.eoi_exitmap_changed, n)) < n ) in vmx_sync_exit_bitmap() 234 clear_bit(i, &v->arch.hvm.vmx.eoi_exitmap_changed); in vmx_sync_exit_bitmap() 235 __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm.vmx.eoi_exit_bitmap[i]); in vmx_sync_exit_bitmap() 248 if ( unlikely(v->arch.hvm.single_step) ) in vmx_intr_assist() 250 v->arch.hvm.vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG; in vmx_intr_assist() 261 if ( unlikely(v->domain->arch.hvm.mem_sharing.block_interrupts) ) in vmx_intr_assist() 377 pi_desc = &v->arch.hvm.vmx.pi_desc; in vmx_intr_assist()
|
A D | vvmx.c | 77 if ( !d->arch.hvm.vmx.exec_sp ) in nvmx_vcpu_initialise() 79 d->arch.hvm.vmx.exec_sp = true; in nvmx_vcpu_initialise() 182 if ( v->arch.hvm.vmx.vmread_bitmap ) in nvmx_vcpu_destroy() 185 v->arch.hvm.vmx.vmread_bitmap = NULL; in nvmx_vcpu_destroy() 187 if ( v->arch.hvm.vmx.vmwrite_bitmap ) in nvmx_vcpu_destroy() 190 v->arch.hvm.vmx.vmwrite_bitmap = NULL; in nvmx_vcpu_destroy() 854 v->arch.hvm.vmx.vmcs_shadow_maddr = 0; in nvmx_purge_vvmcs() 1582 __vmpclear(v->arch.hvm.vmx.vmcs_pa); in nvmx_handle_vmxon() 1585 __vmptrld(v->arch.hvm.vmx.vmcs_pa); in nvmx_handle_vmxon() 1586 v->arch.hvm.vmx.launched = 0; in nvmx_handle_vmxon() [all …]
|
/xen/tools/xl/ |
A D | xl_sxp.c | 96 libxl_defbool_to_string(b_info->u.hvm.acpi)); in printf_info_sexp() 99 libxl_defbool_to_string(b_info->u.hvm.viridian)); in printf_info_sexp() 101 libxl_defbool_to_string(b_info->u.hvm.hpet)); in printf_info_sexp() 103 libxl_defbool_to_string(b_info->u.hvm.vpt_align)); in printf_info_sexp() 112 libxl_defbool_to_string(b_info->u.hvm.vnc.enable)); in printf_info_sexp() 117 fprintf(fh, "\t\t\t(keymap %s)\n", b_info->u.hvm.keymap); in printf_info_sexp() 119 libxl_defbool_to_string(b_info->u.hvm.sdl.enable)); in printf_info_sexp() 121 libxl_defbool_to_string(b_info->u.hvm.sdl.opengl)); in printf_info_sexp() 123 libxl_defbool_to_string(b_info->u.hvm.nographic)); in printf_info_sexp() 137 fprintf(fh, "\t\t\t(serial %s)\n", b_info->u.hvm.serial); in printf_info_sexp() [all …]
|
A D | xl_parse.c | 1660 } else if (b_info->u.hvm.system_firmware) in parse_config_data() 1726 b_info->u.hvm.mmio_hole_memkb = l * 1024; in parse_config_data() 2347 b_info->u.hvm.rdm.strategy = rdm.strategy; in parse_config_data() 2348 b_info->u.hvm.rdm.policy = rdm.policy; in parse_config_data() 2611 b_info->u.hvm.spice.port = l; in parse_config_data() 2613 b_info->u.hvm.spice.tls_port = l; in parse_config_data() 2627 b_info->u.hvm.spice.usbredirection = l; in parse_config_data() 2666 b_info->u.hvm.usbversion = l; in parse_config_data() 2690 if(b_info->u.hvm.vnc.listen in parse_config_data() 2691 && b_info->u.hvm.vnc.display in parse_config_data() [all …]
|
/xen/xen/arch/x86/hvm/ |
A D | io.c | 177 const struct hvm_domain *hvm = &curr->domain->arch.hvm; in g2m_portio_accept() local 311 *data = d->arch.hvm.pci_cf8; in vpci_portio_read() 341 d->arch.hvm.pci_cf8 = data; in vpci_portio_write() 423 read_lock(&d->arch.hvm.mmcfg_lock); in vpci_mmcfg_accept() 425 read_unlock(&d->arch.hvm.mmcfg_lock); in vpci_mmcfg_accept() 440 read_lock(&d->arch.hvm.mmcfg_lock); in vpci_mmcfg_read() 449 read_unlock(&d->arch.hvm.mmcfg_lock); in vpci_mmcfg_read() 482 read_lock(&d->arch.hvm.mmcfg_lock); in vpci_mmcfg_write() 491 read_unlock(&d->arch.hvm.mmcfg_lock); in vpci_mmcfg_write() 530 write_lock(&d->arch.hvm.mmcfg_lock); in register_vpci_mmcfg_handler() [all …]
|
A D | pmtimer.c | 71 PMTState *s = &d->arch.hvm.pl_time->vpmt; in hvm_acpi_power_button() 77 d->arch.hvm.acpi.pm1a_sts |= PWRBTN_STS; in hvm_acpi_power_button() 84 PMTState *s = &d->arch.hvm.pl_time->vpmt; in hvm_acpi_sleep_button() 90 d->arch.hvm.acpi.pm1a_sts |= SLPBTN_STS; in hvm_acpi_sleep_button() 165 ((v->domain->arch.hvm.params[ in handle_evt_io() 255 struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi; in acpi_save() 256 PMTState *s = &d->arch.hvm.pl_time->vpmt; in acpi_save() 286 struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi; in acpi_load() 287 PMTState *s = &d->arch.hvm.pl_time->vpmt; in acpi_load() 374 PMTState *s = &d->arch.hvm.pl_time->vpmt; in pmtimer_deinit() [all …]
|
A D | vpt.c | 32 struct pl_time *pl = d->arch.hvm.pl_time; in hvm_init_guest_time() 41 struct pl_time *pl = v->domain->arch.hvm.pl_time; in hvm_get_guest_time_fixed() 59 return now + v->arch.hvm.stime_offset; in hvm_get_guest_time_fixed() 68 v->arch.hvm.stime_offset += offset; in hvm_set_guest_time() 159 spin_lock(&v->arch.hvm.tm_lock); in pt_vcpu_lock() 164 spin_unlock(&v->arch.hvm.tm_lock); in pt_vcpu_unlock() 176 spin_lock(&pt->vcpu->arch.hvm.tm_lock); in pt_lock() 216 if ( v->arch.hvm.guest_time == 0 ) in pt_thaw_time() 220 v->arch.hvm.guest_time = 0; in pt_thaw_time() 547 list_add(&pt->list, &v->arch.hvm.tm_list); in create_periodic_time() [all …]
|
A D | domain.c | 209 v->arch.hvm.guest_cr[0] = regs->cr0; in arch_set_info_hvm_guest() 210 v->arch.hvm.guest_cr[3] = regs->cr3; in arch_set_info_hvm_guest() 211 v->arch.hvm.guest_cr[4] = regs->cr4; in arch_set_info_hvm_guest() 212 v->arch.hvm.guest_efer = regs->efer; in arch_set_info_hvm_guest() 260 v->arch.hvm.guest_cr[0] = regs->cr0; in arch_set_info_hvm_guest() 261 v->arch.hvm.guest_cr[3] = regs->cr3; in arch_set_info_hvm_guest() 262 v->arch.hvm.guest_cr[4] = regs->cr4; in arch_set_info_hvm_guest() 275 if ( v->arch.hvm.guest_efer & EFER_LME ) in arch_set_info_hvm_guest() 276 v->arch.hvm.guest_efer |= EFER_LMA; in arch_set_info_hvm_guest() 281 v->arch.hvm.guest_cr[4]); in arch_set_info_hvm_guest() [all …]
|
A D | irq.c | 61 spin_lock(&d->arch.hvm.irq_lock); in hvm_ioapic_assert() 65 spin_unlock(&d->arch.hvm.irq_lock); in hvm_ioapic_assert() 80 spin_lock(&d->arch.hvm.irq_lock); in hvm_ioapic_deassert() 82 spin_unlock(&d->arch.hvm.irq_lock); in hvm_ioapic_deassert() 131 spin_lock(&d->arch.hvm.irq_lock); in hvm_pci_intx_assert() 165 spin_lock(&d->arch.hvm.irq_lock); in hvm_pci_intx_deassert() 189 spin_lock(&d->arch.hvm.irq_lock); in hvm_gsi_assert() 210 spin_lock(&d->arch.hvm.irq_lock); in hvm_gsi_deassert() 225 spin_lock(&d->arch.hvm.irq_lock); in hvm_isa_irq_assert() 247 spin_lock(&d->arch.hvm.irq_lock); in hvm_isa_irq_deassert() [all …]
|
A D | hvm.c | 448 v->arch.hvm.msr_tsc_adjust += v->arch.hvm.cache_tsc_offset - tsc_offset; in hvm_set_guest_tsc_msr() 455 v->arch.hvm.cache_tsc_offset += tsc_adjust - v->arch.hvm.msr_tsc_adjust; in hvm_set_guest_tsc_adjust() 648 if ( !d->arch.hvm.pl_time || !d->arch.hvm.irq || in hvm_domain_initialise() 649 !d->arch.hvm.params || !d->arch.hvm.io_handler ) in hvm_domain_initialise() 718 XFREE(d->arch.hvm.params); in hvm_domain_initialise() 719 XFREE(d->arch.hvm.pl_time); in hvm_domain_initialise() 720 XFREE(d->arch.hvm.irq); in hvm_domain_initialise() 768 XFREE(d->arch.hvm.params); in hvm_domain_destroy() 778 XFREE(d->arch.hvm.pl_time); in hvm_domain_destroy() 779 XFREE(d->arch.hvm.irq); in hvm_domain_destroy() [all …]
|
A D | ioreq.c | 43 ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]); in set_ioreq_server() 45 d->arch.hvm.ioreq_server.server[id] = s; in set_ioreq_server() 49 (d)->arch.hvm.ioreq_server.server[id] 109 ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req; in hvm_io_assist() 175 struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io; in handle_hvm_io_completion() 253 return _gfn(d->arch.hvm.params[i]); in hvm_alloc_legacy_ioreq_gfn() 291 set_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask); in hvm_free_legacy_ioreq_gfn() 305 set_bit(i, &d->arch.hvm.ioreq_gfn.mask); in hvm_free_ioreq_gfn() 1267 cf8 = d->arch.hvm.pci_cf8; in hvm_select_ioreq_server() 1525 d->arch.hvm.pci_cf8 = *val; in hvm_access_cf8() [all …]
|
A D | vioapic.c | 233 spin_lock(&d->arch.hvm.irq_lock); in vioapic_write_redirent() 269 spin_unlock(&d->arch.hvm.irq_lock); in vioapic_write_redirent() 524 spin_lock(&d->arch.hvm.irq_lock); in vioapic_update_EOI() 556 spin_unlock(&d->arch.hvm.irq_lock); in vioapic_update_EOI() 603 d->arch.hvm.nr_vioapics != 1 ) in ioapic_save() 619 d->arch.hvm.nr_vioapics != 1 ) in ioapic_load() 633 ASSERT(!d->arch.hvm.nr_vioapics); in vioapic_reset() 670 xfree(d->arch.hvm.vioapic); in vioapic_free() 679 ASSERT(!d->arch.hvm.nr_vioapics); in vioapic_init() 685 if ( (d->arch.hvm.vioapic == NULL) && in vioapic_init() [all …]
|
A D | vmsi.c | 178 return d->arch.hvm.msixtbl_list.next; in msixtbl_initialised() 322 v->arch.hvm.hvm_io.msix_unmask_address = address; in msixtbl_write() 395 curr->arch.hvm.hvm_io.msix_snoop_gpa = 0; in msixtbl_range() 412 curr->arch.hvm.hvm_io.msix_snoop_address = in msixtbl_range() 414 curr->arch.hvm.hvm_io.msix_snoop_gpa = in msixtbl_range() 517 !v->arch.hvm.hvm_io.msix_snoop_gpa && in msixtbl_pt_register() 522 v->arch.hvm.hvm_io.msix_unmask_address = in msixtbl_pt_register() 575 INIT_LIST_HEAD(&d->arch.hvm.msixtbl_list); in msixtbl_init() 606 v->arch.hvm.hvm_io.msix_snoop_address = 0; in msix_write_completion() 609 v->arch.hvm.hvm_io.msix_snoop_gpa ) in msix_write_completion() [all …]
|
/xen/xen/include/ |
A D | xlat.lst | 68 ! dm_op_buf hvm/dm_op.h 69 ? dm_op_relocate_memory hvm/dm_op.h 73 ? dm_op_inject_event hvm/dm_op.h 74 ? dm_op_inject_msi hvm/dm_op.h 76 ? dm_op_modified_memory hvm/dm_op.h 78 ? dm_op_remote_shutdown hvm/dm_op.h 81 ? dm_op_set_mem_type hvm/dm_op.h 84 ? dm_op_track_dirty_vram hvm/dm_op.h 86 ? vcpu_hvm_context hvm/hvm_vcpu.h 87 ? vcpu_hvm_x86_32 hvm/hvm_vcpu.h [all …]
|
/xen/xen/arch/x86/hvm/svm/ |
A D | svm.c | 94 if ( vmcb_get_cpl(v->arch.hvm.svm.vmcb) ) in svm_crash_or_fault() 185 v->arch.hvm.flag_dr_dirty = 0; in svm_save_dr() 211 if ( v->arch.hvm.flag_dr_dirty ) in __restore_debug_registers() 214 v->arch.hvm.flag_dr_dirty = 1; in __restore_debug_registers() 320 v->arch.hvm.guest_cr[3] = c->cr3; in svm_vmcb_restore() 321 v->arch.hvm.guest_cr[4] = c->cr4; in svm_vmcb_restore() 376 v->arch.hvm.guest_efer = data->msr_efer; in svm_load_cpu_state() 529 value |= v->arch.hvm.guest_cr[4]; in svm_update_guest_cr() 1165 v->arch.hvm.svm.launch_core = -1; in svm_vcpu_initialise() 2378 ? v->arch.hvm.n1asid.asid in svm_invlpga_intercept() [all …]
|
/xen/xen/arch/x86/hvm/viridian/ |
A D | time.c | 24 struct viridian_domain *vd = d->arch.hvm.viridian; in update_reference_tsc() 91 &d->arch.hvm.viridian->time_ref_count; in time_ref_count_freeze() 99 struct viridian_domain *vd = d->arch.hvm.viridian; in time_ref_count_thaw() 115 &d->arch.hvm.viridian->time_ref_count; in time_ref_count() 133 struct viridian_vcpu *vv = v->arch.hvm.viridian; in stimer_expire() 143 struct viridian_vcpu *vv = v->arch.hvm.viridian; in start_stimer() 215 struct viridian_vcpu *vv = v->arch.hvm.viridian; in poll_stimer() 247 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_time_poll_timers() 259 struct viridian_vcpu *vv = v->arch.hvm.viridian; in time_vcpu_freeze() 277 struct viridian_vcpu *vv = v->arch.hvm.viridian; in time_vcpu_thaw() [all …]
|
A D | viridian.c | 224 goi = &d->arch.hvm.viridian->guest_os_id; in dump_guest_os_id() 236 hg = &d->arch.hvm.viridian->hypercall_gpa; in dump_hypercall() 279 struct viridian_vcpu *vv = v->arch.hvm.viridian; in guest_wrmsr_viridian() 437 ASSERT(!v->arch.hvm.viridian); in viridian_vcpu_init() 439 if ( !v->arch.hvm.viridian ) in viridian_vcpu_init() 462 ASSERT(!d->arch.hvm.viridian); in viridian_domain_init() 464 if ( !d->arch.hvm.viridian ) in viridian_domain_init() 485 if ( !v->arch.hvm.viridian ) in viridian_vcpu_deinit() 491 XFREE(v->arch.hvm.viridian); in viridian_vcpu_deinit() 501 if ( !d->arch.hvm.viridian ) in viridian_domain_deinit() [all …]
|
A D | synic.c | 29 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_apic_assist_set() 49 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_apic_assist_completed() 67 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_apic_assist_clear() 79 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_synic_wrmsr() 189 const struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_synic_rdmsr() 279 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_synic_vcpu_init() 303 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_synic_vcpu_deinit() 323 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_synic_deliver_timer_msg() 372 const struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_synic_is_auto_eoi_sint() 385 struct viridian_vcpu *vv = v->arch.hvm.viridian; in viridian_synic_ack_sint() [all …]
|
/xen/tools/include/ |
A D | Makefile | 23 ln -sf $(addprefix $(XEN_ROOT)/xen/include/public/,arch-x86 arch-arm hvm io xsm) xen 52 $(INSTALL_DIR) $(DESTDIR)$(includedir)/xen/arch-x86/hvm 54 $(INSTALL_DIR) $(DESTDIR)$(includedir)/xen/arch-arm/hvm 56 $(INSTALL_DIR) $(DESTDIR)$(includedir)/xen/hvm 63 $(INSTALL_DATA) xen/arch-x86/hvm/*.h $(DESTDIR)$(includedir)/xen/arch-x86/hvm 68 $(INSTALL_DATA) xen/arch-arm/hvm/*.h $(DESTDIR)$(includedir)/xen/arch-arm/hvm 70 $(INSTALL_DATA) xen/hvm/*.h $(DESTDIR)$(includedir)/xen/hvm
|
/xen/tools/libxl/ |
A D | libxl_create.c | 71 b_info->u.hvm.rdm_mem_boundary_memkb = in libxl__rdm_setdefault() 137 if (!b_info->u.hvm.bios) in libxl__domain_build_info_setdefault() 286 if (!b_info->u.hvm.hdtype) in libxl__domain_build_info_setdefault() 375 if (!b_info->u.hvm.usbversion && in libxl__domain_build_info_setdefault() 377 b_info->u.hvm.usbversion = 2; in libxl__domain_build_info_setdefault() 379 if ((b_info->u.hvm.usbversion || b_info->u.hvm.spice.usbredirection) && in libxl__domain_build_info_setdefault() 382 || b_info->u.hvm.usbdevice) ){ in libxl__domain_build_info_setdefault() 388 if (!b_info->u.hvm.boot) in libxl__domain_build_info_setdefault() 394 if (!b_info->u.hvm.vnc.listen) in libxl__domain_build_info_setdefault() 496 vments[1] = (info->u.hvm.timeoffset) ? info->u.hvm.timeoffset : ""; in libxl__domain_build() [all …]
|
A D | libxl_dm.c | 771 if (b_info->u.hvm.serial || b_info->u.hvm.serial_list) { in libxl__build_device_model_args_old() 772 if ( b_info->u.hvm.serial && b_info->u.hvm.serial_list ) in libxl__build_device_model_args_old() 818 if (b_info->u.hvm.boot) { in libxl__build_device_model_args_old() 822 || b_info->u.hvm.usbdevice in libxl__build_device_model_args_old() 844 if (b_info->u.hvm.soundhw) { in libxl__build_device_model_args_old() 1373 if (b_info->u.hvm.serial || b_info->u.hvm.serial_list) { in libxl__build_device_model_args_new() 1374 if ( b_info->u.hvm.serial && b_info->u.hvm.serial_list ) in libxl__build_device_model_args_new() 1454 if (b_info->u.hvm.boot) { in libxl__build_device_model_args_new() 1518 if (b_info->u.hvm.soundhw) { in libxl__build_device_model_args_new() 2083 vfb->vnc = b_info->u.hvm.vnc; in libxl__vfb_and_vkb_from_hvm_guest_config() [all …]
|
/xen/xen/arch/x86/x86_64/ |
A D | asm-offsets.c | 85 OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm.svm.vmcb_pa); in __dummy__() 86 OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm.svm.vmcb); in __dummy__() 89 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm.vmx.launched); in __dummy__() 90 OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm.vmx.vmx_realmode); in __dummy__() 91 OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm.vmx.vmx_emulate); in __dummy__() 92 OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm.vmx.vm86_segment_mask); in __dummy__() 93 OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm.guest_cr[2]); in __dummy__() 96 OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm.nvcpu.nv_guestmode); in __dummy__() 97 OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm.nvcpu.nv_p2m); in __dummy__() 98 OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm.nvcpu.u.nsvm.ns_hap_enabled); in __dummy__()
|