/xen/tools/libxc/ |
A D | xc_sr_restore_x86_pv.c | 7 assert(pfn <= ctx->x86.pv.max_pfn); in pfn_to_mfn() 9 return xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width); in pfn_to_mfn() 38 ctx->x86.pv.p2m = p2m; in expand_p2m() 59 ctx->x86.pv.max_pfn = max_pfn; in expand_p2m() 218 ctx->x86.pv.max_pfn + 1, ctx->x86.pv.width); in process_start_info() 255 ctx->x86.pv.width); in process_vcpu_basic() 573 (ctx->x86.pv.max_pfn + 1) * ctx->x86.pv.width); in update_guest_p2m() 662 ctx->x86.pv.width, ctx->x86.pv.levels); in handle_x86_pv_info() 1156 free(ctx->x86.pv.p2m); in x86_pv_cleanup() 1179 if ( ctx->x86.pv.m2p ) in x86_pv_cleanup() [all …]
|
A D | xc_sr_common_x86_pv.c | 7 assert(mfn <= ctx->x86.pv.max_mfn); in mfn_to_pfn() 8 return ctx->x86.pv.m2p[mfn]; in mfn_to_pfn() 13 return ((mfn <= ctx->x86.pv.max_mfn) && in mfn_in_pseudophysmap() 28 pfn = ctx->x86.pv.m2p[mfn]; in dump_bad_pseudophysmap_entry() 35 pfn, xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width)); in dump_bad_pseudophysmap_entry() 40 if ( ctx->x86.pv.width == 8 ) in cr3_to_mfn() 56 if ( ctx->x86.pv.width == 8 ) in mfn_to_cr3() 89 ctx->x86.pv.width = guest_width; in x86_pv_domain_info() 90 ctx->x86.pv.levels = guest_levels; in x86_pv_domain_info() 111 ctx->x86.pv.max_mfn = max_page; in x86_pv_map_m2p() [all …]
|
A D | xc_sr_save_x86_pv.c | 21 if ( !ctx->x86.pv.shinfo ) in map_shinfo() 87 if ( !ctx->x86.pv.p2m ) in map_p2m_leaves() 221 ctx->x86.pv.p2m_frames = (ctx->x86.pv.max_pfn + fpp) / fpp; in map_p2m_tree() 337 ctx->x86.pv.p2m_frames = ctx->x86.pv.max_pfn / fpp + 1; in map_p2m_list() 438 ctx->x86.pv.p2m_frames = (ctx->x86.pv.max_pfn + fpp) / fpp; in map_p2m_list() 470 ctx->x86.pv.max_pfn = GET_FIELD(ctx->x86.pv.shinfo, arch.max_pfn, in map_p2m() 472 p2m_cr3 = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_cr3, ctx->x86.pv.width); in map_p2m() 994 return xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width); in x86_pv_pfn_to_gfn() 1123 if ( ctx->x86.pv.p2m ) in x86_pv_cleanup() 1124 munmap(ctx->x86.pv.p2m, ctx->x86.pv.p2m_frames * PAGE_SIZE); in x86_pv_cleanup() [all …]
|
/xen/xen/arch/x86/pv/ |
A D | domain.c | 197 if ( d->arch.pv.pcid ) in pv_make_cr4() 236 d->arch.pv.is_32bit = true; in switch_compat() 250 d->arch.pv.xpti = false; in switch_compat() 251 d->arch.pv.pcid = false; in switch_compat() 289 XFREE(v->arch.pv.trap_ctxt); in pv_vcpu_destroy() 306 if ( !v->arch.pv.trap_ctxt ) in pv_vcpu_initialise() 339 XFREE(d->arch.pv.cpuidmasks); in pv_domain_destroy() 356 d->arch.pv.gdt_ldt_l1tab = in pv_domain_initialise() 387 d->arch.pv.pcid = d->arch.pv.xpti; in pv_domain_initialise() 391 d->arch.pv.pcid = !d->arch.pv.xpti; in pv_domain_initialise() [all …]
|
A D | callback.c | 33 struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi]; in register_guest_nmi_callback() 74 curr->arch.pv.event_callback_eip = reg->address; in register_guest_callback() 78 curr->arch.pv.failsafe_callback_eip = reg->address; in register_guest_callback() 86 curr->arch.pv.syscall_callback_eip = reg->address; in register_guest_callback() 95 curr->arch.pv.syscall32_disables_events = in register_guest_callback() 100 curr->arch.pv.sysenter_callback_eip = reg->address; in register_guest_callback() 101 curr->arch.pv.sysenter_disables_events = in register_guest_callback() 232 curr->arch.pv.syscall32_disables_events = in compat_register_guest_callback() 239 curr->arch.pv.sysenter_disables_events = in compat_register_guest_callback() 350 struct trap_info *dst = curr->arch.pv.trap_ctxt; in do_set_trap_table() [all …]
|
A D | misc-hypercalls.c | 45 v->arch.pv.ctrlreg[0] |= X86_CR0_TS; in do_fpu_taskswitch() 50 v->arch.pv.ctrlreg[0] &= ~X86_CR0_TS; in do_fpu_taskswitch() 88 if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE ) in set_debugreg() 110 if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE ) in set_debugreg() 141 if ( !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) ) in set_debugreg() 147 v->arch.pv.dr7_emul = io_enable; in set_debugreg() 160 v->arch.pv.dr7_emul = 0; in set_debugreg()
|
A D | traps.c | 67 tb = &curr->arch.pv.trap_bounce; in pv_inject_event() 68 ti = &curr->arch.pv.trap_ctxt[vector]; in pv_inject_event() 77 curr->arch.pv.ctrlreg[2] = event->cr2; in pv_inject_event() 117 struct trap_bounce *tb = &curr->arch.pv.trap_bounce; in set_guest_machinecheck_trapbounce() 132 struct trap_bounce *tb = &curr->arch.pv.trap_bounce; in set_guest_nmi_trapbounce()
|
A D | iret.c | 78 v->arch.pv.iopl = iret_saved.rflags & X86_EFLAGS_IOPL; in do_iret() 142 v->arch.pv.iopl = eflags & X86_EFLAGS_IOPL; in compat_iret() 157 u32 x, ksp = v->arch.pv.kernel_sp - 40; in compat_iret() 185 regs->ss = v->arch.pv.kernel_ss; in compat_iret() 187 ti = &v->arch.pv.trap_ctxt[TRAP_gp_fault]; in compat_iret()
|
A D | emul-priv-op.c | 155 ASSERT((v->arch.pv.iopl & ~X86_EFLAGS_IOPL) == 0); in iopl_ok() 157 return IOPL(cpl) <= v->arch.pv.iopl; in iopl_ok() 170 if ( (port + bytes) <= v->arch.pv.iobmp_limit ) in guest_io_okay() 181 switch ( __copy_from_guest_offset(x.bytes, v->arch.pv.iobmp, in guest_io_okay() 320 if ( !v->arch.pv.dr7_emul || !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) ) in check_guest_io_breakpoint() 726 *val = curr->arch.pv.ctrlreg[reg]; in read_cr() 774 curr->arch.pv.ctrlreg[2] = val; in write_cr() 804 curr->arch.pv.ctrlreg[4] = pv_fixup_guest_cr4(curr, val); in write_cr() 858 *val = curr->arch.pv.gs_base_user; in read_msr() 994 curr->arch.pv.gs_base_user = val; in write_msr() [all …]
|
A D | descriptor-tables.c | 70 v->arch.pv.gdt_ents = 0; in pv_destroy_gdt() 80 v->arch.pv.gdt_frames[i] = 0; in pv_destroy_gdt() 110 v->arch.pv.gdt_ents = entries; in pv_set_gdt() 114 v->arch.pv.gdt_frames[i] = frames[i]; in pv_set_gdt()
|
A D | mm.c | 90 unsigned long linear = curr->arch.pv.ldt_base + offset; in pv_map_ldt_shadow_page() 100 if ( unlikely((offset >> 3) >= curr->arch.pv.ldt_ents) ) in pv_map_ldt_shadow_page()
|
A D | emul-gate-op.c | 41 (gate_sel & 4 ? v->arch.pv.ldt_ents in read_gate_descriptor() 42 : v->arch.pv.gdt_ents)) || in read_gate_descriptor() 315 esp = v->arch.pv.kernel_sp; in pv_emulate_gate_op() 316 ss = v->arch.pv.kernel_ss; in pv_emulate_gate_op()
|
/xen/xen/arch/x86/x86_64/ |
A D | asm-offsets.c | 55 OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv.trap_bounce); in __dummy__() 57 OFFSET(VCPU_event_addr, struct vcpu, arch.pv.event_callback_eip); in __dummy__() 58 OFFSET(VCPU_event_sel, struct vcpu, arch.pv.event_callback_cs); in __dummy__() 63 struct vcpu, arch.pv.syscall32_disables_events); in __dummy__() 67 struct vcpu, arch.pv.sysenter_disables_events); in __dummy__() 68 OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv.trap_ctxt); in __dummy__() 69 OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv.kernel_sp); in __dummy__() 70 OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv.kernel_ss); in __dummy__() 71 OFFSET(VCPU_iopl, struct vcpu, arch.pv.iopl); in __dummy__() 72 OFFSET(VCPU_guest_context_flags, struct vcpu, arch.pv.vgc_flags); in __dummy__() [all …]
|
A D | traps.c | 172 crs[0] = v->arch.pv.ctrlreg[0]; in vcpu_show_registers() 177 crs[4] = v->arch.pv.ctrlreg[4]; in vcpu_show_registers() 178 crs[5] = v->arch.pv.fs_base; in vcpu_show_registers() 179 crs[6 + !kernel] = v->arch.pv.gs_base_kernel; in vcpu_show_registers() 180 crs[7 - !kernel] = v->arch.pv.gs_base_user; in vcpu_show_registers()
|
/xen/docs/misc/ |
A D | console.txt | 7 Xen traditionally provided a single pv console to pv guests, storing the 10 Now many years after the introduction of the pv console we have 11 multiple pv consoles support for pv and hvm guests; multiple pv 124 By default xl creates a pv console for hvm guests, plus an emulated 129 currently no bootloaders support xen pv consoles so the only way to 134 then execute "xl console -t pv <domain>" to connect to it. 141 output from the stubdom to dom0. The pv console backend for stubdom's pv 142 consoles is always ioemu because multiple pv consoles support is a 145 can only have one pv console with xenstored as backend (the stubdom 146 could provide pv console backends to the hvm guest but then it would [all …]
|
A D | vtpm-platforms.txt | 53 kernel="/usr/lib/xen/boot/pv-grub-x86_64.gz" 60 kernel="/usr/lib/xen/boot/pv-grub-x86_64.gz" 94 vtpmmgr domain. The two guest domains may be instantiated using pv-grub or 123 a pair of domains using an unused XSM user field: a vTPM and a pv-grub domain 129 In order to use pv-grub to obtain measurements of the guest kernel in PCRs 4 and 132 guest, as described above, and using the --vtpm-label= argument to pv-grub so 134 the hardware domain, which cannot use pv-grub, to use a vTPM in this situation,
|
/xen/xen/arch/x86/ |
A D | domain.c | 886 v->arch.pv.vgc_flags = flags; in arch_set_info_guest() 956 v->arch.pv.ldt_base = v->arch.pv.ldt_ents in arch_set_info_guest() 988 if ( v->arch.pv.ldt_ents ) in arch_set_info_guest() 995 v->arch.pv.kernel_ss = c(kernel_ss); in arch_set_info_guest() 1019 v->arch.pv.ctrlreg[4] = pv_fixup_guest_cr4(v, v->arch.pv.ctrlreg[4]); in arch_set_info_guest() 1024 v->arch.pv.dr7_emul = 0; in arch_set_info_guest() 1382 ? n->arch.pv.gs_base_kernel : n->arch.pv.gs_base_user; in load_segments() 1384 ? n->arch.pv.gs_base_user : n->arch.pv.gs_base_kernel; in load_segments() 1403 wrfsbase(n->arch.pv.fs_base); in load_segments() 1414 struct pv_vcpu *pv = &n->arch.pv; in load_segments() local [all …]
|
A D | domctl.c | 121 d->arch.pv.cpuidmasks->_1cd = mask; in domain_cpu_policy_changed() 131 d->arch.pv.cpuidmasks->_6c = mask; in domain_cpu_policy_changed() 148 d->arch.pv.cpuidmasks->_7ab0 = mask; in domain_cpu_policy_changed() 159 d->arch.pv.cpuidmasks->Da1 = mask; in domain_cpu_policy_changed() 204 d->arch.pv.cpuidmasks->e1cd = mask; in domain_cpu_policy_changed() 1586 c(ldt_base = v->arch.pv.ldt_ents ? v->arch.pv.ldt_base : 0); in arch_get_info_guest() 1587 c(ldt_ents = v->arch.pv.ldt_ents); in arch_get_info_guest() 1594 c(gdt_ents = v->arch.pv.gdt_ents); in arch_get_info_guest() 1595 c(kernel_ss = v->arch.pv.kernel_ss); in arch_get_info_guest() 1596 c(kernel_sp = v->arch.pv.kernel_sp); in arch_get_info_guest() [all …]
|
A D | domain_page.c | 88 dcache = &v->domain->arch.pv.mapcache; in map_domain_page() 89 vcache = &v->arch.pv.mapcache; in map_domain_page() 192 dcache = &v->domain->arch.pv.mapcache; in unmap_domain_page() 197 hashent = &v->arch.pv.mapcache.hash[MAPHASH_HASHFN(mfn)]; in unmap_domain_page() 236 struct mapcache_domain *dcache = &d->arch.pv.mapcache; in mapcache_domain_init() 264 struct mapcache_domain *dcache = &d->arch.pv.mapcache; in mapcache_vcpu_init() 296 struct vcpu_maphash_entry *hashent = &v->arch.pv.mapcache.hash[i]; in mapcache_vcpu_init()
|
A D | x86_emulate.c | 118 if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE ) in x86emul_read_dr() 127 if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE ) in x86emul_read_dr() 132 *val = curr->arch.dr7 | curr->arch.pv.dr7_emul; in x86emul_read_dr()
|
/xen/tools/python/scripts/ |
A D | convert-legacy-stream | 25 pv = None # Boolean (pv or hvm) variable 87 if pv: 160 if pv: 594 if pv: 599 if pv: 604 if pv: 612 if not pv and (vm.libxl or qemu): 633 global fin, fout, twidth, pv, qemu, verbose 690 pv = opts.gtype == "pv"
|
/xen/xen/include/asm-x86/ |
A D | ldt.h | 12 if ( (ents = v->arch.pv.ldt_ents) == 0 ) in load_LDT()
|
A D | shadow.h | 183 if ( d->arch.pv.check_l1tf && !paging_mode_sh_forced(d) && in pv_l1tf_check_pte() 238 d->arch.pv.check_l1tf = is_hardware_domain(d) ? opt_pv_l1tf_hwdom in pv_l1tf_domain_init()
|
/xen/xen/include/asm-x86/pv/ |
A D | traps.h | 39 return v->arch.pv.trap_ctxt[vector].address; in pv_trap_callback_registered()
|
/xen/automation/gitlab-ci/ |
A D | test.yaml | 31 - ./automation/scripts/qemu-smoke-x86-64.sh pv 2>&1 | tee qemu-smoke-x86-64.log 53 - ./automation/scripts/qemu-smoke-x86-64.sh pv 2>&1 | tee qemu-smoke-x86-64.log
|