/xen/xen/arch/x86/pv/ |
A D | callback.c | 31 struct vcpu *curr = current; in register_guest_nmi_callback() local 32 struct domain *d = curr->domain; in register_guest_nmi_callback() 50 curr->arch.nmi_pending = true; in register_guest_nmi_callback() 57 struct vcpu *curr = current; in unregister_guest_nmi_callback() local 66 struct vcpu *curr = current; in register_guest_callback() local 95 curr->arch.pv.syscall32_disables_events = in register_guest_callback() 101 curr->arch.pv.sysenter_disables_events = in register_guest_callback() 209 struct vcpu *curr = current; in compat_register_guest_callback() local 239 curr->arch.pv.sysenter_disables_events = in compat_register_guest_callback() 349 struct vcpu *curr = current; in do_set_trap_table() local [all …]
|
A D | traps.c | 43 struct vcpu *curr = current; in pv_inject_event() local 67 tb = &curr->arch.pv.trap_bounce; in pv_inject_event() 68 ti = &curr->arch.pv.trap_ctxt[vector]; in pv_inject_event() 77 curr->arch.pv.ctrlreg[2] = event->cr2; in pv_inject_event() 78 arch_set_cr2(curr, event->cr2); in pv_inject_event() 82 if ( !guest_kernel_mode(curr, regs) ) in pv_inject_event() 99 if ( unlikely(null_trap_bounce(curr, tb)) ) in pv_inject_event() 116 struct vcpu *curr = current; in set_guest_machinecheck_trapbounce() local 122 return !null_trap_bounce(curr, tb); in set_guest_machinecheck_trapbounce() 131 struct vcpu *curr = current; in set_guest_nmi_trapbounce() local [all …]
|
A D | mm.c | 64 struct vcpu *curr = current; in guest_get_eff_kern_l1e() local 65 const bool user_mode = !(curr->arch.flags & TF_kernel_mode); in guest_get_eff_kern_l1e() 69 toggle_guest_pt(curr); in guest_get_eff_kern_l1e() 74 toggle_guest_pt(curr); in guest_get_eff_kern_l1e() 86 struct vcpu *curr = current; in pv_map_ldt_shadow_page() local 87 struct domain *currd = curr->domain; in pv_map_ldt_shadow_page() 90 unsigned long linear = curr->arch.pv.ldt_base + offset; in pv_map_ldt_shadow_page() 100 if ( unlikely((offset >> 3) >= curr->arch.pv.ldt_ents) ) in pv_map_ldt_shadow_page() 123 pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT]; in pv_map_ldt_shadow_page()
|
A D | emul-priv-op.c | 351 struct vcpu *curr = current; in read_io() local 437 struct vcpu *curr = current; in write_io() local 584 struct vcpu *curr = current; in rep_ins() local 651 struct vcpu *curr = current; in rep_outs() local 759 struct vcpu *curr = current; in write_cr() local 775 arch_set_cr2(curr, val); in write_cr() 804 curr->arch.pv.ctrlreg[4] = pv_fixup_guest_cr4(curr, val); in write_cr() 806 ctxt_switch_levelling(curr); in write_cr() 826 struct vcpu *curr = current; in read_msr() local 960 struct vcpu *curr = current; in write_msr() local [all …]
|
A D | hypercall.c | 101 struct vcpu *curr = current; in pv_hypercall() local 104 ASSERT(guest_kernel_mode(curr, regs)); in pv_hypercall() 125 curr->hcall_preempted = false; in pv_hypercall() 127 if ( !is_pv_32bit_vcpu(curr) ) in pv_hypercall() 158 if ( !curr->hcall_preempted ) in pv_hypercall() 202 curr->hcall_compat = true; in pv_hypercall() 204 curr->hcall_compat = false; in pv_hypercall() 207 if ( !curr->hcall_preempted ) in pv_hypercall() 228 if ( curr->hcall_preempted ) in pv_hypercall() 236 struct vcpu *curr = current; in arch_do_multicall_call() local [all …]
|
A D | iret.c | 26 static void async_exception_cleanup(struct vcpu *curr) in async_exception_cleanup() argument 30 if ( !curr->arch.async_exception_mask ) in async_exception_cleanup() 33 if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) ) in async_exception_cleanup() 34 trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE); in async_exception_cleanup() 37 if ( (curr->arch.async_exception_mask ^ in async_exception_cleanup() 38 curr->arch.async_exception_state(trap).old_mask) == (1u << trap) ) in async_exception_cleanup() 47 curr->arch.async_exception_mask = in async_exception_cleanup() 48 curr->arch.async_exception_state(trap).old_mask; in async_exception_cleanup()
|
A D | grant_table.c | 50 struct vcpu *curr = current; in create_grant_pv_mapping() local 51 struct domain *currd = curr->domain; in create_grant_pv_mapping() 111 if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, 0) ) in create_grant_pv_mapping() 140 struct vcpu *curr = current; in steal_linear_address() local 141 struct domain *currd = curr->domain; in steal_linear_address() 168 okay = UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(), gl1mfn, curr, 0); in steal_linear_address() 192 struct vcpu *curr = current; in replace_grant_pv_mapping() local 193 struct domain *currd = curr->domain; in replace_grant_pv_mapping() 296 if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, 0) ) in replace_grant_pv_mapping()
|
A D | descriptor-tables.c | 132 struct vcpu *curr = current; in do_set_gdt() local 142 domain_lock(curr->domain); in do_set_gdt() 144 if ( (ret = pv_set_gdt(curr, frames, entries)) == 0 ) in do_set_gdt() 147 domain_unlock(curr->domain); in do_set_gdt() 155 struct vcpu *curr = current; in compat_set_gdt() local 178 domain_lock(curr->domain); in compat_set_gdt() 180 if ( (ret = pv_set_gdt(curr, frames, entries)) == 0 ) in compat_set_gdt() 183 domain_unlock(curr->domain); in compat_set_gdt()
|
A D | misc-hypercalls.c | 66 struct vcpu *curr = current; in set_debugreg() local 75 if ( v == curr ) in set_debugreg() 105 if ( v == curr ) in set_debugreg() 155 if ( (v == curr) && !(v->arch.dr7 & DR7_ACTIVE_MASK) ) in set_debugreg() 163 if ( v == curr ) in set_debugreg()
|
A D | emulate.h | 25 const struct vcpu *curr = current; in gdt_ldt_desc_ptr() local 27 ((sel & X86_XEC_TI) ? LDT_VIRT_START(curr) : GDT_VIRT_START(curr)); in gdt_ldt_desc_ptr()
|
/xen/xen/arch/x86/hvm/ |
A D | monitor.c | 38 struct vcpu *curr = current; in hvm_monitor_cr() local 65 struct vcpu *curr = current; in hvm_monitor_emul_unimplemented() local 73 .vcpu_id = curr->vcpu_id, in hvm_monitor_emul_unimplemented() 82 struct vcpu *curr = current; in hvm_monitor_msr() local 123 struct vcpu *curr = current; in gfn_of_rip() local 127 if ( hvm_get_cpl(curr) == 3 ) in gfn_of_rip() 144 struct vcpu *curr = current; in hvm_monitor_debug() local 166 p2m_altp2m_check(curr, curr->arch.hvm.fast_single_step.p2midx); in hvm_monitor_debug() 198 struct vcpu *curr = current; in hvm_monitor_cpuid() local 236 struct vcpu *curr = current; in hvm_monitor_check_p2m() local [all …]
|
A D | hypercall.c | 34 const struct vcpu *curr = current; in hvm_memory_op() local 44 if ( !curr->hcall_compat ) in hvm_memory_op() 84 const struct vcpu *curr = current; in hvm_physdev_op() local 107 if ( !curr->hcall_compat ) in hvm_physdev_op() 167 struct vcpu *curr = current; in hvm_hypercall() local 232 curr->hcall_preempted = false; in hvm_hypercall() 263 if ( !curr->hcall_preempted ) in hvm_hypercall() 303 curr->hcall_compat = true; in hvm_hypercall() 306 curr->hcall_compat = false; in hvm_hypercall() 309 if ( !curr->hcall_preempted ) in hvm_hypercall() [all …]
|
A D | emulate.c | 93 struct vcpu *curr = current; in set_context_data() local 95 if ( curr->arch.vm_event ) in set_context_data() 160 struct vcpu *curr = current; in hvmemul_do_io() local 578 struct vcpu *curr = current; in hvmemul_map_linear_addr() local 754 struct vcpu *curr = current; in hvmemul_linear_to_phys() local 1599 struct vcpu *curr = current; in hvmemul_cmpxchg() local 1825 struct vcpu *curr = current; in hvmemul_rep_movs() local 1994 struct vcpu *curr = current; in hvmemul_rep_stos() local 2362 struct vcpu *curr = current; in hvmemul_get_fpu() local 2364 if ( !curr->fpu_dirtied ) in hvmemul_get_fpu() [all …]
|
/xen/tools/flask/utils/ |
A D | get-bool.c | 28 int err = 0, i = 0, curr, pend; in all_bools() local 31 err = xc_flask_getbool_byid(xch, i, name, sizeof name, &curr, &pend); in all_bools() 39 if (curr == pend) in all_bools() 40 printf("%s: %d\n", name, curr); in all_bools() 42 printf("%s: %d (pending %d)\n", name, curr, pend); in all_bools() 51 int curr, pend; in main() local 71 err = xc_flask_getbool_byname(xch, argv[1], &curr, &pend); in main() 79 if (curr == pend) in main() 80 printf("%s: %d\n", argv[1], curr); in main() 82 printf("%s: %d (pending %d)\n", argv[1], curr, pend); in main()
|
/xen/xen/arch/x86/hvm/vmx/ |
A D | realmode.c | 99 struct vcpu *curr = current; in vmx_realmode_emulate_one() local 148 domain_crash(curr->domain); in vmx_realmode_emulate_one() 153 struct vcpu *curr = current; in vmx_realmode() local 175 curr->arch.hvm.vmx.vmx_emulate = 1; in vmx_realmode() 176 while ( curr->arch.hvm.vmx.vmx_emulate && in vmx_realmode() 185 curr->arch.hvm.vmx.vmx_realmode && in vmx_realmode() 195 if ( curr->arch.hvm.vmx.vmx_realmode ) in vmx_realmode() 196 curr->arch.hvm.vmx.vmx_emulate = in vmx_realmode() 199 curr->arch.hvm.vmx.vmx_emulate = in vmx_realmode() 206 curr->arch.hvm.vmx.vmx_emulate = 1; in vmx_realmode() [all …]
|
A D | vmx.c | 1701 struct vcpu *curr = current; in __vmx_inject_exception() local 1780 struct vcpu *curr = current; in vmx_inject_event() local 2177 struct vcpu *curr = current; in vmx_vcpu_emulate_vmfunc() local 2600 struct vcpu *curr = current; in vmx_fpu_dirty_intercept() local 2602 vmx_fpu_enter(curr); in vmx_fpu_dirty_intercept() 2654 struct vcpu *curr = current; in vmx_cr_access() local 2947 struct vcpu *curr = current; in vmx_msr_read_intercept() local 3504 vmcs_dump_vcpu(curr); in vmx_failed_vmentry() 3507 domain_crash(curr->domain); in vmx_failed_vmentry() 4394 if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m ) in vmx_vmenter_helper() [all …]
|
/xen/xen/common/ |
A D | wait.c | 125 struct vcpu *curr = current; in __prepare_to_wait() local 135 domain_crash(curr->domain); in __prepare_to_wait() 169 domain_crash(curr->domain); in __prepare_to_wait() 186 struct vcpu *curr = current; in check_wakeup_from_wait() local 198 domain_crash(curr->domain); in check_wakeup_from_wait() 229 struct vcpu *curr = current; in prepare_to_wait() local 238 vcpu_pause_nosync(curr); in prepare_to_wait() 239 get_knownalive_domain(curr->domain); in prepare_to_wait() 245 struct vcpu *curr = current; in finish_wait() local 257 vcpu_unpause(curr); in finish_wait() [all …]
|
A D | multicall.c | 39 struct vcpu *curr = current; in do_multicall() local 40 struct mc_state *mcs = &curr->mc_state; in do_multicall() 97 else if ( curr->hcall_preempted ) in do_multicall() 106 hypercall_cancel_continuation(curr); in do_multicall()
|
A D | monitor.c | 135 struct vcpu *curr = current; in monitor_guest_request() local 136 struct domain *d = curr->domain; in monitor_guest_request() 142 .vcpu_id = curr->vcpu_id, in monitor_guest_request() 145 monitor_traps(curr, d->monitor.guest_request_sync, &req); in monitor_guest_request()
|
/xen/xen/arch/x86/ |
A D | x86_emulate.c | 106 struct vcpu *curr = current; in x86emul_read_dr() local 109 ASSERT(is_pv_vcpu(curr)); in x86emul_read_dr() 114 *val = array_access_nospec(curr->arch.dr, reg); in x86emul_read_dr() 118 if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE ) in x86emul_read_dr() 123 *val = curr->arch.dr6; in x86emul_read_dr() 127 if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE ) in x86emul_read_dr() 132 *val = curr->arch.dr7 | curr->arch.pv.dr7_emul; in x86emul_read_dr() 149 struct vcpu *curr = current; in x86emul_write_dr() local 152 ASSERT(is_pv_vcpu(curr)); in x86emul_write_dr() 154 switch ( set_debugreg(curr, reg, val) ) in x86emul_write_dr()
|
A D | xstate.c | 696 struct vcpu *curr = current; in handle_xsetbv() local 712 domain_crash(curr->domain); in handle_xsetbv() 725 domain_crash(curr->domain); in handle_xsetbv() 730 mask = new_bv & ~curr->arch.xcr0_accum; in handle_xsetbv() 731 curr->arch.xcr0 = new_bv; in handle_xsetbv() 732 curr->arch.xcr0_accum |= new_bv; in handle_xsetbv() 743 if ( curr->fpu_dirtied ) in handle_xsetbv() 745 else if ( xstate_all(curr) ) in handle_xsetbv() 749 curr->fpu_initialised = 1; in handle_xsetbv() 750 curr->fpu_dirtied = 1; in handle_xsetbv() [all …]
|
A D | msr.c | 168 const struct vcpu *curr = current; in guest_rdmsr() local 277 if ( !is_hvm_domain(d) || v != curr ) in guest_rdmsr() 358 const struct vcpu *curr = current; in guest_wrmsr() local 461 if ( v == curr ) in guest_wrmsr() 472 if ( v == curr ) in guest_wrmsr() 489 if ( v == curr && is_hvm_domain(d) && cpu_has_cpuid_faulting && in guest_wrmsr() 511 if ( !is_hvm_domain(d) || v != curr ) in guest_wrmsr() 553 if ( v == curr ) in guest_wrmsr() 567 if ( v == curr && (curr->arch.dr7 & DR7_ACTIVE_MASK) ) in guest_wrmsr()
|
/xen/xen/arch/x86/hvm/svm/ |
A D | asid.c | 40 struct vcpu *curr = current; in svm_asid_handle_vmrun() local 41 struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb; in svm_asid_handle_vmrun() 43 nestedhvm_vcpu_in_guestmode(curr) in svm_asid_handle_vmrun() 44 ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid; in svm_asid_handle_vmrun()
|
/xen/xen/arch/x86/oprofile/ |
A D | xenoprof.c | 77 int xenoprofile_get_mode(struct vcpu *curr, const struct cpu_user_regs *regs) in xenoprofile_get_mode() argument 82 if ( !is_hvm_vcpu(curr) ) in xenoprofile_get_mode() 83 return guest_kernel_mode(curr, regs); in xenoprofile_get_mode() 85 switch ( hvm_guest_x86_mode(curr) ) in xenoprofile_get_mode() 92 return hvm_get_cpl(curr) != 3; in xenoprofile_get_mode()
|
/xen/xen/tools/kconfig/ |
A D | symbol.c | 16 .curr = { "y", yes }, 20 .curr = { "m", mod }, 24 .curr = { "n", no }, 28 .curr = { "", no }, 293 sym->curr.tri = no; in sym_calc_choice() 340 oldval = sym->curr; in sym_calc_value() 350 newval = symbol_no.curr; in sym_calc_value() 354 sym->curr.tri = no; in sym_calc_value() 365 sym->curr = newval; in sym_calc_value() 429 sym->curr = newval; in sym_calc_value() [all …]
|