Lines Matching refs:vcpu

94 	((struct kvm_vcpu *)(ctxt)->vcpu)
114 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
115 static void process_nmi(struct kvm_vcpu *vcpu);
116 static void process_smi(struct kvm_vcpu *vcpu);
117 static void enter_smm(struct kvm_vcpu *vcpu);
118 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
119 static void store_regs(struct kvm_vcpu *vcpu);
120 static int sync_regs(struct kvm_vcpu *vcpu);
122 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
123 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
334 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
338 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
453 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument
455 return vcpu->arch.apic_base; in kvm_get_apic_base()
459 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) in kvm_get_apic_mode() argument
461 return kvm_apic_mode(kvm_get_apic_base(vcpu)); in kvm_get_apic_mode()
465 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument
467 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); in kvm_set_apic_base()
469 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | in kvm_set_apic_base()
470 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); in kvm_set_apic_base()
481 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
482 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
547 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) in kvm_deliver_exception_payload() argument
549 unsigned nr = vcpu->arch.exception.nr; in kvm_deliver_exception_payload()
550 bool has_payload = vcpu->arch.exception.has_payload; in kvm_deliver_exception_payload()
551 unsigned long payload = vcpu->arch.exception.payload; in kvm_deliver_exception_payload()
563 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
580 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
581 vcpu->arch.dr6 |= payload; in kvm_deliver_exception_payload()
582 vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
590 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
593 vcpu->arch.cr2 = payload; in kvm_deliver_exception_payload()
597 vcpu->arch.exception.has_payload = false; in kvm_deliver_exception_payload()
598 vcpu->arch.exception.payload = 0; in kvm_deliver_exception_payload()
602 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, in kvm_multiple_exception() argument
609 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_multiple_exception()
611 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
622 WARN_ON_ONCE(vcpu->arch.exception.pending); in kvm_multiple_exception()
623 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
633 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
634 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
636 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
637 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
638 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
639 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
640 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
641 if (!is_guest_mode(vcpu)) in kvm_multiple_exception()
642 kvm_deliver_exception_payload(vcpu); in kvm_multiple_exception()
647 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
650 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_multiple_exception()
662 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
663 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
664 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
665 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
666 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
667 vcpu->arch.exception.has_payload = false; in kvm_multiple_exception()
668 vcpu->arch.exception.payload = 0; in kvm_multiple_exception()
676 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument
678 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); in kvm_queue_exception()
682 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_requeue_exception() argument
684 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); in kvm_requeue_exception()
688 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_p() argument
691 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); in kvm_queue_exception_p()
695 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_e_p() argument
698 kvm_multiple_exception(vcpu, nr, true, error_code, in kvm_queue_exception_e_p()
702 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) in kvm_complete_insn_gp() argument
705 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
707 return kvm_skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
713 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_inject_page_fault() argument
715 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
716 vcpu->arch.exception.nested_apf = in kvm_inject_page_fault()
717 is_guest_mode(vcpu) && fault->async_page_fault; in kvm_inject_page_fault()
718 if (vcpu->arch.exception.nested_apf) { in kvm_inject_page_fault()
719 vcpu->arch.apf.nested_apf_token = fault->address; in kvm_inject_page_fault()
720 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
722 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
728 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, in kvm_inject_emulated_page_fault() argument
734 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
735 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
743 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
746 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
751 void kvm_inject_nmi(struct kvm_vcpu *vcpu) in kvm_inject_nmi() argument
753 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
754 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_inject_nmi()
758 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_queue_exception_e() argument
760 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); in kvm_queue_exception_e()
764 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_requeue_exception_e() argument
766 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); in kvm_requeue_exception_e()
774 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) in kvm_require_cpl() argument
776 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) in kvm_require_cpl()
778 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
783 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) in kvm_require_dr() argument
785 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) in kvm_require_dr()
788 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_require_dr()
793 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) in pdptr_rsvd_bits() argument
795 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); in pdptr_rsvd_bits()
801 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) in load_pdptrs() argument
813 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(pdpt_gfn), in load_pdptrs()
819 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, in load_pdptrs()
826 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { in load_pdptrs()
832 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); in load_pdptrs()
833 vcpu->arch.pdptrs_from_userspace = false; in load_pdptrs()
839 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) in kvm_post_set_cr0() argument
842 kvm_clear_async_pf_completion_queue(vcpu); in kvm_post_set_cr0()
843 kvm_async_pf_hash_reset(vcpu); in kvm_post_set_cr0()
847 kvm_mmu_reset_context(vcpu); in kvm_post_set_cr0()
850 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_post_set_cr0()
851 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_post_set_cr0()
852 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_post_set_cr0()
856 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_set_cr0() argument
858 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_set_cr0()
877 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
881 if (!is_pae(vcpu)) in kvm_set_cr0()
883 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
888 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
889 is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) && in kvm_set_cr0()
890 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
894 (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) in kvm_set_cr0()
897 static_call(kvm_x86_set_cr0)(vcpu, cr0); in kvm_set_cr0()
899 kvm_post_set_cr0(vcpu, old_cr0, cr0); in kvm_set_cr0()
905 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) in kvm_lmsw() argument
907 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
911 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_guest_xsave_state() argument
913 if (vcpu->arch.guest_state_protected) in kvm_load_guest_xsave_state()
916 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_guest_xsave_state()
918 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
919 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
921 if (vcpu->arch.xsaves_enabled && in kvm_load_guest_xsave_state()
922 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
923 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
927 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || in kvm_load_guest_xsave_state()
928 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && in kvm_load_guest_xsave_state()
929 vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_guest_xsave_state()
930 write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
934 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_host_xsave_state() argument
936 if (vcpu->arch.guest_state_protected) in kvm_load_host_xsave_state()
940 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || in kvm_load_host_xsave_state()
941 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { in kvm_load_host_xsave_state()
942 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
943 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
944 write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
947 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_host_xsave_state()
949 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
952 if (vcpu->arch.xsaves_enabled && in kvm_load_host_xsave_state()
953 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
960 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in __kvm_set_xcr() argument
963 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
979 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
993 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
996 kvm_update_cpuid_runtime(vcpu); in __kvm_set_xcr()
1000 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) in kvm_emulate_xsetbv() argument
1002 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || in kvm_emulate_xsetbv()
1003 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { in kvm_emulate_xsetbv()
1004 kvm_inject_gp(vcpu, 0); in kvm_emulate_xsetbv()
1008 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_xsetbv()
1012 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_is_valid_cr4() argument
1017 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in kvm_is_valid_cr4()
1020 return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); in kvm_is_valid_cr4()
1024 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) in kvm_post_set_cr4() argument
1043 kvm_mmu_reset_context(vcpu); in kvm_post_set_cr4()
1045 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); in kvm_post_set_cr4()
1047 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_post_set_cr4()
1051 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_set_cr4() argument
1053 unsigned long old_cr4 = kvm_read_cr4(vcpu); in kvm_set_cr4()
1057 if (!kvm_is_valid_cr4(vcpu, cr4)) in kvm_set_cr4()
1060 if (is_long_mode(vcpu)) { in kvm_set_cr4()
1065 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) in kvm_set_cr4()
1067 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
1068 kvm_read_cr3(vcpu))) in kvm_set_cr4()
1072 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) in kvm_set_cr4()
1076 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) in kvm_set_cr4()
1080 static_call(kvm_x86_set_cr4)(vcpu, cr4); in kvm_set_cr4()
1082 kvm_post_set_cr4(vcpu, old_cr4, cr4); in kvm_set_cr4()
1088 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) in kvm_invalidate_pcid() argument
1090 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_invalidate_pcid()
1102 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_invalidate_pcid()
1111 if (kvm_get_active_pcid(vcpu) == pcid) { in kvm_invalidate_pcid()
1112 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); in kvm_invalidate_pcid()
1113 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_invalidate_pcid()
1121 if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) in kvm_invalidate_pcid()
1125 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) in kvm_invalidate_pcid()
1128 kvm_mmu_free_roots(vcpu, mmu, roots_to_free); in kvm_invalidate_pcid()
1131 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in kvm_set_cr3() argument
1136 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); in kvm_set_cr3()
1146 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) in kvm_set_cr3()
1154 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) in kvm_set_cr3()
1157 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
1160 if (cr3 != kvm_read_cr3(vcpu)) in kvm_set_cr3()
1161 kvm_mmu_new_pgd(vcpu, cr3); in kvm_set_cr3()
1163 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1164 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); in kvm_set_cr3()
1175 kvm_invalidate_pcid(vcpu, pcid); in kvm_set_cr3()
1181 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_set_cr8() argument
1185 if (lapic_in_kernel(vcpu)) in kvm_set_cr8()
1186 kvm_lapic_set_tpr(vcpu, cr8); in kvm_set_cr8()
1188 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1193 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) in kvm_get_cr8() argument
1195 if (lapic_in_kernel(vcpu)) in kvm_get_cr8()
1196 return kvm_lapic_get_cr8(vcpu); in kvm_get_cr8()
1198 return vcpu->arch.cr8; in kvm_get_cr8()
1202 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) in kvm_update_dr0123() argument
1206 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1208 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1212 void kvm_update_dr7(struct kvm_vcpu *vcpu) in kvm_update_dr7() argument
1216 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1217 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1219 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1220 static_call(kvm_x86_set_dr7)(vcpu, dr7); in kvm_update_dr7()
1221 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1223 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1227 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) in kvm_dr6_fixed() argument
1231 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) in kvm_dr6_fixed()
1234 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) in kvm_dr6_fixed()
1239 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in kvm_set_dr() argument
1241 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_set_dr()
1245 vcpu->arch.db[array_index_nospec(dr, size)] = val; in kvm_set_dr()
1246 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_set_dr()
1247 vcpu->arch.eff_db[dr] = val; in kvm_set_dr()
1253 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in kvm_set_dr()
1259 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in kvm_set_dr()
1260 kvm_update_dr7(vcpu); in kvm_set_dr()
1268 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) in kvm_get_dr() argument
1270 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1274 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1278 *val = vcpu->arch.dr6; in kvm_get_dr()
1282 *val = vcpu->arch.dr7; in kvm_get_dr()
1288 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) in kvm_emulate_rdpmc() argument
1290 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdpmc()
1293 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { in kvm_emulate_rdpmc()
1294 kvm_inject_gp(vcpu, 0); in kvm_emulate_rdpmc()
1298 kvm_rax_write(vcpu, (u32)data); in kvm_emulate_rdpmc()
1299 kvm_rdx_write(vcpu, data >> 32); in kvm_emulate_rdpmc()
1300 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_rdpmc()
1539 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr_feature() argument
1562 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in __kvm_valid_efer() argument
1564 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) in __kvm_valid_efer()
1567 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) in __kvm_valid_efer()
1571 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) in __kvm_valid_efer()
1574 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) in __kvm_valid_efer()
1580 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument
1585 return __kvm_valid_efer(vcpu, efer); in kvm_valid_efer()
1589 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_efer() argument
1591 u64 old_efer = vcpu->arch.efer; in set_efer()
1599 if (!__kvm_valid_efer(vcpu, efer)) in set_efer()
1602 if (is_paging(vcpu) && in set_efer()
1603 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1608 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1610 r = static_call(kvm_x86_set_efer)(vcpu, efer); in set_efer()
1618 kvm_mmu_reset_context(vcpu); in set_efer()
1629 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) in kvm_msr_allowed() argument
1633 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1678 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, in __kvm_set_msr() argument
1683 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) in __kvm_set_msr()
1692 if (is_noncanonical_address(data, vcpu)) in __kvm_set_msr()
1709 data = get_canonical(data, vcpu_virt_addr_bits(vcpu)); in __kvm_set_msr()
1716 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && in __kvm_set_msr()
1717 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) in __kvm_set_msr()
1729 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) in __kvm_set_msr()
1740 return static_call(kvm_x86_set_msr)(vcpu, &msr); in __kvm_set_msr()
1743 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_set_msr_ignored_check() argument
1746 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); in kvm_set_msr_ignored_check()
1761 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, in __kvm_get_msr() argument
1767 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) in __kvm_get_msr()
1776 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && in __kvm_get_msr()
1777 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) in __kvm_get_msr()
1785 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); in __kvm_get_msr()
1791 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_get_msr_ignored_check() argument
1794 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); in kvm_get_msr_ignored_check()
1806 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) in kvm_get_msr() argument
1808 return kvm_get_msr_ignored_check(vcpu, index, data, false); in kvm_get_msr()
1812 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) in kvm_set_msr() argument
1814 return kvm_set_msr_ignored_check(vcpu, index, data, false); in kvm_set_msr()
1818 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) in complete_emulated_rdmsr() argument
1820 int err = vcpu->run->msr.error; in complete_emulated_rdmsr()
1822 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_emulated_rdmsr()
1823 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_emulated_rdmsr()
1826 return static_call(kvm_x86_complete_emulated_msr)(vcpu, err); in complete_emulated_rdmsr()
1829 static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) in complete_emulated_wrmsr() argument
1831 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); in complete_emulated_wrmsr()
1846 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, in kvm_msr_user_space() argument
1848 int (*completion)(struct kvm_vcpu *vcpu), in kvm_msr_user_space() argument
1854 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
1857 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
1858 vcpu->run->msr.error = 0; in kvm_msr_user_space()
1859 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
1860 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
1861 vcpu->run->msr.index = index; in kvm_msr_user_space()
1862 vcpu->run->msr.data = data; in kvm_msr_user_space()
1863 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
1868 static int kvm_get_msr_user_space(struct kvm_vcpu *vcpu, u32 index, int r) in kvm_get_msr_user_space() argument
1870 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_RDMSR, 0, in kvm_get_msr_user_space()
1874 static int kvm_set_msr_user_space(struct kvm_vcpu *vcpu, u32 index, u64 data, int r) in kvm_set_msr_user_space() argument
1876 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_WRMSR, data, in kvm_set_msr_user_space()
1880 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) in kvm_emulate_rdmsr() argument
1882 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdmsr()
1886 r = kvm_get_msr(vcpu, ecx, &data); in kvm_emulate_rdmsr()
1889 if (r && kvm_get_msr_user_space(vcpu, ecx, r)) { in kvm_emulate_rdmsr()
1897 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
1898 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
1903 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); in kvm_emulate_rdmsr()
1907 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) in kvm_emulate_wrmsr() argument
1909 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_wrmsr()
1910 u64 data = kvm_read_edx_eax(vcpu); in kvm_emulate_wrmsr()
1913 r = kvm_set_msr(vcpu, ecx, data); in kvm_emulate_wrmsr()
1916 if (r && kvm_set_msr_user_space(vcpu, ecx, data, r)) in kvm_emulate_wrmsr()
1929 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); in kvm_emulate_wrmsr()
1933 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) in kvm_emulate_as_nop() argument
1935 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_as_nop()
1939 int kvm_emulate_invd(struct kvm_vcpu *vcpu) in kvm_emulate_invd() argument
1942 return kvm_emulate_as_nop(vcpu); in kvm_emulate_invd()
1946 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) in kvm_emulate_mwait() argument
1949 return kvm_emulate_as_nop(vcpu); in kvm_emulate_mwait()
1953 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) in kvm_handle_invalid_op() argument
1955 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_handle_invalid_op()
1960 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) in kvm_emulate_monitor() argument
1963 return kvm_emulate_as_nop(vcpu); in kvm_emulate_monitor()
1967 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) in kvm_vcpu_exit_request() argument
1970 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
1981 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_x2apic_icr_irqoff() argument
1983 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
1992 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1993 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1994 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data); in handle_fastpath_set_x2apic_icr_irqoff()
2002 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_tscdeadline() argument
2004 if (!kvm_can_use_hv_timer(vcpu)) in handle_fastpath_set_tscdeadline()
2007 kvm_set_lapic_tscdeadline_msr(vcpu, data); in handle_fastpath_set_tscdeadline()
2011 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) in handle_fastpath_set_msr_irqoff() argument
2013 u32 msr = kvm_rcx_read(vcpu); in handle_fastpath_set_msr_irqoff()
2019 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
2020 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { in handle_fastpath_set_msr_irqoff()
2021 kvm_skip_emulated_instruction(vcpu); in handle_fastpath_set_msr_irqoff()
2026 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
2027 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { in handle_fastpath_set_msr_irqoff()
2028 kvm_skip_emulated_instruction(vcpu); in handle_fastpath_set_msr_irqoff()
2046 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr() argument
2048 return kvm_get_msr_ignored_check(vcpu, index, data, true); in do_get_msr()
2051 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_set_msr() argument
2053 return kvm_set_msr_ignored_check(vcpu, index, *data, true); in do_set_msr()
2168 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, in kvm_write_system_time() argument
2171 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2173 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2175 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_write_system_time()
2180 vcpu->arch.time = system_time; in kvm_write_system_time()
2181 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_write_system_time()
2184 vcpu->arch.pv_time_enabled = false; in kvm_write_system_time()
2188 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_write_system_time()
2189 &vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2191 vcpu->arch.pv_time_enabled = true; in kvm_write_system_time()
2244 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2246 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in set_tsc_khz() argument
2252 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio); in set_tsc_khz()
2259 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2260 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2278 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); in set_tsc_khz()
2282 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) in kvm_set_tsc_khz() argument
2290 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio); in kvm_set_tsc_khz()
2296 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2297 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2298 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2312 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); in kvm_set_tsc_khz()
2315 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) in compute_guest_tsc() argument
2317 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2318 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2319 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2320 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2329 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) in kvm_track_tsc_matching() argument
2333 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2337 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2349 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_track_tsc_matching()
2351 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2352 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2372 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio) in kvm_scale_tsc() argument
2383 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in kvm_compute_l1_tsc_offset() argument
2387 tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); in kvm_compute_l1_tsc_offset()
2392 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in kvm_read_l1_tsc() argument
2394 return vcpu->arch.l1_tsc_offset + in kvm_read_l1_tsc()
2395 kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio); in kvm_read_l1_tsc()
2424 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) in kvm_vcpu_write_tsc_offset() argument
2426 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in kvm_vcpu_write_tsc_offset()
2427 vcpu->arch.l1_tsc_offset, in kvm_vcpu_write_tsc_offset()
2430 vcpu->arch.l1_tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2437 if (is_guest_mode(vcpu)) in kvm_vcpu_write_tsc_offset()
2438 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in kvm_vcpu_write_tsc_offset()
2440 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), in kvm_vcpu_write_tsc_offset()
2441 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); in kvm_vcpu_write_tsc_offset()
2443 vcpu->arch.tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2445 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); in kvm_vcpu_write_tsc_offset()
2448 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) in kvm_vcpu_write_tsc_multiplier() argument
2450 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2453 if (is_guest_mode(vcpu)) in kvm_vcpu_write_tsc_multiplier()
2454 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in kvm_vcpu_write_tsc_multiplier()
2456 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); in kvm_vcpu_write_tsc_multiplier()
2458 vcpu->arch.tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2462 vcpu, vcpu->arch.tsc_scaling_ratio); in kvm_vcpu_write_tsc_multiplier()
2483 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, in __kvm_synchronize_tsc() argument
2486 struct kvm *kvm = vcpu->kvm; in __kvm_synchronize_tsc()
2496 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in __kvm_synchronize_tsc()
2499 vcpu->arch.last_guest_tsc = tsc; in __kvm_synchronize_tsc()
2501 kvm_vcpu_write_tsc_offset(vcpu, offset); in __kvm_synchronize_tsc()
2518 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { in __kvm_synchronize_tsc()
2523 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in __kvm_synchronize_tsc()
2524 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in __kvm_synchronize_tsc()
2525 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in __kvm_synchronize_tsc()
2527 kvm_track_tsc_matching(vcpu); in __kvm_synchronize_tsc()
2530 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) in kvm_synchronize_tsc() argument
2532 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2539 offset = kvm_compute_l1_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2543 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2553 nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2554 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2572 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2576 u64 delta = nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2578 offset = kvm_compute_l1_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2583 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); in kvm_synchronize_tsc()
2587 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, in adjust_tsc_offset_guest() argument
2590 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2591 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); in adjust_tsc_offset_guest()
2594 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) in adjust_tsc_offset_host() argument
2596 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2598 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment, in adjust_tsc_offset_host()
2599 vcpu->arch.l1_tsc_scaling_ratio); in adjust_tsc_offset_host()
2600 adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_host()
2819 struct kvm_vcpu *vcpu; in kvm_end_pvclock_update() local
2824 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_end_pvclock_update()
2825 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_end_pvclock_update()
2828 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_end_pvclock_update()
2829 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); in kvm_end_pvclock_update()
2898 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_pvclock_page() local
2924 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_setup_pvclock_page()
2926 &vcpu->hv_clock, offset, in kvm_setup_pvclock_page()
2927 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2932 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_pvclock_page()
2934 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_pvclock_page()
2935 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_pvclock_page()
2936 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_pvclock_page()
2939 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_pvclock_page()
2942 &vcpu->hv_clock, offset, in kvm_setup_pvclock_page()
2943 sizeof(vcpu->hv_clock)); in kvm_setup_pvclock_page()
2947 vcpu->hv_clock.version++; in kvm_setup_pvclock_page()
2949 &vcpu->hv_clock, offset, in kvm_setup_pvclock_page()
2950 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2957 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update() local
3005 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
3021 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
3023 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
3024 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
3025 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
3028 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
3029 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
3030 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
3037 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
3039 if (vcpu->pv_time_enabled) in kvm_guest_time_update()
3040 kvm_setup_pvclock_page(v, &vcpu->pv_time, 0); in kvm_guest_time_update()
3041 if (vcpu->xen.vcpu_info_set) in kvm_guest_time_update()
3042 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache, in kvm_guest_time_update()
3044 if (vcpu->xen.vcpu_time_info_set) in kvm_guest_time_update()
3045 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); in kvm_guest_time_update()
3047 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
3074 struct kvm_vcpu *vcpu; in kvmclock_update_fn() local
3076 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
3077 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_update_fn()
3078 kvm_vcpu_kick(vcpu); in kvmclock_update_fn()
3111 static bool can_set_mci_status(struct kvm_vcpu *vcpu) in can_set_mci_status() argument
3114 if (guest_cpuid_is_amd_or_hygon(vcpu)) in can_set_mci_status()
3115 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
3120 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_msr_mce() argument
3122 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
3129 vcpu->arch.mcg_status = data; in set_msr_mce()
3137 vcpu->arch.mcg_ctl = data; in set_msr_mce()
3158 if (!can_set_mci_status(vcpu)) in set_msr_mce()
3162 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
3170 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) in kvm_pv_async_pf_enabled() argument
3174 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
3177 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf() argument
3185 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && in kvm_pv_enable_async_pf()
3189 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && in kvm_pv_enable_async_pf()
3193 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf()
3196 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
3198 if (!kvm_pv_async_pf_enabled(vcpu)) { in kvm_pv_enable_async_pf()
3199 kvm_clear_async_pf_completion_queue(vcpu); in kvm_pv_enable_async_pf()
3200 kvm_async_pf_hash_reset(vcpu); in kvm_pv_enable_async_pf()
3204 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
3208 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
3209 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
3211 kvm_async_pf_wakeup_all(vcpu); in kvm_pv_enable_async_pf()
3216 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf_int() argument
3222 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf_int()
3225 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3227 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3232 static void kvmclock_reset(struct kvm_vcpu *vcpu) in kvmclock_reset() argument
3234 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
3235 vcpu->arch.time = 0; in kvmclock_reset()
3238 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_all() argument
3240 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3241 static_call(kvm_x86_tlb_flush_all)(vcpu); in kvm_vcpu_flush_tlb_all()
3244 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_guest() argument
3246 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3255 kvm_mmu_sync_roots(vcpu); in kvm_vcpu_flush_tlb_guest()
3256 kvm_mmu_sync_prev_roots(vcpu); in kvm_vcpu_flush_tlb_guest()
3259 static_call(kvm_x86_tlb_flush_guest)(vcpu); in kvm_vcpu_flush_tlb_guest()
3263 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_current() argument
3265 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_current()
3266 static_call(kvm_x86_tlb_flush_current)(vcpu); in kvm_vcpu_flush_tlb_current()
3275 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) in kvm_service_local_tlb_flush_requests() argument
3277 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) in kvm_service_local_tlb_flush_requests()
3278 kvm_vcpu_flush_tlb_current(vcpu); in kvm_service_local_tlb_flush_requests()
3280 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) in kvm_service_local_tlb_flush_requests()
3281 kvm_vcpu_flush_tlb_guest(vcpu); in kvm_service_local_tlb_flush_requests()
3285 static void record_steal_time(struct kvm_vcpu *vcpu) in record_steal_time() argument
3287 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in record_steal_time()
3293 if (kvm_xen_msr_enabled(vcpu->kvm)) { in record_steal_time()
3294 kvm_xen_runstate_set_running(vcpu); in record_steal_time()
3298 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3301 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) in record_steal_time()
3304 slots = kvm_memslots(vcpu->kvm); in record_steal_time()
3308 gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in record_steal_time()
3313 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) || in record_steal_time()
3323 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { in record_steal_time()
3342 vcpu->arch.st.preempted = 0; in record_steal_time()
3344 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3347 kvm_vcpu_flush_tlb_guest(vcpu); in record_steal_time()
3356 vcpu->arch.st.preempted = 0; in record_steal_time()
3370 vcpu->arch.st.last_steal; in record_steal_time()
3371 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3380 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in record_steal_time()
3383 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_msr_common() argument
3389 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) in kvm_set_msr_common()
3390 return kvm_xen_write_hypercall_page(vcpu, data); in kvm_set_msr_common()
3404 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3409 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3421 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3426 return set_efer(vcpu, msr_info); in kvm_set_msr_common()
3434 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3436 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", in kvm_set_msr_common()
3443 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " in kvm_set_msr_common()
3449 return kvm_mtrr_set_msr(vcpu, msr, data); in kvm_set_msr_common()
3451 return kvm_set_apic_base(vcpu, msr_info); in kvm_set_msr_common()
3453 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
3455 kvm_set_lapic_tscdeadline_msr(vcpu, data); in kvm_set_msr_common()
3458 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { in kvm_set_msr_common()
3460 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3461 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3465 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_msr_common()
3467 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3471 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3472 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { in kvm_set_msr_common()
3473 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) in kvm_set_msr_common()
3475 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3476 kvm_update_cpuid_runtime(vcpu); in kvm_set_msr_common()
3478 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3484 vcpu->arch.smbase = data; in kvm_set_msr_common()
3487 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3491 kvm_synchronize_tsc(vcpu, data); in kvm_set_msr_common()
3493 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3494 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3495 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3500 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_set_msr_common()
3509 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3514 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3517 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3520 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3521 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3524 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3527 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3528 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3531 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3534 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3537 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3540 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3543 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_set_msr_common()
3546 if (kvm_pv_enable_async_pf(vcpu, data)) in kvm_set_msr_common()
3550 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
3553 if (kvm_pv_enable_async_pf_int(vcpu, data)) in kvm_set_msr_common()
3557 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
3560 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3561 kvm_check_async_pf_completion(vcpu); in kvm_set_msr_common()
3565 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_set_msr_common()
3574 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3579 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_set_msr_common()
3583 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_set_msr_common()
3586 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) in kvm_set_msr_common()
3591 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_set_msr_common()
3598 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3604 return set_msr_mce(vcpu, msr_info); in kvm_set_msr_common()
3612 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
3613 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
3616 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " in kvm_set_msr_common()
3638 return kvm_hv_set_msr_common(vcpu, msr, data, in kvm_set_msr_common()
3645 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", in kvm_set_msr_common()
3649 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
3651 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3654 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
3656 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3661 cpuid_fault_enabled(vcpu))) in kvm_set_msr_common()
3663 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3668 !supports_cpuid_fault(vcpu))) in kvm_set_msr_common()
3670 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3673 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
3674 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
3681 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) in get_msr_mce() argument
3684 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
3693 data = vcpu->arch.mcg_cap; in get_msr_mce()
3698 data = vcpu->arch.mcg_ctl; in get_msr_mce()
3701 data = vcpu->arch.mcg_status; in get_msr_mce()
3710 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
3719 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_get_msr_common() argument
3753 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3754 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
3763 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3764 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
3768 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
3772 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) in kvm_get_msr_common()
3774 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
3778 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) in kvm_get_msr_common()
3780 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
3783 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
3798 offset = vcpu->arch.l1_tsc_offset; in kvm_get_msr_common()
3799 ratio = vcpu->arch.l1_tsc_scaling_ratio; in kvm_get_msr_common()
3801 offset = vcpu->arch.tsc_offset; in kvm_get_msr_common()
3802 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_get_msr_common()
3805 msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset; in kvm_get_msr_common()
3810 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3829 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
3832 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3834 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
3837 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
3840 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
3845 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
3848 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
3857 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
3860 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
3863 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3866 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
3869 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3872 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
3875 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3878 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
3881 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3884 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_get_msr_common()
3887 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
3890 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
3893 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
3896 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
3902 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_get_msr_common()
3905 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
3908 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_get_msr_common()
3911 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
3914 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_get_msr_common()
3917 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
3925 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
3929 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_get_msr_common()
3931 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
3954 return kvm_hv_get_msr_common(vcpu, in kvm_get_msr_common()
3971 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
3973 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
3976 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
3978 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
3982 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
3984 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
3987 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
3990 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
3993 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3994 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
4006 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, in __msr_io() argument
4008 int (*do_msr)(struct kvm_vcpu *vcpu, in __msr_io() argument
4014 if (do_msr(vcpu, entries[i].index, &entries[i].data)) in __msr_io()
4025 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, in msr_io() argument
4026 int (*do_msr)(struct kvm_vcpu *vcpu, in msr_io() argument
4050 r = n = __msr_io(vcpu, &msrs, entries, do_msr); in msr_io()
4073 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, in kvm_ioctl_get_supported_hv_cpuid() argument
4083 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_ioctl_get_supported_hv_cpuid()
4361 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) in need_emulate_wbinvd() argument
4363 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
4366 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
4369 if (need_emulate_wbinvd(vcpu)) { in kvm_arch_vcpu_load()
4371 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4372 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4373 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
4377 static_call(kvm_x86_vcpu_load)(vcpu, cpu); in kvm_arch_vcpu_load()
4380 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
4383 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
4384 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
4385 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
4386 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
4389 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
4390 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
4391 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4396 u64 offset = kvm_compute_l1_tsc_offset(vcpu, in kvm_arch_vcpu_load()
4397 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4398 kvm_vcpu_write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
4399 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4402 if (kvm_lapic_hv_timer_in_use(vcpu)) in kvm_arch_vcpu_load()
4403 kvm_lapic_restart_hv_timer(vcpu); in kvm_arch_vcpu_load()
4409 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4410 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
4411 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4412 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); in kvm_arch_vcpu_load()
4413 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
4416 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
4419 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) in kvm_steal_time_set_preempted() argument
4421 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in kvm_steal_time_set_preempted()
4426 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4429 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4433 if (unlikely(current->mm != vcpu->kvm->mm)) in kvm_steal_time_set_preempted()
4436 slots = kvm_memslots(vcpu->kvm); in kvm_steal_time_set_preempted()
4446 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4448 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_steal_time_set_preempted()
4451 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
4455 if (vcpu->preempted && !vcpu->arch.guest_state_protected) in kvm_arch_vcpu_put()
4456 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); in kvm_arch_vcpu_put()
4462 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4463 if (kvm_xen_msr_enabled(vcpu->kvm)) in kvm_arch_vcpu_put()
4464 kvm_xen_runstate_set_preempted(vcpu); in kvm_arch_vcpu_put()
4466 kvm_steal_time_set_preempted(vcpu); in kvm_arch_vcpu_put()
4467 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4469 static_call(kvm_x86_vcpu_put)(vcpu); in kvm_arch_vcpu_put()
4470 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4473 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_lapic() argument
4476 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in kvm_vcpu_ioctl_get_lapic()
4478 return kvm_apic_get_state(vcpu, s); in kvm_vcpu_ioctl_get_lapic()
4481 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_lapic() argument
4486 r = kvm_apic_set_state(vcpu, s); in kvm_vcpu_ioctl_set_lapic()
4489 update_cr8_intercept(vcpu); in kvm_vcpu_ioctl_set_lapic()
4494 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) in kvm_cpu_accept_dm_intr() argument
4502 if (kvm_cpu_has_extint(vcpu)) in kvm_cpu_accept_dm_intr()
4506 return (!lapic_in_kernel(vcpu) || in kvm_cpu_accept_dm_intr()
4507 kvm_apic_accept_pic_intr(vcpu)); in kvm_cpu_accept_dm_intr()
4510 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) in kvm_vcpu_ready_for_interrupt_injection() argument
4519 return (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4520 kvm_cpu_accept_dm_intr(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4521 !kvm_event_needs_reinjection(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4522 !vcpu->arch.exception.pending); in kvm_vcpu_ready_for_interrupt_injection()
4525 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
4531 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4532 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4533 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
4541 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
4544 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
4547 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
4548 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
4552 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_nmi() argument
4554 kvm_inject_nmi(vcpu); in kvm_vcpu_ioctl_nmi()
4559 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_smi() argument
4561 kvm_make_request(KVM_REQ_SMI, vcpu); in kvm_vcpu_ioctl_smi()
4566 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, in vcpu_ioctl_tpr_access_reporting() argument
4571 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
4575 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_setup_mce() argument
4587 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
4590 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4593 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4595 static_call(kvm_x86_setup_mce)(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
4600 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_mce() argument
4603 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
4605 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
4614 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4624 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
4625 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { in kvm_vcpu_ioctl_x86_set_mce()
4626 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_mce()
4633 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
4635 kvm_queue_exception(vcpu, MC_VECTOR); in kvm_vcpu_ioctl_x86_set_mce()
4648 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_vcpu_events() argument
4651 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4653 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4654 process_smi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4667 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
4668 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4669 kvm_deliver_exception_payload(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4677 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
4681 events->exception.injected = vcpu->arch.exception.injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4682 events->exception.pending = vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4688 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4690 vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4692 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4693 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4694 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4695 events->exception_has_payload = vcpu->arch.exception.has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4696 events->exception_payload = vcpu->arch.exception.payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4699 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4700 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4702 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4704 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4705 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4706 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4711 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4712 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4714 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4715 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4720 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4726 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
4728 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events() argument
4739 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4757 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4760 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4761 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4762 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4763 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4764 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4765 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4766 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4767 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4769 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4770 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4771 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4773 static_call(kvm_x86_set_interrupt_shadow)(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
4776 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4778 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4779 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4782 lapic_in_kernel(vcpu)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4783 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4786 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4787 kvm_smm_changed(vcpu, events->smi.smm); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4789 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4793 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4795 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4798 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4800 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4802 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4806 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4811 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_debugregs() argument
4816 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
4817 kvm_get_dr(vcpu, 6, &val); in kvm_vcpu_ioctl_x86_get_debugregs()
4819 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
4824 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_debugregs() argument
4835 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
4836 kvm_update_dr0123(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
4837 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
4838 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
4839 kvm_update_dr7(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
4844 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave() argument
4847 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_get_xsave()
4850 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_get_xsave()
4853 vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_get_xsave()
4856 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xsave() argument
4859 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_set_xsave()
4862 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_set_xsave()
4864 supported_xcr0, &vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_set_xsave()
4867 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xcrs() argument
4878 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
4881 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xcrs() argument
4895 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, in kvm_vcpu_ioctl_x86_set_xcrs()
4910 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) in kvm_set_guest_paused() argument
4912 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
4914 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
4915 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_guest_paused()
4919 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_has_attr() argument
4935 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_get_attr() argument
4947 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) in kvm_arch_tsc_get_attr()
4958 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_set_attr() argument
4962 struct kvm *kvm = vcpu->kvm; in kvm_arch_tsc_set_attr()
4980 matched = (vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
4981 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
4984 tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; in kvm_arch_tsc_set_attr()
4987 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); in kvm_arch_tsc_set_attr()
5000 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_device_attr() argument
5015 r = kvm_arch_tsc_has_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5018 r = kvm_arch_tsc_get_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5021 r = kvm_arch_tsc_set_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5028 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
5045 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
5047 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
5052 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
5064 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); in kvm_vcpu_ioctl_enable_cap()
5067 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
5070 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
5071 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
5072 kvm_update_pv_runtime(vcpu); in kvm_vcpu_ioctl_enable_cap()
5083 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
5094 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
5100 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5108 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
5119 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5127 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
5136 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
5140 r = kvm_vcpu_ioctl_nmi(vcpu); in kvm_arch_vcpu_ioctl()
5144 r = kvm_vcpu_ioctl_smi(vcpu); in kvm_arch_vcpu_ioctl()
5159 if (vcpu->arch.last_vmentry_cpu != -1) in kvm_arch_vcpu_ioctl()
5165 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5177 if (vcpu->arch.last_vmentry_cpu != -1) in kvm_arch_vcpu_ioctl()
5183 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
5194 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
5205 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5206 r = msr_io(vcpu, argp, do_get_msr, 1); in kvm_arch_vcpu_ioctl()
5207 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5211 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5212 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
5213 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5222 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); in kvm_arch_vcpu_ioctl()
5236 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5241 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5242 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); in kvm_arch_vcpu_ioctl()
5243 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5252 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); in kvm_arch_vcpu_ioctl()
5261 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); in kvm_arch_vcpu_ioctl()
5267 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
5282 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
5288 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
5305 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
5314 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
5329 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
5338 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
5354 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
5370 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) in kvm_arch_vcpu_ioctl()
5376 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
5380 r = kvm_set_guest_paused(vcpu); in kvm_arch_vcpu_ioctl()
5389 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
5405 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
5449 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5450 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5451 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5455 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); in kvm_arch_vcpu_ioctl()
5464 r = kvm_xen_vcpu_get_attr(vcpu, &xva); in kvm_arch_vcpu_ioctl()
5475 r = kvm_xen_vcpu_set_attr(vcpu, &xva); in kvm_arch_vcpu_ioctl()
5484 __get_sregs2(vcpu, u.sregs2); in kvm_arch_vcpu_ioctl()
5498 r = __set_sregs2(vcpu, u.sregs2); in kvm_arch_vcpu_ioctl()
5504 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); in kvm_arch_vcpu_ioctl()
5512 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
5516 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
5695 struct kvm_vcpu *vcpu; in kvm_arch_sync_dirty_log() local
5698 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_sync_dirty_log()
5699 kvm_vcpu_kick(vcpu); in kvm_arch_sync_dirty_log()
5964 struct kvm_vcpu *vcpu; in kvm_arch_suspend_notifier() local
5968 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_suspend_notifier()
5969 if (!vcpu->arch.pv_time_enabled) in kvm_arch_suspend_notifier()
5972 ret = kvm_set_guest_paused(vcpu); in kvm_arch_suspend_notifier()
5975 vcpu->vcpu_id, ret); in kvm_arch_suspend_notifier()
6450 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, in vcpu_mmio_write() argument
6458 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_write()
6459 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
6460 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_write()
6471 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) in vcpu_mmio_read() argument
6478 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_read()
6479 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
6481 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_read()
6493 static void kvm_set_segment(struct kvm_vcpu *vcpu, in kvm_set_segment() argument
6496 static_call(kvm_x86_set_segment)(vcpu, var, seg); in kvm_set_segment()
6499 void kvm_get_segment(struct kvm_vcpu *vcpu, in kvm_get_segment() argument
6502 static_call(kvm_x86_get_segment)(vcpu, var, seg); in kvm_get_segment()
6505 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, in translate_nested_gpa() argument
6510 BUG_ON(!mmu_is_nested(vcpu)); in translate_nested_gpa()
6514 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
6519 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument
6522 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
6523 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
6527 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_fetch() argument
6530 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
6532 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
6535 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument
6538 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
6540 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
6545 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument
6548 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
6552 struct kvm_vcpu *vcpu, u32 access, in kvm_read_guest_virt_helper() argument
6559 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
6567 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
6587 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() local
6588 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
6593 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
6601 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
6609 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, in kvm_read_guest_virt() argument
6613 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
6622 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, in kvm_read_guest_virt()
6631 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_std() local
6634 if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) in emulator_read_std()
6637 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); in emulator_read_std()
6643 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_phys_system() local
6644 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); in kvm_read_guest_phys_system()
6650 struct kvm_vcpu *vcpu, u32 access, in kvm_write_guest_virt_helper() argument
6657 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_helper()
6666 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); in kvm_write_guest_virt_helper()
6684 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_write_std() local
6687 if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) in emulator_write_std()
6690 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in emulator_write_std()
6694 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, in kvm_write_guest_virt_system() argument
6698 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
6700 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in kvm_write_guest_virt_system()
6705 int handle_ud(struct kvm_vcpu *vcpu) in handle_ud() argument
6712 if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0))) in handle_ud()
6716 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), in handle_ud()
6719 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); in handle_ud()
6723 return kvm_emulate_instruction(vcpu, emul_type); in handle_ud()
6727 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_is_mmio_gpa() argument
6734 if (vcpu_match_mmio_gpa(vcpu, gpa)) { in vcpu_is_mmio_gpa()
6742 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument
6746 u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
6754 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || in vcpu_mmio_gva_to_gpa()
6755 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
6756 vcpu->arch.mmio_access, 0, access))) { in vcpu_mmio_gva_to_gpa()
6757 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
6763 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
6768 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); in vcpu_mmio_gva_to_gpa()
6771 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, in emulator_write_phys() argument
6776 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); in emulator_write_phys()
6779 kvm_page_track_write(vcpu, gpa, val, bytes); in emulator_write_phys()
6784 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
6786 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
6788 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
6790 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
6795 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) in read_prepare() argument
6797 if (vcpu->mmio_read_completed) { in read_prepare()
6799 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
6800 vcpu->mmio_read_completed = 0; in read_prepare()
6807 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in read_emulate() argument
6810 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); in read_emulate()
6813 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in write_emulate() argument
6816 return emulator_write_phys(vcpu, gpa, val, bytes); in write_emulate()
6819 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) in write_mmio() argument
6822 return vcpu_mmio_write(vcpu, gpa, bytes, val); in write_mmio()
6825 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in read_exit_mmio() argument
6832 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in write_exit_mmio() argument
6835 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
6837 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
6858 struct kvm_vcpu *vcpu, in emulator_read_write_onepage() argument
6865 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
6877 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); in emulator_read_write_onepage()
6879 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); in emulator_read_write_onepage()
6884 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
6890 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
6898 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
6899 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
6912 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_write() local
6917 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
6920 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
6928 vcpu, ops); in emulator_read_write()
6940 vcpu, ops); in emulator_read_write()
6944 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
6947 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
6949 vcpu->mmio_needed = 1; in emulator_read_write()
6950 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
6952 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
6953 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
6954 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
6955 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
6957 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
6998 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_cmpxchg_emulated() local
7008 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); in emulator_cmpxchg_emulated()
7026 if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) in emulator_cmpxchg_emulated()
7048 kvm_vcpu_unmap(vcpu, &map, true); in emulator_cmpxchg_emulated()
7053 kvm_page_track_write(vcpu, gpa, new, bytes); in emulator_cmpxchg_emulated()
7063 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) in kernel_pio() argument
7067 for (i = 0; i < vcpu->arch.pio.count; i++) { in kernel_pio()
7068 if (vcpu->arch.pio.in) in kernel_pio()
7069 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
7070 vcpu->arch.pio.size, pd); in kernel_pio()
7072 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, in kernel_pio()
7073 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
7077 pd += vcpu->arch.pio.size; in kernel_pio()
7082 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_in_out() argument
7086 vcpu->arch.pio.port = port; in emulator_pio_in_out()
7087 vcpu->arch.pio.in = in; in emulator_pio_in_out()
7088 vcpu->arch.pio.count = count; in emulator_pio_in_out()
7089 vcpu->arch.pio.size = size; in emulator_pio_in_out()
7091 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) in emulator_pio_in_out()
7094 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
7095 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
7096 vcpu->run->io.size = size; in emulator_pio_in_out()
7097 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
7098 vcpu->run->io.count = count; in emulator_pio_in_out()
7099 vcpu->run->io.port = port; in emulator_pio_in_out()
7104 static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size, in __emulator_pio_in() argument
7107 WARN_ON(vcpu->arch.pio.count); in __emulator_pio_in()
7108 memset(vcpu->arch.pio_data, 0, size * count); in __emulator_pio_in()
7109 return emulator_pio_in_out(vcpu, size, port, count, true); in __emulator_pio_in()
7112 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) in complete_emulator_pio_in() argument
7114 int size = vcpu->arch.pio.size; in complete_emulator_pio_in()
7115 unsigned count = vcpu->arch.pio.count; in complete_emulator_pio_in()
7116 memcpy(val, vcpu->arch.pio_data, size * count); in complete_emulator_pio_in()
7117 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); in complete_emulator_pio_in()
7118 vcpu->arch.pio.count = 0; in complete_emulator_pio_in()
7121 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, in emulator_pio_in() argument
7124 if (vcpu->arch.pio.count) { in emulator_pio_in()
7133 int r = __emulator_pio_in(vcpu, size, port, count); in emulator_pio_in()
7140 complete_emulator_pio_in(vcpu, val); in emulator_pio_in()
7152 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_out() argument
7158 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out()
7159 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out()
7160 ret = emulator_pio_in_out(vcpu, size, port, count, false); in emulator_pio_out()
7162 vcpu->arch.pio.count = 0; in emulator_pio_out()
7174 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) in get_segment_base() argument
7176 return static_call(kvm_x86_get_segment_base)(vcpu, seg); in get_segment_base()
7184 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd_noskip() argument
7186 if (!need_emulate_wbinvd(vcpu)) in kvm_emulate_wbinvd_noskip()
7192 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
7193 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
7196 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
7202 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd() argument
7204 kvm_emulate_wbinvd_noskip(vcpu); in kvm_emulate_wbinvd()
7205 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
7236 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_cr() local
7241 value = kvm_read_cr0(vcpu); in emulator_get_cr()
7244 value = vcpu->arch.cr2; in emulator_get_cr()
7247 value = kvm_read_cr3(vcpu); in emulator_get_cr()
7250 value = kvm_read_cr4(vcpu); in emulator_get_cr()
7253 value = kvm_get_cr8(vcpu); in emulator_get_cr()
7265 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_cr() local
7270 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); in emulator_set_cr()
7273 vcpu->arch.cr2 = val; in emulator_set_cr()
7276 res = kvm_set_cr3(vcpu, val); in emulator_set_cr()
7279 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); in emulator_set_cr()
7282 res = kvm_set_cr8(vcpu, val); in emulator_set_cr()
7363 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_segment() local
7385 kvm_set_segment(vcpu, &var, seg); in emulator_set_segment()
7392 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_msr() local
7395 r = kvm_get_msr(vcpu, msr_index, pdata); in emulator_get_msr()
7397 if (r && kvm_get_msr_user_space(vcpu, msr_index, r)) { in emulator_get_msr()
7408 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_msr() local
7411 r = kvm_set_msr(vcpu, msr_index, data); in emulator_set_msr()
7413 if (r && kvm_set_msr_user_space(vcpu, msr_index, data, r)) { in emulator_set_msr()
7423 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_smbase() local
7425 return vcpu->arch.smbase; in emulator_get_smbase()
7430 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_smbase() local
7432 vcpu->arch.smbase = smbase; in emulator_set_smbase()
7506 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_exiting_smm() local
7508 kvm_smm_changed(vcpu, false); in emulator_exiting_smm()
7574 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) in toggle_interruptibility() argument
7576 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); in toggle_interruptibility()
7587 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); in toggle_interruptibility()
7589 kvm_make_request(KVM_REQ_EVENT, vcpu); in toggle_interruptibility()
7593 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) in inject_emulated_exception() argument
7595 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
7597 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
7600 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
7603 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
7607 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) in alloc_emulate_ctxt() argument
7617 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
7619 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
7624 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) in init_emulate_ctxt() argument
7626 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
7629 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
7632 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
7635 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
7636 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
7638 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : in init_emulate_ctxt()
7651 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
7654 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) in kvm_inject_realmode_interrupt() argument
7656 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
7659 init_emulate_ctxt(vcpu); in kvm_inject_realmode_interrupt()
7667 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_inject_realmode_interrupt()
7670 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
7671 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
7676 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, in prepare_emulation_failure_exit() argument
7679 struct kvm_run *run = vcpu->run; in prepare_emulation_failure_exit()
7689 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], in prepare_emulation_failure_exit()
7727 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) in prepare_emulation_ctxt_failure_exit() argument
7729 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in prepare_emulation_ctxt_failure_exit()
7731 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, in prepare_emulation_ctxt_failure_exit()
7735 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, in __kvm_prepare_emulation_failure_exit() argument
7738 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); in __kvm_prepare_emulation_failure_exit()
7742 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) in kvm_prepare_emulation_failure_exit() argument
7744 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); in kvm_prepare_emulation_failure_exit()
7748 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) in handle_emulation_failure() argument
7750 struct kvm *kvm = vcpu->kvm; in handle_emulation_failure()
7752 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
7753 trace_kvm_emulate_insn_failed(vcpu); in handle_emulation_failure()
7756 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in handle_emulation_failure()
7762 prepare_emulation_ctxt_failure_exit(vcpu); in handle_emulation_failure()
7766 kvm_queue_exception(vcpu, UD_VECTOR); in handle_emulation_failure()
7768 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { in handle_emulation_failure()
7769 prepare_emulation_ctxt_failure_exit(vcpu); in handle_emulation_failure()
7776 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in reexecute_instruction() argument
7786 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || in reexecute_instruction()
7790 if (!vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7795 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in reexecute_instruction()
7811 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7823 if (vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7826 write_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7827 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
7828 write_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7831 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7841 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7854 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in retry_instruction() local
7857 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
7858 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
7873 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
7878 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || in retry_instruction()
7888 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
7889 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
7891 if (!vcpu->arch.mmu->direct_map) in retry_instruction()
7892 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in retry_instruction()
7894 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
7899 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
7900 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
7902 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) in kvm_smm_changed() argument
7904 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); in kvm_smm_changed()
7907 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_smm_changed()
7909 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); in kvm_smm_changed()
7912 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed()
7919 vcpu->arch.pdptrs_from_userspace = false; in kvm_smm_changed()
7922 kvm_mmu_reset_context(vcpu); in kvm_smm_changed()
7940 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) in kvm_vcpu_do_singlestep() argument
7942 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
7944 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
7946 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
7951 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); in kvm_vcpu_do_singlestep()
7955 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) in kvm_skip_emulated_instruction() argument
7957 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); in kvm_skip_emulated_instruction()
7960 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); in kvm_skip_emulated_instruction()
7973 r = kvm_vcpu_do_singlestep(vcpu); in kvm_skip_emulated_instruction()
7978 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) in kvm_vcpu_check_breakpoint() argument
7980 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_breakpoint()
7981 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
7982 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_breakpoint()
7983 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
7985 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
7986 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
7998 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
7999 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { in kvm_vcpu_check_breakpoint()
8000 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
8002 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
8003 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
8006 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); in kvm_vcpu_check_breakpoint()
8049 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, in x86_decode_emulated_instruction() argument
8053 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
8055 init_emulate_ctxt(vcpu); in x86_decode_emulated_instruction()
8063 kvm_vcpu_check_breakpoint(vcpu, &r)) in x86_decode_emulated_instruction()
8068 trace_kvm_emulate_insn_start(vcpu); in x86_decode_emulated_instruction()
8069 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
8075 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in x86_emulate_instruction() argument
8079 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
8083 if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len))) in x86_emulate_instruction()
8086 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
8092 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
8093 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
8096 kvm_clear_exception_queue(vcpu); in x86_emulate_instruction()
8098 r = x86_decode_emulated_instruction(vcpu, emulation_type, in x86_emulate_instruction()
8103 kvm_queue_exception(vcpu, UD_VECTOR); in x86_emulate_instruction()
8106 if (reexecute_instruction(vcpu, cr2_or_gpa, in x86_emulate_instruction()
8117 inject_emulated_exception(vcpu); in x86_emulate_instruction()
8120 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
8126 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in x86_emulate_instruction()
8136 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
8138 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
8147 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
8148 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
8158 if (vcpu->arch.mmu->direct_map) { in x86_emulate_instruction()
8173 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, in x86_emulate_instruction()
8177 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
8182 if (inject_emulated_exception(vcpu)) in x86_emulate_instruction()
8184 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
8185 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
8187 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
8190 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
8193 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
8194 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
8196 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
8199 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
8206 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); in x86_emulate_instruction()
8207 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
8208 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
8211 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
8212 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
8213 r = kvm_vcpu_do_singlestep(vcpu); in x86_emulate_instruction()
8215 static_call(kvm_x86_update_emulated_instruction)(vcpu); in x86_emulate_instruction()
8216 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
8226 kvm_make_request(KVM_REQ_EVENT, vcpu); in x86_emulate_instruction()
8228 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
8233 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) in kvm_emulate_instruction() argument
8235 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); in kvm_emulate_instruction()
8239 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, in kvm_emulate_instruction_from_buffer() argument
8242 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); in kvm_emulate_instruction_from_buffer()
8246 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) in complete_fast_pio_out_port_0x7e() argument
8248 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
8252 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) in complete_fast_pio_out() argument
8254 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
8256 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
8259 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_out()
8262 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_out() argument
8265 unsigned long val = kvm_rax_read(vcpu); in kvm_fast_pio_out()
8266 int ret = emulator_pio_out(vcpu, size, port, &val, 1); in kvm_fast_pio_out()
8276 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
8277 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
8279 kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio_out()
8281 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
8282 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
8287 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) in complete_fast_pio_in() argument
8292 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
8294 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
8295 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
8300 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
8306 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in()
8307 kvm_rax_write(vcpu, val); in complete_fast_pio_in()
8309 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_in()
8312 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_in() argument
8319 val = (size < 4) ? kvm_rax_read(vcpu) : 0; in kvm_fast_pio_in()
8321 ret = emulator_pio_in(vcpu, size, port, &val, 1); in kvm_fast_pio_in()
8323 kvm_rax_write(vcpu, val); in kvm_fast_pio_in()
8327 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
8328 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
8333 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) in kvm_fast_pio() argument
8338 ret = kvm_fast_pio_in(vcpu, size, port); in kvm_fast_pio()
8340 ret = kvm_fast_pio_out(vcpu, size, port); in kvm_fast_pio()
8341 return ret && kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio()
8396 struct kvm_vcpu *vcpu; in __kvmclock_cpufreq_notifier() local
8442 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvmclock_cpufreq_notifier()
8443 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
8445 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in __kvmclock_cpufreq_notifier()
8446 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
8552 struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu); in kvm_handle_intel_pt_intr() local
8554 kvm_make_request(KVM_REQ_PMI, vcpu); in kvm_handle_intel_pt_intr()
8556 (unsigned long *)&vcpu->arch.pmu.global_status); in kvm_handle_intel_pt_intr()
8571 struct kvm_vcpu *vcpu; in pvclock_gtod_update_fn() local
8576 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
8577 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in pvclock_gtod_update_fn()
8733 static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason) in __kvm_vcpu_halt() argument
8735 ++vcpu->stat.halt_exits; in __kvm_vcpu_halt()
8736 if (lapic_in_kernel(vcpu)) { in __kvm_vcpu_halt()
8737 vcpu->arch.mp_state = state; in __kvm_vcpu_halt()
8740 vcpu->run->exit_reason = reason; in __kvm_vcpu_halt()
8745 int kvm_vcpu_halt(struct kvm_vcpu *vcpu) in kvm_vcpu_halt() argument
8747 return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); in kvm_vcpu_halt()
8751 int kvm_emulate_halt(struct kvm_vcpu *vcpu) in kvm_emulate_halt() argument
8753 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_halt()
8758 return kvm_vcpu_halt(vcpu) && ret; in kvm_emulate_halt()
8762 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) in kvm_emulate_ap_reset_hold() argument
8764 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_ap_reset_hold()
8766 return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret; in kvm_emulate_ap_reset_hold()
8771 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, in kvm_pv_clock_pairing() argument
8787 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); in kvm_pv_clock_pairing()
8792 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
8836 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) in kvm_sched_yield() argument
8841 vcpu->stat.directed_yield_attempted++; in kvm_sched_yield()
8847 map = rcu_dereference(vcpu->kvm->arch.apic_map); in kvm_sched_yield()
8850 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
8858 if (vcpu == target) in kvm_sched_yield()
8864 vcpu->stat.directed_yield_successful++; in kvm_sched_yield()
8870 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) in complete_hypercall_exit() argument
8872 u64 ret = vcpu->run->hypercall.ret; in complete_hypercall_exit()
8874 if (!is_64_bit_mode(vcpu)) in complete_hypercall_exit()
8876 kvm_rax_write(vcpu, ret); in complete_hypercall_exit()
8877 ++vcpu->stat.hypercalls; in complete_hypercall_exit()
8878 return kvm_skip_emulated_instruction(vcpu); in complete_hypercall_exit()
8881 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) in kvm_emulate_hypercall() argument
8886 if (kvm_xen_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
8887 return kvm_xen_hypercall(vcpu); in kvm_emulate_hypercall()
8889 if (kvm_hv_hypercall_enabled(vcpu)) in kvm_emulate_hypercall()
8890 return kvm_hv_hypercall(vcpu); in kvm_emulate_hypercall()
8892 nr = kvm_rax_read(vcpu); in kvm_emulate_hypercall()
8893 a0 = kvm_rbx_read(vcpu); in kvm_emulate_hypercall()
8894 a1 = kvm_rcx_read(vcpu); in kvm_emulate_hypercall()
8895 a2 = kvm_rdx_read(vcpu); in kvm_emulate_hypercall()
8896 a3 = kvm_rsi_read(vcpu); in kvm_emulate_hypercall()
8900 op_64_bit = is_64_bit_hypercall(vcpu); in kvm_emulate_hypercall()
8909 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { in kvm_emulate_hypercall()
8921 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) in kvm_emulate_hypercall()
8924 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
8925 kvm_sched_yield(vcpu, a1); in kvm_emulate_hypercall()
8930 ret = kvm_pv_clock_pairing(vcpu, a0, a1); in kvm_emulate_hypercall()
8934 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) in kvm_emulate_hypercall()
8937 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
8940 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) in kvm_emulate_hypercall()
8943 kvm_sched_yield(vcpu, a0); in kvm_emulate_hypercall()
8950 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) in kvm_emulate_hypercall()
8959 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in kvm_emulate_hypercall()
8960 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in kvm_emulate_hypercall()
8961 vcpu->run->hypercall.args[0] = gpa; in kvm_emulate_hypercall()
8962 vcpu->run->hypercall.args[1] = npages; in kvm_emulate_hypercall()
8963 vcpu->run->hypercall.args[2] = attrs; in kvm_emulate_hypercall()
8964 vcpu->run->hypercall.longmode = op_64_bit; in kvm_emulate_hypercall()
8965 vcpu->arch.complete_userspace_io = complete_hypercall_exit; in kvm_emulate_hypercall()
8975 kvm_rax_write(vcpu, ret); in kvm_emulate_hypercall()
8977 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
8978 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_hypercall()
8984 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_fix_hypercall() local
8986 unsigned long rip = kvm_rip_read(vcpu); in emulator_fix_hypercall()
8988 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); in emulator_fix_hypercall()
8994 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) in dm_request_for_irq_injection() argument
8996 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
8997 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
9000 static void post_kvm_run_save(struct kvm_vcpu *vcpu) in post_kvm_run_save() argument
9002 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
9004 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); in post_kvm_run_save()
9005 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
9006 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
9013 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in post_kvm_run_save()
9015 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
9016 kvm_vcpu_ready_for_interrupt_injection(vcpu); in post_kvm_run_save()
9017 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in post_kvm_run_save()
9019 if (is_smm(vcpu)) in post_kvm_run_save()
9023 static void update_cr8_intercept(struct kvm_vcpu *vcpu) in update_cr8_intercept() argument
9030 if (!lapic_in_kernel(vcpu)) in update_cr8_intercept()
9033 if (vcpu->arch.apicv_active) in update_cr8_intercept()
9036 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
9037 max_irr = kvm_lapic_find_highest_irr(vcpu); in update_cr8_intercept()
9044 tpr = kvm_lapic_get_cr8(vcpu); in update_cr8_intercept()
9046 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); in update_cr8_intercept()
9050 int kvm_check_nested_events(struct kvm_vcpu *vcpu) in kvm_check_nested_events() argument
9052 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in kvm_check_nested_events()
9053 kvm_x86_ops.nested_ops->triple_fault(vcpu); in kvm_check_nested_events()
9057 return kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_check_nested_events()
9060 static void kvm_inject_exception(struct kvm_vcpu *vcpu) in kvm_inject_exception() argument
9062 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) in kvm_inject_exception()
9063 vcpu->arch.exception.error_code = false; in kvm_inject_exception()
9064 static_call(kvm_x86_queue_exception)(vcpu); in kvm_inject_exception()
9067 static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) in inject_pending_event() argument
9074 if (vcpu->arch.exception.injected) { in inject_pending_event()
9075 kvm_inject_exception(vcpu); in inject_pending_event()
9092 else if (!vcpu->arch.exception.pending) { in inject_pending_event()
9093 if (vcpu->arch.nmi_injected) { in inject_pending_event()
9094 static_call(kvm_x86_set_nmi)(vcpu); in inject_pending_event()
9096 } else if (vcpu->arch.interrupt.injected) { in inject_pending_event()
9097 static_call(kvm_x86_set_irq)(vcpu); in inject_pending_event()
9102 WARN_ON_ONCE(vcpu->arch.exception.injected && in inject_pending_event()
9103 vcpu->arch.exception.pending); in inject_pending_event()
9111 if (is_guest_mode(vcpu)) { in inject_pending_event()
9112 r = kvm_check_nested_events(vcpu); in inject_pending_event()
9118 if (vcpu->arch.exception.pending) { in inject_pending_event()
9119 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
9120 vcpu->arch.exception.has_error_code, in inject_pending_event()
9121 vcpu->arch.exception.error_code); in inject_pending_event()
9123 vcpu->arch.exception.pending = false; in inject_pending_event()
9124 vcpu->arch.exception.injected = true; in inject_pending_event()
9126 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
9127 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | in inject_pending_event()
9130 if (vcpu->arch.exception.nr == DB_VECTOR) { in inject_pending_event()
9131 kvm_deliver_exception_payload(vcpu); in inject_pending_event()
9132 if (vcpu->arch.dr7 & DR7_GD) { in inject_pending_event()
9133 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
9134 kvm_update_dr7(vcpu); in inject_pending_event()
9138 kvm_inject_exception(vcpu); in inject_pending_event()
9143 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) in inject_pending_event()
9157 if (vcpu->arch.smi_pending) { in inject_pending_event()
9158 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; in inject_pending_event()
9162 vcpu->arch.smi_pending = false; in inject_pending_event()
9163 ++vcpu->arch.smi_count; in inject_pending_event()
9164 enter_smm(vcpu); in inject_pending_event()
9167 static_call(kvm_x86_enable_smi_window)(vcpu); in inject_pending_event()
9170 if (vcpu->arch.nmi_pending) { in inject_pending_event()
9171 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; in inject_pending_event()
9175 --vcpu->arch.nmi_pending; in inject_pending_event()
9176 vcpu->arch.nmi_injected = true; in inject_pending_event()
9177 static_call(kvm_x86_set_nmi)(vcpu); in inject_pending_event()
9179 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); in inject_pending_event()
9181 if (vcpu->arch.nmi_pending) in inject_pending_event()
9182 static_call(kvm_x86_enable_nmi_window)(vcpu); in inject_pending_event()
9185 if (kvm_cpu_has_injectable_intr(vcpu)) { in inject_pending_event()
9186 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; in inject_pending_event()
9190 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); in inject_pending_event()
9191 static_call(kvm_x86_set_irq)(vcpu); in inject_pending_event()
9192 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); in inject_pending_event()
9194 if (kvm_cpu_has_injectable_intr(vcpu)) in inject_pending_event()
9195 static_call(kvm_x86_enable_irq_window)(vcpu); in inject_pending_event()
9198 if (is_guest_mode(vcpu) && in inject_pending_event()
9200 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
9203 WARN_ON(vcpu->arch.exception.pending); in inject_pending_event()
9214 static void process_nmi(struct kvm_vcpu *vcpu) in process_nmi() argument
9223 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
9226 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
9227 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
9228 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_nmi()
9245 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) in enter_smm_save_seg_32() argument
9250 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_32()
9264 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) in enter_smm_save_seg_64() argument
9270 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_64()
9281 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) in enter_smm_save_state_32() argument
9288 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); in enter_smm_save_state_32()
9289 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); in enter_smm_save_state_32()
9290 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); in enter_smm_save_state_32()
9291 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); in enter_smm_save_state_32()
9294 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); in enter_smm_save_state_32()
9296 kvm_get_dr(vcpu, 6, &val); in enter_smm_save_state_32()
9298 kvm_get_dr(vcpu, 7, &val); in enter_smm_save_state_32()
9301 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in enter_smm_save_state_32()
9307 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in enter_smm_save_state_32()
9313 static_call(kvm_x86_get_gdt)(vcpu, &dt); in enter_smm_save_state_32()
9317 static_call(kvm_x86_get_idt)(vcpu, &dt); in enter_smm_save_state_32()
9322 enter_smm_save_seg_32(vcpu, buf, i); in enter_smm_save_state_32()
9324 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); in enter_smm_save_state_32()
9328 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
9332 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) in enter_smm_save_state_64() argument
9340 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); in enter_smm_save_state_64()
9342 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); in enter_smm_save_state_64()
9343 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); in enter_smm_save_state_64()
9345 kvm_get_dr(vcpu, 6, &val); in enter_smm_save_state_64()
9347 kvm_get_dr(vcpu, 7, &val); in enter_smm_save_state_64()
9350 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); in enter_smm_save_state_64()
9351 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); in enter_smm_save_state_64()
9352 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); in enter_smm_save_state_64()
9354 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
9359 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
9361 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in enter_smm_save_state_64()
9367 static_call(kvm_x86_get_idt)(vcpu, &dt); in enter_smm_save_state_64()
9371 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in enter_smm_save_state_64()
9377 static_call(kvm_x86_get_gdt)(vcpu, &dt); in enter_smm_save_state_64()
9382 enter_smm_save_seg_64(vcpu, buf, i); in enter_smm_save_state_64()
9386 static void enter_smm(struct kvm_vcpu *vcpu) in enter_smm() argument
9395 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
9396 enter_smm_save_state_64(vcpu, buf); in enter_smm()
9399 enter_smm_save_state_32(vcpu, buf); in enter_smm()
9406 static_call(kvm_x86_enter_smm)(vcpu, buf); in enter_smm()
9408 kvm_smm_changed(vcpu, true); in enter_smm()
9409 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
9411 if (static_call(kvm_x86_get_nmi_mask)(vcpu)) in enter_smm()
9412 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
9414 static_call(kvm_x86_set_nmi_mask)(vcpu, true); in enter_smm()
9416 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in enter_smm()
9417 kvm_rip_write(vcpu, 0x8000); in enter_smm()
9419 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
9420 static_call(kvm_x86_set_cr0)(vcpu, cr0); in enter_smm()
9421 vcpu->arch.cr0 = cr0; in enter_smm()
9423 static_call(kvm_x86_set_cr4)(vcpu, 0); in enter_smm()
9427 static_call(kvm_x86_set_idt)(vcpu, &dt); in enter_smm()
9429 kvm_set_dr(vcpu, 7, DR7_FIXED_1); in enter_smm()
9431 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
9432 cs.base = vcpu->arch.smbase; in enter_smm()
9449 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in enter_smm()
9450 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); in enter_smm()
9451 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); in enter_smm()
9452 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); in enter_smm()
9453 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); in enter_smm()
9454 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); in enter_smm()
9457 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
9458 static_call(kvm_x86_set_efer)(vcpu, 0); in enter_smm()
9461 kvm_update_cpuid_runtime(vcpu); in enter_smm()
9462 kvm_mmu_reset_context(vcpu); in enter_smm()
9465 static void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument
9467 vcpu->arch.smi_pending = true; in process_smi()
9468 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_smi()
9482 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) in kvm_vcpu_update_apicv() argument
9486 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_update_apicv()
9489 down_read(&vcpu->kvm->arch.apicv_update_lock); in kvm_vcpu_update_apicv()
9491 activate = kvm_apicv_activated(vcpu->kvm); in kvm_vcpu_update_apicv()
9492 if (vcpu->arch.apicv_active == activate) in kvm_vcpu_update_apicv()
9495 vcpu->arch.apicv_active = activate; in kvm_vcpu_update_apicv()
9496 kvm_apic_update_apicv(vcpu); in kvm_vcpu_update_apicv()
9497 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); in kvm_vcpu_update_apicv()
9505 if (!vcpu->arch.apicv_active) in kvm_vcpu_update_apicv()
9506 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_update_apicv()
9509 up_read(&vcpu->kvm->arch.apicv_update_lock); in kvm_vcpu_update_apicv()
9563 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) in vcpu_scan_ioapic() argument
9565 if (!kvm_apic_present(vcpu)) in vcpu_scan_ioapic()
9568 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
9570 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
9571 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
9573 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in vcpu_scan_ioapic()
9574 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
9575 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
9578 if (is_guest_mode(vcpu)) in vcpu_scan_ioapic()
9579 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
9581 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in vcpu_scan_ioapic()
9584 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) in vcpu_load_eoi_exitmap() argument
9588 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
9591 if (to_hv_vcpu(vcpu)) { in vcpu_load_eoi_exitmap()
9593 vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
9594 to_hv_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
9595 static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
9600 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); in vcpu_load_eoi_exitmap()
9617 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_apic_access_page() argument
9619 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_reload_apic_access_page()
9625 static_call(kvm_x86_set_apic_access_page_addr)(vcpu); in kvm_vcpu_reload_apic_access_page()
9628 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) in __kvm_request_immediate_exit() argument
9630 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
9639 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) in vcpu_enter_guest() argument
9643 dm_request_for_irq_injection(vcpu) && in vcpu_enter_guest()
9644 kvm_cpu_accept_dm_intr(vcpu); in vcpu_enter_guest()
9650 if (unlikely(vcpu->kvm->dirty_ring_size && in vcpu_enter_guest()
9651 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { in vcpu_enter_guest()
9652 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; in vcpu_enter_guest()
9653 trace_kvm_dirty_ring_exit(vcpu); in vcpu_enter_guest()
9658 if (kvm_request_pending(vcpu)) { in vcpu_enter_guest()
9659 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { in vcpu_enter_guest()
9663 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { in vcpu_enter_guest()
9664 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
9669 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) in vcpu_enter_guest()
9670 kvm_mmu_unload(vcpu); in vcpu_enter_guest()
9671 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) in vcpu_enter_guest()
9672 __kvm_migrate_timers(vcpu); in vcpu_enter_guest()
9673 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
9674 kvm_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
9675 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
9676 kvm_gen_kvmclock_update(vcpu); in vcpu_enter_guest()
9677 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { in vcpu_enter_guest()
9678 r = kvm_guest_time_update(vcpu); in vcpu_enter_guest()
9682 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) in vcpu_enter_guest()
9683 kvm_mmu_sync_roots(vcpu); in vcpu_enter_guest()
9684 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) in vcpu_enter_guest()
9685 kvm_mmu_load_pgd(vcpu); in vcpu_enter_guest()
9686 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { in vcpu_enter_guest()
9687 kvm_vcpu_flush_tlb_all(vcpu); in vcpu_enter_guest()
9690 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in vcpu_enter_guest()
9692 kvm_service_local_tlb_flush_requests(vcpu); in vcpu_enter_guest()
9694 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { in vcpu_enter_guest()
9695 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
9699 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
9700 if (is_guest_mode(vcpu)) { in vcpu_enter_guest()
9701 kvm_x86_ops.nested_ops->triple_fault(vcpu); in vcpu_enter_guest()
9703 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
9704 vcpu->mmio_needed = 0; in vcpu_enter_guest()
9709 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { in vcpu_enter_guest()
9711 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
9715 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in vcpu_enter_guest()
9716 record_steal_time(vcpu); in vcpu_enter_guest()
9717 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in vcpu_enter_guest()
9718 process_smi(vcpu); in vcpu_enter_guest()
9719 if (kvm_check_request(KVM_REQ_NMI, vcpu)) in vcpu_enter_guest()
9720 process_nmi(vcpu); in vcpu_enter_guest()
9721 if (kvm_check_request(KVM_REQ_PMU, vcpu)) in vcpu_enter_guest()
9722 kvm_pmu_handle_event(vcpu); in vcpu_enter_guest()
9723 if (kvm_check_request(KVM_REQ_PMI, vcpu)) in vcpu_enter_guest()
9724 kvm_pmu_deliver_pmi(vcpu); in vcpu_enter_guest()
9725 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { in vcpu_enter_guest()
9726 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
9727 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
9728 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
9729 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
9730 vcpu->run->eoi.vector = in vcpu_enter_guest()
9731 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
9736 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) in vcpu_enter_guest()
9737 vcpu_scan_ioapic(vcpu); in vcpu_enter_guest()
9738 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) in vcpu_enter_guest()
9739 vcpu_load_eoi_exitmap(vcpu); in vcpu_enter_guest()
9740 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) in vcpu_enter_guest()
9741 kvm_vcpu_reload_apic_access_page(vcpu); in vcpu_enter_guest()
9742 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { in vcpu_enter_guest()
9743 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9744 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
9748 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { in vcpu_enter_guest()
9749 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9750 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
9754 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { in vcpu_enter_guest()
9755 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in vcpu_enter_guest()
9757 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
9758 vcpu->run->hyperv = hv_vcpu->exit; in vcpu_enter_guest()
9768 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) in vcpu_enter_guest()
9769 kvm_hv_process_stimers(vcpu); in vcpu_enter_guest()
9770 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) in vcpu_enter_guest()
9771 kvm_vcpu_update_apicv(vcpu); in vcpu_enter_guest()
9772 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) in vcpu_enter_guest()
9773 kvm_check_async_pf_completion(vcpu); in vcpu_enter_guest()
9774 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) in vcpu_enter_guest()
9775 static_call(kvm_x86_msr_filter_changed)(vcpu); in vcpu_enter_guest()
9777 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) in vcpu_enter_guest()
9778 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); in vcpu_enter_guest()
9781 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || in vcpu_enter_guest()
9782 kvm_xen_has_interrupt(vcpu)) { in vcpu_enter_guest()
9783 ++vcpu->stat.req_event; in vcpu_enter_guest()
9784 r = kvm_apic_accept_events(vcpu); in vcpu_enter_guest()
9789 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
9794 r = inject_pending_event(vcpu, &req_immediate_exit); in vcpu_enter_guest()
9800 static_call(kvm_x86_enable_irq_window)(vcpu); in vcpu_enter_guest()
9802 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
9803 update_cr8_intercept(vcpu); in vcpu_enter_guest()
9804 kvm_lapic_sync_to_vapic(vcpu); in vcpu_enter_guest()
9808 r = kvm_mmu_reload(vcpu); in vcpu_enter_guest()
9815 static_call(kvm_x86_prepare_guest_switch)(vcpu); in vcpu_enter_guest()
9823 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
9825 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
9847 if (kvm_lapic_enabled(vcpu)) in vcpu_enter_guest()
9848 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in vcpu_enter_guest()
9850 if (kvm_vcpu_exit_request(vcpu)) { in vcpu_enter_guest()
9851 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9855 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9861 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
9862 static_call(kvm_x86_request_immediate_exit)(vcpu); in vcpu_enter_guest()
9869 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
9871 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
9872 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
9873 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
9874 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
9886 WARN_ON_ONCE(kvm_apicv_activated(vcpu->kvm) != kvm_vcpu_apicv_active(vcpu)); in vcpu_enter_guest()
9888 exit_fastpath = static_call(kvm_x86_run)(vcpu); in vcpu_enter_guest()
9892 if (kvm_lapic_enabled(vcpu)) in vcpu_enter_guest()
9893 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in vcpu_enter_guest()
9895 if (unlikely(kvm_vcpu_exit_request(vcpu))) { in vcpu_enter_guest()
9907 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
9908 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
9909 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); in vcpu_enter_guest()
9910 kvm_update_dr0123(vcpu); in vcpu_enter_guest()
9911 kvm_update_dr7(vcpu); in vcpu_enter_guest()
9924 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
9925 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
9927 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9930 static_call(kvm_x86_handle_exit_irqoff)(vcpu); in vcpu_enter_guest()
9939 kvm_before_interrupt(vcpu); in vcpu_enter_guest()
9941 ++vcpu->stat.exits; in vcpu_enter_guest()
9943 kvm_after_interrupt(vcpu); in vcpu_enter_guest()
9954 if (lapic_in_kernel(vcpu)) { in vcpu_enter_guest()
9955 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; in vcpu_enter_guest()
9957 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta); in vcpu_enter_guest()
9958 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; in vcpu_enter_guest()
9965 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9971 unsigned long rip = kvm_rip_read(vcpu); in vcpu_enter_guest()
9975 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
9976 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in vcpu_enter_guest()
9978 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
9979 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
9981 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); in vcpu_enter_guest()
9986 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
9987 static_call(kvm_x86_cancel_injection)(vcpu); in vcpu_enter_guest()
9988 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
9989 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
9994 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) in vcpu_block() argument
9996 if (!kvm_arch_vcpu_runnable(vcpu) && in vcpu_block()
9997 (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { in vcpu_block()
9998 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
9999 kvm_vcpu_block(vcpu); in vcpu_block()
10000 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
10003 static_call(kvm_x86_post_block)(vcpu); in vcpu_block()
10005 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) in vcpu_block()
10009 if (kvm_apic_accept_events(vcpu) < 0) in vcpu_block()
10011 switch(vcpu->arch.mp_state) { in vcpu_block()
10014 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
10015 vcpu->arch.mp_state = in vcpu_block()
10019 vcpu->arch.apf.halted = false; in vcpu_block()
10029 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) in kvm_vcpu_running() argument
10031 if (is_guest_mode(vcpu)) in kvm_vcpu_running()
10032 kvm_check_nested_events(vcpu); in kvm_vcpu_running()
10034 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
10035 !vcpu->arch.apf.halted); in kvm_vcpu_running()
10038 static int vcpu_run(struct kvm_vcpu *vcpu) in vcpu_run() argument
10041 struct kvm *kvm = vcpu->kvm; in vcpu_run()
10043 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
10044 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
10047 if (kvm_vcpu_running(vcpu)) { in vcpu_run()
10048 r = vcpu_enter_guest(vcpu); in vcpu_run()
10050 r = vcpu_block(kvm, vcpu); in vcpu_run()
10056 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); in vcpu_run()
10057 if (kvm_cpu_has_pending_timer(vcpu)) in vcpu_run()
10058 kvm_inject_pending_timer_irqs(vcpu); in vcpu_run()
10060 if (dm_request_for_irq_injection(vcpu) && in vcpu_run()
10061 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { in vcpu_run()
10063 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
10064 ++vcpu->stat.request_irq_exits; in vcpu_run()
10069 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
10070 r = xfer_to_guest_mode_handle_work(vcpu); in vcpu_run()
10073 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
10077 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
10082 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) in complete_emulated_io() argument
10086 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
10087 r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); in complete_emulated_io()
10088 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
10092 static int complete_emulated_pio(struct kvm_vcpu *vcpu) in complete_emulated_pio() argument
10094 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
10096 return complete_emulated_io(vcpu); in complete_emulated_pio()
10117 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) in complete_emulated_mmio() argument
10119 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
10123 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
10126 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
10128 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
10134 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
10142 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
10143 vcpu->mmio_needed = 0; in complete_emulated_mmio()
10146 if (vcpu->mmio_is_write) in complete_emulated_mmio()
10148 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
10149 return complete_emulated_io(vcpu); in complete_emulated_mmio()
10154 if (vcpu->mmio_is_write) in complete_emulated_mmio()
10157 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
10158 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
10163 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) in kvm_load_guest_fpu() argument
10169 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); in kvm_load_guest_fpu()
10174 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) in kvm_put_guest_fpu() argument
10176 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); in kvm_put_guest_fpu()
10177 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
10181 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
10183 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
10186 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
10187 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
10189 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
10191 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
10196 kvm_vcpu_block(vcpu); in kvm_arch_vcpu_ioctl_run()
10197 if (kvm_apic_accept_events(vcpu) < 0) { in kvm_arch_vcpu_ioctl_run()
10201 kvm_clear_request(KVM_REQ_UNHALT, vcpu); in kvm_arch_vcpu_ioctl_run()
10206 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
10218 r = sync_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
10224 if (!lapic_in_kernel(vcpu)) { in kvm_arch_vcpu_ioctl_run()
10225 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
10231 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
10232 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
10233 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
10234 r = cui(vcpu); in kvm_arch_vcpu_ioctl_run()
10238 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
10243 r = vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
10246 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
10248 store_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
10249 post_kvm_run_save(vcpu); in kvm_arch_vcpu_ioctl_run()
10250 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
10252 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
10256 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __get_regs() argument
10258 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
10266 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
10267 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
10269 regs->rax = kvm_rax_read(vcpu); in __get_regs()
10270 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
10271 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
10272 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
10273 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
10274 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
10275 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
10276 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
10278 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
10279 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
10280 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
10281 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
10282 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
10283 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
10284 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
10285 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
10288 regs->rip = kvm_rip_read(vcpu); in __get_regs()
10289 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
10292 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
10294 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
10295 __get_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_get_regs()
10296 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
10300 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __set_regs() argument
10302 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
10303 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
10305 kvm_rax_write(vcpu, regs->rax); in __set_regs()
10306 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
10307 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
10308 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
10309 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
10310 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
10311 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
10312 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
10314 kvm_r8_write(vcpu, regs->r8); in __set_regs()
10315 kvm_r9_write(vcpu, regs->r9); in __set_regs()
10316 kvm_r10_write(vcpu, regs->r10); in __set_regs()
10317 kvm_r11_write(vcpu, regs->r11); in __set_regs()
10318 kvm_r12_write(vcpu, regs->r12); in __set_regs()
10319 kvm_r13_write(vcpu, regs->r13); in __set_regs()
10320 kvm_r14_write(vcpu, regs->r14); in __set_regs()
10321 kvm_r15_write(vcpu, regs->r15); in __set_regs()
10324 kvm_rip_write(vcpu, regs->rip); in __set_regs()
10325 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
10327 vcpu->arch.exception.pending = false; in __set_regs()
10329 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_regs()
10332 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
10334 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
10335 __set_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_set_regs()
10336 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
10340 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) in kvm_get_cs_db_l_bits() argument
10344 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_get_cs_db_l_bits()
10350 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs_common() argument
10354 if (vcpu->arch.guest_state_protected) in __get_sregs_common()
10357 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs_common()
10358 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs_common()
10359 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs_common()
10360 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs_common()
10361 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs_common()
10362 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs_common()
10364 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs_common()
10365 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs_common()
10367 static_call(kvm_x86_get_idt)(vcpu, &dt); in __get_sregs_common()
10370 static_call(kvm_x86_get_gdt)(vcpu, &dt); in __get_sregs_common()
10374 sregs->cr2 = vcpu->arch.cr2; in __get_sregs_common()
10375 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs_common()
10378 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs_common()
10379 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs_common()
10380 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs_common()
10381 sregs->efer = vcpu->arch.efer; in __get_sregs_common()
10382 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs_common()
10385 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs() argument
10387 __get_sregs_common(vcpu, sregs); in __get_sregs()
10389 if (vcpu->arch.guest_state_protected) in __get_sregs()
10392 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
10393 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
10397 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) in __get_sregs2() argument
10401 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); in __get_sregs2()
10403 if (vcpu->arch.guest_state_protected) in __get_sregs2()
10406 if (is_pae_paging(vcpu)) { in __get_sregs2()
10408 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); in __get_sregs2()
10413 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
10416 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
10417 __get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs()
10418 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
10422 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
10427 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
10429 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
10431 r = kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
10436 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || in kvm_arch_vcpu_ioctl_get_mpstate()
10437 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && in kvm_arch_vcpu_ioctl_get_mpstate()
10438 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
10441 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
10445 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
10446 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
10450 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
10455 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
10457 if (!lapic_in_kernel(vcpu) && in kvm_arch_vcpu_ioctl_set_mpstate()
10466 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
10472 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
10473 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
10475 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
10476 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
10480 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
10484 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, in kvm_task_switch() argument
10487 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
10490 init_emulate_ctxt(vcpu); in kvm_task_switch()
10495 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
10496 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
10497 vcpu->run->internal.ndata = 0; in kvm_task_switch()
10501 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
10502 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
10507 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_is_valid_sregs() argument
10517 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) in kvm_is_valid_sregs()
10528 return kvm_is_valid_cr4(vcpu, sregs->cr4); in kvm_is_valid_sregs()
10531 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, in __set_sregs_common() argument
10538 if (!kvm_is_valid_sregs(vcpu, sregs)) in __set_sregs_common()
10543 if (kvm_set_apic_base(vcpu, &apic_base_msr)) in __set_sregs_common()
10546 if (vcpu->arch.guest_state_protected) in __set_sregs_common()
10551 static_call(kvm_x86_set_idt)(vcpu, &dt); in __set_sregs_common()
10554 static_call(kvm_x86_set_gdt)(vcpu, &dt); in __set_sregs_common()
10556 vcpu->arch.cr2 = sregs->cr2; in __set_sregs_common()
10557 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs_common()
10558 vcpu->arch.cr3 = sregs->cr3; in __set_sregs_common()
10559 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); in __set_sregs_common()
10561 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs_common()
10563 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs_common()
10564 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); in __set_sregs_common()
10566 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs_common()
10567 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); in __set_sregs_common()
10568 vcpu->arch.cr0 = sregs->cr0; in __set_sregs_common()
10570 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs_common()
10571 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); in __set_sregs_common()
10574 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs_common()
10575 if (is_pae_paging(vcpu)) { in __set_sregs_common()
10576 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs_common()
10579 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs_common()
10582 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs_common()
10583 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs_common()
10584 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs_common()
10585 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs_common()
10586 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs_common()
10587 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs_common()
10589 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs_common()
10590 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs_common()
10592 update_cr8_intercept(vcpu); in __set_sregs_common()
10595 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in __set_sregs_common()
10597 !is_protmode(vcpu)) in __set_sregs_common()
10598 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs_common()
10603 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __set_sregs() argument
10607 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); in __set_sregs()
10613 kvm_mmu_reset_context(vcpu); in __set_sregs()
10620 kvm_queue_interrupt(vcpu, pending_vec, false); in __set_sregs()
10622 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_sregs()
10627 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) in __set_sregs2() argument
10638 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) in __set_sregs2()
10641 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, in __set_sregs2()
10648 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); in __set_sregs2()
10650 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); in __set_sregs2()
10652 vcpu->arch.pdptrs_from_userspace = true; in __set_sregs2()
10655 kvm_mmu_reset_context(vcpu); in __set_sregs2()
10659 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
10664 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
10665 ret = __set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs()
10666 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
10673 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_guestdbg_update_apicv_inhibit() local
10678 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
10679 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
10688 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
10694 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_guest_debug()
10697 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10701 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
10704 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
10706 kvm_queue_exception(vcpu, BP_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
10713 rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10715 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
10716 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
10717 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
10719 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
10721 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
10722 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
10725 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
10727 kvm_update_dr7(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10729 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
10730 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10736 kvm_set_rflags(vcpu, rflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
10738 static_call(kvm_x86_update_exception_bitmap)(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10740 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_guest_debug()
10745 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10752 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
10759 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_translate()
10761 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
10762 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); in kvm_arch_vcpu_ioctl_translate()
10763 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
10769 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_translate()
10773 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
10777 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_get_fpu()
10780 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
10782 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
10792 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
10796 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
10800 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_set_fpu()
10803 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
10805 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
10816 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
10820 static void store_regs(struct kvm_vcpu *vcpu) in store_regs() argument
10824 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
10825 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
10827 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
10828 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
10830 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
10832 vcpu, &vcpu->run->s.regs.events); in store_regs()
10835 static int sync_regs(struct kvm_vcpu *vcpu) in sync_regs() argument
10837 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
10838 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
10839 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
10841 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
10842 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
10844 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
10846 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
10848 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
10850 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
10865 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
10870 vcpu->arch.last_vmentry_cpu = -1; in kvm_arch_vcpu_create()
10871 vcpu->arch.regs_avail = ~0; in kvm_arch_vcpu_create()
10872 vcpu->arch.regs_dirty = ~0; in kvm_arch_vcpu_create()
10874 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
10875 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
10877 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
10879 r = kvm_mmu_create(vcpu); in kvm_arch_vcpu_create()
10883 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
10884 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); in kvm_arch_vcpu_create()
10887 if (kvm_apicv_activated(vcpu->kvm)) in kvm_arch_vcpu_create()
10888 vcpu->arch.apicv_active = true; in kvm_arch_vcpu_create()
10897 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
10899 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_create()
10901 if (!vcpu->arch.mce_banks) in kvm_arch_vcpu_create()
10903 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
10905 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
10909 if (!alloc_emulate_ctxt(vcpu)) in kvm_arch_vcpu_create()
10912 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { in kvm_arch_vcpu_create()
10917 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
10918 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); in kvm_arch_vcpu_create()
10920 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
10922 kvm_async_pf_hash_reset(vcpu); in kvm_arch_vcpu_create()
10923 kvm_pmu_init(vcpu); in kvm_arch_vcpu_create()
10925 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
10926 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
10929 vcpu->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_vcpu_create()
10932 r = static_call(kvm_x86_vcpu_create)(vcpu); in kvm_arch_vcpu_create()
10936 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
10937 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
10938 kvm_vcpu_mtrr_init(vcpu); in kvm_arch_vcpu_create()
10939 vcpu_load(vcpu); in kvm_arch_vcpu_create()
10940 kvm_set_tsc_khz(vcpu, max_tsc_khz); in kvm_arch_vcpu_create()
10941 kvm_vcpu_reset(vcpu, false); in kvm_arch_vcpu_create()
10942 kvm_init_mmu(vcpu); in kvm_arch_vcpu_create()
10943 vcpu_put(vcpu); in kvm_arch_vcpu_create()
10947 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
10949 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
10951 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
10953 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
10955 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
10957 kvm_free_lapic(vcpu); in kvm_arch_vcpu_create()
10959 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_create()
10963 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
10965 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
10967 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
10969 vcpu_load(vcpu); in kvm_arch_vcpu_postcreate()
10970 kvm_synchronize_tsc(vcpu, 0); in kvm_arch_vcpu_postcreate()
10971 vcpu_put(vcpu); in kvm_arch_vcpu_postcreate()
10974 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
10976 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
10978 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
10983 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
10987 kvmclock_reset(vcpu); in kvm_arch_vcpu_destroy()
10989 static_call(kvm_x86_vcpu_free)(vcpu); in kvm_arch_vcpu_destroy()
10991 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
10992 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
10993 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
10995 kvm_hv_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
10996 kvm_pmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
10997 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
10998 kvm_free_lapic(vcpu); in kvm_arch_vcpu_destroy()
10999 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
11000 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
11001 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
11002 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
11003 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
11004 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_destroy()
11008 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in kvm_vcpu_reset() argument
11011 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_vcpu_reset()
11022 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); in kvm_vcpu_reset()
11024 kvm_lapic_reset(vcpu, init_event); in kvm_vcpu_reset()
11026 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
11028 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
11029 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
11030 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
11031 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
11032 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
11033 kvm_clear_interrupt_queue(vcpu); in kvm_vcpu_reset()
11034 kvm_clear_exception_queue(vcpu); in kvm_vcpu_reset()
11036 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
11037 kvm_update_dr0123(vcpu); in kvm_vcpu_reset()
11038 vcpu->arch.dr6 = DR6_ACTIVE_LOW; in kvm_vcpu_reset()
11039 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
11040 kvm_update_dr7(vcpu); in kvm_vcpu_reset()
11042 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
11044 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_reset()
11045 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
11046 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
11047 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
11049 kvmclock_reset(vcpu); in kvm_vcpu_reset()
11051 kvm_clear_async_pf_completion_queue(vcpu); in kvm_vcpu_reset()
11052 kvm_async_pf_hash_reset(vcpu); in kvm_vcpu_reset()
11053 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
11055 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { in kvm_vcpu_reset()
11056 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; in kvm_vcpu_reset()
11063 kvm_put_guest_fpu(vcpu); in kvm_vcpu_reset()
11069 kvm_load_guest_fpu(vcpu); in kvm_vcpu_reset()
11073 kvm_pmu_reset(vcpu); in kvm_vcpu_reset()
11074 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
11076 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
11078 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in kvm_vcpu_reset()
11082 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
11083 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); in kvm_vcpu_reset()
11092 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_vcpu_reset()
11093 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); in kvm_vcpu_reset()
11095 vcpu->arch.ia32_xss = 0; in kvm_vcpu_reset()
11097 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); in kvm_vcpu_reset()
11099 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in kvm_vcpu_reset()
11100 kvm_rip_write(vcpu, 0xfff0); in kvm_vcpu_reset()
11102 vcpu->arch.cr3 = 0; in kvm_vcpu_reset()
11103 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); in kvm_vcpu_reset()
11116 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); in kvm_vcpu_reset()
11117 static_call(kvm_x86_set_cr4)(vcpu, 0); in kvm_vcpu_reset()
11118 static_call(kvm_x86_set_efer)(vcpu, 0); in kvm_vcpu_reset()
11119 static_call(kvm_x86_update_exception_bitmap)(vcpu); in kvm_vcpu_reset()
11130 kvm_mmu_reset_context(vcpu); in kvm_vcpu_reset()
11142 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_vcpu_reset()
11146 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) in kvm_vcpu_deliver_sipi_vector() argument
11150 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
11153 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
11154 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
11161 struct kvm_vcpu *vcpu; in kvm_arch_hardware_enable() local
11176 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
11177 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
11178 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
11179 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
11181 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
11182 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
11229 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
11230 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
11231 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
11232 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
11316 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_reset_bsp() argument
11318 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
11322 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_bsp() argument
11324 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
11330 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_sched_in() argument
11332 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_arch_sched_in()
11334 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
11337 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_arch_sched_in()
11339 static_call(kvm_x86_sched_in)(vcpu, cpu); in kvm_arch_sched_in()
11406 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) in kvm_unload_vcpu_mmu() argument
11408 vcpu_load(vcpu); in kvm_unload_vcpu_mmu()
11409 kvm_mmu_unload(vcpu); in kvm_unload_vcpu_mmu()
11410 vcpu_put(vcpu); in kvm_unload_vcpu_mmu()
11416 struct kvm_vcpu *vcpu; in kvm_free_vcpus() local
11421 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_free_vcpus()
11422 kvm_clear_async_pf_completion_queue(vcpu); in kvm_free_vcpus()
11423 kvm_unload_vcpu_mmu(vcpu); in kvm_free_vcpus()
11425 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
11426 kvm_vcpu_destroy(vcpu); in kvm_free_vcpus()
11667 struct kvm_vcpu *vcpu; in kvm_arch_memslots_updated() local
11677 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_memslots_updated()
11678 kvm_vcpu_kick(vcpu); in kvm_arch_memslots_updated()
11807 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) in kvm_guest_apic_has_interrupt() argument
11809 return (is_guest_mode(vcpu) && in kvm_guest_apic_has_interrupt()
11811 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); in kvm_guest_apic_has_interrupt()
11814 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) in kvm_vcpu_has_events() argument
11816 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
11819 if (kvm_apic_has_events(vcpu)) in kvm_vcpu_has_events()
11822 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
11825 if (vcpu->arch.exception.pending) in kvm_vcpu_has_events()
11828 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_vcpu_has_events()
11829 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
11830 static_call(kvm_x86_nmi_allowed)(vcpu, false))) in kvm_vcpu_has_events()
11833 if (kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_vcpu_has_events()
11834 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
11835 static_call(kvm_x86_smi_allowed)(vcpu, false))) in kvm_vcpu_has_events()
11838 if (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_has_events()
11839 (kvm_cpu_has_interrupt(vcpu) || in kvm_vcpu_has_events()
11840 kvm_guest_apic_has_interrupt(vcpu))) in kvm_vcpu_has_events()
11843 if (kvm_hv_has_stimer_pending(vcpu)) in kvm_vcpu_has_events()
11846 if (is_guest_mode(vcpu) && in kvm_vcpu_has_events()
11848 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
11854 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
11856 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); in kvm_arch_vcpu_runnable()
11859 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) in kvm_arch_dy_has_pending_interrupt() argument
11861 if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) in kvm_arch_dy_has_pending_interrupt()
11867 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
11869 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
11872 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_arch_dy_runnable()
11873 kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_arch_dy_runnable()
11874 kvm_test_request(KVM_REQ_EVENT, vcpu)) in kvm_arch_dy_runnable()
11877 return kvm_arch_dy_has_pending_interrupt(vcpu); in kvm_arch_dy_runnable()
11880 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
11882 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_in_kernel()
11885 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
11888 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
11890 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
11893 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) in kvm_arch_interrupt_allowed() argument
11895 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); in kvm_arch_interrupt_allowed()
11898 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) in kvm_get_linear_rip() argument
11901 if (vcpu->arch.guest_state_protected) in kvm_get_linear_rip()
11904 if (is_64_bit_mode(vcpu)) in kvm_get_linear_rip()
11905 return kvm_rip_read(vcpu); in kvm_get_linear_rip()
11906 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + in kvm_get_linear_rip()
11907 kvm_rip_read(vcpu)); in kvm_get_linear_rip()
11911 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) in kvm_is_linear_rip() argument
11913 return kvm_get_linear_rip(vcpu) == linear_rip; in kvm_is_linear_rip()
11917 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) in kvm_get_rflags() argument
11921 rflags = static_call(kvm_x86_get_rflags)(vcpu); in kvm_get_rflags()
11922 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
11928 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in __kvm_set_rflags() argument
11930 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
11931 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
11933 static_call(kvm_x86_set_rflags)(vcpu, rflags); in __kvm_set_rflags()
11936 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in kvm_set_rflags() argument
11938 __kvm_set_rflags(vcpu, rflags); in kvm_set_rflags()
11939 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_set_rflags()
11943 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) in kvm_arch_async_page_ready() argument
11947 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
11951 r = kvm_mmu_reload(vcpu); in kvm_arch_async_page_ready()
11955 if (!vcpu->arch.mmu->direct_map && in kvm_arch_async_page_ready()
11956 work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) in kvm_arch_async_page_ready()
11959 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); in kvm_arch_async_page_ready()
11974 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
11978 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
11981 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
11984 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot() argument
11990 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
11991 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
11997 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn() argument
11999 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
12002 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn() argument
12006 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); in kvm_del_async_pf_gfn()
12008 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
12012 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
12015 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
12017 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
12024 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
12029 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) in apf_put_user_notpresent() argument
12033 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
12037 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) in apf_put_user_ready() argument
12041 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
12045 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) in apf_pageready_slot_free() argument
12050 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
12057 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) in kvm_can_deliver_async_pf() argument
12059 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) in kvm_can_deliver_async_pf()
12062 if (!kvm_pv_async_pf_enabled(vcpu) || in kvm_can_deliver_async_pf()
12063 (vcpu->arch.apf.send_user_only && static_call(kvm_x86_get_cpl)(vcpu) == 0)) in kvm_can_deliver_async_pf()
12069 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) in kvm_can_do_async_pf() argument
12071 if (unlikely(!lapic_in_kernel(vcpu) || in kvm_can_do_async_pf()
12072 kvm_event_needs_reinjection(vcpu) || in kvm_can_do_async_pf()
12073 vcpu->arch.exception.pending)) in kvm_can_do_async_pf()
12076 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
12083 return kvm_arch_interrupt_allowed(vcpu); in kvm_can_do_async_pf()
12086 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
12092 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
12094 if (kvm_can_deliver_async_pf(vcpu) && in kvm_arch_async_page_not_present()
12095 !apf_put_user_notpresent(vcpu)) { in kvm_arch_async_page_not_present()
12102 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_not_present()
12113 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in kvm_arch_async_page_not_present()
12118 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
12123 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
12129 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
12133 kvm_pv_async_pf_enabled(vcpu) && in kvm_arch_async_page_present()
12134 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
12135 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
12136 kvm_apic_set_irq(vcpu, &irq, NULL); in kvm_arch_async_page_present()
12139 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
12140 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
12143 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) in kvm_arch_async_page_present_queued() argument
12145 kvm_make_request(KVM_REQ_APF_READY, vcpu); in kvm_arch_async_page_present_queued()
12146 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
12147 kvm_vcpu_kick(vcpu); in kvm_arch_async_page_present_queued()
12150 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_dequeue_async_page_present() argument
12152 if (!kvm_pv_async_pf_enabled(vcpu)) in kvm_arch_can_dequeue_async_page_present()
12155 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); in kvm_arch_can_dequeue_async_page_present()
12262 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) in kvm_arch_no_poll() argument
12264 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
12295 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) in kvm_fixup_and_inject_pf_error() argument
12302 vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { in kvm_fixup_and_inject_pf_error()
12314 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
12323 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, in kvm_handle_memory_failure() argument
12327 kvm_inject_emulated_page_fault(vcpu, e); in kvm_handle_memory_failure()
12338 kvm_prepare_emulation_failure_exit(vcpu); in kvm_handle_memory_failure()
12344 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) in kvm_handle_invpcid() argument
12354 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); in kvm_handle_invpcid()
12356 return kvm_handle_memory_failure(vcpu, r, &e); in kvm_handle_invpcid()
12359 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
12363 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); in kvm_handle_invpcid()
12368 is_noncanonical_address(operand.gla, vcpu)) { in kvm_handle_invpcid()
12369 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
12372 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); in kvm_handle_invpcid()
12373 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
12377 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
12381 kvm_invalidate_pcid(vcpu, operand.pcid); in kvm_handle_invpcid()
12382 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
12394 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_handle_invpcid()
12395 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
12398 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
12404 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_mmio() argument
12406 struct kvm_run *run = vcpu->run; in complete_sev_es_emulated_mmio()
12410 BUG_ON(!vcpu->mmio_needed); in complete_sev_es_emulated_mmio()
12413 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_sev_es_emulated_mmio()
12415 if (!vcpu->mmio_is_write) in complete_sev_es_emulated_mmio()
12421 vcpu->mmio_cur_fragment++; in complete_sev_es_emulated_mmio()
12429 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_sev_es_emulated_mmio()
12430 vcpu->mmio_needed = 0; in complete_sev_es_emulated_mmio()
12440 run->mmio.is_write = vcpu->mmio_is_write; in complete_sev_es_emulated_mmio()
12445 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in complete_sev_es_emulated_mmio()
12450 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, in kvm_sev_es_mmio_write() argument
12459 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); in kvm_sev_es_mmio_write()
12468 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_write()
12469 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_write()
12474 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_write()
12475 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_write()
12477 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_write()
12478 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_write()
12479 vcpu->run->mmio.is_write = 1; in kvm_sev_es_mmio_write()
12480 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in kvm_sev_es_mmio_write()
12481 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_write()
12483 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_write()
12489 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, in kvm_sev_es_mmio_read() argument
12498 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); in kvm_sev_es_mmio_read()
12507 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_read()
12508 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_read()
12513 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_read()
12514 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_read()
12516 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_read()
12517 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_read()
12518 vcpu->run->mmio.is_write = 0; in kvm_sev_es_mmio_read()
12519 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_read()
12521 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_read()
12527 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
12530 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_outs() argument
12532 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_outs()
12533 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_outs()
12535 vcpu->arch.pio.count = 0; in complete_sev_es_emulated_outs()
12536 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_outs()
12537 return kvm_sev_es_outs(vcpu, size, port); in complete_sev_es_emulated_outs()
12541 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_outs() argument
12546 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_outs()
12547 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); in kvm_sev_es_outs()
12550 vcpu->arch.sev_pio_count -= count; in kvm_sev_es_outs()
12551 vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size; in kvm_sev_es_outs()
12556 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_outs()
12560 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; in kvm_sev_es_outs()
12564 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
12567 static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu) in advance_sev_es_emulated_ins() argument
12569 unsigned count = vcpu->arch.pio.count; in advance_sev_es_emulated_ins()
12570 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); in advance_sev_es_emulated_ins()
12571 vcpu->arch.sev_pio_count -= count; in advance_sev_es_emulated_ins()
12572 vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size; in advance_sev_es_emulated_ins()
12575 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_ins() argument
12577 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_ins()
12578 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_ins()
12580 advance_sev_es_emulated_ins(vcpu); in complete_sev_es_emulated_ins()
12581 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_ins()
12582 return kvm_sev_es_ins(vcpu, size, port); in complete_sev_es_emulated_ins()
12586 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_ins() argument
12591 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_ins()
12592 if (!__emulator_pio_in(vcpu, size, port, count)) in kvm_sev_es_ins()
12596 advance_sev_es_emulated_ins(vcpu); in kvm_sev_es_ins()
12597 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_ins()
12601 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; in kvm_sev_es_ins()
12605 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_string_io() argument
12609 vcpu->arch.sev_pio_data = data; in kvm_sev_es_string_io()
12610 vcpu->arch.sev_pio_count = count; in kvm_sev_es_string_io()
12611 return in ? kvm_sev_es_ins(vcpu, size, port) in kvm_sev_es_string_io()
12612 : kvm_sev_es_outs(vcpu, size, port); in kvm_sev_es_string_io()