Lines Matching refs:hv_vcpu

197 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);  in kvm_hv_notify_acked_sint()  local
204 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { in kvm_hv_notify_acked_sint()
205 stimer = &hv_vcpu->stimer[idx]; in kvm_hv_notify_acked_sint()
222 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in synic_exit() local
224 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; in synic_exit()
225 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
226 hv_vcpu->exit.u.synic.control = synic->control; in synic_exit()
227 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; in synic_exit()
228 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; in synic_exit()
301 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_is_syndbg_enabled() local
303 return hv_vcpu->cpuid_cache.syndbg_cap_eax & in kvm_hv_is_syndbg_enabled()
320 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in syndbg_exit() local
322 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; in syndbg_exit()
323 hv_vcpu->exit.u.syndbg.msr = msr; in syndbg_exit()
324 hv_vcpu->exit.u.syndbg.control = syndbg->control.control; in syndbg_exit()
325 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; in syndbg_exit()
326 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; in syndbg_exit()
327 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; in syndbg_exit()
658 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in stimer_set_config() local
664 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && in stimer_set_config()
665 !(hv_vcpu->cpuid_cache.features_edx & in stimer_set_config()
831 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_process_stimers() local
836 if (!hv_vcpu) in kvm_hv_process_stimers()
839 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_process_stimers()
840 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { in kvm_hv_process_stimers()
841 stimer = &hv_vcpu->stimer[i]; in kvm_hv_process_stimers()
864 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_vcpu_uninit() local
867 if (!hv_vcpu) in kvm_hv_vcpu_uninit()
870 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_uninit()
871 stimer_cleanup(&hv_vcpu->stimer[i]); in kvm_hv_vcpu_uninit()
873 kfree(hv_vcpu); in kvm_hv_vcpu_uninit()
879 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_assist_page_enabled() local
881 if (!hv_vcpu) in kvm_hv_assist_page_enabled()
884 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) in kvm_hv_assist_page_enabled()
926 struct kvm_vcpu_hv *hv_vcpu; in kvm_hv_vcpu_init() local
929 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT); in kvm_hv_vcpu_init()
930 if (!hv_vcpu) in kvm_hv_vcpu_init()
933 vcpu->arch.hyperv = hv_vcpu; in kvm_hv_vcpu_init()
934 hv_vcpu->vcpu = vcpu; in kvm_hv_vcpu_init()
936 synic_init(&hv_vcpu->synic); in kvm_hv_vcpu_init()
938 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); in kvm_hv_vcpu_init()
939 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_init()
940 stimer_init(&hv_vcpu->stimer[i], i); in kvm_hv_vcpu_init()
942 hv_vcpu->vp_index = vcpu->vcpu_idx; in kvm_hv_vcpu_init()
1230 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr) in hv_check_msr_access() argument
1232 if (!hv_vcpu->enforce_cpuid) in hv_check_msr_access()
1238 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1241 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1244 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1247 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1250 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1253 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1261 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1271 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1277 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1282 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1287 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1291 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1295 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1439 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_msr() local
1441 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) in kvm_hv_set_msr()
1452 if (new_vp_index == hv_vcpu->vp_index) in kvm_hv_set_msr()
1461 if (hv_vcpu->vp_index == vcpu->vcpu_idx) in kvm_hv_set_msr()
1466 hv_vcpu->vp_index = new_vp_index; in kvm_hv_set_msr()
1474 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1491 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1508 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); in kvm_hv_set_msr()
1607 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_msr() local
1609 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) in kvm_hv_get_msr()
1614 data = hv_vcpu->vp_index; in kvm_hv_get_msr()
1623 data = hv_vcpu->hv_vapic; in kvm_hv_get_msr()
1626 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; in kvm_hv_get_msr()
1956 struct kvm_vcpu_hv *hv_vcpu; in kvm_hv_set_cpuid() local
1969 hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_cpuid()
1973 hv_vcpu->cpuid_cache.features_eax = entry->eax; in kvm_hv_set_cpuid()
1974 hv_vcpu->cpuid_cache.features_ebx = entry->ebx; in kvm_hv_set_cpuid()
1975 hv_vcpu->cpuid_cache.features_edx = entry->edx; in kvm_hv_set_cpuid()
1977 hv_vcpu->cpuid_cache.features_eax = 0; in kvm_hv_set_cpuid()
1978 hv_vcpu->cpuid_cache.features_ebx = 0; in kvm_hv_set_cpuid()
1979 hv_vcpu->cpuid_cache.features_edx = 0; in kvm_hv_set_cpuid()
1984 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax; in kvm_hv_set_cpuid()
1985 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx; in kvm_hv_set_cpuid()
1987 hv_vcpu->cpuid_cache.enlightenments_eax = 0; in kvm_hv_set_cpuid()
1988 hv_vcpu->cpuid_cache.enlightenments_ebx = 0; in kvm_hv_set_cpuid()
1993 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax; in kvm_hv_set_cpuid()
1995 hv_vcpu->cpuid_cache.syndbg_cap_eax = 0; in kvm_hv_set_cpuid()
2000 struct kvm_vcpu_hv *hv_vcpu; in kvm_hv_set_enforce_cpuid() local
2013 hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_enforce_cpuid()
2014 hv_vcpu->enforce_cpuid = enforce; in kvm_hv_set_enforce_cpuid()
2114 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code) in hv_check_hypercall_access() argument
2116 if (!hv_vcpu->enforce_cpuid) in hv_check_hypercall_access()
2121 return hv_vcpu->cpuid_cache.enlightenments_ebx && in hv_check_hypercall_access()
2122 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX; in hv_check_hypercall_access()
2124 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES; in hv_check_hypercall_access()
2126 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS; in hv_check_hypercall_access()
2134 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) || in hv_check_hypercall_access()
2135 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING; in hv_check_hypercall_access()
2138 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2144 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2147 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2152 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2163 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_hypercall() local
2201 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) { in kvm_hv_hypercall()
2207 if (unlikely(hv_vcpu->enforce_cpuid && in kvm_hv_hypercall()
2208 !(hv_vcpu->cpuid_cache.features_edx & in kvm_hv_hypercall()