Lines Matching refs:best

85 	struct kvm_cpuid_entry2 *best;  in kvm_check_cpuid()  local
91 best = cpuid_entry2_find(entries, nent, 0x80000008, 0); in kvm_check_cpuid()
92 if (best) { in kvm_check_cpuid()
93 int vaddr_bits = (best->eax & 0xff00) >> 8; in kvm_check_cpuid()
140 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu); in kvm_update_pv_runtime() local
146 if (best) in kvm_update_pv_runtime()
147 vcpu->arch.pv_cpuid.features = best->eax; in kvm_update_pv_runtime()
152 struct kvm_cpuid_entry2 *best; in kvm_update_cpuid_runtime() local
154 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_update_cpuid_runtime()
155 if (best) { in kvm_update_cpuid_runtime()
158 cpuid_entry_change(best, X86_FEATURE_OSXSAVE, in kvm_update_cpuid_runtime()
161 cpuid_entry_change(best, X86_FEATURE_APIC, in kvm_update_cpuid_runtime()
165 best = kvm_find_cpuid_entry(vcpu, 7, 0); in kvm_update_cpuid_runtime()
166 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) in kvm_update_cpuid_runtime()
167 cpuid_entry_change(best, X86_FEATURE_OSPKE, in kvm_update_cpuid_runtime()
170 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_update_cpuid_runtime()
171 if (best) in kvm_update_cpuid_runtime()
172 best->ebx = xstate_required_size(vcpu->arch.xcr0, false); in kvm_update_cpuid_runtime()
174 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); in kvm_update_cpuid_runtime()
175 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || in kvm_update_cpuid_runtime()
176 cpuid_entry_has(best, X86_FEATURE_XSAVEC))) in kvm_update_cpuid_runtime()
177 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); in kvm_update_cpuid_runtime()
179 best = kvm_find_kvm_cpuid_features(vcpu); in kvm_update_cpuid_runtime()
180 if (kvm_hlt_in_guest(vcpu->kvm) && best && in kvm_update_cpuid_runtime()
181 (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) in kvm_update_cpuid_runtime()
182 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); in kvm_update_cpuid_runtime()
185 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); in kvm_update_cpuid_runtime()
186 if (best) in kvm_update_cpuid_runtime()
187 cpuid_entry_change(best, X86_FEATURE_MWAIT, in kvm_update_cpuid_runtime()
197 struct kvm_cpuid_entry2 *best; in kvm_vcpu_after_set_cpuid() local
199 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_vcpu_after_set_cpuid()
200 if (best && apic) { in kvm_vcpu_after_set_cpuid()
201 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) in kvm_vcpu_after_set_cpuid()
209 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_vcpu_after_set_cpuid()
210 if (!best) in kvm_vcpu_after_set_cpuid()
214 (best->eax | ((u64)best->edx << 32)) & supported_xcr0; in kvm_vcpu_after_set_cpuid()
224 best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1); in kvm_vcpu_after_set_cpuid()
225 if (best) { in kvm_vcpu_after_set_cpuid()
226 best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff; in kvm_vcpu_after_set_cpuid()
227 best->edx &= vcpu->arch.guest_supported_xcr0 >> 32; in kvm_vcpu_after_set_cpuid()
228 best->ecx |= XFEATURE_MASK_FPSSE; in kvm_vcpu_after_set_cpuid()
254 struct kvm_cpuid_entry2 *best; in cpuid_query_maxphyaddr() local
256 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); in cpuid_query_maxphyaddr()
257 if (!best || best->eax < 0x80000008) in cpuid_query_maxphyaddr()
259 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); in cpuid_query_maxphyaddr()
260 if (best) in cpuid_query_maxphyaddr()
261 return best->eax & 0xff; in cpuid_query_maxphyaddr()