Lines Matching refs:vcpu
23 static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, in sgx_get_encls_gva() argument
31 if (!is_long_mode(vcpu)) { in sgx_get_encls_gva()
32 vmx_get_segment(vcpu, &s, VCPU_SREG_DS); in sgx_get_encls_gva()
38 } else if (likely(is_long_mode(vcpu))) { in sgx_get_encls_gva()
39 fault = is_noncanonical_address(*gva, vcpu); in sgx_get_encls_gva()
49 kvm_inject_gp(vcpu, 0); in sgx_get_encls_gva()
53 static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr, in sgx_handle_emulation_failure() argument
58 __kvm_prepare_emulation_failure_exit(vcpu, data, ARRAY_SIZE(data)); in sgx_handle_emulation_failure()
61 static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data, in sgx_read_hva() argument
65 sgx_handle_emulation_failure(vcpu, hva, size); in sgx_read_hva()
72 static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, in sgx_gva_to_gpa() argument
78 *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); in sgx_gva_to_gpa()
80 *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); in sgx_gva_to_gpa()
83 kvm_inject_emulated_page_fault(vcpu, &ex); in sgx_gva_to_gpa()
90 static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva) in sgx_gpa_to_hva() argument
92 *hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa)); in sgx_gpa_to_hva()
94 sgx_handle_emulation_failure(vcpu, gpa, 1); in sgx_gpa_to_hva()
103 static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr) in sgx_inject_fault() argument
113 kvm_prepare_emulation_failure_exit(vcpu); in sgx_inject_fault()
124 guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) { in sgx_inject_fault()
132 kvm_inject_page_fault(vcpu, &ex); in sgx_inject_fault()
134 kvm_inject_gp(vcpu, 0); in sgx_inject_fault()
139 static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, in __handle_encls_ecreate() argument
151 sgx_12_0 = kvm_find_cpuid_entry(vcpu, 0x12, 0); in __handle_encls_ecreate()
152 sgx_12_1 = kvm_find_cpuid_entry(vcpu, 0x12, 1); in __handle_encls_ecreate()
154 kvm_prepare_emulation_failure_exit(vcpu); in __handle_encls_ecreate()
164 if (!vcpu->kvm->arch.sgx_provisioning_allowed && in __handle_encls_ecreate()
168 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate()
178 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate()
186 kvm_inject_gp(vcpu, 0); in __handle_encls_ecreate()
199 return kvm_skip_emulated_instruction(vcpu); in __handle_encls_ecreate()
201 return sgx_inject_fault(vcpu, secs_gva, trapnr); in __handle_encls_ecreate()
206 static int handle_encls_ecreate(struct kvm_vcpu *vcpu) in handle_encls_ecreate() argument
217 if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) || in handle_encls_ecreate()
218 sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva)) in handle_encls_ecreate()
225 r = kvm_read_guest_virt(vcpu, pageinfo_gva, &pageinfo, in handle_encls_ecreate()
228 kvm_inject_emulated_page_fault(vcpu, &ex); in handle_encls_ecreate()
231 sgx_handle_emulation_failure(vcpu, pageinfo_gva, in handle_encls_ecreate()
236 if (sgx_get_encls_gva(vcpu, pageinfo.metadata, 64, 64, &metadata_gva) || in handle_encls_ecreate()
237 sgx_get_encls_gva(vcpu, pageinfo.contents, 4096, 4096, in handle_encls_ecreate()
245 if (sgx_gva_to_gpa(vcpu, metadata_gva, false, &metadata_gpa) || in handle_encls_ecreate()
246 sgx_gva_to_gpa(vcpu, contents_gva, false, &contents_gpa) || in handle_encls_ecreate()
247 sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa)) in handle_encls_ecreate()
255 if (sgx_gpa_to_hva(vcpu, metadata_gpa, &metadata_hva) || in handle_encls_ecreate()
256 sgx_gpa_to_hva(vcpu, contents_gpa, &contents_hva) || in handle_encls_ecreate()
257 sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva)) in handle_encls_ecreate()
271 if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) { in handle_encls_ecreate()
279 r = __handle_encls_ecreate(vcpu, &pageinfo, secs_hva, secs_gva); in handle_encls_ecreate()
286 static int handle_encls_einit(struct kvm_vcpu *vcpu) in handle_encls_einit() argument
289 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_encls_einit()
294 if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) || in handle_encls_einit()
295 sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) || in handle_encls_einit()
296 sgx_get_encls_gva(vcpu, kvm_rdx_read(vcpu), 304, 512, &token_gva)) in handle_encls_einit()
303 if (sgx_gva_to_gpa(vcpu, sig_gva, false, &sig_gpa) || in handle_encls_einit()
304 sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa) || in handle_encls_einit()
305 sgx_gva_to_gpa(vcpu, token_gva, false, &token_gpa)) in handle_encls_einit()
314 if (sgx_gpa_to_hva(vcpu, sig_gpa, &sig_hva) || in handle_encls_einit()
315 sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva) || in handle_encls_einit()
316 sgx_gpa_to_hva(vcpu, token_gpa, &token_hva)) in handle_encls_einit()
324 return sgx_inject_fault(vcpu, secs_gva, trapnr); in handle_encls_einit()
335 rflags = vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | in handle_encls_einit()
342 vmx_set_rflags(vcpu, rflags); in handle_encls_einit()
344 kvm_rax_write(vcpu, ret); in handle_encls_einit()
345 return kvm_skip_emulated_instruction(vcpu); in handle_encls_einit()
348 static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf) in encls_leaf_enabled_in_guest() argument
350 if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX)) in encls_leaf_enabled_in_guest()
354 return guest_cpuid_has(vcpu, X86_FEATURE_SGX1); in encls_leaf_enabled_in_guest()
357 return guest_cpuid_has(vcpu, X86_FEATURE_SGX2); in encls_leaf_enabled_in_guest()
362 static inline bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu) in sgx_enabled_in_guest_bios() argument
366 return (to_vmx(vcpu)->msr_ia32_feature_control & bits) == bits; in sgx_enabled_in_guest_bios()
369 int handle_encls(struct kvm_vcpu *vcpu) in handle_encls() argument
371 u32 leaf = (u32)kvm_rax_read(vcpu); in handle_encls()
373 if (!encls_leaf_enabled_in_guest(vcpu, leaf)) { in handle_encls()
374 kvm_queue_exception(vcpu, UD_VECTOR); in handle_encls()
375 } else if (!sgx_enabled_in_guest_bios(vcpu)) { in handle_encls()
376 kvm_inject_gp(vcpu, 0); in handle_encls()
379 return handle_encls_ecreate(vcpu); in handle_encls()
381 return handle_encls_einit(vcpu); in handle_encls()
383 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in handle_encls()
384 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS; in handle_encls()
414 void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) in vcpu_setup_sgx_lepubkeyhash() argument
416 struct vcpu_vmx *vmx = to_vmx(vcpu); in vcpu_setup_sgx_lepubkeyhash()
426 static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu) in sgx_intercept_encls_ecreate() argument
431 if (!vcpu->kvm->arch.sgx_provisioning_allowed) in sgx_intercept_encls_ecreate()
434 guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 0); in sgx_intercept_encls_ecreate()
442 guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 1); in sgx_intercept_encls_ecreate()
454 void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in vmx_write_encls_bitmap() argument
469 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) && in vmx_write_encls_bitmap()
470 sgx_enabled_in_guest_bios(vcpu)) { in vmx_write_encls_bitmap()
471 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) { in vmx_write_encls_bitmap()
473 if (sgx_intercept_encls_ecreate(vcpu)) in vmx_write_encls_bitmap()
477 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) in vmx_write_encls_bitmap()
490 if (!vmcs12 && is_guest_mode(vcpu)) in vmx_write_encls_bitmap()
491 vmcs12 = get_vmcs12(vcpu); in vmx_write_encls_bitmap()