Searched refs:kvm_x86_ops (Results 1 – 15 of 15) sorted by relevance
| /linux/arch/x86/kvm/ |
| A D | pmu.c | 271 kvm_x86_ops.pmu_ops->find_fixed_event(idx), in reprogram_fixed_counter() 390 if (kvm_x86_ops.pmu_ops->deliver_pmi) in kvm_pmu_deliver_pmi() 391 kvm_x86_ops.pmu_ops->deliver_pmi(vcpu); in kvm_pmu_deliver_pmi() 399 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr); in kvm_pmu_is_valid_msr() 413 return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info); in kvm_pmu_get_msr() 428 kvm_x86_ops.pmu_ops->refresh(vcpu); in kvm_pmu_refresh() 436 kvm_x86_ops.pmu_ops->reset(vcpu); in kvm_pmu_reset() 444 kvm_x86_ops.pmu_ops->init(vcpu); in kvm_pmu_init() 476 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i); in kvm_pmu_cleanup() 482 if (kvm_x86_ops.pmu_ops->cleanup) in kvm_pmu_cleanup() [all …]
|
| A D | kvm_onhyperv.h | 18 if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) { in hv_track_root_tdp()
|
| A D | x86.c | 125 struct kvm_x86_ops kvm_x86_ops __read_mostly; 126 EXPORT_SYMBOL_GPL(kvm_x86_ops); 130 *(((struct kvm_x86_ops *)0)->func)); 6305 if (kvm_x86_ops.mem_enc_op) in kvm_arch_vm_ioctl() 6317 if (kvm_x86_ops.mem_enc_reg_region) in kvm_arch_vm_ioctl() 6329 if (kvm_x86_ops.mem_enc_unreg_region) in kvm_arch_vm_ioctl() 8628 if (kvm_x86_ops.hardware_enable) { in kvm_arch_init() 8723 kvm_x86_ops.hardware_enable = NULL; in kvm_arch_exit() 10002 if (kvm_x86_ops.post_block) in vcpu_block() 11269 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_arch_hardware_setup() [all …]
|
| A D | pmu.h | 95 return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc); in pmc_is_enabled()
|
| A D | xen.c | 592 kvm_x86_ops.patch_hypercall(vcpu, instructions + 5); in kvm_xen_write_hypercall_page() 726 vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu); in kvm_xen_hypercall()
|
| A D | lapic.c | 121 return kvm_x86_ops.set_hv_timer in kvm_can_use_hv_timer() 710 if (kvm_x86_ops.sync_pir_to_irr) in apic_has_interrupt_for_ppr() 2944 kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector); in kvm_apic_accept_events()
|
| A D | hyperv.c | 2416 if (kvm_x86_ops.nested_ops->get_evmcs_version) in kvm_get_hv_cpuid() 2417 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); in kvm_get_hv_cpuid()
|
| /linux/arch/x86/include/asm/ |
| A D | kvm_host.h | 1306 struct kvm_x86_ops { struct 1523 struct kvm_x86_ops *runtime_ops; 1537 extern struct kvm_x86_ops kvm_x86_ops; 1540 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); 1547 static_call_update(kvm_x86_##func, kvm_x86_ops.func); in kvm_ops_static_call_update() 1555 return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); in kvm_arch_alloc_vm() 1564 if (kvm_x86_ops.tlb_remote_flush && in kvm_arch_flush_remote_tlb()
|
| /linux/arch/x86/kvm/svm/ |
| A D | svm_onhyperv.h | 15 static struct kvm_x86_ops svm_x86_ops;
|
| A D | svm.c | 4581 static struct kvm_x86_ops svm_x86_ops __initdata = {
|
| /linux/arch/x86/kvm/mmu/ |
| A D | mmu_internal.h | 118 kvm_x86_ops.cpu_dirty_log_size; in kvm_vcpu_ad_need_write_protect()
|
| A D | paging_tmpl.h | 263 if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr)) in FNAME()
|
| A D | mmu.c | 273 return kvm_x86_ops.tlb_remote_flush_with_range; in kvm_available_flush_tlb_with_range() 281 if (range && kvm_x86_ops.tlb_remote_flush_with_range) in kvm_flush_remote_tlbs_with_range() 1397 if (kvm_x86_ops.cpu_dirty_log_size) in kvm_arch_mmu_enable_log_dirty_pt_masked() 1405 return kvm_x86_ops.cpu_dirty_log_size; in kvm_cpu_dirty_log_size()
|
| /linux/arch/x86/kvm/vmx/ |
| A D | vmx.c | 5053 if (kvm_x86_ops.get_cpl(vcpu) > 0) in handle_dr() 7459 if (kvm_x86_ops.set_hv_timer) in vmx_post_block() 7557 static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
| A D | nested.c | 4782 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { in nested_vmx_pmu_entry_exit_ctls_update()
|
Completed in 92 milliseconds