/linux/include/trace/events/ |
A D | kvm.h | 263 TP_ARGS(gva, gfn), 266 __field(__u64, gva) 271 __entry->gva = gva; 282 TP_ARGS(gva, gfn) 289 TP_ARGS(gva, gfn) 296 TP_ARGS(token, gva), 305 __entry->gva = gva; 316 TP_ARGS(token, gva) 323 TP_ARGS(token, gva) 333 __field(u64, gva) [all …]
|
/linux/tools/testing/selftests/kvm/lib/s390x/ |
A D | processor.c | 55 TEST_ASSERT((gva % vm->page_size) == 0, in virt_pg_map() 58 gva, vm->page_size); in virt_pg_map() 60 (gva >> vm->page_shift)), in virt_pg_map() 62 gva); in virt_pg_map() 66 gva, vm->page_size); in virt_pg_map() 70 gva, vm->max_gfn, vm->page_size); in virt_pg_map() 75 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in virt_pg_map() 82 idx = (gva >> 12) & 0x0ffu; /* page index */ in virt_pg_map() 99 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in addr_gva2gpa() 102 gva); in addr_gva2gpa() [all …]
|
/linux/tools/testing/selftests/kvm/lib/aarch64/ |
A D | processor.c | 24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument 29 return (gva >> shift) & mask; in pgd_index() 32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument 40 return (gva >> shift) & mask; in pud_index() 43 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pmd_index() argument 51 return (gva >> shift) & mask; in pmd_index() 54 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) in pte_index() argument 57 return (gva >> vm->page_shift) & mask; in pte_index() 141 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument 148 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; in addr_gva2gpa() [all …]
|
A D | ucall.c | 102 vm_vaddr_t gva; in get_ucall() local 106 memcpy(&gva, run->mmio.data, sizeof(gva)); in get_ucall() 107 memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); in get_ucall()
|
/linux/arch/x86/kvm/vmx/ |
A D | sgx.c | 24 int size, int alignment, gva_t *gva) in sgx_get_encls_gva() argument 30 *gva = offset; in sgx_get_encls_gva() 33 *gva += s.base; in sgx_get_encls_gva() 36 if (!IS_ALIGNED(*gva, alignment)) { in sgx_get_encls_gva() 39 fault = is_noncanonical_address(*gva, vcpu); in sgx_get_encls_gva() 41 *gva &= 0xffffffff; in sgx_get_encls_gva() 44 (*gva > s.limit) || in sgx_get_encls_gva() 46 (((u64)*gva + size - 1) > s.limit + 1)); in sgx_get_encls_gva() 78 *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); in sgx_gva_to_gpa() 80 *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); in sgx_gva_to_gpa() [all …]
|
A D | vmx_ops.h | 19 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva); 257 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) in __invvpid() argument 262 u64 gva; in __invvpid() member 263 } operand = { vpid, 0, gva }; in __invvpid() 265 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva); in __invvpid()
|
A D | nested.c | 4798 gva_t gva; in nested_vmx_get_vmptr() local 4804 sizeof(*vmpointer), &gva)) { in nested_vmx_get_vmptr() 5070 gva_t gva = 0; in handle_vmread() local 5109 instr_info, true, len, &gva)) in handle_vmread() 5154 gva_t gva; in handle_vmwrite() local 5183 instr_info, false, len, &gva)) in handle_vmwrite() 5185 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); in handle_vmwrite() 5340 gva_t gva; in handle_vmptrst() local 5350 true, sizeof(gpa_t), &gva)) in handle_vmptrst() 5368 gva_t gva; in handle_invept() local [all …]
|
/linux/tools/testing/selftests/kvm/lib/ |
A D | perf_test_util.c | 47 uint64_t gva; in guest_code() local 54 gva = vcpu_args->gva; in guest_code() 59 uint64_t addr = gva + (i * pta->guest_page_size); in guest_code() 84 vcpu_args->gva = guest_test_virt_mem + in perf_test_setup_vcpus() 90 vcpu_args->gva = guest_test_virt_mem; in perf_test_setup_vcpus()
|
A D | kvm_util.c | 684 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() argument 699 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); in kvm_memcmp_hva_gva() 2283 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument 2285 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
|
/linux/arch/x86/kvm/ |
A D | x86.h | 225 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument 236 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info() 253 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument 255 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info() 261 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument 264 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva() 329 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); 452 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
|
A D | trace.h | 818 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 819 TP_ARGS(gva, gpa, write, gpa_match), 822 __field(gva_t, gva) 829 __entry->gva = gva; 835 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
|
/linux/arch/s390/kvm/ |
A D | gaccess.c | 534 tec->addr = gva >> PAGE_SHIFT; in trans_exc() 618 union vaddress vaddr = {.addr = gva}; in guest_translate() 619 union raddress raddr = {.addr = gva}; in guest_translate() 917 gva = kvm_s390_logical_to_effective(vcpu, gva); in guest_translate_address() 918 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); in guest_translate_address() 923 return trans_exc(vcpu, PGM_PROTECTION, gva, 0, in guest_translate_address() 928 rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot); in guest_translate_address() 930 return trans_exc(vcpu, rc, gva, 0, mode, prot); in guest_translate_address() 932 *gpa = kvm_s390_real_to_abs(vcpu, gva); in guest_translate_address() 957 currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); in check_gva_range() [all …]
|
A D | gaccess.h | 189 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 191 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
/linux/tools/testing/selftests/kvm/ |
A D | access_tracking_perf_test.c | 96 static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) in lookup_pfn() argument 98 uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); in lookup_pfn() 132 uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva; in mark_vcpu_memory_idle() 151 uint64_t gva = base_gva + page * perf_test_args.guest_page_size; in mark_vcpu_memory_idle() local 152 uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); in mark_vcpu_memory_idle()
|
/linux/arch/mips/kvm/ |
A D | tlb.c | 166 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_guest_tlb_lookup() argument 184 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); in kvm_vz_guest_tlb_lookup() 226 pa = entrylo[!!(gva & pagemaskbit)]; in kvm_vz_guest_tlb_lookup() 240 pa |= gva & ~(pagemask | pagemaskbit); in kvm_vz_guest_tlb_lookup()
|
A D | vz.c | 197 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) in kvm_vz_gva_to_gpa_cb() argument 200 return gva; in kvm_vz_gva_to_gpa_cb() 689 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_gva_to_gpa() argument 692 u32 gva32 = gva; in kvm_vz_gva_to_gpa() 695 if ((long)gva == (s32)gva32) { in kvm_vz_gva_to_gpa() 750 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { in kvm_vz_gva_to_gpa() 758 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { in kvm_vz_gva_to_gpa() 772 *gpa = gva & 0x07ffffffffffffff; in kvm_vz_gva_to_gpa() 778 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); in kvm_vz_gva_to_gpa()
|
/linux/tools/testing/selftests/kvm/x86_64/ |
A D | get_cpuid_test.c | 148 vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); in vcpu_alloc_cpuid() local 149 struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); in vcpu_alloc_cpuid() 153 *p_gva = gva; in vcpu_alloc_cpuid()
|
/linux/tools/testing/selftests/kvm/include/ |
A D | perf_test_util.h | 24 uint64_t gva; member
|
A D | kvm_util.h | 103 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, 155 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); 174 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
|
/linux/arch/x86/include/asm/ |
A D | kvm_host.h | 429 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); 1767 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 1769 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 1771 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 1773 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1788 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 1790 gva_t gva, hpa_t root_hpa); 1791 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
|
/linux/tools/testing/selftests/kvm/lib/x86_64/ |
A D | processor.c | 558 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument 567 index[0] = (gva >> 12) & 0x1ffu; in addr_gva2gpa() 568 index[1] = (gva >> 21) & 0x1ffu; in addr_gva2gpa() 569 index[2] = (gva >> 30) & 0x1ffu; in addr_gva2gpa() 570 index[3] = (gva >> 39) & 0x1ffu; in addr_gva2gpa() 590 return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu); in addr_gva2gpa() 593 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva); in addr_gva2gpa()
|
/linux/arch/x86/kvm/mmu/ |
A D | mmu.c | 2561 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); in kvm_mmu_unprotect_page_virt() 3025 gva_t gva = fault->is_tdp ? 0 : fault->addr; in handle_abnormal_pfn() local 3027 vcpu_cache_mmio_info(vcpu, gva, fault->gfn, in handle_abnormal_pfn() 5359 gva_t gva, hpa_t root_hpa) in kvm_mmu_invalidate_gva() argument 5366 if (is_noncanonical_address(gva, vcpu)) in kvm_mmu_invalidate_gva() 5369 static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); in kvm_mmu_invalidate_gva() 5376 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invalidate_gva() 5393 mmu->invlpg(vcpu, gva, root_hpa); in kvm_mmu_invalidate_gva() 5412 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invpcid_gva() 5419 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invpcid_gva() [all …]
|
A D | paging_tmpl.h | 943 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) in FNAME() 951 vcpu_clear_mmio_info(vcpu, gva); in FNAME() 965 for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { in FNAME()
|
/linux/Documentation/virt/kvm/ |
A D | mmu.rst | 49 gva guest virtual address 79 guest physical addresses, to host physical addresses (gva->gpa->hpa) 139 paging: gva->gpa->hpa 140 paging, tdp: (gva->)gpa->hpa 322 (gva->gpa or ngpa->gpa)
|
/linux/arch/x86/kvm/svm/ |
A D | svm.c | 2391 gva_t gva = kvm_rax_read(vcpu); in invlpga_interception() local 2396 gva = (u32)gva; in invlpga_interception() 2398 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); in invlpga_interception() 2401 kvm_mmu_invlpg(vcpu, gva); in invlpga_interception() 3118 gva_t gva; in invpcid_interception() local 3131 gva = svm->vmcb->control.exit_info_1; in invpcid_interception() 3133 return kvm_handle_invpcid(vcpu, type, gva); in invpcid_interception() 3687 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) in svm_flush_tlb_gva() argument 3691 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
|