/linux/arch/x86/kvm/mmu/ |
A D | mmutrace.h | 13 __field(__u64, gfn) \ 20 __entry->gfn = sp->gfn; \ 216 __field(gfn_t, gfn) 223 __entry->gfn = gfn; 245 __entry->gfn = gfn; 338 __field(u64, gfn) 349 __entry->gfn = gfn; 374 __field(u64, gfn) 380 __entry->gfn = fault->gfn; 396 __field(u64, gfn) [all …]
|
A D | tdp_mmu.c | 185 sp->gfn = gfn; in alloc_tdp_mmu_page() 319 gfn_t base_gfn = sp->gfn; in handle_removed_tdp_mmu_page() 745 (iter.gfn < start || in zap_gfn_range() 968 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { in kvm_tdp_mmu_map() 1317 gfn + BITS_PER_LONG) { in clear_dirty_pt_masked() 1322 !(mask & (1UL << (iter.gfn - gfn)))) in clear_dirty_pt_masked() 1325 mask &= ~(1UL << (iter.gfn - gfn)); in clear_dirty_pt_masked() 1440 min_level, gfn, gfn + 1) { in write_protect_gfn() 1490 gfn_t gfn = addr >> PAGE_SHIFT; in kvm_tdp_mmu_get_walk() local 1495 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_get_walk() [all …]
|
A D | tdp_iter.c | 14 SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep() 18 static gfn_t round_gfn_for_level(gfn_t gfn, int level) in round_gfn_for_level() argument 20 return gfn & -KVM_PAGES_PER_HPAGE(level); in round_gfn_for_level() 33 iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart() 98 iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); in try_step_down() 117 if (SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side() 121 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side() 122 iter->next_last_level_gfn = iter->gfn; in try_step_side() 140 iter->gfn = round_gfn_for_level(iter->gfn, iter->level); in try_step_up()
|
A D | page_track.c | 86 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() argument 91 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track() 114 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page() argument 125 update_gfn_track(slot, gfn, mode, 1); in kvm_slot_page_track_add_page() 131 kvm_mmu_gfn_disallow_lpage(slot, gfn); in kvm_slot_page_track_add_page() 134 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in kvm_slot_page_track_add_page() 153 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page() argument 163 update_gfn_track(slot, gfn, mode, -1); in kvm_slot_page_track_remove_page() 169 kvm_mmu_gfn_allow_lpage(slot, gfn); in kvm_slot_page_track_remove_page() 177 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_is_active() argument [all …]
|
A D | mmu.c | 792 sp->gfn, in kvm_mmu_page_set_gfn() 836 gfn_t gfn; in account_shadowed() local 839 gfn = sp->gfn; in account_shadowed() 866 gfn_t gfn; in unaccount_shadowed() local 869 gfn = sp->gfn; in unaccount_shadowed() 1083 gfn_t gfn; in rmap_remove() local 1500 gfn_t gfn; member 2100 if (sp->gfn != gfn) { in kvm_mmu_get_page() 2155 sp->gfn = gfn; in kvm_mmu_get_page() 2768 gfn_t gfn; in direct_pte_prefetch_many() local [all …]
|
A D | mmu_audit.c | 96 gfn_t gfn; in audit_mappings() local 113 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings() 114 pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn); in audit_mappings() 133 gfn_t gfn; in inspect_spte_has_rmap() local 139 slot = __gfn_to_memslot(slots, gfn); in inspect_spte_has_rmap() 143 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); in inspect_spte_has_rmap() 145 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap() 150 rmap_head = gfn_to_rmap(gfn, rev_sp->role.level, slot); in inspect_spte_has_rmap() 202 slot = __gfn_to_memslot(slots, sp->gfn); in audit_write_protection() 203 rmap_head = gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot); in audit_write_protection() [all …]
|
A D | paging_tmpl.h | 95 gfn_t gfn; member 360 gfn_t gfn; in FNAME() local 468 gfn += pse36_gfn_delta(pte); in FNAME() 566 gfn_t gfn; in FNAME() local 574 gfn = gpte_to_gfn(gpte); in FNAME() 665 gfn_t base_gfn = fault->gfn; in FNAME() 810 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; in FNAME() local 862 fault->gfn = walker.gfn; in FNAME() 1099 gfn_t gfn; in FNAME() local 1115 gfn = gpte_to_gfn(gpte); in FNAME() [all …]
|
A D | mmu_internal.h | 51 gfn_t gfn; member 122 gfn_t gfn, bool can_unsync, bool prefetch); 124 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 125 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 127 struct kvm_memory_slot *slot, u64 gfn, 154 const struct kvm_memory_slot *slot, gfn_t gfn,
|
A D | spte.c | 54 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) in make_mmio_spte() argument 58 u64 gpa = gfn << PAGE_SHIFT; in make_mmio_spte() 94 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, in make_spte() argument 133 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, in make_spte() 164 if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) { in make_spte() 166 __func__, gfn); in make_spte() 187 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); in make_spte()
|
A D | tdp_mmu.h | 31 gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1); in kvm_tdp_mmu_zap_sp() 44 sp->gfn, end, false, false); in kvm_tdp_mmu_zap_sp() 65 gfn_t gfn, unsigned long mask, 71 struct kvm_memory_slot *slot, gfn_t gfn,
|
/linux/drivers/gpu/drm/i915/gvt/ |
A D | page_track.c | 35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument 37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track() 56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track() 67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track() 83 unsigned long gfn) in intel_vgpu_unregister_page_track() argument 87 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track() 90 intel_gvt_hypervisor_disable_page_track(vgpu, gfn); in intel_vgpu_unregister_page_track() 108 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track() 115 ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn); in intel_vgpu_enable_page_track() 135 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_disable_page_track() [all …]
|
A D | mpt.h | 165 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_enable_page_track() argument 167 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_enable_page_track() 179 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_disable_page_track() argument 181 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_disable_page_track() 225 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_gfn_to_mfn() argument 227 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); in intel_gvt_hypervisor_gfn_to_mfn() 241 struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, in intel_gvt_hypervisor_dma_map_guest_page() argument 286 struct intel_vgpu *vgpu, unsigned long gfn, in intel_gvt_hypervisor_map_gfn_to_mfn() argument 294 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, in intel_gvt_hypervisor_map_gfn_to_mfn() 389 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_is_valid_gfn() argument [all …]
|
A D | kvmgt.c | 87 gfn_t gfn; member 105 gfn_t gfn; member 378 if (gfn < itr->gfn) in __gvt_cache_find_gfn() 380 else if (gfn > itr->gfn) in __gvt_cache_find_gfn() 400 new->gfn = gfn; in __gvt_cache_add() 411 if (gfn < itr->gfn) in __gvt_cache_add() 502 if (gfn == p->gfn) { in __kvmgt_protect_table_find() 512 gfn_t gfn) in kvmgt_gfn_is_write_protected() argument 531 p->gfn = gfn; in kvmgt_protect_table_add() 536 gfn_t gfn) in kvmgt_protect_table_del() argument [all …]
|
A D | hypercall.h | 57 int (*enable_page_track)(unsigned long handle, u64 gfn); 58 int (*disable_page_track)(unsigned long handle, u64 gfn); 63 unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); 65 int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, 71 int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, 79 bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
|
A D | page_track.h | 45 struct intel_vgpu *vgpu, unsigned long gfn); 48 unsigned long gfn, gvt_page_track_handler_t handler, 51 unsigned long gfn); 53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
|
/linux/arch/powerpc/kvm/ |
A D | book3s_hv_uvmem.c | 294 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn() 338 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_gfn_is_uvmem_pfn() 368 if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns) in kvmppc_next_nontransitioned_gfn() 380 *gfn = i; in kvmppc_next_nontransitioned_gfn() 606 unsigned long uvmem_pfn, gfn; in kvmppc_uvmem_drop_pages() local 613 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages() 894 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_share_page() 910 kvmppc_gfn_shared(gfn, kvm); in kvmppc_share_page() 952 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_in() 1057 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_out() [all …]
|
A D | book3s_64_mmu_hv.c | 555 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault() 836 gfn_t gfn; in kvm_unmap_gfn_range_hv() local 839 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv() 842 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv() 852 unsigned long gfn; in kvmppc_core_flush_memslot_hv() local 877 unsigned long gfn) in kvm_age_rmapp() argument 933 gfn_t gfn; in kvm_age_gfn_hv() local 937 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv() 940 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv() 1096 unsigned long gfn; in kvmppc_harvest_vpa_dirty() local [all …]
|
A D | e500_mmu_host.c | 353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map() 354 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map() 381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map() 409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map() 412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map() 414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map() 416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map() 449 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map() 453 __func__, (long)gfn); in kvmppc_e500_shadow_map() 488 __func__, (long)gfn, pfn); in kvmppc_e500_shadow_map() [all …]
|
A D | book3s_hv_rm_mmu.c | 110 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 119 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local 123 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte() 137 unsigned long gfn; in revmap_for_hpte() local 144 *gfnp = gfn; in revmap_for_hpte() 162 unsigned long gfn; in remove_revmap_chain() local 185 kvmppc_update_dirty_map(memslot, gfn, in remove_revmap_chain() 194 unsigned long i, pa, gpa, gfn, psize; in kvmppc_do_h_enter() local 233 gfn = gpa >> PAGE_SHIFT; in kvmppc_do_h_enter() 248 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter() [all …]
|
/linux/include/linux/ |
A D | kvm_host.h | 267 kvm_pfn_t gfn; member 863 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 927 mark_page_dirty(kvm, gfn); \ 1227 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot() 1252 if (gfn >= memslots[slot].base_gfn) in search_memslots() 1258 slot = try_get_memslot(slots, start, gfn); in search_memslots() 1278 slot = try_get_memslot(slots, slot_index, gfn); in __gfn_to_memslot() 1300 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot() 1307 return gfn_to_memslot(kvm, gfn)->id; in memslot_id() 1318 static inline gpa_t gfn_to_gpa(gfn_t gfn) in gfn_to_gpa() argument [all …]
|
/linux/include/xen/ |
A D | xen-ops.h | 72 xen_pfn_t *gfn, int nr, 85 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 122 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument 128 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array() 136 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array() 182 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument 189 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
|
/linux/virt/kvm/ |
A D | kvm_main.c | 2184 gfn_t gfn) in gfn_to_hva_memslot() argument 2192 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva() 2517 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn() 2523 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); in kvm_vcpu_gfn_to_pfn() 2606 map->gfn = gfn; in kvm_vcpu_map() 2746 ++gfn; in kvm_read_guest() 2766 ++gfn; in kvm_vcpu_read_guest() 2850 ++gfn; in kvm_write_guest() 2871 ++gfn; in kvm_vcpu_write_guest() 3015 ++gfn; in kvm_clear_guest() [all …]
|
A D | dirty_ring.c | 84 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_invalid() argument 86 gfn->flags = 0; in kvm_dirty_gfn_set_invalid() 89 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_dirtied() argument 91 gfn->flags = KVM_DIRTY_GFN_F_DIRTY; in kvm_dirty_gfn_set_dirtied() 94 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_harvested() argument 96 return gfn->flags & KVM_DIRTY_GFN_F_RESET; in kvm_dirty_gfn_harvested()
|
/linux/drivers/xen/ |
A D | xlate_mmu.c | 45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); 84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument 89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams() 145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 158 data.fgfn = gfn; in xen_xlate_remap_gfn_array() 174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument 179 xrp.gpfn = gfn; in unmap_gfn() 197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
|
/linux/include/trace/events/ |
A D | kvm.h | 261 TP_PROTO(u64 gva, u64 gfn), 263 TP_ARGS(gva, gfn), 267 __field(u64, gfn) 272 __entry->gfn = gfn; 275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 280 TP_PROTO(u64 gva, u64 gfn), 282 TP_ARGS(gva, gfn) 287 TP_PROTO(u64 gva, u64 gfn), 289 TP_ARGS(gva, gfn)
|