| /linux/include/linux/ |
| A D | kvm_host.h | 235 gfn_t start; 236 gfn_t end; 428 gfn_t base_gfn; 863 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 942 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 946 void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 1005 gfn_t gfn_offset, 1310 static inline gfn_t 1318 static inline gpa_t gfn_to_gpa(gfn_t gfn) in gfn_to_gpa() 1323 static inline gfn_t gpa_to_gfn(gpa_t gpa) in gpa_to_gfn() [all …]
|
| A D | kvm_types.h | 38 typedef u64 gfn_t; typedef
|
| /linux/arch/x86/kvm/mmu/ |
| A D | tdp_mmu.h | 22 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, 23 gfn_t end, bool can_yield, bool flush); 25 gfn_t start, gfn_t end, bool flush) in kvm_tdp_mmu_zap_gfn_range() 31 gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1); in kvm_tdp_mmu_zap_sp() 65 gfn_t gfn, unsigned long mask, 71 struct kvm_memory_slot *slot, gfn_t gfn,
|
| A D | mmu_internal.h | 51 gfn_t gfn; 55 gfn_t *gfns; 122 gfn_t gfn, bool can_unsync, bool prefetch); 124 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 125 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 154 const struct kvm_memory_slot *slot, gfn_t gfn,
|
| A D | tdp_mmu.c | 57 gfn_t start, gfn_t end, bool can_yield, bool flush, 319 gfn_t base_gfn = sp->gfn; in handle_removed_tdp_mmu_page() 703 gfn_t start, gfn_t end, bool can_yield, bool flush, in zap_gfn_range() 1173 gfn_t start, gfn_t end, int min_level) in wrprot_gfn_range() 1239 gfn_t start, gfn_t end) in clear_dirty_gfn_range() 1354 gfn_t gfn, unsigned long mask, in kvm_tdp_mmu_clear_dirty_pt_masked() 1372 gfn_t start = slot->base_gfn; in zap_collapsible_spte_range() 1373 gfn_t end = start + slot->npages; in zap_collapsible_spte_range() 1429 gfn_t gfn, int min_level) in write_protect_gfn() 1490 gfn_t gfn = addr >> PAGE_SHIFT; in kvm_tdp_mmu_get_walk() [all …]
|
| A D | tdp_iter.h | 20 gfn_t next_last_level_gfn; 26 gfn_t yielded_gfn; 32 gfn_t gfn; 71 int min_level, gfn_t next_last_level_gfn);
|
| A D | mmu.c | 836 gfn_t gfn; in account_shadowed() 866 gfn_t gfn; in unaccount_shadowed() 1083 gfn_t gfn; in rmap_remove() 1494 gfn_t start_gfn; 1495 gfn_t end_gfn; 1500 gfn_t gfn; 1520 int end_level, gfn_t start_gfn, gfn_t end_gfn) in slot_rmap_walk_init() 2768 gfn_t gfn; in direct_pte_prefetch_many() 3909 gfn_t gfn) in kvm_arch_setup_async_pf() 5468 gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, in slot_handle_level_range() [all …]
|
| A D | mmutrace.h | 211 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 216 __field(gfn_t, gfn) 234 TP_PROTO(u64 addr, gfn_t gfn, unsigned access), 239 __field(gfn_t, gfn) 334 TP_PROTO(int level, gfn_t gfn, u64 *sptep), 392 TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
|
| A D | tdp_iter.c | 18 static gfn_t round_gfn_for_level(gfn_t gfn, int level) in round_gfn_for_level() 44 int min_level, gfn_t next_last_level_gfn) in tdp_iter_start()
|
| A D | page_track.c | 86 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() 114 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page() 153 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page() 177 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_is_active()
|
| A D | paging_tmpl.h | 87 gfn_t table_gfn[PT_MAX_FULL_LEVELS]; 95 gfn_t gfn; 99 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl() 243 gfn_t table_gfn; in FNAME() 347 gfn_t table_gfn; in FNAME() 360 gfn_t gfn; in FNAME() 566 gfn_t gfn; in FNAME() 665 gfn_t base_gfn = fault->gfn; in FNAME() 688 gfn_t table_gfn; in FNAME() 802 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); in FNAME() [all …]
|
| A D | mmu_audit.c | 96 gfn_t gfn; in audit_mappings() 133 gfn_t gfn; in inspect_spte_has_rmap()
|
| A D | spte.h | 334 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
| /linux/arch/mips/kvm/ |
| A D | mmu.c | 271 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in kvm_mips_flush_gpa_pt() 397 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in BUILD_PTE_RANGE_OP() 417 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() 420 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 421 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 434 static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, in BUILD_PTE_RANGE_OP() 435 gfn_t end_gfn) in BUILD_PTE_RANGE_OP() 516 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast() 593 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page()
|
| /linux/arch/x86/kvm/ |
| A D | mmu.h | 159 gfn_t gfn; 300 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); 329 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index()
|
| A D | x86.h | 225 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() 322 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 326 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
|
| A D | mtrr.c | 309 gfn_t start, end; in update_mtrr() 615 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type() 691 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency()
|
| /linux/arch/x86/include/asm/ |
| A D | kvm_page_track.h | 61 struct kvm_memory_slot *slot, gfn_t gfn, 64 struct kvm_memory_slot *slot, gfn_t gfn, 67 struct kvm_memory_slot *slot, gfn_t gfn,
|
| A D | kvm_host.h | 799 gfn_t mmio_gfn; 818 gfn_t gfns[ASYNC_PF_PER_VCPU]; 1017 gfn_t shinfo_gfn; 1406 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 1528 gfn_t gfn; 1760 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); 1894 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
| /linux/virt/kvm/ |
| A D | kvm_main.c | 1933 gfn_t offset; in kvm_get_dirty_log_protect() 2002 gfn_t offset; in kvm_clear_dirty_log_protect() 2178 gfn_t *nr_pages) in gfn_to_hva_many() 2184 gfn_t gfn) in gfn_to_hva_memslot() 2531 gfn_t entry = 0; in gfn_to_page_many_atomic() 2734 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_read_guest() 2754 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_read_guest() 2792 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_read_guest_atomic() 2838 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_write_guest() 2885 gfn_t nr_pages_avail; in __kvm_gfn_to_hva_cache_init() [all …]
|
| /linux/arch/mips/include/asm/ |
| A D | kvm_host.h | 809 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 810 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
|
| /linux/drivers/gpu/drm/i915/gvt/ |
| A D | kvmgt.c | 87 gfn_t gfn; 105 gfn_t gfn; 370 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) in __gvt_cache_find_gfn() 388 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, in __gvt_cache_add() 497 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) in __kvmgt_protect_table_find() 512 gfn_t gfn) in kvmgt_gfn_is_write_protected() 520 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) in kvmgt_protect_table_add() 536 gfn_t gfn) in kvmgt_protect_table_del() 1874 gfn_t gfn; in kvmgt_page_track_flush_slot()
|
| /linux/arch/powerpc/kvm/ |
| A D | e500_mmu_host.c | 323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() 560 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_tlb1_map() 612 gfn_t gfn = gpaddr >> PAGE_SHIFT; in kvmppc_mmu_map()
|
| /linux/arch/arm64/kvm/ |
| A D | mmu.c | 732 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked() 750 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 956 gfn_t gfn; in user_mem_abort() 1193 gfn_t gfn; in kvm_handle_guest_abort()
|
| /linux/arch/riscv/kvm/ |
| A D | mmu.c | 420 gfn_t gfn_offset, in kvm_arch_mmu_enable_log_dirty_pt_masked() 646 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_riscv_stage2_map()
|