| /linux/arch/x86/kvm/ |
| A D | mmu.h | 329 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument 333 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index() 340 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages() 341 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
|
| /linux/arch/riscv/kvm/ |
| A D | mmu.c | 371 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in stage2_wp_memory_region() 372 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in stage2_wp_memory_region() 423 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 424 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 425 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 456 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() 497 if ((memslot->base_gfn + memslot->npages) >= in kvm_arch_prepare_memory_region()
|
| /linux/arch/arm64/kvm/ |
| A D | mmu.c | 196 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot() 549 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot() 710 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 711 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 734 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_mmu_write_protect_pt_masked() local 735 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked() 736 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked() 774 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping() 1505 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region() 1567 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
|
| /linux/arch/powerpc/kvm/ |
| A D | trace_hv.h | 285 __field(u64, base_gfn) 297 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL; 305 __entry->base_gfn, __entry->slot_flags)
|
| A D | book3s_hv_uvmem.c | 260 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init() 278 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free() 391 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge() 442 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create() 613 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages() 787 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
|
| A D | book3s_64_mmu_hv.c | 569 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault() 683 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault() 805 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp() 856 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv() 885 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp() 956 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp() 1101 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty() 1102 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty() 1107 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty() 1182 set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); in kvmppc_unpin_guest_page() [all …]
|
| A D | book3s_64_mmu_radix.c | 1052 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_radix() 1084 unsigned long gfn = memslot->base_gfn + pagenum; in kvm_radix_test_clear_dirty() 1127 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_radix_test_clear_dirty() 1175 gpa = memslot->base_gfn << PAGE_SHIFT; in kvmppc_radix_flush_memslot()
|
| A D | book3s_hv_rm_mmu.c | 110 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 148 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte() 248 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
|
| A D | e500_mmu_host.c | 381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
|
| A D | book3s_hv_nested.c | 1000 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range() 1635 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in __kvmhv_nested_page_fault()
|
| A D | book3s_pr.c | 1879 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log_pr()
|
| A D | book3s_hv.c | 827 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) in kvmppc_copy_guest() 839 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) in kvmppc_copy_guest()
|
| /linux/arch/x86/kvm/mmu/ |
| A D | tdp_mmu.c | 319 gfn_t base_gfn = sp->gfn; in handle_removed_tdp_mmu_page() local 328 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_tdp_mmu_page() 375 kvm_flush_remote_tlbs_with_address(kvm, base_gfn, in handle_removed_tdp_mmu_page() 1225 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot() 1226 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot() 1295 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot() 1296 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot() 1372 gfn_t start = slot->base_gfn; in zap_collapsible_spte_range()
|
| A D | page_track.c | 91 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track() 192 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_slot_page_track_is_active()
|
| A D | paging_tmpl.h | 665 gfn_t base_gfn = fault->gfn; in FNAME() local 667 WARN_ON_ONCE(gw->gfn != base_gfn); in FNAME() 745 base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in FNAME() 754 sp = kvm_mmu_get_page(vcpu, base_gfn, fault->addr, in FNAME() 767 base_gfn, fault->pfn, fault); in FNAME()
|
| A D | mmu.c | 805 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot() 1066 idx = gfn_to_index(gfn, slot->base_gfn, level); in gfn_to_rmap() 1312 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked() 1345 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked() 1385 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 2949 gfn_t base_gfn = fault->gfn; in __direct_map() local 2970 sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, in __direct_map() 2983 base_gfn, fault->pfn, fault); in __direct_map() 5498 end_level, memslot->base_gfn, in slot_handle_level() 5499 memslot->base_gfn + memslot->npages - 1, in slot_handle_level() [all …]
|
| /linux/arch/mips/kvm/ |
| A D | mmu.c | 419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 420 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 421 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
|
| A D | mips.c | 229 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot() 230 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot() 269 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region() 270 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
|
| /linux/virt/kvm/ |
| A D | kvm_main.c | 1320 if (memslot->base_gfn > mslots[i + 1].base_gfn) in kvm_memslot_move_backward() 1323 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); in kvm_memslot_move_backward() 1347 if (memslot->base_gfn < mslots[i - 1].base_gfn) in kvm_memslot_move_forward() 1350 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); in kvm_memslot_move_forward() 1728 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; in __kvm_set_memory_region() 1745 if (new.base_gfn != old.base_gfn) in __kvm_set_memory_region() 1761 if (!((new.base_gfn + new.npages <= tmp->base_gfn) || in __kvm_set_memory_region() 1762 (new.base_gfn >= tmp->base_gfn + tmp->npages))) in __kvm_set_memory_region() 2172 *nr_pages = slot->npages - (gfn - slot->base_gfn); in __gfn_to_hva_many() 3026 unsigned long rel_gfn = gfn - memslot->base_gfn; in mark_page_dirty_in_slot()
|
| /linux/include/linux/ |
| A D | kvm_host.h | 428 gfn_t base_gfn; member 1227 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot() 1252 if (gfn >= memslots[slot].base_gfn) in search_memslots() 1300 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot() 1315 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
|
| /linux/arch/s390/kvm/ |
| A D | kvm-s390.c | 624 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log() 625 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log() 1957 if (gfn >= memslots[slot].base_gfn && in gfn_to_memslot_approx() 1958 gfn < memslots[slot].base_gfn + memslots[slot].npages) in gfn_to_memslot_approx() 1964 if (gfn >= memslots[slot].base_gfn) in gfn_to_memslot_approx() 1973 if (gfn >= memslots[start].base_gfn && in gfn_to_memslot_approx() 1974 gfn < memslots[start].base_gfn + memslots[start].npages) { in gfn_to_memslot_approx() 2009 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma() 2011 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma() 2026 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma() [all …]
|
| A D | pv.c | 134 npages = memslot->base_gfn + memslot->npages; in kvm_s390_pv_alloc_vm()
|
| A D | priv.c | 1186 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in __do_essa()
|
| /linux/arch/powerpc/include/asm/ |
| A D | kvm_book3s_64.h | 507 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()
|
| /linux/drivers/gpu/drm/i915/gvt/ |
| A D | kvmgt.c | 1880 gfn = slot->base_gfn + i; in kvmgt_page_track_flush_slot()
|