Lines Matching refs:memslot

70 static bool memslot_is_logging(struct kvm_memory_slot *memslot)  in memslot_is_logging()  argument
72 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging()
194 struct kvm_memory_slot *memslot) in stage2_flush_memslot() argument
196 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
197 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
212 struct kvm_memory_slot *memslot; in stage2_flush_vm() local
219 kvm_for_each_memslot(memslot, slots) in stage2_flush_vm()
220 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
546 struct kvm_memory_slot *memslot) in stage2_unmap_memslot() argument
548 hva_t hva = memslot->userspace_addr; in stage2_unmap_memslot()
549 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot()
550 phys_addr_t size = PAGE_SIZE * memslot->npages; in stage2_unmap_memslot()
580 gpa_t gpa = addr + (vm_start - memslot->userspace_addr); in stage2_unmap_memslot()
597 struct kvm_memory_slot *memslot; in stage2_unmap_vm() local
605 kvm_for_each_memslot(memslot, slots) in stage2_unmap_vm()
606 stage2_unmap_memslot(kvm, memslot); in stage2_unmap_vm()
704 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); in kvm_mmu_wp_memory_region() local
707 if (WARN_ON_ONCE(!memslot)) in kvm_mmu_wp_memory_region()
710 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
711 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
760 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, in fault_supports_stage2_huge_mapping() argument
772 size = memslot->npages * PAGE_SIZE; in fault_supports_stage2_huge_mapping()
774 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping()
776 uaddr_start = memslot->userspace_addr; in fault_supports_stage2_huge_mapping()
830 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, in transparent_hugepage_adjust() argument
841 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && in transparent_hugepage_adjust()
943 struct kvm_memory_slot *memslot, unsigned long hva, in user_mem_abort() argument
958 bool logging_active = memslot_is_logging(memslot); in user_mem_abort()
1002 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) in user_mem_abort()
1010 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) in user_mem_abort()
1059 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, in user_mem_abort()
1104 vma_pagesize = transparent_hugepage_adjust(kvm, memslot, in user_mem_abort()
1146 mark_page_dirty_in_slot(kvm, memslot, gfn); in user_mem_abort()
1190 struct kvm_memory_slot *memslot; in kvm_handle_guest_abort() local
1229 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1230 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in kvm_handle_guest_abort()
1286 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); in kvm_handle_guest_abort()
1489 struct kvm_memory_slot *memslot, in kvm_arch_prepare_memory_region() argument
1505 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
1539 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_arch_prepare_memory_region()