Lines Matching refs:kvm

42 static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,  in stage2_apply_range()  argument
51 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; in stage2_apply_range()
55 next = stage2_pgd_addr_end(kvm, addr, end); in stage2_apply_range()
61 cond_resched_lock(&kvm->mmu_lock); in stage2_apply_range()
67 #define stage2_apply_range_resched(kvm, addr, end, fn) \ argument
68 stage2_apply_range(kvm, addr, end, fn, true)
81 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
83 ++kvm->stat.generic.remote_tlb_flush_requests; in kvm_flush_remote_tlbs()
84 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); in kvm_flush_remote_tlbs()
179 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in __unmap_stage2_range() local
182 assert_spin_locked(&kvm->mmu_lock); in __unmap_stage2_range()
184 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, in __unmap_stage2_range()
193 static void stage2_flush_memslot(struct kvm *kvm, in stage2_flush_memslot() argument
199 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush); in stage2_flush_memslot()
209 static void stage2_flush_vm(struct kvm *kvm) in stage2_flush_vm() argument
215 idx = srcu_read_lock(&kvm->srcu); in stage2_flush_vm()
216 spin_lock(&kvm->mmu_lock); in stage2_flush_vm()
218 slots = kvm_memslots(kvm); in stage2_flush_vm()
220 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
222 spin_unlock(&kvm->mmu_lock); in stage2_flush_vm()
223 srcu_read_unlock(&kvm->srcu, idx); in stage2_flush_vm()
462 static int get_user_mapping_size(struct kvm *kvm, u64 addr) in get_user_mapping_size() argument
465 .pgd = (kvm_pte_t *)kvm->mm->pgd, in get_user_mapping_size()
505 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) in kvm_init_stage2_mmu() argument
519 err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); in kvm_init_stage2_mmu()
532 mmu->arch = &kvm->arch; in kvm_init_stage2_mmu()
545 static void stage2_unmap_memslot(struct kvm *kvm, in stage2_unmap_memslot() argument
581 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); in stage2_unmap_memslot()
594 void stage2_unmap_vm(struct kvm *kvm) in stage2_unmap_vm() argument
600 idx = srcu_read_lock(&kvm->srcu); in stage2_unmap_vm()
602 spin_lock(&kvm->mmu_lock); in stage2_unmap_vm()
604 slots = kvm_memslots(kvm); in stage2_unmap_vm()
606 stage2_unmap_memslot(kvm, memslot); in stage2_unmap_vm()
608 spin_unlock(&kvm->mmu_lock); in stage2_unmap_vm()
610 srcu_read_unlock(&kvm->srcu, idx); in stage2_unmap_vm()
615 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in kvm_free_stage2_pgd() local
618 spin_lock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
625 spin_unlock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
642 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, in kvm_phys_addr_ioremap() argument
648 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; in kvm_phys_addr_ioremap()
658 kvm_mmu_cache_min_pages(kvm)); in kvm_phys_addr_ioremap()
662 spin_lock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
665 spin_unlock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
684 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in stage2_wp_range() local
685 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); in stage2_wp_range()
701 static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) in kvm_mmu_wp_memory_region() argument
703 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_mmu_wp_memory_region()
713 spin_lock(&kvm->mmu_lock); in kvm_mmu_wp_memory_region()
714 stage2_wp_range(&kvm->arch.mmu, start, end); in kvm_mmu_wp_memory_region()
715 spin_unlock(&kvm->mmu_lock); in kvm_mmu_wp_memory_region()
716 kvm_flush_remote_tlbs(kvm); in kvm_mmu_wp_memory_region()
730 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, in kvm_mmu_write_protect_pt_masked() argument
738 stage2_wp_range(&kvm->arch.mmu, start, end); in kvm_mmu_write_protect_pt_masked()
748 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
752 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
830 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, in transparent_hugepage_adjust() argument
842 get_user_mapping_size(kvm, hva) >= PMD_SIZE) { in transparent_hugepage_adjust()
914 static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, in sanitise_mte_tags() argument
920 if (!kvm_has_mte(kvm)) in sanitise_mte_tags()
952 struct kvm *kvm = vcpu->kvm; in user_mem_abort() local
1038 kvm_mmu_cache_min_pages(kvm)); in user_mem_abort()
1043 mmu_seq = vcpu->kvm->mmu_notifier_seq; in user_mem_abort()
1091 spin_lock(&kvm->mmu_lock); in user_mem_abort()
1093 if (mmu_notifier_retry(kvm, mmu_seq)) in user_mem_abort()
1104 vma_pagesize = transparent_hugepage_adjust(kvm, memslot, in user_mem_abort()
1109 if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { in user_mem_abort()
1112 ret = sanitise_mte_tags(kvm, pfn, vma_pagesize); in user_mem_abort()
1146 mark_page_dirty_in_slot(kvm, memslot, gfn); in user_mem_abort()
1150 spin_unlock(&kvm->mmu_lock); in user_mem_abort()
1165 spin_lock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1168 spin_unlock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1226 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_handle_guest_abort()
1229 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1278 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); in kvm_handle_guest_abort()
1295 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_handle_guest_abort()
1299 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range() argument
1301 if (!kvm->arch.mmu.pgt) in kvm_unmap_gfn_range()
1304 __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, in kvm_unmap_gfn_range()
1311 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_set_spte_gfn() argument
1316 if (!kvm->arch.mmu.pgt) in kvm_set_spte_gfn()
1321 ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE); in kvm_set_spte_gfn()
1335 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, in kvm_set_spte_gfn()
1342 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn() argument
1348 if (!kvm->arch.mmu.pgt) in kvm_age_gfn()
1353 kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, in kvm_age_gfn()
1359 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn() argument
1361 if (!kvm->arch.mmu.pgt) in kvm_test_age_gfn()
1364 return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, in kvm_test_age_gfn()
1465 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
1482 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { in kvm_arch_commit_memory_region()
1483 kvm_mmu_wp_memory_region(kvm, mem->slot); in kvm_arch_commit_memory_region()
1488 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
1505 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
1532 if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) { in kvm_arch_prepare_memory_region()
1551 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_arch_free_memslot() argument
1555 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) in kvm_arch_memslots_updated() argument
1559 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
1561 kvm_free_stage2_pgd(&kvm->arch.mmu); in kvm_arch_flush_shadow_all()
1564 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
1570 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
1571 unmap_stage2_range(&kvm->arch.mmu, gpa, size); in kvm_arch_flush_shadow_memslot()
1572 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
1619 stage2_flush_vm(vcpu->kvm); in kvm_set_way_flush()
1634 stage2_flush_vm(vcpu->kvm); in kvm_toggle_cache()