Lines Matching refs:pgt

53 	struct kvm_pgtable		*pgt;  member
91 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in __kvm_pgd_page_idx() argument
93 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in __kvm_pgd_page_idx()
94 u64 mask = BIT(pgt->ia_bits) - 1; in __kvm_pgd_page_idx()
101 return __kvm_pgd_page_idx(data->pgt, data->addr); in kvm_pgd_page_idx()
106 struct kvm_pgtable pgt = { in kvm_pgd_pages() local
111 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages()
216 childp = kvm_pte_follow(pte, data->pgt->mm_ops); in __kvm_pgtable_visit()
257 struct kvm_pgtable *pgt = data->pgt; in _kvm_pgtable_walk() local
258 u64 limit = BIT(pgt->ia_bits); in _kvm_pgtable_walk()
263 if (!pgt->pgd) in _kvm_pgtable_walk()
267 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE]; in _kvm_pgtable_walk()
269 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level); in _kvm_pgtable_walk()
277 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_walk() argument
281 .pgt = pgt, in kvm_pgtable_walk()
306 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, in kvm_pgtable_get_leaf() argument
317 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE), in kvm_pgtable_get_leaf()
439 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, in kvm_pgtable_hyp_map() argument
445 .mm_ops = pgt->mm_ops, in kvm_pgtable_hyp_map()
457 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_hyp_map()
463 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, in kvm_pgtable_hyp_init() argument
468 pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL); in kvm_pgtable_hyp_init()
469 if (!pgt->pgd) in kvm_pgtable_hyp_init()
472 pgt->ia_bits = va_bits; in kvm_pgtable_hyp_init()
473 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels; in kvm_pgtable_hyp_init()
474 pgt->mm_ops = mm_ops; in kvm_pgtable_hyp_init()
475 pgt->mmu = NULL; in kvm_pgtable_hyp_init()
476 pgt->force_pte_cb = NULL; in kvm_pgtable_hyp_init()
490 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) in kvm_pgtable_hyp_destroy() argument
495 .arg = pgt->mm_ops, in kvm_pgtable_hyp_destroy()
498 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_hyp_destroy()
499 pgt->mm_ops->put_page(pgt->pgd); in kvm_pgtable_hyp_destroy()
500 pgt->pgd = NULL; in kvm_pgtable_hyp_destroy()
551 static bool stage2_has_fwb(struct kvm_pgtable *pgt) in stage2_has_fwb() argument
556 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); in stage2_has_fwb()
559 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) argument
561 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, in stage2_set_prot_attr() argument
565 kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) : in stage2_set_prot_attr()
566 KVM_S2_MEMATTR(pgt, NORMAL); in stage2_set_prot_attr()
638 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte) in stage2_pte_cacheable() argument
641 return memattr == KVM_S2_MEMATTR(pgt, NORMAL); in stage2_pte_cacheable()
664 struct kvm_pgtable *pgt = data->mmu->pgt; in stage2_map_walker_try_leaf() local
689 if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new)) in stage2_map_walker_try_leaf()
831 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_stage2_map() argument
838 .mmu = pgt->mmu, in kvm_pgtable_stage2_map()
840 .mm_ops = pgt->mm_ops, in kvm_pgtable_stage2_map()
841 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot), in kvm_pgtable_stage2_map()
851 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys))) in kvm_pgtable_stage2_map()
854 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr); in kvm_pgtable_stage2_map()
858 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_map()
863 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_stage2_set_owner() argument
869 .mmu = pgt->mmu, in kvm_pgtable_stage2_set_owner()
871 .mm_ops = pgt->mm_ops, in kvm_pgtable_stage2_set_owner()
886 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_set_owner()
894 struct kvm_pgtable *pgt = arg; in stage2_unmap_walker() local
895 struct kvm_s2_mmu *mmu = pgt->mmu; in stage2_unmap_walker()
896 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; in stage2_unmap_walker()
913 } else if (stage2_pte_cacheable(pgt, pte)) { in stage2_unmap_walker()
914 need_flush = !stage2_has_fwb(pgt); in stage2_unmap_walker()
938 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_unmap() argument
942 .arg = pgt, in kvm_pgtable_stage2_unmap()
946 return kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_unmap()
993 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, in stage2_update_leaf_attrs() argument
1003 .mm_ops = pgt->mm_ops, in stage2_update_leaf_attrs()
1011 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in stage2_update_leaf_attrs()
1023 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_wrprotect() argument
1025 return stage2_update_leaf_attrs(pgt, addr, size, 0, in kvm_pgtable_stage2_wrprotect()
1030 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) in kvm_pgtable_stage2_mkyoung() argument
1033 stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0, in kvm_pgtable_stage2_mkyoung()
1039 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) in kvm_pgtable_stage2_mkold() argument
1042 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, in kvm_pgtable_stage2_mkold()
1053 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) in kvm_pgtable_stage2_is_young() argument
1056 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL); in kvm_pgtable_stage2_is_young()
1060 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, in kvm_pgtable_stage2_relax_perms() argument
1079 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level); in kvm_pgtable_stage2_relax_perms()
1081 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level); in kvm_pgtable_stage2_relax_perms()
1089 struct kvm_pgtable *pgt = arg; in stage2_flush_walker() local
1090 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; in stage2_flush_walker()
1094 if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) in stage2_flush_walker()
1104 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_flush() argument
1109 .arg = pgt, in kvm_pgtable_stage2_flush()
1112 if (stage2_has_fwb(pgt)) in kvm_pgtable_stage2_flush()
1115 return kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_flush()
1119 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, in __kvm_pgtable_stage2_init() argument
1131 pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz); in __kvm_pgtable_stage2_init()
1132 if (!pgt->pgd) in __kvm_pgtable_stage2_init()
1135 pgt->ia_bits = ia_bits; in __kvm_pgtable_stage2_init()
1136 pgt->start_level = start_level; in __kvm_pgtable_stage2_init()
1137 pgt->mm_ops = mm_ops; in __kvm_pgtable_stage2_init()
1138 pgt->mmu = &arch->mmu; in __kvm_pgtable_stage2_init()
1139 pgt->flags = flags; in __kvm_pgtable_stage2_init()
1140 pgt->force_pte_cb = force_pte_cb; in __kvm_pgtable_stage2_init()
1165 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) in kvm_pgtable_stage2_destroy() argument
1172 .arg = pgt->mm_ops, in kvm_pgtable_stage2_destroy()
1175 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_stage2_destroy()
1176 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; in kvm_pgtable_stage2_destroy()
1177 pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz); in kvm_pgtable_stage2_destroy()
1178 pgt->pgd = NULL; in kvm_pgtable_stage2_destroy()