Lines Matching refs:r
123 struct vm_region *r; in get_num_req_pgts() local
126 r = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head); in get_num_req_pgts()
127 e = r->va + r->size; in get_num_req_pgts()
166 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r) in rem_um_region() argument
170 vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE); in rem_um_region()
171 vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE); in rem_um_region()
177 if (mobj_is_paged(r->mobj)) { in rem_um_region()
178 tee_pager_rem_um_region(uctx, r->va, r->size); in rem_um_region()
180 pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va, in rem_um_region()
181 r->va + r->size); in rem_um_region()
182 tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE, in rem_um_region()
186 r2 = TAILQ_NEXT(r, link); in rem_um_region()
190 r2 = TAILQ_PREV(r, vm_region_head, link); in rem_um_region()
199 pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size); in rem_um_region()
208 struct vm_region *r = NULL; in umap_add_region() local
235 TAILQ_FOREACH(r, &vmi->regions, link) { in umap_add_region()
236 va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end, in umap_add_region()
240 TAILQ_INSERT_BEFORE(r, reg, link); in umap_add_region()
243 prev_r = r; in umap_add_region()
246 r = TAILQ_LAST(&vmi->regions, vm_region_head); in umap_add_region()
247 if (!r) in umap_add_region()
248 r = &dummy_first_reg; in umap_add_region()
249 va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end, in umap_add_region()
339 struct vm_region *r = NULL; in find_vm_region() local
341 TAILQ_FOREACH(r, &vm_info->regions, link) in find_vm_region()
342 if (va >= r->va && va < r->va + r->size) in find_vm_region()
343 return r; in find_vm_region()
351 const struct vm_region *r, in va_range_is_contiguous() argument
354 struct vm_region *r = r0; in va_range_is_contiguous() local
361 struct vm_region *r_next = TAILQ_NEXT(r, link); in va_range_is_contiguous()
362 vaddr_t r_end_va = r->va + r->size; in va_range_is_contiguous()
370 if (cmp_regs && !cmp_regs(r0, r, r_next)) in va_range_is_contiguous()
372 r = r_next; in va_range_is_contiguous()
377 struct vm_region *r, vaddr_t va) in split_vm_region() argument
380 size_t diff = va - r->va; in split_vm_region()
382 assert(diff && diff < r->size); in split_vm_region()
388 if (mobj_is_paged(r->mobj)) { in split_vm_region()
397 r2->mobj = mobj_get(r->mobj); in split_vm_region()
398 r2->offset = r->offset + diff; in split_vm_region()
400 r2->size = r->size - diff; in split_vm_region()
401 r2->attr = r->attr; in split_vm_region()
402 r2->flags = r->flags; in split_vm_region()
404 r->size = diff; in split_vm_region()
406 TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link); in split_vm_region()
414 const struct vm_region *r, in split_vm_range() argument
419 struct vm_region *r = NULL; in split_vm_range() local
432 r = find_vm_region(&uctx->vm_info, va); in split_vm_range()
433 if (!r || !va_range_is_contiguous(r, va, len, cmp_regs)) in split_vm_range()
440 if (va != r->va) { in split_vm_range()
441 res = split_vm_region(uctx, r, va); in split_vm_range()
444 r = TAILQ_NEXT(r, link); in split_vm_range()
447 *r0_ret = r; in split_vm_range()
448 r = find_vm_region(&uctx->vm_info, va + len - 1); in split_vm_range()
449 if (!r) in split_vm_range()
451 if (end_va != r->va + r->size) { in split_vm_range()
452 res = split_vm_region(uctx, r, end_va); in split_vm_range()
463 struct vm_region *r = NULL; in merge_vm_range() local
471 for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) { in merge_vm_range()
472 r_next = TAILQ_NEXT(r, link); in merge_vm_range()
477 if (r->va + r->size < va) in merge_vm_range()
485 if (r->va > end_va) in merge_vm_range()
488 if (r->va + r->size != r_next->va) in merge_vm_range()
490 if (r->mobj != r_next->mobj || in merge_vm_range()
491 r->flags != r_next->flags || in merge_vm_range()
492 r->attr != r_next->attr) in merge_vm_range()
494 if (r->offset + r->size != r_next->offset) in merge_vm_range()
498 r->size += r_next->size; in merge_vm_range()
501 r_next = r; in merge_vm_range()
506 const struct vm_region *r, in cmp_region_for_remap()
516 return r0->flags == r->flags && r0->attr == r->attr && in cmp_region_for_remap()
517 r0->mobj == r->mobj && rn->offset == r->offset + r->size; in cmp_region_for_remap()
526 struct vm_region *r = NULL; in vm_remap() local
548 for (r = r0; r; r = r_next) { in vm_remap()
549 if (r->va + r->size > old_va + len) in vm_remap()
551 r_next = TAILQ_NEXT(r, link); in vm_remap()
552 rem_um_region(uctx, r); in vm_remap()
553 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); in vm_remap()
554 TAILQ_INSERT_TAIL(®s, r, link); in vm_remap()
565 r = TAILQ_FIRST(®s); in vm_remap()
566 TAILQ_REMOVE(®s, r, link); in vm_remap()
568 r->va = r_last->va + r_last->size; in vm_remap()
569 res = umap_add_region(&uctx->vm_info, r, 0, 0, 0); in vm_remap()
571 r->va = *new_va; in vm_remap()
572 res = umap_add_region(&uctx->vm_info, r, pad_begin, in vm_remap()
573 pad_end + len - r->size, 0); in vm_remap()
576 r_last = r; in vm_remap()
580 res = tee_pager_add_um_region(uctx, r->va, fobj, in vm_remap()
581 r->attr); in vm_remap()
591 if (r != r_last) { in vm_remap()
596 TAILQ_INSERT_HEAD(®s, r, link); in vm_remap()
598 for (r = r_first; r_last && r != r_last; r = r_next) { in vm_remap()
599 r_next = TAILQ_NEXT(r, link); in vm_remap()
600 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); in vm_remap()
602 TAILQ_INSERT_AFTER(®s, r_tmp, r, in vm_remap()
605 TAILQ_INSERT_HEAD(®s, r, link); in vm_remap()
606 r_tmp = r; in vm_remap()
623 r = TAILQ_FIRST(®s); in vm_remap()
624 TAILQ_REMOVE(®s, r, link); in vm_remap()
625 r->va = next_va; in vm_remap()
626 next_va += r->size; in vm_remap()
627 if (umap_add_region(&uctx->vm_info, r, 0, 0, 0)) in vm_remap()
631 if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr)) in vm_remap()
641 const struct vm_region *r, in cmp_region_for_get_flags()
644 return r0->flags == r->flags; in cmp_region_for_get_flags()
650 struct vm_region *r = NULL; in vm_get_flags() local
655 r = find_vm_region(&uctx->vm_info, va); in vm_get_flags()
656 if (!r) in vm_get_flags()
659 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags)) in vm_get_flags()
662 *flags = r->flags; in vm_get_flags()
668 const struct vm_region *r, in cmp_region_for_get_prot()
672 (r->attr & TEE_MATTR_PROT_MASK); in cmp_region_for_get_prot()
678 struct vm_region *r = NULL; in vm_get_prot() local
683 r = find_vm_region(&uctx->vm_info, va); in vm_get_prot()
684 if (!r) in vm_get_prot()
687 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot)) in vm_get_prot()
690 *prot = r->attr & TEE_MATTR_PROT_MASK; in vm_get_prot()
700 struct vm_region *r = NULL; in vm_set_prot() local
713 for (r = r0; r; r = TAILQ_NEXT(r, link)) { in vm_set_prot()
714 if (r->va + r->size > va + len) in vm_set_prot()
716 if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW)) in vm_set_prot()
719 if (!mobj_is_paged(r->mobj)) in vm_set_prot()
722 r->attr &= ~TEE_MATTR_PROT_MASK; in vm_set_prot()
723 r->attr |= prot; in vm_set_prot()
731 for (r = r0; r; r = TAILQ_NEXT(r, link)) { in vm_set_prot()
732 if (r->va + r->size > va + len) in vm_set_prot()
734 if (mobj_is_paged(r->mobj)) { in vm_set_prot()
735 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size, in vm_set_prot()
739 cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va, in vm_set_prot()
740 r->size); in vm_set_prot()
762 struct vm_region *r = NULL; in vm_unmap() local
779 res = split_vm_range(uctx, va, l, NULL, &r); in vm_unmap()
784 r_next = TAILQ_NEXT(r, link); in vm_unmap()
785 unmap_end_va = r->va + r->size; in vm_unmap()
786 rem_um_region(uctx, r); in vm_unmap()
787 umap_remove_region(&uctx->vm_info, r); in vm_unmap()
790 r = r_next; in vm_unmap()
847 struct vm_region *r; in vm_clean_param() local
849 TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) { in vm_clean_param()
850 if (r->flags & VM_FLAG_EPHEMERAL) { in vm_clean_param()
851 rem_um_region(uctx, r); in vm_clean_param()
852 umap_remove_region(&uctx->vm_info, r); in vm_clean_param()
859 struct vm_region *r = NULL; in check_param_map_empty() local
861 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) in check_param_map_empty()
862 assert(!(r->flags & VM_FLAG_EPHEMERAL)); in check_param_map_empty()
1057 struct vm_region *r = NULL; in vm_rem_rwmem() local
1059 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_rem_rwmem()
1060 if (r->mobj == mobj && r->va == va) { in vm_rem_rwmem()
1061 rem_um_region(uctx, r); in vm_rem_rwmem()
1062 umap_remove_region(&uctx->vm_info, r); in vm_rem_rwmem()
1087 struct vm_region *r = NULL; in vm_buf_is_inside_um_private() local
1089 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_is_inside_um_private()
1090 if (r->flags & VM_FLAGS_NONPRIV) in vm_buf_is_inside_um_private()
1092 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) in vm_buf_is_inside_um_private()
1103 struct vm_region *r = NULL; in vm_buf_intersects_um_private() local
1105 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_intersects_um_private()
1106 if (r->attr & VM_FLAGS_NONPRIV) in vm_buf_intersects_um_private()
1108 if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size)) in vm_buf_intersects_um_private()
1119 struct vm_region *r = NULL; in vm_buf_to_mboj_offs() local
1121 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_to_mboj_offs()
1122 if (!r->mobj) in vm_buf_to_mboj_offs()
1124 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) { in vm_buf_to_mboj_offs()
1127 poffs = mobj_get_phys_offs(r->mobj, in vm_buf_to_mboj_offs()
1129 *mobj = r->mobj; in vm_buf_to_mboj_offs()
1130 *offs = (vaddr_t)va - r->va + r->offset - poffs; in vm_buf_to_mboj_offs()
1313 struct vm_region *r = NULL; in vm_get_mobj() local
1319 r = find_vm_region(&uctx->vm_info, va); in vm_get_mobj()
1320 if (!r) in vm_get_mobj()
1323 r_offs = va - r->va; in vm_get_mobj()
1325 *len = MIN(r->size - r_offs, *len); in vm_get_mobj()
1326 *offs = r->offset + r_offs; in vm_get_mobj()
1327 *prot = r->attr & TEE_MATTR_PROT_MASK; in vm_get_mobj()
1328 return mobj_get(r->mobj); in vm_get_mobj()