Lines Matching refs:uctx

113 static size_t get_num_req_pgts(struct user_mode_ctx *uctx, vaddr_t *begin,  in get_num_req_pgts()  argument
119 if (TAILQ_EMPTY(&uctx->vm_info.regions)) { in get_num_req_pgts()
125 b = TAILQ_FIRST(&uctx->vm_info.regions)->va; in get_num_req_pgts()
126 r = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head); in get_num_req_pgts()
139 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx) in alloc_pgt() argument
146 ntbl = get_num_req_pgts(uctx, &b, &e); in alloc_pgt()
154 if (uctx->ts_ctx == tsd->ctx) { in alloc_pgt()
159 pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, b, e - 1); in alloc_pgt()
166 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r) in rem_um_region() argument
174 if (uctx->ts_ctx == tsd->ctx) in rem_um_region()
178 tee_pager_rem_um_region(uctx, r->va, r->size); in rem_um_region()
180 pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va, in rem_um_region()
183 uctx->vm_info.asid); in rem_um_region()
199 pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size); in rem_um_region()
260 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len, in vm_map_pad() argument
295 res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align); in vm_map_pad()
299 res = alloc_pgt(uctx); in vm_map_pad()
311 res = tee_pager_add_um_region(uctx, reg->va, fobj, prot); in vm_map_pad()
321 if (thread_get_tsd()->ctx == uctx->ts_ctx) in vm_map_pad()
322 vm_set_ctx(uctx->ts_ctx); in vm_map_pad()
329 TAILQ_REMOVE(&uctx->vm_info.regions, reg, link); in vm_map_pad()
376 static TEE_Result split_vm_region(struct user_mode_ctx *uctx, in split_vm_region() argument
389 TEE_Result res = tee_pager_split_um_region(uctx, va); in split_vm_region()
406 TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link); in split_vm_region()
411 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va, in split_vm_range() argument
432 r = find_vm_region(&uctx->vm_info, va); in split_vm_range()
441 res = split_vm_region(uctx, r, va); in split_vm_range()
448 r = find_vm_region(&uctx->vm_info, va + len - 1); in split_vm_range()
452 res = split_vm_region(uctx, r, end_va); in split_vm_range()
460 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len) in merge_vm_range() argument
469 tee_pager_merge_um_region(uctx, va, len); in merge_vm_range()
471 for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) { in merge_vm_range()
497 TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link); in merge_vm_range()
520 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va, in vm_remap() argument
533 assert(thread_get_tsd()->ctx == uctx->ts_ctx); in vm_remap()
538 res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0); in vm_remap()
552 rem_um_region(uctx, r); in vm_remap()
553 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); in vm_remap()
561 vm_set_ctx(uctx->ts_ctx); in vm_remap()
569 res = umap_add_region(&uctx->vm_info, r, 0, 0, 0); in vm_remap()
572 res = umap_add_region(&uctx->vm_info, r, pad_begin, in vm_remap()
578 res = alloc_pgt(uctx); in vm_remap()
580 res = tee_pager_add_um_region(uctx, r->va, fobj, in vm_remap()
600 TAILQ_REMOVE(&uctx->vm_info.regions, r, link); in vm_remap()
615 vm_set_ctx(uctx->ts_ctx); in vm_remap()
627 if (umap_add_region(&uctx->vm_info, r, 0, 0, 0)) in vm_remap()
629 if (alloc_pgt(uctx)) in vm_remap()
631 if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr)) in vm_remap()
635 vm_set_ctx(uctx->ts_ctx); in vm_remap()
647 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_get_flags() argument
655 r = find_vm_region(&uctx->vm_info, va); in vm_get_flags()
675 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_get_prot() argument
683 r = find_vm_region(&uctx->vm_info, va); in vm_get_prot()
695 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len, in vm_set_prot() argument
704 assert(thread_get_tsd()->ctx == uctx->ts_ctx); in vm_set_prot()
709 res = split_vm_range(uctx, va, len, NULL, &r0); in vm_set_prot()
728 vm_set_ctx(uctx->ts_ctx); in vm_set_prot()
735 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size, in vm_set_prot()
747 merge_vm_range(uctx, va, len); in vm_set_prot()
759 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len) in vm_unmap() argument
768 assert(thread_get_tsd()->ctx == uctx->ts_ctx); in vm_unmap()
779 res = split_vm_range(uctx, va, l, NULL, &r); in vm_unmap()
786 rem_um_region(uctx, r); in vm_unmap()
787 umap_remove_region(&uctx->vm_info, r); in vm_unmap()
796 static TEE_Result map_kinit(struct user_mode_ctx *uctx) in map_kinit() argument
810 res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT, in map_kinit()
818 return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT, in map_kinit()
824 TEE_Result vm_info_init(struct user_mode_ctx *uctx) in vm_info_init() argument
834 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info)); in vm_info_init()
835 TAILQ_INIT(&uctx->vm_info.regions); in vm_info_init()
836 uctx->vm_info.asid = asid; in vm_info_init()
838 res = map_kinit(uctx); in vm_info_init()
840 vm_info_final(uctx); in vm_info_init()
844 void vm_clean_param(struct user_mode_ctx *uctx) in vm_clean_param() argument
849 TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) { in vm_clean_param()
851 rem_um_region(uctx, r); in vm_clean_param()
852 umap_remove_region(&uctx->vm_info, r); in vm_clean_param()
857 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused) in check_param_map_empty()
861 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) in check_param_map_empty()
865 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx, in param_mem_to_user_va() argument
870 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { in param_mem_to_user_va()
922 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param, in vm_map_param() argument
985 check_param_map_empty(uctx); in vm_map_param()
990 res = vm_map(uctx, &va, mem[n].size, in vm_map_param()
1008 res = param_mem_to_user_va(uctx, &param->u[n].mem, in vm_map_param()
1014 res = alloc_pgt(uctx); in vm_map_param()
1017 vm_clean_param(uctx); in vm_map_param()
1022 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, in vm_add_rwmem() argument
1040 res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0); in vm_add_rwmem()
1046 res = alloc_pgt(uctx); in vm_add_rwmem()
1048 umap_remove_region(&uctx->vm_info, reg); in vm_add_rwmem()
1055 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va) in vm_rem_rwmem() argument
1059 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_rem_rwmem()
1061 rem_um_region(uctx, r); in vm_rem_rwmem()
1062 umap_remove_region(&uctx->vm_info, r); in vm_rem_rwmem()
1068 void vm_info_final(struct user_mode_ctx *uctx) in vm_info_final() argument
1070 if (!uctx->vm_info.asid) in vm_info_final()
1074 tlbi_asid(uctx->vm_info.asid); in vm_info_final()
1076 asid_free(uctx->vm_info.asid); in vm_info_final()
1077 while (!TAILQ_EMPTY(&uctx->vm_info.regions)) in vm_info_final()
1078 umap_remove_region(&uctx->vm_info, in vm_info_final()
1079 TAILQ_FIRST(&uctx->vm_info.regions)); in vm_info_final()
1080 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info)); in vm_info_final()
1084 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx, in vm_buf_is_inside_um_private() argument
1089 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_is_inside_um_private()
1100 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx, in vm_buf_intersects_um_private() argument
1105 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_intersects_um_private()
1115 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx, in vm_buf_to_mboj_offs() argument
1121 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) { in vm_buf_to_mboj_offs()
1138 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx, in tee_mmu_user_va2pa_attr() argument
1143 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { in tee_mmu_user_va2pa_attr()
1184 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa) in vm_va2pa() argument
1186 return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL); in vm_va2pa()
1189 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size) in vm_pa2va() argument
1194 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { in vm_pa2va()
1236 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx, in vm_check_access_rights() argument
1256 !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len)) in vm_check_access_rights()
1263 res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr); in vm_check_access_rights()
1301 struct user_mode_ctx *uctx = to_user_mode_ctx(ctx); in vm_set_ctx() local
1303 core_mmu_create_user_map(uctx, &map); in vm_set_ctx()
1305 tee_pager_assign_um_tables(uctx); in vm_set_ctx()
1310 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len, in vm_get_mobj() argument
1319 r = find_vm_region(&uctx->vm_info, va); in vm_get_mobj()