1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2021, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Copyright (c) 2021, Arm Limited
6 */
7
8 #include <arm.h>
9 #include <assert.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_mmu_types.h>
24 #include <mm/tee_pager.h>
25 #include <mm/vm.h>
26 #include <sm/optee_smc.h>
27 #include <stdlib.h>
28 #include <tee_api_defines_extensions.h>
29 #include <tee_api_types.h>
30 #include <trace.h>
31 #include <types_ext.h>
32 #include <user_ta_header.h>
33 #include <util.h>
34
35 #ifdef CFG_PL310
36 #include <kernel/tee_l2cc_mutex.h>
37 #endif
38
39 #define TEE_MMU_UDATA_ATTR (TEE_MATTR_VALID_BLOCK | \
40 TEE_MATTR_PRW | TEE_MATTR_URW | \
41 TEE_MATTR_SECURE)
42 #define TEE_MMU_UCODE_ATTR (TEE_MATTR_VALID_BLOCK | \
43 TEE_MATTR_PRW | TEE_MATTR_URWX | \
44 TEE_MATTR_SECURE)
45
46 #define TEE_MMU_UCACHE_DEFAULT_ATTR (TEE_MATTR_CACHE_CACHED << \
47 TEE_MATTR_CACHE_SHIFT)
48
select_va_in_range(const struct vm_region * prev_reg,const struct vm_region * next_reg,const struct vm_region * reg,size_t pad_begin,size_t pad_end,size_t granul)49 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
50 const struct vm_region *next_reg,
51 const struct vm_region *reg,
52 size_t pad_begin, size_t pad_end,
53 size_t granul)
54 {
55 const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
56 VM_FLAG_SHAREABLE;
57 vaddr_t begin_va = 0;
58 vaddr_t end_va = 0;
59 size_t pad = 0;
60
61 /*
62 * Insert an unmapped entry to separate regions with differing
63 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
64 * bits as they never are to be contiguous with another region.
65 */
66 if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
67 pad = SMALL_PAGE_SIZE;
68 else
69 pad = 0;
70
71 #ifndef CFG_WITH_LPAE
72 if ((prev_reg->attr & TEE_MATTR_SECURE) !=
73 (reg->attr & TEE_MATTR_SECURE))
74 granul = CORE_MMU_PGDIR_SIZE;
75 #endif
76
77 if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
78 ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
79 ADD_OVERFLOW(begin_va, pad, &begin_va) ||
80 ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
81 return 0;
82
83 if (reg->va) {
84 if (reg->va < begin_va)
85 return 0;
86 begin_va = reg->va;
87 }
88
89 if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
90 pad = SMALL_PAGE_SIZE;
91 else
92 pad = 0;
93
94 #ifndef CFG_WITH_LPAE
95 if ((next_reg->attr & TEE_MATTR_SECURE) !=
96 (reg->attr & TEE_MATTR_SECURE))
97 granul = CORE_MMU_PGDIR_SIZE;
98 #endif
99 if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
100 ADD_OVERFLOW(end_va, pad_end, &end_va) ||
101 ADD_OVERFLOW(end_va, pad, &end_va) ||
102 ROUNDUP_OVERFLOW(end_va, granul, &end_va))
103 return 0;
104
105 if (end_va <= next_reg->va) {
106 assert(!reg->va || reg->va == begin_va);
107 return begin_va;
108 }
109
110 return 0;
111 }
112
get_num_req_pgts(struct user_mode_ctx * uctx,vaddr_t * begin,vaddr_t * end)113 static size_t get_num_req_pgts(struct user_mode_ctx *uctx, vaddr_t *begin,
114 vaddr_t *end)
115 {
116 vaddr_t b;
117 vaddr_t e;
118
119 if (TAILQ_EMPTY(&uctx->vm_info.regions)) {
120 core_mmu_get_user_va_range(&b, NULL);
121 e = b;
122 } else {
123 struct vm_region *r;
124
125 b = TAILQ_FIRST(&uctx->vm_info.regions)->va;
126 r = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head);
127 e = r->va + r->size;
128 b = ROUNDDOWN(b, CORE_MMU_PGDIR_SIZE);
129 e = ROUNDUP(e, CORE_MMU_PGDIR_SIZE);
130 }
131
132 if (begin)
133 *begin = b;
134 if (end)
135 *end = e;
136 return (e - b) >> CORE_MMU_PGDIR_SHIFT;
137 }
138
alloc_pgt(struct user_mode_ctx * uctx)139 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
140 {
141 struct thread_specific_data *tsd __maybe_unused;
142 vaddr_t b;
143 vaddr_t e;
144 size_t ntbl;
145
146 ntbl = get_num_req_pgts(uctx, &b, &e);
147 if (!pgt_check_avail(ntbl)) {
148 EMSG("%zu page tables not available", ntbl);
149 return TEE_ERROR_OUT_OF_MEMORY;
150 }
151
152 #ifdef CFG_PAGED_USER_TA
153 tsd = thread_get_tsd();
154 if (uctx->ts_ctx == tsd->ctx) {
155 /*
156 * The supplied utc is the current active utc, allocate the
157 * page tables too as the pager needs to use them soon.
158 */
159 pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, b, e - 1);
160 }
161 #endif
162
163 return TEE_SUCCESS;
164 }
165
rem_um_region(struct user_mode_ctx * uctx,struct vm_region * r)166 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
167 {
168 struct thread_specific_data *tsd = thread_get_tsd();
169 struct pgt_cache *pgt_cache = NULL;
170 vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
171 vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
172 struct vm_region *r2 = NULL;
173
174 if (uctx->ts_ctx == tsd->ctx)
175 pgt_cache = &tsd->pgt_cache;
176
177 if (mobj_is_paged(r->mobj)) {
178 tee_pager_rem_um_region(uctx, r->va, r->size);
179 } else {
180 pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va,
181 r->va + r->size);
182 tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
183 uctx->vm_info.asid);
184 }
185
186 r2 = TAILQ_NEXT(r, link);
187 if (r2)
188 last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
189
190 r2 = TAILQ_PREV(r, vm_region_head, link);
191 if (r2)
192 begin = MAX(begin,
193 ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
194
195 /* If there's no unused page tables, there's nothing left to do */
196 if (begin >= last)
197 return;
198
199 pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size);
200 }
201
umap_add_region(struct vm_info * vmi,struct vm_region * reg,size_t pad_begin,size_t pad_end,size_t align)202 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
203 size_t pad_begin, size_t pad_end,
204 size_t align)
205 {
206 struct vm_region dummy_first_reg = { };
207 struct vm_region dummy_last_reg = { };
208 struct vm_region *r = NULL;
209 struct vm_region *prev_r = NULL;
210 vaddr_t va_range_base = 0;
211 size_t va_range_size = 0;
212 size_t granul;
213 vaddr_t va = 0;
214 size_t offs_plus_size = 0;
215
216 core_mmu_get_user_va_range(&va_range_base, &va_range_size);
217 dummy_first_reg.va = va_range_base;
218 dummy_last_reg.va = va_range_base + va_range_size;
219
220 /* Check alignment, it has to be at least SMALL_PAGE based */
221 if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
222 return TEE_ERROR_ACCESS_CONFLICT;
223
224 /* Check that the mobj is defined for the entire range */
225 if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
226 return TEE_ERROR_BAD_PARAMETERS;
227 if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
228 return TEE_ERROR_BAD_PARAMETERS;
229
230 granul = MAX(align, SMALL_PAGE_SIZE);
231 if (!IS_POWER_OF_TWO(granul))
232 return TEE_ERROR_BAD_PARAMETERS;
233
234 prev_r = &dummy_first_reg;
235 TAILQ_FOREACH(r, &vmi->regions, link) {
236 va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
237 granul);
238 if (va) {
239 reg->va = va;
240 TAILQ_INSERT_BEFORE(r, reg, link);
241 return TEE_SUCCESS;
242 }
243 prev_r = r;
244 }
245
246 r = TAILQ_LAST(&vmi->regions, vm_region_head);
247 if (!r)
248 r = &dummy_first_reg;
249 va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
250 granul);
251 if (va) {
252 reg->va = va;
253 TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
254 return TEE_SUCCESS;
255 }
256
257 return TEE_ERROR_ACCESS_CONFLICT;
258 }
259
vm_map_pad(struct user_mode_ctx * uctx,vaddr_t * va,size_t len,uint32_t prot,uint32_t flags,struct mobj * mobj,size_t offs,size_t pad_begin,size_t pad_end,size_t align)260 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
261 uint32_t prot, uint32_t flags, struct mobj *mobj,
262 size_t offs, size_t pad_begin, size_t pad_end,
263 size_t align)
264 {
265 TEE_Result res = TEE_SUCCESS;
266 struct vm_region *reg = NULL;
267 uint32_t attr = 0;
268
269 if (prot & ~TEE_MATTR_PROT_MASK)
270 return TEE_ERROR_BAD_PARAMETERS;
271
272 reg = calloc(1, sizeof(*reg));
273 if (!reg)
274 return TEE_ERROR_OUT_OF_MEMORY;
275
276 if (!mobj_is_paged(mobj)) {
277 uint32_t cattr;
278
279 res = mobj_get_cattr(mobj, &cattr);
280 if (res)
281 goto err_free_reg;
282 attr |= cattr << TEE_MATTR_CACHE_SHIFT;
283 }
284 attr |= TEE_MATTR_VALID_BLOCK;
285 if (mobj_is_secure(mobj))
286 attr |= TEE_MATTR_SECURE;
287
288 reg->mobj = mobj_get(mobj);
289 reg->offset = offs;
290 reg->va = *va;
291 reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
292 reg->attr = attr | prot;
293 reg->flags = flags;
294
295 res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
296 if (res)
297 goto err_put_mobj;
298
299 res = alloc_pgt(uctx);
300 if (res)
301 goto err_rem_reg;
302
303 if (mobj_is_paged(mobj)) {
304 struct fobj *fobj = mobj_get_fobj(mobj);
305
306 if (!fobj) {
307 res = TEE_ERROR_GENERIC;
308 goto err_rem_reg;
309 }
310
311 res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
312 fobj_put(fobj);
313 if (res)
314 goto err_rem_reg;
315 }
316
317 /*
318 * If the context currently is active set it again to update
319 * the mapping.
320 */
321 if (thread_get_tsd()->ctx == uctx->ts_ctx)
322 vm_set_ctx(uctx->ts_ctx);
323
324 *va = reg->va;
325
326 return TEE_SUCCESS;
327
328 err_rem_reg:
329 TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
330 err_put_mobj:
331 mobj_put(reg->mobj);
332 err_free_reg:
333 free(reg);
334 return res;
335 }
336
find_vm_region(struct vm_info * vm_info,vaddr_t va)337 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
338 {
339 struct vm_region *r = NULL;
340
341 TAILQ_FOREACH(r, &vm_info->regions, link)
342 if (va >= r->va && va < r->va + r->size)
343 return r;
344
345 return NULL;
346 }
347
va_range_is_contiguous(struct vm_region * r0,vaddr_t va,size_t len,bool (* cmp_regs)(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn))348 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
349 size_t len,
350 bool (*cmp_regs)(const struct vm_region *r0,
351 const struct vm_region *r,
352 const struct vm_region *rn))
353 {
354 struct vm_region *r = r0;
355 vaddr_t end_va = 0;
356
357 if (ADD_OVERFLOW(va, len, &end_va))
358 return false;
359
360 while (true) {
361 struct vm_region *r_next = TAILQ_NEXT(r, link);
362 vaddr_t r_end_va = r->va + r->size;
363
364 if (r_end_va >= end_va)
365 return true;
366 if (!r_next)
367 return false;
368 if (r_end_va != r_next->va)
369 return false;
370 if (cmp_regs && !cmp_regs(r0, r, r_next))
371 return false;
372 r = r_next;
373 }
374 }
375
split_vm_region(struct user_mode_ctx * uctx,struct vm_region * r,vaddr_t va)376 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
377 struct vm_region *r, vaddr_t va)
378 {
379 struct vm_region *r2 = NULL;
380 size_t diff = va - r->va;
381
382 assert(diff && diff < r->size);
383
384 r2 = calloc(1, sizeof(*r2));
385 if (!r2)
386 return TEE_ERROR_OUT_OF_MEMORY;
387
388 if (mobj_is_paged(r->mobj)) {
389 TEE_Result res = tee_pager_split_um_region(uctx, va);
390
391 if (res) {
392 free(r2);
393 return res;
394 }
395 }
396
397 r2->mobj = mobj_get(r->mobj);
398 r2->offset = r->offset + diff;
399 r2->va = va;
400 r2->size = r->size - diff;
401 r2->attr = r->attr;
402 r2->flags = r->flags;
403
404 r->size = diff;
405
406 TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
407
408 return TEE_SUCCESS;
409 }
410
split_vm_range(struct user_mode_ctx * uctx,vaddr_t va,size_t len,bool (* cmp_regs)(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn),struct vm_region ** r0_ret)411 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
412 size_t len,
413 bool (*cmp_regs)(const struct vm_region *r0,
414 const struct vm_region *r,
415 const struct vm_region *rn),
416 struct vm_region **r0_ret)
417 {
418 TEE_Result res = TEE_SUCCESS;
419 struct vm_region *r = NULL;
420 vaddr_t end_va = 0;
421
422 if ((va | len) & SMALL_PAGE_MASK)
423 return TEE_ERROR_BAD_PARAMETERS;
424
425 if (ADD_OVERFLOW(va, len, &end_va))
426 return TEE_ERROR_BAD_PARAMETERS;
427
428 /*
429 * Find first vm_region in range and check that the entire range is
430 * contiguous.
431 */
432 r = find_vm_region(&uctx->vm_info, va);
433 if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
434 return TEE_ERROR_BAD_PARAMETERS;
435
436 /*
437 * If needed split regions so that va and len covers only complete
438 * regions.
439 */
440 if (va != r->va) {
441 res = split_vm_region(uctx, r, va);
442 if (res)
443 return res;
444 r = TAILQ_NEXT(r, link);
445 }
446
447 *r0_ret = r;
448 r = find_vm_region(&uctx->vm_info, va + len - 1);
449 if (!r)
450 return TEE_ERROR_BAD_PARAMETERS;
451 if (end_va != r->va + r->size) {
452 res = split_vm_region(uctx, r, end_va);
453 if (res)
454 return res;
455 }
456
457 return TEE_SUCCESS;
458 }
459
merge_vm_range(struct user_mode_ctx * uctx,vaddr_t va,size_t len)460 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
461 {
462 struct vm_region *r_next = NULL;
463 struct vm_region *r = NULL;
464 vaddr_t end_va = 0;
465
466 if (ADD_OVERFLOW(va, len, &end_va))
467 return;
468
469 tee_pager_merge_um_region(uctx, va, len);
470
471 for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
472 r_next = TAILQ_NEXT(r, link);
473 if (!r_next)
474 return;
475
476 /* Try merging with the region just before va */
477 if (r->va + r->size < va)
478 continue;
479
480 /*
481 * If r->va is well past our range we're done.
482 * Note that if it's just the page after our range we'll
483 * try to merge.
484 */
485 if (r->va > end_va)
486 return;
487
488 if (r->va + r->size != r_next->va)
489 continue;
490 if (r->mobj != r_next->mobj ||
491 r->flags != r_next->flags ||
492 r->attr != r_next->attr)
493 continue;
494 if (r->offset + r->size != r_next->offset)
495 continue;
496
497 TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
498 r->size += r_next->size;
499 mobj_put(r_next->mobj);
500 free(r_next);
501 r_next = r;
502 }
503 }
504
cmp_region_for_remap(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn)505 static bool cmp_region_for_remap(const struct vm_region *r0,
506 const struct vm_region *r,
507 const struct vm_region *rn)
508 {
509 /*
510 * All the essentionals has to match for remap to make sense. The
511 * essentials are, mobj/fobj, attr, flags and the offset should be
512 * contiguous.
513 *
514 * Note that vm_remap() depends on mobj/fobj to be the same.
515 */
516 return r0->flags == r->flags && r0->attr == r->attr &&
517 r0->mobj == r->mobj && rn->offset == r->offset + r->size;
518 }
519
vm_remap(struct user_mode_ctx * uctx,vaddr_t * new_va,vaddr_t old_va,size_t len,size_t pad_begin,size_t pad_end)520 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
521 size_t len, size_t pad_begin, size_t pad_end)
522 {
523 struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
524 TEE_Result res = TEE_SUCCESS;
525 struct vm_region *r0 = NULL;
526 struct vm_region *r = NULL;
527 struct vm_region *r_next = NULL;
528 struct vm_region *r_last = NULL;
529 struct vm_region *r_first = NULL;
530 struct fobj *fobj = NULL;
531 vaddr_t next_va = 0;
532
533 assert(thread_get_tsd()->ctx == uctx->ts_ctx);
534
535 if (!len || ((len | old_va) & SMALL_PAGE_MASK))
536 return TEE_ERROR_BAD_PARAMETERS;
537
538 res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
539 if (res)
540 return res;
541
542 if (mobj_is_paged(r0->mobj)) {
543 fobj = mobj_get_fobj(r0->mobj);
544 if (!fobj)
545 panic();
546 }
547
548 for (r = r0; r; r = r_next) {
549 if (r->va + r->size > old_va + len)
550 break;
551 r_next = TAILQ_NEXT(r, link);
552 rem_um_region(uctx, r);
553 TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
554 TAILQ_INSERT_TAIL(®s, r, link);
555 }
556
557 /*
558 * Synchronize change to translation tables. Even though the pager
559 * case unmaps immediately we may still free a translation table.
560 */
561 vm_set_ctx(uctx->ts_ctx);
562
563 r_first = TAILQ_FIRST(®s);
564 while (!TAILQ_EMPTY(®s)) {
565 r = TAILQ_FIRST(®s);
566 TAILQ_REMOVE(®s, r, link);
567 if (r_last) {
568 r->va = r_last->va + r_last->size;
569 res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
570 } else {
571 r->va = *new_va;
572 res = umap_add_region(&uctx->vm_info, r, pad_begin,
573 pad_end + len - r->size, 0);
574 }
575 if (!res)
576 r_last = r;
577 if (!res)
578 res = alloc_pgt(uctx);
579 if (fobj && !res)
580 res = tee_pager_add_um_region(uctx, r->va, fobj,
581 r->attr);
582
583 if (res) {
584 /*
585 * Something went wrong move all the recently added
586 * regions back to regs for later reinsertion at
587 * the original spot.
588 */
589 struct vm_region *r_tmp = NULL;
590
591 if (r != r_last) {
592 /*
593 * umap_add_region() failed, move r back to
594 * regs before all the rest are moved back.
595 */
596 TAILQ_INSERT_HEAD(®s, r, link);
597 }
598 for (r = r_first; r_last && r != r_last; r = r_next) {
599 r_next = TAILQ_NEXT(r, link);
600 TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
601 if (r_tmp)
602 TAILQ_INSERT_AFTER(®s, r_tmp, r,
603 link);
604 else
605 TAILQ_INSERT_HEAD(®s, r, link);
606 r_tmp = r;
607 }
608
609 goto err_restore_map;
610 }
611 }
612
613 fobj_put(fobj);
614
615 vm_set_ctx(uctx->ts_ctx);
616 *new_va = r_first->va;
617
618 return TEE_SUCCESS;
619
620 err_restore_map:
621 next_va = old_va;
622 while (!TAILQ_EMPTY(®s)) {
623 r = TAILQ_FIRST(®s);
624 TAILQ_REMOVE(®s, r, link);
625 r->va = next_va;
626 next_va += r->size;
627 if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
628 panic("Cannot restore mapping");
629 if (alloc_pgt(uctx))
630 panic("Cannot restore mapping");
631 if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
632 panic("Cannot restore mapping");
633 }
634 fobj_put(fobj);
635 vm_set_ctx(uctx->ts_ctx);
636
637 return res;
638 }
639
cmp_region_for_get_flags(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn __unused)640 static bool cmp_region_for_get_flags(const struct vm_region *r0,
641 const struct vm_region *r,
642 const struct vm_region *rn __unused)
643 {
644 return r0->flags == r->flags;
645 }
646
vm_get_flags(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint32_t * flags)647 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
648 uint32_t *flags)
649 {
650 struct vm_region *r = NULL;
651
652 if (!len || ((len | va) & SMALL_PAGE_MASK))
653 return TEE_ERROR_BAD_PARAMETERS;
654
655 r = find_vm_region(&uctx->vm_info, va);
656 if (!r)
657 return TEE_ERROR_BAD_PARAMETERS;
658
659 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
660 return TEE_ERROR_BAD_PARAMETERS;
661
662 *flags = r->flags;
663
664 return TEE_SUCCESS;
665 }
666
cmp_region_for_get_prot(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn __unused)667 static bool cmp_region_for_get_prot(const struct vm_region *r0,
668 const struct vm_region *r,
669 const struct vm_region *rn __unused)
670 {
671 return (r0->attr & TEE_MATTR_PROT_MASK) ==
672 (r->attr & TEE_MATTR_PROT_MASK);
673 }
674
vm_get_prot(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint16_t * prot)675 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
676 uint16_t *prot)
677 {
678 struct vm_region *r = NULL;
679
680 if (!len || ((len | va) & SMALL_PAGE_MASK))
681 return TEE_ERROR_BAD_PARAMETERS;
682
683 r = find_vm_region(&uctx->vm_info, va);
684 if (!r)
685 return TEE_ERROR_BAD_PARAMETERS;
686
687 if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
688 return TEE_ERROR_BAD_PARAMETERS;
689
690 *prot = r->attr & TEE_MATTR_PROT_MASK;
691
692 return TEE_SUCCESS;
693 }
694
vm_set_prot(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint32_t prot)695 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
696 uint32_t prot)
697 {
698 TEE_Result res = TEE_SUCCESS;
699 struct vm_region *r0 = NULL;
700 struct vm_region *r = NULL;
701 bool was_writeable = false;
702 bool need_sync = false;
703
704 assert(thread_get_tsd()->ctx == uctx->ts_ctx);
705
706 if (prot & ~TEE_MATTR_PROT_MASK || !len)
707 return TEE_ERROR_BAD_PARAMETERS;
708
709 res = split_vm_range(uctx, va, len, NULL, &r0);
710 if (res)
711 return res;
712
713 for (r = r0; r; r = TAILQ_NEXT(r, link)) {
714 if (r->va + r->size > va + len)
715 break;
716 if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
717 was_writeable = true;
718
719 if (!mobj_is_paged(r->mobj))
720 need_sync = true;
721
722 r->attr &= ~TEE_MATTR_PROT_MASK;
723 r->attr |= prot;
724 }
725
726 if (need_sync) {
727 /* Synchronize changes to translation tables */
728 vm_set_ctx(uctx->ts_ctx);
729 }
730
731 for (r = r0; r; r = TAILQ_NEXT(r, link)) {
732 if (r->va + r->size > va + len)
733 break;
734 if (mobj_is_paged(r->mobj)) {
735 if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
736 prot))
737 panic();
738 } else if (was_writeable) {
739 cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
740 r->size);
741 }
742
743 }
744 if (need_sync && was_writeable)
745 cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
746
747 merge_vm_range(uctx, va, len);
748
749 return TEE_SUCCESS;
750 }
751
umap_remove_region(struct vm_info * vmi,struct vm_region * reg)752 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
753 {
754 TAILQ_REMOVE(&vmi->regions, reg, link);
755 mobj_put(reg->mobj);
756 free(reg);
757 }
758
vm_unmap(struct user_mode_ctx * uctx,vaddr_t va,size_t len)759 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
760 {
761 TEE_Result res = TEE_SUCCESS;
762 struct vm_region *r = NULL;
763 struct vm_region *r_next = NULL;
764 size_t end_va = 0;
765 size_t unmap_end_va = 0;
766 size_t l = 0;
767
768 assert(thread_get_tsd()->ctx == uctx->ts_ctx);
769
770 if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
771 return TEE_ERROR_BAD_PARAMETERS;
772
773 if (!l || (va & SMALL_PAGE_MASK))
774 return TEE_ERROR_BAD_PARAMETERS;
775
776 if (ADD_OVERFLOW(va, l, &end_va))
777 return TEE_ERROR_BAD_PARAMETERS;
778
779 res = split_vm_range(uctx, va, l, NULL, &r);
780 if (res)
781 return res;
782
783 while (true) {
784 r_next = TAILQ_NEXT(r, link);
785 unmap_end_va = r->va + r->size;
786 rem_um_region(uctx, r);
787 umap_remove_region(&uctx->vm_info, r);
788 if (!r_next || unmap_end_va == end_va)
789 break;
790 r = r_next;
791 }
792
793 return TEE_SUCCESS;
794 }
795
map_kinit(struct user_mode_ctx * uctx)796 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
797 {
798 TEE_Result res = TEE_SUCCESS;
799 struct mobj *mobj = NULL;
800 size_t offs = 0;
801 vaddr_t va = 0;
802 size_t sz = 0;
803 uint32_t prot = 0;
804
805 thread_get_user_kcode(&mobj, &offs, &va, &sz);
806 if (sz) {
807 prot = TEE_MATTR_PRX;
808 if (IS_ENABLED(CFG_CORE_BTI))
809 prot |= TEE_MATTR_GUARDED;
810 res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
811 mobj, offs);
812 if (res)
813 return res;
814 }
815
816 thread_get_user_kdata(&mobj, &offs, &va, &sz);
817 if (sz)
818 return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
819 mobj, offs);
820
821 return TEE_SUCCESS;
822 }
823
vm_info_init(struct user_mode_ctx * uctx)824 TEE_Result vm_info_init(struct user_mode_ctx *uctx)
825 {
826 TEE_Result res;
827 uint32_t asid = asid_alloc();
828
829 if (!asid) {
830 DMSG("Failed to allocate ASID");
831 return TEE_ERROR_GENERIC;
832 }
833
834 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
835 TAILQ_INIT(&uctx->vm_info.regions);
836 uctx->vm_info.asid = asid;
837
838 res = map_kinit(uctx);
839 if (res)
840 vm_info_final(uctx);
841 return res;
842 }
843
vm_clean_param(struct user_mode_ctx * uctx)844 void vm_clean_param(struct user_mode_ctx *uctx)
845 {
846 struct vm_region *next_r;
847 struct vm_region *r;
848
849 TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
850 if (r->flags & VM_FLAG_EPHEMERAL) {
851 rem_um_region(uctx, r);
852 umap_remove_region(&uctx->vm_info, r);
853 }
854 }
855 }
856
check_param_map_empty(struct user_mode_ctx * uctx __maybe_unused)857 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
858 {
859 struct vm_region *r = NULL;
860
861 TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
862 assert(!(r->flags & VM_FLAG_EPHEMERAL));
863 }
864
param_mem_to_user_va(struct user_mode_ctx * uctx,struct param_mem * mem,void ** user_va)865 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
866 struct param_mem *mem, void **user_va)
867 {
868 struct vm_region *region = NULL;
869
870 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
871 vaddr_t va = 0;
872 size_t phys_offs = 0;
873
874 if (!(region->flags & VM_FLAG_EPHEMERAL))
875 continue;
876 if (mem->mobj != region->mobj)
877 continue;
878
879 phys_offs = mobj_get_phys_offs(mem->mobj,
880 CORE_MMU_USER_PARAM_SIZE);
881 phys_offs += mem->offs;
882 if (phys_offs < region->offset)
883 continue;
884 if (phys_offs >= (region->offset + region->size))
885 continue;
886 va = region->va + phys_offs - region->offset;
887 *user_va = (void *)va;
888 return TEE_SUCCESS;
889 }
890 return TEE_ERROR_GENERIC;
891 }
892
cmp_param_mem(const void * a0,const void * a1)893 static int cmp_param_mem(const void *a0, const void *a1)
894 {
895 const struct param_mem *m1 = a1;
896 const struct param_mem *m0 = a0;
897 int ret;
898
899 /* Make sure that invalid param_mem are placed last in the array */
900 if (!m0->mobj && !m1->mobj)
901 return 0;
902 if (!m0->mobj)
903 return 1;
904 if (!m1->mobj)
905 return -1;
906
907 ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
908 if (ret)
909 return ret;
910
911 ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
912 if (ret)
913 return ret;
914
915 ret = CMP_TRILEAN(m0->offs, m1->offs);
916 if (ret)
917 return ret;
918
919 return CMP_TRILEAN(m0->size, m1->size);
920 }
921
vm_map_param(struct user_mode_ctx * uctx,struct tee_ta_param * param,void * param_va[TEE_NUM_PARAMS])922 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
923 void *param_va[TEE_NUM_PARAMS])
924 {
925 TEE_Result res = TEE_SUCCESS;
926 size_t n;
927 size_t m;
928 struct param_mem mem[TEE_NUM_PARAMS];
929
930 memset(mem, 0, sizeof(mem));
931 for (n = 0; n < TEE_NUM_PARAMS; n++) {
932 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
933 size_t phys_offs;
934
935 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
936 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
937 param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
938 continue;
939 phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
940 CORE_MMU_USER_PARAM_SIZE);
941 mem[n].mobj = param->u[n].mem.mobj;
942 mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
943 CORE_MMU_USER_PARAM_SIZE);
944 mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
945 mem[n].offs + param->u[n].mem.size,
946 CORE_MMU_USER_PARAM_SIZE);
947 /*
948 * For size 0 (raw pointer parameter), add minimum size
949 * value to allow address to be mapped
950 */
951 if (!mem[n].size)
952 mem[n].size = CORE_MMU_USER_PARAM_SIZE;
953 }
954
955 /*
956 * Sort arguments so NULL mobj is last, secure mobjs first, then by
957 * mobj pointer value since those entries can't be merged either,
958 * finally by offset.
959 *
960 * This should result in a list where all mergeable entries are
961 * next to each other and unused/invalid entries are at the end.
962 */
963 qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
964
965 for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
966 if (mem[n].mobj == mem[m].mobj &&
967 (mem[n].offs == (mem[m].offs + mem[m].size) ||
968 core_is_buffer_intersect(mem[m].offs, mem[m].size,
969 mem[n].offs, mem[n].size))) {
970 mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
971 continue;
972 }
973 m++;
974 if (n != m)
975 mem[m] = mem[n];
976 }
977 /*
978 * We'd like 'm' to be the number of valid entries. Here 'm' is the
979 * index of the last valid entry if the first entry is valid, else
980 * 0.
981 */
982 if (mem[0].mobj)
983 m++;
984
985 check_param_map_empty(uctx);
986
987 for (n = 0; n < m; n++) {
988 vaddr_t va = 0;
989
990 res = vm_map(uctx, &va, mem[n].size,
991 TEE_MATTR_PRW | TEE_MATTR_URW,
992 VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
993 mem[n].mobj, mem[n].offs);
994 if (res)
995 goto out;
996 }
997
998 for (n = 0; n < TEE_NUM_PARAMS; n++) {
999 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
1000
1001 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
1002 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
1003 param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
1004 continue;
1005 if (!param->u[n].mem.mobj)
1006 continue;
1007
1008 res = param_mem_to_user_va(uctx, ¶m->u[n].mem,
1009 param_va + n);
1010 if (res != TEE_SUCCESS)
1011 goto out;
1012 }
1013
1014 res = alloc_pgt(uctx);
1015 out:
1016 if (res)
1017 vm_clean_param(uctx);
1018
1019 return res;
1020 }
1021
vm_add_rwmem(struct user_mode_ctx * uctx,struct mobj * mobj,vaddr_t * va)1022 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
1023 vaddr_t *va)
1024 {
1025 TEE_Result res;
1026 struct vm_region *reg = calloc(1, sizeof(*reg));
1027
1028 if (!reg)
1029 return TEE_ERROR_OUT_OF_MEMORY;
1030
1031 reg->mobj = mobj;
1032 reg->offset = 0;
1033 reg->va = 0;
1034 reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1035 if (mobj_is_secure(mobj))
1036 reg->attr = TEE_MATTR_SECURE;
1037 else
1038 reg->attr = 0;
1039
1040 res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1041 if (res) {
1042 free(reg);
1043 return res;
1044 }
1045
1046 res = alloc_pgt(uctx);
1047 if (res)
1048 umap_remove_region(&uctx->vm_info, reg);
1049 else
1050 *va = reg->va;
1051
1052 return res;
1053 }
1054
vm_rem_rwmem(struct user_mode_ctx * uctx,struct mobj * mobj,vaddr_t va)1055 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1056 {
1057 struct vm_region *r = NULL;
1058
1059 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1060 if (r->mobj == mobj && r->va == va) {
1061 rem_um_region(uctx, r);
1062 umap_remove_region(&uctx->vm_info, r);
1063 return;
1064 }
1065 }
1066 }
1067
vm_info_final(struct user_mode_ctx * uctx)1068 void vm_info_final(struct user_mode_ctx *uctx)
1069 {
1070 if (!uctx->vm_info.asid)
1071 return;
1072
1073 /* clear MMU entries to avoid clash when asid is reused */
1074 tlbi_asid(uctx->vm_info.asid);
1075
1076 asid_free(uctx->vm_info.asid);
1077 while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1078 umap_remove_region(&uctx->vm_info,
1079 TAILQ_FIRST(&uctx->vm_info.regions));
1080 memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1081 }
1082
1083 /* return true only if buffer fits inside TA private memory */
vm_buf_is_inside_um_private(const struct user_mode_ctx * uctx,const void * va,size_t size)1084 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1085 const void *va, size_t size)
1086 {
1087 struct vm_region *r = NULL;
1088
1089 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1090 if (r->flags & VM_FLAGS_NONPRIV)
1091 continue;
1092 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1093 return true;
1094 }
1095
1096 return false;
1097 }
1098
1099 /* return true only if buffer intersects TA private memory */
vm_buf_intersects_um_private(const struct user_mode_ctx * uctx,const void * va,size_t size)1100 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1101 const void *va, size_t size)
1102 {
1103 struct vm_region *r = NULL;
1104
1105 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1106 if (r->attr & VM_FLAGS_NONPRIV)
1107 continue;
1108 if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1109 return true;
1110 }
1111
1112 return false;
1113 }
1114
vm_buf_to_mboj_offs(const struct user_mode_ctx * uctx,const void * va,size_t size,struct mobj ** mobj,size_t * offs)1115 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1116 const void *va, size_t size,
1117 struct mobj **mobj, size_t *offs)
1118 {
1119 struct vm_region *r = NULL;
1120
1121 TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1122 if (!r->mobj)
1123 continue;
1124 if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1125 size_t poffs;
1126
1127 poffs = mobj_get_phys_offs(r->mobj,
1128 CORE_MMU_USER_PARAM_SIZE);
1129 *mobj = r->mobj;
1130 *offs = (vaddr_t)va - r->va + r->offset - poffs;
1131 return TEE_SUCCESS;
1132 }
1133 }
1134
1135 return TEE_ERROR_BAD_PARAMETERS;
1136 }
1137
tee_mmu_user_va2pa_attr(const struct user_mode_ctx * uctx,void * ua,paddr_t * pa,uint32_t * attr)1138 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1139 void *ua, paddr_t *pa, uint32_t *attr)
1140 {
1141 struct vm_region *region = NULL;
1142
1143 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1144 if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1145 region->size))
1146 continue;
1147
1148 if (pa) {
1149 TEE_Result res;
1150 paddr_t p;
1151 size_t offset;
1152 size_t granule;
1153
1154 /*
1155 * mobj and input user address may each include
1156 * a specific offset-in-granule position.
1157 * Drop both to get target physical page base
1158 * address then apply only user address
1159 * offset-in-granule.
1160 * Mapping lowest granule is the small page.
1161 */
1162 granule = MAX(region->mobj->phys_granule,
1163 (size_t)SMALL_PAGE_SIZE);
1164 assert(!granule || IS_POWER_OF_TWO(granule));
1165
1166 offset = region->offset +
1167 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1168
1169 res = mobj_get_pa(region->mobj, offset, granule, &p);
1170 if (res != TEE_SUCCESS)
1171 return res;
1172
1173 *pa = p | ((vaddr_t)ua & (granule - 1));
1174 }
1175 if (attr)
1176 *attr = region->attr;
1177
1178 return TEE_SUCCESS;
1179 }
1180
1181 return TEE_ERROR_ACCESS_DENIED;
1182 }
1183
vm_va2pa(const struct user_mode_ctx * uctx,void * ua,paddr_t * pa)1184 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1185 {
1186 return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1187 }
1188
vm_pa2va(const struct user_mode_ctx * uctx,paddr_t pa,size_t pa_size)1189 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1190 {
1191 paddr_t p = 0;
1192 struct vm_region *region = NULL;
1193
1194 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1195 size_t granule = 0;
1196 size_t size = 0;
1197 size_t ofs = 0;
1198
1199 /* pa2va is expected only for memory tracked through mobj */
1200 if (!region->mobj)
1201 continue;
1202
1203 /* Physically granulated memory object must be scanned */
1204 granule = region->mobj->phys_granule;
1205 assert(!granule || IS_POWER_OF_TWO(granule));
1206
1207 for (ofs = region->offset; ofs < region->size; ofs += size) {
1208
1209 if (granule) {
1210 /* From current offset to buffer/granule end */
1211 size = granule - (ofs & (granule - 1));
1212
1213 if (size > (region->size - ofs))
1214 size = region->size - ofs;
1215 } else {
1216 size = region->size;
1217 }
1218
1219 if (mobj_get_pa(region->mobj, ofs, granule, &p))
1220 continue;
1221
1222 if (core_is_buffer_inside(pa, pa_size, p, size)) {
1223 /* Remove region offset (mobj phys offset) */
1224 ofs -= region->offset;
1225 /* Get offset-in-granule */
1226 p = pa - p;
1227
1228 return (void *)(region->va + ofs + (vaddr_t)p);
1229 }
1230 }
1231 }
1232
1233 return NULL;
1234 }
1235
vm_check_access_rights(const struct user_mode_ctx * uctx,uint32_t flags,uaddr_t uaddr,size_t len)1236 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1237 uint32_t flags, uaddr_t uaddr, size_t len)
1238 {
1239 uaddr_t a = 0;
1240 uaddr_t end_addr = 0;
1241 size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1242 CORE_MMU_USER_PARAM_SIZE);
1243
1244 if (ADD_OVERFLOW(uaddr, len, &end_addr))
1245 return TEE_ERROR_ACCESS_DENIED;
1246
1247 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1248 (flags & TEE_MEMORY_ACCESS_SECURE))
1249 return TEE_ERROR_ACCESS_DENIED;
1250
1251 /*
1252 * Rely on TA private memory test to check if address range is private
1253 * to TA or not.
1254 */
1255 if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1256 !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1257 return TEE_ERROR_ACCESS_DENIED;
1258
1259 for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1260 uint32_t attr;
1261 TEE_Result res;
1262
1263 res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1264 if (res != TEE_SUCCESS)
1265 return res;
1266
1267 if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1268 (attr & TEE_MATTR_SECURE))
1269 return TEE_ERROR_ACCESS_DENIED;
1270
1271 if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1272 !(attr & TEE_MATTR_SECURE))
1273 return TEE_ERROR_ACCESS_DENIED;
1274
1275 if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1276 return TEE_ERROR_ACCESS_DENIED;
1277 if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1278 return TEE_ERROR_ACCESS_DENIED;
1279 }
1280
1281 return TEE_SUCCESS;
1282 }
1283
vm_set_ctx(struct ts_ctx * ctx)1284 void vm_set_ctx(struct ts_ctx *ctx)
1285 {
1286 struct thread_specific_data *tsd = thread_get_tsd();
1287
1288 core_mmu_set_user_map(NULL);
1289 /*
1290 * No matter what happens below, the current user TA will not be
1291 * current any longer. Make sure pager is in sync with that.
1292 * This function has to be called before there's a chance that
1293 * pgt_free_unlocked() is called.
1294 *
1295 * Save translation tables in a cache if it's a user TA.
1296 */
1297 pgt_free(&tsd->pgt_cache, is_user_ta_ctx(tsd->ctx));
1298
1299 if (is_user_mode_ctx(ctx)) {
1300 struct core_mmu_user_map map = { };
1301 struct user_mode_ctx *uctx = to_user_mode_ctx(ctx);
1302
1303 core_mmu_create_user_map(uctx, &map);
1304 core_mmu_set_user_map(&map);
1305 tee_pager_assign_um_tables(uctx);
1306 }
1307 tsd->ctx = ctx;
1308 }
1309
vm_get_mobj(struct user_mode_ctx * uctx,vaddr_t va,size_t * len,uint16_t * prot,size_t * offs)1310 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1311 uint16_t *prot, size_t *offs)
1312 {
1313 struct vm_region *r = NULL;
1314 size_t r_offs = 0;
1315
1316 if (!len || ((*len | va) & SMALL_PAGE_MASK))
1317 return NULL;
1318
1319 r = find_vm_region(&uctx->vm_info, va);
1320 if (!r)
1321 return NULL;
1322
1323 r_offs = va - r->va;
1324
1325 *len = MIN(r->size - r_offs, *len);
1326 *offs = r->offset + r_offs;
1327 *prot = r->attr & TEE_MATTR_PROT_MASK;
1328 return mobj_get(r->mobj);
1329 }
1330