1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2021, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <keep.h>
10 #include <kernel/linker.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/tee_misc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <mm/tee_pager.h>
19 #include <mm/vm.h>
20 #include <optee_msg.h>
21 #include <sm/optee_smc.h>
22 #include <stdlib.h>
23 #include <tee_api_types.h>
24 #include <types_ext.h>
25 #include <util.h>
26
27 struct mobj *mobj_sec_ddr;
28 struct mobj *mobj_tee_ram_rx;
29 struct mobj *mobj_tee_ram_rw;
30
31 /*
32 * mobj_phys implementation
33 */
34
35 struct mobj_phys {
36 struct mobj mobj;
37 enum buf_is_attr battr;
38 uint32_t cattr; /* Defined by TEE_MATTR_CACHE_* in tee_mmu_types.h */
39 vaddr_t va;
40 paddr_t pa;
41 };
42
43 static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
44
mobj_phys_get_va(struct mobj * mobj,size_t offset,size_t len)45 static void *mobj_phys_get_va(struct mobj *mobj, size_t offset, size_t len)
46 {
47 struct mobj_phys *moph = to_mobj_phys(mobj);
48
49 if (!moph->va || !mobj_check_offset_and_len(mobj, offset, len))
50 return NULL;
51
52 return (void *)(moph->va + offset);
53 }
54
mobj_phys_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)55 static TEE_Result mobj_phys_get_pa(struct mobj *mobj, size_t offs,
56 size_t granule, paddr_t *pa)
57 {
58 struct mobj_phys *moph = to_mobj_phys(mobj);
59 paddr_t p;
60
61 if (!pa)
62 return TEE_ERROR_GENERIC;
63
64 p = moph->pa + offs;
65
66 if (granule) {
67 if (granule != SMALL_PAGE_SIZE &&
68 granule != CORE_MMU_PGDIR_SIZE)
69 return TEE_ERROR_GENERIC;
70 p &= ~(granule - 1);
71 }
72
73 *pa = p;
74 return TEE_SUCCESS;
75 }
76 DECLARE_KEEP_PAGER(mobj_phys_get_pa);
77
mobj_phys_get_cattr(struct mobj * mobj,uint32_t * cattr)78 static TEE_Result mobj_phys_get_cattr(struct mobj *mobj, uint32_t *cattr)
79 {
80 struct mobj_phys *moph = to_mobj_phys(mobj);
81
82 if (!cattr)
83 return TEE_ERROR_GENERIC;
84
85 *cattr = moph->cattr;
86 return TEE_SUCCESS;
87 }
88
mobj_phys_matches(struct mobj * mobj,enum buf_is_attr attr)89 static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr)
90 {
91 struct mobj_phys *moph = to_mobj_phys(mobj);
92 enum buf_is_attr a;
93
94 a = moph->battr;
95
96 switch (attr) {
97 case CORE_MEM_SEC:
98 return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM ||
99 a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM;
100 case CORE_MEM_NON_SEC:
101 return a == CORE_MEM_NSEC_SHM;
102 case CORE_MEM_TEE_RAM:
103 case CORE_MEM_TA_RAM:
104 case CORE_MEM_NSEC_SHM:
105 case CORE_MEM_SDP_MEM:
106 return attr == a;
107 default:
108 return false;
109 }
110 }
111
mobj_phys_free(struct mobj * mobj)112 static void mobj_phys_free(struct mobj *mobj)
113 {
114 struct mobj_phys *moph = to_mobj_phys(mobj);
115
116 free(moph);
117 }
118
119 /*
120 * Note: this variable is weak just to ease breaking its dependency chain
121 * when added to the unpaged area.
122 */
123 const struct mobj_ops mobj_phys_ops __weak __rodata_unpaged("mobj_phys_ops") = {
124 .get_va = mobj_phys_get_va,
125 .get_pa = mobj_phys_get_pa,
126 .get_phys_offs = NULL, /* only offset 0 */
127 .get_cattr = mobj_phys_get_cattr,
128 .matches = mobj_phys_matches,
129 .free = mobj_phys_free,
130 };
131
to_mobj_phys(struct mobj * mobj)132 static struct mobj_phys *to_mobj_phys(struct mobj *mobj)
133 {
134 assert(mobj->ops == &mobj_phys_ops);
135 return container_of(mobj, struct mobj_phys, mobj);
136 }
137
mobj_phys_init(paddr_t pa,size_t size,uint32_t cattr,enum buf_is_attr battr,enum teecore_memtypes area_type)138 static struct mobj *mobj_phys_init(paddr_t pa, size_t size, uint32_t cattr,
139 enum buf_is_attr battr,
140 enum teecore_memtypes area_type)
141 {
142 void *va = NULL;
143 struct mobj_phys *moph = NULL;
144 struct tee_mmap_region *map = NULL;
145
146 if ((pa & CORE_MMU_USER_PARAM_MASK) ||
147 (size & CORE_MMU_USER_PARAM_MASK)) {
148 DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE);
149 return NULL;
150 }
151
152 if (pa) {
153 va = phys_to_virt(pa, area_type, size);
154 } else {
155 map = core_mmu_find_mapping_exclusive(area_type, size);
156 if (!map)
157 return NULL;
158
159 pa = map->pa;
160 va = (void *)map->va;
161 }
162
163 /* Only SDP memory may not have a virtual address */
164 if (!va && battr != CORE_MEM_SDP_MEM)
165 return NULL;
166
167 moph = calloc(1, sizeof(*moph));
168 if (!moph)
169 return NULL;
170
171 moph->battr = battr;
172 moph->cattr = cattr;
173 moph->mobj.size = size;
174 moph->mobj.ops = &mobj_phys_ops;
175 refcount_set(&moph->mobj.refc, 1);
176 moph->pa = pa;
177 moph->va = (vaddr_t)va;
178
179 return &moph->mobj;
180 }
181
mobj_phys_alloc(paddr_t pa,size_t size,uint32_t cattr,enum buf_is_attr battr)182 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
183 enum buf_is_attr battr)
184 {
185 enum teecore_memtypes area_type;
186
187 switch (battr) {
188 case CORE_MEM_TEE_RAM:
189 area_type = MEM_AREA_TEE_RAM_RW_DATA;
190 break;
191 case CORE_MEM_TA_RAM:
192 area_type = MEM_AREA_TA_RAM;
193 break;
194 case CORE_MEM_NSEC_SHM:
195 area_type = MEM_AREA_NSEC_SHM;
196 break;
197 case CORE_MEM_SDP_MEM:
198 area_type = MEM_AREA_SDP_MEM;
199 break;
200 default:
201 DMSG("can't allocate with specified attribute");
202 return NULL;
203 }
204
205 return mobj_phys_init(pa, size, cattr, battr, area_type);
206 }
207
208 /*
209 * mobj_virt implementation
210 */
211
212 static void mobj_virt_assert_type(struct mobj *mobj);
213
mobj_virt_get_va(struct mobj * mobj,size_t offset,size_t len __maybe_unused)214 static void *mobj_virt_get_va(struct mobj *mobj, size_t offset,
215 size_t len __maybe_unused)
216 {
217 mobj_virt_assert_type(mobj);
218 assert(mobj_check_offset_and_len(mobj, offset, len));
219
220 return (void *)(vaddr_t)offset;
221 }
222
223 /*
224 * Note: this variable is weak just to ease breaking its dependency chain
225 * when added to the unpaged area.
226 */
227 const struct mobj_ops mobj_virt_ops __weak __rodata_unpaged("mobj_virt_ops") = {
228 .get_va = mobj_virt_get_va,
229 };
230
mobj_virt_assert_type(struct mobj * mobj __maybe_unused)231 static void mobj_virt_assert_type(struct mobj *mobj __maybe_unused)
232 {
233 assert(mobj->ops == &mobj_virt_ops);
234 }
235
236 struct mobj mobj_virt = { .ops = &mobj_virt_ops, .size = SIZE_MAX };
237
238 /*
239 * mobj_mm implementation
240 */
241
242 struct mobj_mm {
243 tee_mm_entry_t *mm;
244 struct mobj *parent_mobj;
245 struct mobj mobj;
246 };
247
248 static struct mobj_mm *to_mobj_mm(struct mobj *mobj);
249
mobj_mm_offs(struct mobj * mobj,size_t offs)250 static size_t mobj_mm_offs(struct mobj *mobj, size_t offs)
251 {
252 tee_mm_entry_t *mm = to_mobj_mm(mobj)->mm;
253
254 return (mm->offset << mm->pool->shift) + offs;
255 }
256
mobj_mm_get_va(struct mobj * mobj,size_t offs,size_t len)257 static void *mobj_mm_get_va(struct mobj *mobj, size_t offs, size_t len)
258 {
259 return mobj_get_va(to_mobj_mm(mobj)->parent_mobj,
260 mobj_mm_offs(mobj, offs), len);
261 }
262
263
mobj_mm_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)264 static TEE_Result mobj_mm_get_pa(struct mobj *mobj, size_t offs,
265 size_t granule, paddr_t *pa)
266 {
267 return mobj_get_pa(to_mobj_mm(mobj)->parent_mobj,
268 mobj_mm_offs(mobj, offs), granule, pa);
269 }
270 DECLARE_KEEP_PAGER(mobj_mm_get_pa);
271
mobj_mm_get_phys_offs(struct mobj * mobj,size_t granule)272 static size_t mobj_mm_get_phys_offs(struct mobj *mobj, size_t granule)
273 {
274 return mobj_get_phys_offs(to_mobj_mm(mobj)->parent_mobj, granule);
275 }
276
mobj_mm_get_cattr(struct mobj * mobj,uint32_t * cattr)277 static TEE_Result mobj_mm_get_cattr(struct mobj *mobj, uint32_t *cattr)
278 {
279 return mobj_get_cattr(to_mobj_mm(mobj)->parent_mobj, cattr);
280 }
281
mobj_mm_matches(struct mobj * mobj,enum buf_is_attr attr)282 static bool mobj_mm_matches(struct mobj *mobj, enum buf_is_attr attr)
283 {
284 return mobj_matches(to_mobj_mm(mobj)->parent_mobj, attr);
285 }
286
mobj_mm_free(struct mobj * mobj)287 static void mobj_mm_free(struct mobj *mobj)
288 {
289 struct mobj_mm *m = to_mobj_mm(mobj);
290
291 tee_mm_free(m->mm);
292 free(m);
293 }
294
295 /*
296 * Note: this variable is weak just to ease breaking its dependency chain
297 * when added to the unpaged area.
298 */
299 const struct mobj_ops mobj_mm_ops __weak __rodata_unpaged("mobj_mm_ops") = {
300 .get_va = mobj_mm_get_va,
301 .get_pa = mobj_mm_get_pa,
302 .get_phys_offs = mobj_mm_get_phys_offs,
303 .get_cattr = mobj_mm_get_cattr,
304 .matches = mobj_mm_matches,
305 .free = mobj_mm_free,
306 };
307
to_mobj_mm(struct mobj * mobj)308 static struct mobj_mm *to_mobj_mm(struct mobj *mobj)
309 {
310 assert(mobj->ops == &mobj_mm_ops);
311 return container_of(mobj, struct mobj_mm, mobj);
312 }
313
mobj_mm_alloc(struct mobj * mobj_parent,size_t size,tee_mm_pool_t * pool)314 struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
315 tee_mm_pool_t *pool)
316 {
317 struct mobj_mm *m = calloc(1, sizeof(*m));
318
319 if (!m)
320 return NULL;
321
322 m->mm = tee_mm_alloc(pool, size);
323 if (!m->mm) {
324 free(m);
325 return NULL;
326 }
327
328 m->parent_mobj = mobj_parent;
329 m->mobj.size = size;
330 m->mobj.ops = &mobj_mm_ops;
331 refcount_set(&m->mobj.refc, 1);
332
333 return &m->mobj;
334 }
335
336
337 /*
338 * mobj_shm implementation. mobj_shm represents buffer in predefined shm region
339 * - it is physically contiguous.
340 * - it is identified in static physical layout as MEM_AREA_NSEC_SHM.
341 * - it creates mobjs that match specific CORE_MEM_NSEC_SHM and non secure
342 * generic CORE_MEM_NON_SEC.
343 */
344
345 struct mobj_shm {
346 struct mobj mobj;
347 paddr_t pa;
348 uint64_t cookie;
349 };
350
351 static struct mobj_shm *to_mobj_shm(struct mobj *mobj);
352
mobj_shm_get_va(struct mobj * mobj,size_t offset,size_t len)353 static void *mobj_shm_get_va(struct mobj *mobj, size_t offset, size_t len)
354 {
355 struct mobj_shm *m = to_mobj_shm(mobj);
356
357 if (!mobj_check_offset_and_len(mobj, offset, len))
358 return NULL;
359
360 return phys_to_virt(m->pa + offset, MEM_AREA_NSEC_SHM,
361 mobj->size - offset);
362 }
363
mobj_shm_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)364 static TEE_Result mobj_shm_get_pa(struct mobj *mobj, size_t offs,
365 size_t granule, paddr_t *pa)
366 {
367 struct mobj_shm *m = to_mobj_shm(mobj);
368 paddr_t p;
369
370 if (!pa || offs >= mobj->size)
371 return TEE_ERROR_GENERIC;
372
373 p = m->pa + offs;
374
375 if (granule) {
376 if (granule != SMALL_PAGE_SIZE &&
377 granule != CORE_MMU_PGDIR_SIZE)
378 return TEE_ERROR_GENERIC;
379 p &= ~(granule - 1);
380 }
381
382 *pa = p;
383 return TEE_SUCCESS;
384 }
385 DECLARE_KEEP_PAGER(mobj_shm_get_pa);
386
mobj_shm_get_phys_offs(struct mobj * mobj,size_t granule)387 static size_t mobj_shm_get_phys_offs(struct mobj *mobj, size_t granule)
388 {
389 assert(IS_POWER_OF_TWO(granule));
390 return to_mobj_shm(mobj)->pa & (granule - 1);
391 }
392
mobj_shm_matches(struct mobj * mobj __unused,enum buf_is_attr attr)393 static bool mobj_shm_matches(struct mobj *mobj __unused, enum buf_is_attr attr)
394 {
395 return attr == CORE_MEM_NSEC_SHM || attr == CORE_MEM_NON_SEC;
396 }
397
mobj_shm_free(struct mobj * mobj)398 static void mobj_shm_free(struct mobj *mobj)
399 {
400 struct mobj_shm *m = to_mobj_shm(mobj);
401
402 free(m);
403 }
404
mobj_shm_get_cookie(struct mobj * mobj)405 static uint64_t mobj_shm_get_cookie(struct mobj *mobj)
406 {
407 return to_mobj_shm(mobj)->cookie;
408 }
409
410 /*
411 * Note: this variable is weak just to ease breaking its dependency chain
412 * when added to the unpaged area.
413 */
414 const struct mobj_ops mobj_shm_ops __weak __rodata_unpaged("mobj_shm_ops") = {
415 .get_va = mobj_shm_get_va,
416 .get_pa = mobj_shm_get_pa,
417 .get_phys_offs = mobj_shm_get_phys_offs,
418 .matches = mobj_shm_matches,
419 .free = mobj_shm_free,
420 .get_cookie = mobj_shm_get_cookie,
421 };
422
to_mobj_shm(struct mobj * mobj)423 static struct mobj_shm *to_mobj_shm(struct mobj *mobj)
424 {
425 assert(mobj->ops == &mobj_shm_ops);
426 return container_of(mobj, struct mobj_shm, mobj);
427 }
428
mobj_shm_alloc(paddr_t pa,size_t size,uint64_t cookie)429 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie)
430 {
431 struct mobj_shm *m;
432
433 if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
434 return NULL;
435
436 m = calloc(1, sizeof(*m));
437 if (!m)
438 return NULL;
439
440 m->mobj.size = size;
441 m->mobj.ops = &mobj_shm_ops;
442 refcount_set(&m->mobj.refc, 1);
443 m->pa = pa;
444 m->cookie = cookie;
445
446 return &m->mobj;
447 }
448
449 #ifdef CFG_PAGED_USER_TA
450 /*
451 * mobj_seccpy_shm implementation
452 */
453
454 struct mobj_seccpy_shm {
455 struct user_ta_ctx *utc;
456 vaddr_t va;
457 struct mobj mobj;
458 struct fobj *fobj;
459 };
460
461 static bool __maybe_unused mobj_is_seccpy_shm(struct mobj *mobj);
462
to_mobj_seccpy_shm(struct mobj * mobj)463 static struct mobj_seccpy_shm *to_mobj_seccpy_shm(struct mobj *mobj)
464 {
465 assert(mobj_is_seccpy_shm(mobj));
466 return container_of(mobj, struct mobj_seccpy_shm, mobj);
467 }
468
mobj_seccpy_shm_get_va(struct mobj * mobj,size_t offs,size_t len)469 static void *mobj_seccpy_shm_get_va(struct mobj *mobj, size_t offs, size_t len)
470 {
471 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
472
473 if (&m->utc->ta_ctx.ts_ctx != thread_get_tsd()->ctx)
474 return NULL;
475
476 if (!mobj_check_offset_and_len(mobj, offs, len))
477 return NULL;
478 return (void *)(m->va + offs);
479 }
480
mobj_seccpy_shm_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)481 static bool mobj_seccpy_shm_matches(struct mobj *mobj __maybe_unused,
482 enum buf_is_attr attr)
483 {
484 assert(mobj_is_seccpy_shm(mobj));
485
486 return attr == CORE_MEM_SEC || attr == CORE_MEM_TEE_RAM;
487 }
488
mobj_seccpy_shm_free(struct mobj * mobj)489 static void mobj_seccpy_shm_free(struct mobj *mobj)
490 {
491 struct mobj_seccpy_shm *m = to_mobj_seccpy_shm(mobj);
492
493 tee_pager_rem_um_region(&m->utc->uctx, m->va, mobj->size);
494 vm_rem_rwmem(&m->utc->uctx, mobj, m->va);
495 fobj_put(m->fobj);
496 free(m);
497 }
498
mobj_seccpy_shm_get_fobj(struct mobj * mobj)499 static struct fobj *mobj_seccpy_shm_get_fobj(struct mobj *mobj)
500 {
501 return fobj_get(to_mobj_seccpy_shm(mobj)->fobj);
502 }
503
504 /*
505 * Note: this variable is weak just to ease breaking its dependency chain
506 * when added to the unpaged area.
507 */
508 const struct mobj_ops mobj_seccpy_shm_ops
509 __weak __rodata_unpaged("mobj_seccpy_shm_ops") = {
510 .get_va = mobj_seccpy_shm_get_va,
511 .matches = mobj_seccpy_shm_matches,
512 .free = mobj_seccpy_shm_free,
513 .get_fobj = mobj_seccpy_shm_get_fobj,
514 };
515
mobj_is_seccpy_shm(struct mobj * mobj)516 static bool mobj_is_seccpy_shm(struct mobj *mobj)
517 {
518 return mobj && mobj->ops == &mobj_seccpy_shm_ops;
519 }
520
mobj_seccpy_shm_alloc(size_t size)521 struct mobj *mobj_seccpy_shm_alloc(size_t size)
522 {
523 struct thread_specific_data *tsd = thread_get_tsd();
524 struct mobj_seccpy_shm *m;
525 struct user_ta_ctx *utc;
526 vaddr_t va = 0;
527
528 if (!is_user_ta_ctx(tsd->ctx))
529 return NULL;
530 utc = to_user_ta_ctx(tsd->ctx);
531
532 m = calloc(1, sizeof(*m));
533 if (!m)
534 return NULL;
535
536 m->mobj.size = size;
537 m->mobj.ops = &mobj_seccpy_shm_ops;
538 refcount_set(&m->mobj.refc, 1);
539
540 if (vm_add_rwmem(&utc->uctx, &m->mobj, &va) != TEE_SUCCESS)
541 goto bad;
542
543 m->fobj = fobj_rw_paged_alloc(ROUNDUP(size, SMALL_PAGE_SIZE) /
544 SMALL_PAGE_SIZE);
545 if (tee_pager_add_um_region(&utc->uctx, va, m->fobj,
546 TEE_MATTR_PRW | TEE_MATTR_URW))
547 goto bad;
548
549 m->va = va;
550 m->utc = to_user_ta_ctx(tsd->ctx);
551 return &m->mobj;
552 bad:
553 if (va)
554 vm_rem_rwmem(&utc->uctx, &m->mobj, va);
555 fobj_put(m->fobj);
556 free(m);
557 return NULL;
558 }
559
560
561 #endif /*CFG_PAGED_USER_TA*/
562
563 struct mobj_with_fobj {
564 struct fobj *fobj;
565 struct file *file;
566 struct mobj mobj;
567 };
568
569 const struct mobj_ops mobj_with_fobj_ops;
570
mobj_with_fobj_alloc(struct fobj * fobj,struct file * file)571 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file)
572 {
573 struct mobj_with_fobj *m = NULL;
574
575 if (!fobj)
576 return NULL;
577
578 m = calloc(1, sizeof(*m));
579 if (!m)
580 return NULL;
581
582 m->mobj.ops = &mobj_with_fobj_ops;
583 refcount_set(&m->mobj.refc, 1);
584 m->mobj.size = fobj->num_pages * SMALL_PAGE_SIZE;
585 m->mobj.phys_granule = SMALL_PAGE_SIZE;
586 m->fobj = fobj_get(fobj);
587 m->file = file_get(file);
588
589 return &m->mobj;
590 }
591
to_mobj_with_fobj(struct mobj * mobj)592 static struct mobj_with_fobj *to_mobj_with_fobj(struct mobj *mobj)
593 {
594 assert(mobj && mobj->ops == &mobj_with_fobj_ops);
595
596 return container_of(mobj, struct mobj_with_fobj, mobj);
597 }
598
mobj_with_fobj_matches(struct mobj * mobj __maybe_unused,enum buf_is_attr attr)599 static bool mobj_with_fobj_matches(struct mobj *mobj __maybe_unused,
600 enum buf_is_attr attr)
601 {
602 assert(to_mobj_with_fobj(mobj));
603
604 /*
605 * All fobjs are supposed to be mapped secure so classify it as
606 * CORE_MEM_SEC. Stay out of CORE_MEM_TEE_RAM etc, if that information
607 * needed it can probably be carried in another way than to put the
608 * burden directly on fobj.
609 */
610 return attr == CORE_MEM_SEC;
611 }
612
mobj_with_fobj_free(struct mobj * mobj)613 static void mobj_with_fobj_free(struct mobj *mobj)
614 {
615 struct mobj_with_fobj *m = to_mobj_with_fobj(mobj);
616
617 fobj_put(m->fobj);
618 file_put(m->file);
619 free(m);
620 }
621
mobj_with_fobj_get_fobj(struct mobj * mobj)622 static struct fobj *mobj_with_fobj_get_fobj(struct mobj *mobj)
623 {
624 return fobj_get(to_mobj_with_fobj(mobj)->fobj);
625 }
626
mobj_with_fobj_get_cattr(struct mobj * mobj __unused,uint32_t * cattr)627 static TEE_Result mobj_with_fobj_get_cattr(struct mobj *mobj __unused,
628 uint32_t *cattr)
629 {
630 if (!cattr)
631 return TEE_ERROR_GENERIC;
632
633 /* All fobjs are mapped as normal cached memory */
634 *cattr = TEE_MATTR_CACHE_CACHED;
635
636 return TEE_SUCCESS;
637 }
638
mobj_with_fobj_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)639 static TEE_Result mobj_with_fobj_get_pa(struct mobj *mobj, size_t offs,
640 size_t granule, paddr_t *pa)
641 {
642 struct mobj_with_fobj *f = to_mobj_with_fobj(mobj);
643 paddr_t p = 0;
644
645 if (!f->fobj->ops->get_pa) {
646 assert(mobj_is_paged(mobj));
647 return TEE_ERROR_NOT_SUPPORTED;
648 }
649
650 p = f->fobj->ops->get_pa(f->fobj, offs / SMALL_PAGE_SIZE) +
651 offs % SMALL_PAGE_SIZE;
652
653 if (granule) {
654 if (granule != SMALL_PAGE_SIZE &&
655 granule != CORE_MMU_PGDIR_SIZE)
656 return TEE_ERROR_GENERIC;
657 p &= ~(granule - 1);
658 }
659
660 *pa = p;
661
662 return TEE_SUCCESS;
663 }
664 DECLARE_KEEP_PAGER(mobj_with_fobj_get_pa);
665
666 /*
667 * Note: this variable is weak just to ease breaking its dependency chain
668 * when added to the unpaged area.
669 */
670 const struct mobj_ops mobj_with_fobj_ops
671 __weak __rodata_unpaged("mobj_with_fobj_ops") = {
672 .matches = mobj_with_fobj_matches,
673 .free = mobj_with_fobj_free,
674 .get_fobj = mobj_with_fobj_get_fobj,
675 .get_cattr = mobj_with_fobj_get_cattr,
676 .get_pa = mobj_with_fobj_get_pa,
677 };
678
679 #ifdef CFG_PAGED_USER_TA
mobj_is_paged(struct mobj * mobj)680 bool mobj_is_paged(struct mobj *mobj)
681 {
682 if (mobj->ops == &mobj_seccpy_shm_ops)
683 return true;
684
685 if (mobj->ops == &mobj_with_fobj_ops &&
686 !to_mobj_with_fobj(mobj)->fobj->ops->get_pa)
687 return true;
688
689 return false;
690 }
691 #endif /*CFG_PAGED_USER_TA*/
692
mobj_init(void)693 static TEE_Result mobj_init(void)
694 {
695 mobj_sec_ddr = mobj_phys_alloc(tee_mm_sec_ddr.lo,
696 tee_mm_sec_ddr.size,
697 OPTEE_SMC_SHM_CACHED, CORE_MEM_TA_RAM);
698 if (!mobj_sec_ddr)
699 panic("Failed to register secure ta ram");
700
701 if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
702 mobj_tee_ram_rx = mobj_phys_init(0,
703 VCORE_UNPG_RX_SZ,
704 TEE_MATTR_CACHE_CACHED,
705 CORE_MEM_TEE_RAM,
706 MEM_AREA_TEE_RAM_RX);
707 if (!mobj_tee_ram_rx)
708 panic("Failed to register tee ram rx");
709
710 mobj_tee_ram_rw = mobj_phys_init(0,
711 VCORE_UNPG_RW_SZ,
712 TEE_MATTR_CACHE_CACHED,
713 CORE_MEM_TEE_RAM,
714 MEM_AREA_TEE_RAM_RW_DATA);
715 if (!mobj_tee_ram_rw)
716 panic("Failed to register tee ram rw");
717 } else {
718 mobj_tee_ram_rw = mobj_phys_init(TEE_RAM_START,
719 VCORE_UNPG_RW_PA +
720 VCORE_UNPG_RW_SZ -
721 TEE_RAM_START,
722 TEE_MATTR_CACHE_CACHED,
723 CORE_MEM_TEE_RAM,
724 MEM_AREA_TEE_RAM_RW_DATA);
725 if (!mobj_tee_ram_rw)
726 panic("Failed to register tee ram");
727
728 mobj_tee_ram_rx = mobj_tee_ram_rw;
729 }
730
731 return TEE_SUCCESS;
732 }
733
734 driver_init_late(mobj_init);
735