1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016-2020, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <bitstring.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <keep.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/thread_spmc.h>
14 #include <mm/mobj.h>
15 #include <sys/queue.h>
16
17 struct mobj_ffa {
18 struct mobj mobj;
19 SLIST_ENTRY(mobj_ffa) link;
20 uint64_t cookie;
21 tee_mm_entry_t *mm;
22 struct refcount mapcount;
23 uint16_t page_offset;
24 #ifdef CFG_CORE_SEL1_SPMC
25 bool registered_by_cookie;
26 bool unregistered_by_cookie;
27 #endif
28 paddr_t pages[];
29 };
30
31 SLIST_HEAD(mobj_ffa_head, mobj_ffa);
32
33 #ifdef CFG_CORE_SEL1_SPMC
34 #define NUM_SHMS 64
35 static bitstr_t bit_decl(shm_bits, NUM_SHMS);
36 #endif
37
38 static struct mobj_ffa_head shm_head = SLIST_HEAD_INITIALIZER(shm_head);
39 static struct mobj_ffa_head shm_inactive_head =
40 SLIST_HEAD_INITIALIZER(shm_inactive_head);
41
42 static unsigned int shm_lock = SPINLOCK_UNLOCK;
43
44 const struct mobj_ops mobj_ffa_ops;
45
to_mobj_ffa(struct mobj * mobj)46 static struct mobj_ffa *to_mobj_ffa(struct mobj *mobj)
47 {
48 assert(mobj->ops == &mobj_ffa_ops);
49 return container_of(mobj, struct mobj_ffa, mobj);
50 }
51
shm_size(size_t num_pages)52 static size_t shm_size(size_t num_pages)
53 {
54 size_t s = 0;
55
56 if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
57 return 0;
58 if (ADD_OVERFLOW(sizeof(struct mobj_ffa), s, &s))
59 return 0;
60 return s;
61 }
62
ffa_new(unsigned int num_pages)63 static struct mobj_ffa *ffa_new(unsigned int num_pages)
64 {
65 struct mobj_ffa *mf = NULL;
66 size_t s = 0;
67
68 if (!num_pages)
69 return NULL;
70
71 s = shm_size(num_pages);
72 if (!s)
73 return NULL;
74 mf = calloc(1, s);
75 if (!mf)
76 return NULL;
77
78 mf->mobj.ops = &mobj_ffa_ops;
79 mf->mobj.size = num_pages * SMALL_PAGE_SIZE;
80 mf->mobj.phys_granule = SMALL_PAGE_SIZE;
81 refcount_set(&mf->mobj.refc, 0);
82
83 return mf;
84 }
85
86 #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_new(unsigned int num_pages)87 struct mobj_ffa *mobj_ffa_sel1_spmc_new(unsigned int num_pages)
88 {
89 struct mobj_ffa *mf = NULL;
90 uint32_t exceptions = 0;
91 int i = 0;
92
93 mf = ffa_new(num_pages);
94 if (!mf)
95 return NULL;
96
97 exceptions = cpu_spin_lock_xsave(&shm_lock);
98 bit_ffc(shm_bits, NUM_SHMS, &i);
99 if (i != -1) {
100 bit_set(shm_bits, i);
101 /*
102 * Setting bit 44 to use one of the upper 32 bits too for
103 * testing.
104 */
105 mf->cookie = i | FFA_MEMORY_HANDLE_NONE_SECURE_BIT;
106
107 }
108 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
109
110 if (i == -1) {
111 free(mf);
112 return NULL;
113 }
114
115 return mf;
116 }
117 #endif /*CFG_CORE_SEL1_SPMC*/
118
get_page_count(struct mobj_ffa * mf)119 static size_t get_page_count(struct mobj_ffa *mf)
120 {
121 return ROUNDUP(mf->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
122 }
123
cmp_cookie(struct mobj_ffa * mf,uint64_t cookie)124 static bool cmp_cookie(struct mobj_ffa *mf, uint64_t cookie)
125 {
126 return mf->cookie == cookie;
127 }
128
cmp_ptr(struct mobj_ffa * mf,uint64_t ptr)129 static bool cmp_ptr(struct mobj_ffa *mf, uint64_t ptr)
130 {
131 return mf == (void *)(vaddr_t)ptr;
132 }
133
pop_from_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)134 static struct mobj_ffa *pop_from_list(struct mobj_ffa_head *head,
135 bool (*cmp_func)(struct mobj_ffa *mf,
136 uint64_t val),
137 uint64_t val)
138 {
139 struct mobj_ffa *mf = SLIST_FIRST(head);
140 struct mobj_ffa *p = NULL;
141
142 if (!mf)
143 return NULL;
144
145 if (cmp_func(mf, val)) {
146 SLIST_REMOVE_HEAD(head, link);
147 return mf;
148 }
149
150 while (true) {
151 p = SLIST_NEXT(mf, link);
152 if (!p)
153 return NULL;
154 if (cmp_func(p, val)) {
155 SLIST_REMOVE_AFTER(mf, link);
156 return p;
157 }
158 mf = p;
159 }
160 }
161
find_in_list(struct mobj_ffa_head * head,bool (* cmp_func)(struct mobj_ffa * mf,uint64_t val),uint64_t val)162 static struct mobj_ffa *find_in_list(struct mobj_ffa_head *head,
163 bool (*cmp_func)(struct mobj_ffa *mf,
164 uint64_t val),
165 uint64_t val)
166 {
167 struct mobj_ffa *mf = NULL;
168
169 SLIST_FOREACH(mf, head, link)
170 if (cmp_func(mf, val))
171 return mf;
172
173 return NULL;
174 }
175
176 #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_delete(struct mobj_ffa * mf)177 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mf)
178 {
179 int i = mf->cookie & ~BIT64(44);
180 uint32_t exceptions = 0;
181
182 assert(i >= 0 && i < NUM_SHMS);
183
184 exceptions = cpu_spin_lock_xsave(&shm_lock);
185 assert(bit_test(shm_bits, i));
186 bit_clear(shm_bits, i);
187 assert(!mf->mm);
188 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
189
190 free(mf);
191 }
192 #endif /*CFG_CORE_SEL1_SPMC*/
193
194 #ifdef CFG_CORE_SEL2_SPMC
mobj_ffa_sel2_spmc_new(uint64_t cookie,unsigned int num_pages)195 struct mobj_ffa *mobj_ffa_sel2_spmc_new(uint64_t cookie,
196 unsigned int num_pages)
197 {
198 struct mobj_ffa *mf = NULL;
199
200 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
201 mf = ffa_new(num_pages);
202 if (mf)
203 mf->cookie = cookie;
204 return mf;
205 }
206
mobj_ffa_sel2_spmc_delete(struct mobj_ffa * mf)207 void mobj_ffa_sel2_spmc_delete(struct mobj_ffa *mf)
208 {
209 free(mf);
210 }
211 #endif /*CFG_CORE_SEL2_SPMC*/
212
mobj_ffa_add_pages_at(struct mobj_ffa * mf,unsigned int * idx,paddr_t pa,unsigned int num_pages)213 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mf, unsigned int *idx,
214 paddr_t pa, unsigned int num_pages)
215 {
216 unsigned int n = 0;
217 size_t tot_page_count = get_page_count(mf);
218
219 if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
220 return TEE_ERROR_BAD_PARAMETERS;
221
222 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
223 return TEE_ERROR_BAD_PARAMETERS;
224
225 for (n = 0; n < num_pages; n++)
226 mf->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
227
228 (*idx) += n;
229 return TEE_SUCCESS;
230 }
231
mobj_ffa_get_cookie(struct mobj_ffa * mf)232 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mf)
233 {
234 return mf->cookie;
235 }
236
mobj_ffa_push_to_inactive(struct mobj_ffa * mf)237 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mf)
238 {
239 uint32_t exceptions = 0;
240
241 exceptions = cpu_spin_lock_xsave(&shm_lock);
242 assert(!find_in_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf));
243 assert(!find_in_list(&shm_inactive_head, cmp_cookie, mf->cookie));
244 assert(!find_in_list(&shm_head, cmp_cookie, mf->cookie));
245 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
246 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
247
248 return mf->cookie;
249 }
250
unmap_helper(struct mobj_ffa * mf)251 static void unmap_helper(struct mobj_ffa *mf)
252 {
253 if (mf->mm) {
254 core_mmu_unmap_pages(tee_mm_get_smem(mf->mm),
255 get_page_count(mf));
256 tee_mm_free(mf->mm);
257 mf->mm = NULL;
258 }
259 }
260
261 #ifdef CFG_CORE_SEL1_SPMC
mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)262 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie)
263 {
264 TEE_Result res = TEE_SUCCESS;
265 struct mobj_ffa *mf = NULL;
266 uint32_t exceptions = 0;
267
268 exceptions = cpu_spin_lock_xsave(&shm_lock);
269 mf = find_in_list(&shm_head, cmp_cookie, cookie);
270 /*
271 * If the mobj is found here it's still active and cannot be
272 * reclaimed.
273 */
274 if (mf) {
275 DMSG("cookie %#"PRIx64" busy refc %u",
276 cookie, refcount_val(&mf->mobj.refc));
277 res = TEE_ERROR_BUSY;
278 goto out;
279 }
280
281 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
282 if (!mf) {
283 res = TEE_ERROR_ITEM_NOT_FOUND;
284 goto out;
285 }
286 /*
287 * If the mobj has been registered via mobj_ffa_get_by_cookie()
288 * but not unregistered yet with mobj_ffa_unregister_by_cookie().
289 */
290 if (mf->registered_by_cookie && !mf->unregistered_by_cookie) {
291 DMSG("cookie %#"PRIx64" busy", cookie);
292 res = TEE_ERROR_BUSY;
293 goto out;
294 }
295
296 if (!pop_from_list(&shm_inactive_head, cmp_ptr, (vaddr_t)mf))
297 panic();
298 res = TEE_SUCCESS;
299 out:
300 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
301 if (!res)
302 mobj_ffa_sel1_spmc_delete(mf);
303 return res;
304 }
305 #endif /*CFG_CORE_SEL1_SPMC*/
306
mobj_ffa_unregister_by_cookie(uint64_t cookie)307 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie)
308 {
309 TEE_Result res = TEE_SUCCESS;
310 struct mobj_ffa *mf = NULL;
311 uint32_t exceptions = 0;
312
313 assert(cookie != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID);
314 exceptions = cpu_spin_lock_xsave(&shm_lock);
315 mf = find_in_list(&shm_head, cmp_cookie, cookie);
316 /*
317 * If the mobj is found here it's still active and cannot be
318 * unregistered.
319 */
320 if (mf) {
321 DMSG("cookie %#"PRIx64" busy refc %u",
322 cookie, refcount_val(&mf->mobj.refc));
323 res = TEE_ERROR_BUSY;
324 goto out;
325 }
326 mf = find_in_list(&shm_inactive_head, cmp_cookie, cookie);
327 /*
328 * If the mobj isn't found or if it already has been unregistered.
329 */
330 #ifdef CFG_CORE_SEL2_SPMC
331 if (!mf) {
332 #else
333 if (!mf || mf->unregistered_by_cookie) {
334 #endif
335 res = TEE_ERROR_ITEM_NOT_FOUND;
336 goto out;
337 }
338
339 #ifdef CFG_CORE_SEL2_SPMC
340 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
341 mobj_ffa_sel2_spmc_delete(mf);
342 thread_spmc_relinquish(cookie);
343 #else
344 mf->unregistered_by_cookie = true;
345 #endif
346 res = TEE_SUCCESS;
347
348 out:
349 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
350 return res;
351 }
352
353 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
354 unsigned int internal_offs)
355 {
356 struct mobj_ffa *mf = NULL;
357 uint32_t exceptions = 0;
358
359 if (internal_offs >= SMALL_PAGE_SIZE)
360 return NULL;
361 exceptions = cpu_spin_lock_xsave(&shm_lock);
362 mf = find_in_list(&shm_head, cmp_cookie, cookie);
363 if (mf) {
364 if (mf->page_offset == internal_offs) {
365 if (!refcount_inc(&mf->mobj.refc)) {
366 /*
367 * If refcount is 0 some other thread has
368 * called mobj_put() on this reached 0 and
369 * before ffa_inactivate() got the lock we
370 * found it. Let's reinitialize it.
371 */
372 refcount_set(&mf->mobj.refc, 1);
373 }
374 DMSG("cookie %#"PRIx64" active: refc %d",
375 cookie, refcount_val(&mf->mobj.refc));
376 } else {
377 EMSG("cookie %#"PRIx64" mismatching internal_offs got %#"PRIx16" expected %#x",
378 cookie, mf->page_offset, internal_offs);
379 mf = NULL;
380 }
381 } else {
382 mf = pop_from_list(&shm_inactive_head, cmp_cookie, cookie);
383 #if defined(CFG_CORE_SEL2_SPMC)
384 /* Try to retrieve it from the SPM at S-EL2 */
385 if (mf) {
386 DMSG("cookie %#"PRIx64" resurrecting", cookie);
387 } else {
388 EMSG("Populating mobj from rx buffer, cookie %#"PRIx64,
389 cookie);
390 mf = thread_spmc_populate_mobj_from_rx(cookie);
391 }
392 #endif
393 if (mf) {
394 #if defined(CFG_CORE_SEL1_SPMC)
395 mf->unregistered_by_cookie = false;
396 mf->registered_by_cookie = true;
397 #endif
398 assert(refcount_val(&mf->mobj.refc) == 0);
399 refcount_set(&mf->mobj.refc, 1);
400 refcount_set(&mf->mapcount, 0);
401
402 /*
403 * mf->page_offset is offset into the first page.
404 * This offset is assigned from the internal_offs
405 * parameter to this function.
406 *
407 * While a mobj_ffa is active (ref_count > 0) this
408 * will not change, but when being pushed to the
409 * inactive list it can be changed again.
410 *
411 * So below we're backing out the old
412 * mf->page_offset and then assigning a new from
413 * internal_offset.
414 */
415 mf->mobj.size += mf->page_offset;
416 assert(!(mf->mobj.size & SMALL_PAGE_MASK));
417 mf->mobj.size -= internal_offs;
418 mf->page_offset = internal_offs;
419
420 SLIST_INSERT_HEAD(&shm_head, mf, link);
421 }
422 }
423
424 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
425
426 if (!mf) {
427 EMSG("Failed to get cookie %#"PRIx64" internal_offs %#x",
428 cookie, internal_offs);
429 return NULL;
430 }
431 return &mf->mobj;
432 }
433
434 static TEE_Result ffa_get_pa(struct mobj *mobj, size_t offset,
435 size_t granule, paddr_t *pa)
436 {
437 struct mobj_ffa *mf = to_mobj_ffa(mobj);
438 size_t full_offset = 0;
439 paddr_t p = 0;
440
441 if (!pa)
442 return TEE_ERROR_GENERIC;
443
444 if (offset >= mobj->size)
445 return TEE_ERROR_GENERIC;
446
447 full_offset = offset + mf->page_offset;
448 switch (granule) {
449 case 0:
450 p = mf->pages[full_offset / SMALL_PAGE_SIZE] +
451 (full_offset & SMALL_PAGE_MASK);
452 break;
453 case SMALL_PAGE_SIZE:
454 p = mf->pages[full_offset / SMALL_PAGE_SIZE];
455 break;
456 default:
457 return TEE_ERROR_GENERIC;
458 }
459 *pa = p;
460
461 return TEE_SUCCESS;
462 }
463 DECLARE_KEEP_PAGER(ffa_get_pa);
464
465 static size_t ffa_get_phys_offs(struct mobj *mobj,
466 size_t granule __maybe_unused)
467 {
468 assert(granule >= mobj->phys_granule);
469
470 return to_mobj_ffa(mobj)->page_offset;
471 }
472
473 static void *ffa_get_va(struct mobj *mobj, size_t offset, size_t len)
474 {
475 struct mobj_ffa *mf = to_mobj_ffa(mobj);
476
477 if (!mf->mm || !mobj_check_offset_and_len(mobj, offset, len))
478 return NULL;
479
480 return (void *)(tee_mm_get_smem(mf->mm) + offset + mf->page_offset);
481 }
482
483 static void ffa_inactivate(struct mobj *mobj)
484 {
485 struct mobj_ffa *mf = to_mobj_ffa(mobj);
486 uint32_t exceptions = 0;
487
488 exceptions = cpu_spin_lock_xsave(&shm_lock);
489 /*
490 * If refcount isn't 0 some other thread has found this mobj in
491 * shm_head after the mobj_put() that put us here and before we got
492 * the lock.
493 */
494 if (refcount_val(&mobj->refc)) {
495 DMSG("cookie %#"PRIx64" was resurrected", mf->cookie);
496 goto out;
497 }
498
499 DMSG("cookie %#"PRIx64, mf->cookie);
500 if (!pop_from_list(&shm_head, cmp_ptr, (vaddr_t)mf))
501 panic();
502 unmap_helper(mf);
503 SLIST_INSERT_HEAD(&shm_inactive_head, mf, link);
504 out:
505 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
506 }
507
508 static TEE_Result ffa_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
509 {
510 if (!cattr)
511 return TEE_ERROR_GENERIC;
512
513 *cattr = TEE_MATTR_CACHE_CACHED;
514
515 return TEE_SUCCESS;
516 }
517
518 static bool ffa_matches(struct mobj *mobj __maybe_unused, enum buf_is_attr attr)
519 {
520 assert(mobj->ops == &mobj_ffa_ops);
521
522 return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
523 }
524
525 static uint64_t ffa_get_cookie(struct mobj *mobj)
526 {
527 return to_mobj_ffa(mobj)->cookie;
528 }
529
530 static TEE_Result ffa_inc_map(struct mobj *mobj)
531 {
532 TEE_Result res = TEE_SUCCESS;
533 struct mobj_ffa *mf = to_mobj_ffa(mobj);
534 uint32_t exceptions = 0;
535 size_t sz = 0;
536
537 while (true) {
538 if (refcount_inc(&mf->mapcount))
539 return TEE_SUCCESS;
540
541 exceptions = cpu_spin_lock_xsave(&shm_lock);
542
543 if (!refcount_val(&mf->mapcount))
544 break; /* continue to reinitialize */
545 /*
546 * If another thread beat us to initialize mapcount,
547 * restart to make sure we still increase it.
548 */
549 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
550 }
551
552 /*
553 * If we have beated another thread calling ffa_dec_map()
554 * to get the lock we need only to reinitialize mapcount to 1.
555 */
556 if (!mf->mm) {
557 sz = ROUNDUP(mobj->size + mf->page_offset, SMALL_PAGE_SIZE);
558 mf->mm = tee_mm_alloc(&tee_mm_shm, sz);
559 if (!mf->mm) {
560 res = TEE_ERROR_OUT_OF_MEMORY;
561 goto out;
562 }
563
564 res = core_mmu_map_pages(tee_mm_get_smem(mf->mm), mf->pages,
565 sz / SMALL_PAGE_SIZE,
566 MEM_AREA_NSEC_SHM);
567 if (res) {
568 tee_mm_free(mf->mm);
569 mf->mm = NULL;
570 goto out;
571 }
572 }
573
574 refcount_set(&mf->mapcount, 1);
575 out:
576 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
577
578 return res;
579 }
580
581 static TEE_Result ffa_dec_map(struct mobj *mobj)
582 {
583 struct mobj_ffa *mf = to_mobj_ffa(mobj);
584 uint32_t exceptions = 0;
585
586 if (!refcount_dec(&mf->mapcount))
587 return TEE_SUCCESS;
588
589 exceptions = cpu_spin_lock_xsave(&shm_lock);
590 if (!refcount_val(&mf->mapcount))
591 unmap_helper(mf);
592 cpu_spin_unlock_xrestore(&shm_lock, exceptions);
593
594 return TEE_SUCCESS;
595 }
596
597 static TEE_Result mapped_shm_init(void)
598 {
599 vaddr_t pool_start = 0;
600 vaddr_t pool_end = 0;
601
602 core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
603 if (!pool_start || !pool_end)
604 panic("Can't find region for shmem pool");
605
606 if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
607 TEE_MM_POOL_NO_FLAGS))
608 panic("Could not create shmem pool");
609
610 DMSG("Shared memory address range: %#"PRIxVA", %#"PRIxVA,
611 pool_start, pool_end);
612 return TEE_SUCCESS;
613 }
614
615 /*
616 * Note: this variable is weak just to ease breaking its dependency chain
617 * when added to the unpaged area.
618 */
619 const struct mobj_ops mobj_ffa_ops __weak __rodata_unpaged("mobj_ffa_ops") = {
620 .get_pa = ffa_get_pa,
621 .get_phys_offs = ffa_get_phys_offs,
622 .get_va = ffa_get_va,
623 .get_cattr = ffa_get_cattr,
624 .matches = ffa_matches,
625 .free = ffa_inactivate,
626 .get_cookie = ffa_get_cookie,
627 .inc_map = ffa_inc_map,
628 .dec_map = ffa_dec_map,
629 };
630
631 preinit(mapped_shm_init);
632