1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2020-2021, Linaro Limited.
4 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5 */
6
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <initcall.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <kernel/secure_partition.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/spmc_sp_handler.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/thread.h>
18 #include <kernel/thread_spmc.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <optee_ffa.h>
22 #include <optee_msg.h>
23 #include <optee_rpc_cmd.h>
24 #include <string.h>
25 #include <sys/queue.h>
26 #include <tee/entry_std.h>
27 #include <tee/uuid.h>
28 #include <util.h>
29
30 #include "thread_private.h"
31
32 #if defined(CFG_CORE_SEL1_SPMC)
33 struct mem_share_state {
34 struct mobj_ffa *mf;
35 unsigned int page_count;
36 unsigned int region_count;
37 unsigned int current_page_idx;
38 };
39
40 struct mem_frag_state {
41 struct mem_share_state share;
42 tee_mm_entry_t *mm;
43 unsigned int frag_offset;
44 SLIST_ENTRY(mem_frag_state) link;
45 };
46 #endif
47
48 /* Initialized in spmc_init() below */
49 static uint16_t my_endpoint_id;
50
51 /*
52 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
53 *
54 * struct ffa_rxtx::spin_lock protects the variables below from concurrent
55 * access this includes the use of content of struct ffa_rxtx::rx and
56 * @frag_state_head.
57 *
58 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
59 * ffa_rxtx::tx and false when it is owned by normal world.
60 *
61 * Note that we can't prevent normal world from updating the content of
62 * these buffers so we must always be careful when reading. while we hold
63 * the lock.
64 */
65
66 #ifdef CFG_CORE_SEL2_SPMC
67 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
68 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
69 static struct ffa_rxtx nw_rxtx = { .rx = __rx_buf, .tx = __tx_buf };
70 #else
71 static struct ffa_rxtx nw_rxtx;
72
is_nw_buf(struct ffa_rxtx * rxtx)73 static bool is_nw_buf(struct ffa_rxtx *rxtx)
74 {
75 return rxtx == &nw_rxtx;
76 }
77
78 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
79 SLIST_HEAD_INITIALIZER(&frag_state_head);
80 #endif
81
swap_src_dst(uint32_t src_dst)82 static uint32_t swap_src_dst(uint32_t src_dst)
83 {
84 return (src_dst >> 16) | (src_dst << 16);
85 }
86
spmc_set_args(struct thread_smc_args * args,uint32_t fid,uint32_t src_dst,uint32_t w2,uint32_t w3,uint32_t w4,uint32_t w5)87 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
88 uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
89 {
90 *args = (struct thread_smc_args){ .a0 = fid,
91 .a1 = src_dst,
92 .a2 = w2,
93 .a3 = w3,
94 .a4 = w4,
95 .a5 = w5, };
96 }
97
98 #if defined(CFG_CORE_SEL1_SPMC)
spmc_handle_version(struct thread_smc_args * args)99 void spmc_handle_version(struct thread_smc_args *args)
100 {
101 /*
102 * We currently only support one version, 1.0 so let's keep it
103 * simple.
104 */
105 spmc_set_args(args,
106 MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
107 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
108 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
109 }
110
handle_features(struct thread_smc_args * args)111 static void handle_features(struct thread_smc_args *args)
112 {
113 uint32_t ret_fid = 0;
114 uint32_t ret_w2 = FFA_PARAM_MBZ;
115
116 switch (args->a1) {
117 #ifdef ARM64
118 case FFA_RXTX_MAP_64:
119 #endif
120 case FFA_RXTX_MAP_32:
121 ret_fid = FFA_SUCCESS_32;
122 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
123 break;
124 #ifdef ARM64
125 case FFA_MEM_SHARE_64:
126 #endif
127 case FFA_MEM_SHARE_32:
128 ret_fid = FFA_SUCCESS_32;
129 /*
130 * Partition manager supports transmission of a memory
131 * transaction descriptor in a buffer dynamically allocated
132 * by the endpoint.
133 */
134 ret_w2 = BIT(0);
135 break;
136
137 case FFA_ERROR:
138 case FFA_VERSION:
139 case FFA_SUCCESS_32:
140 #ifdef ARM64
141 case FFA_SUCCESS_64:
142 #endif
143 case FFA_MEM_FRAG_TX:
144 case FFA_MEM_RECLAIM:
145 case FFA_MSG_SEND_DIRECT_REQ_32:
146 case FFA_INTERRUPT:
147 case FFA_PARTITION_INFO_GET:
148 case FFA_RX_RELEASE:
149 ret_fid = FFA_SUCCESS_32;
150 break;
151 default:
152 ret_fid = FFA_ERROR;
153 ret_w2 = FFA_NOT_SUPPORTED;
154 break;
155 }
156
157 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
158 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
159 }
160
map_buf(paddr_t pa,unsigned int sz,void ** va_ret)161 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
162 {
163 tee_mm_entry_t *mm = NULL;
164
165 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
166 return FFA_INVALID_PARAMETERS;
167
168 mm = tee_mm_alloc(&tee_mm_shm, sz);
169 if (!mm)
170 return FFA_NO_MEMORY;
171
172 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
173 sz / SMALL_PAGE_SIZE,
174 MEM_AREA_NSEC_SHM)) {
175 tee_mm_free(mm);
176 return FFA_INVALID_PARAMETERS;
177 }
178
179 *va_ret = (void *)tee_mm_get_smem(mm);
180 return 0;
181 }
182
unmap_buf(void * va,size_t sz)183 static void unmap_buf(void *va, size_t sz)
184 {
185 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
186
187 assert(mm);
188 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
189 tee_mm_free(mm);
190 }
191
spmc_handle_rxtx_map(struct thread_smc_args * args,struct ffa_rxtx * rxtx)192 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
193 {
194 int rc = 0;
195 uint32_t ret_fid = FFA_ERROR;
196 unsigned int sz = 0;
197 paddr_t rx_pa = 0;
198 paddr_t tx_pa = 0;
199 void *rx = NULL;
200 void *tx = NULL;
201
202 cpu_spin_lock(&rxtx->spinlock);
203
204 if (args->a3 & GENMASK_64(63, 6)) {
205 rc = FFA_INVALID_PARAMETERS;
206 goto out;
207 }
208
209 sz = args->a3 * SMALL_PAGE_SIZE;
210 if (!sz) {
211 rc = FFA_INVALID_PARAMETERS;
212 goto out;
213 }
214 /* TX/RX are swapped compared to the caller */
215 tx_pa = args->a2;
216 rx_pa = args->a1;
217
218 if (rxtx->size) {
219 rc = FFA_DENIED;
220 goto out;
221 }
222
223 /*
224 * If the buffer comes from a SP the address is virtual and already
225 * mapped.
226 */
227 if (is_nw_buf(rxtx)) {
228 rc = map_buf(tx_pa, sz, &tx);
229 if (rc)
230 goto out;
231 rc = map_buf(rx_pa, sz, &rx);
232 if (rc) {
233 unmap_buf(tx, sz);
234 goto out;
235 }
236 rxtx->tx = tx;
237 rxtx->rx = rx;
238 } else {
239 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
240 rc = FFA_INVALID_PARAMETERS;
241 goto out;
242 }
243
244 if (!virt_to_phys((void *)tx_pa) ||
245 !virt_to_phys((void *)rx_pa)) {
246 rc = FFA_INVALID_PARAMETERS;
247 goto out;
248 }
249
250 rxtx->tx = (void *)tx_pa;
251 rxtx->rx = (void *)rx_pa;
252 }
253
254 rxtx->size = sz;
255 rxtx->tx_is_mine = true;
256 ret_fid = FFA_SUCCESS_32;
257 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
258 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
259 out:
260 cpu_spin_unlock(&rxtx->spinlock);
261 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
262 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
263 }
264
spmc_handle_rxtx_unmap(struct thread_smc_args * args,struct ffa_rxtx * rxtx)265 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
266 {
267 uint32_t ret_fid = FFA_ERROR;
268 int rc = FFA_INVALID_PARAMETERS;
269
270 cpu_spin_lock(&rxtx->spinlock);
271
272 if (!rxtx->size)
273 goto out;
274
275 /* We don't unmap the SP memory as the SP might still use it */
276 if (is_nw_buf(rxtx)) {
277 unmap_buf(rxtx->rx, rxtx->size);
278 unmap_buf(rxtx->tx, rxtx->size);
279 }
280 rxtx->size = 0;
281 rxtx->rx = NULL;
282 rxtx->tx = NULL;
283 ret_fid = FFA_SUCCESS_32;
284 rc = 0;
285 out:
286 cpu_spin_unlock(&rxtx->spinlock);
287 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
288 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
289 }
290
spmc_handle_rx_release(struct thread_smc_args * args,struct ffa_rxtx * rxtx)291 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
292 {
293 uint32_t ret_fid = 0;
294 int rc = 0;
295
296 cpu_spin_lock(&rxtx->spinlock);
297 /* The senders RX is our TX */
298 if (!rxtx->size || rxtx->tx_is_mine) {
299 ret_fid = FFA_ERROR;
300 rc = FFA_DENIED;
301 } else {
302 ret_fid = FFA_SUCCESS_32;
303 rc = 0;
304 rxtx->tx_is_mine = true;
305 }
306 cpu_spin_unlock(&rxtx->spinlock);
307
308 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
309 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
310 }
311
is_nil_uuid(uint32_t w0,uint32_t w1,uint32_t w2,uint32_t w3)312 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
313 {
314 return !w0 && !w1 && !w2 && !w3;
315 }
316
is_my_uuid(uint32_t w0,uint32_t w1,uint32_t w2,uint32_t w3)317 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
318 {
319 /*
320 * This depends on which UUID we have been assigned.
321 * TODO add a generic mechanism to obtain our UUID.
322 *
323 * The test below is for the hard coded UUID
324 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
325 */
326 return w0 == 0xe0786148 && w1 == 0xe311f8e7 &&
327 w2 == 0x02005ebc && w3 == 0x1bc5d5a5;
328 }
329
spmc_fill_partition_entry(struct ffa_partition_info * fpi,uint16_t endpoint_id,uint16_t execution_context)330 void spmc_fill_partition_entry(struct ffa_partition_info *fpi,
331 uint16_t endpoint_id, uint16_t execution_context)
332 {
333 fpi->id = endpoint_id;
334 /* Number of execution contexts implemented by this partition */
335 fpi->execution_context = execution_context;
336
337 fpi->partition_properties = FFA_PARTITION_DIRECT_REQ_RECV_SUPPORT |
338 FFA_PARTITION_DIRECT_REQ_SEND_SUPPORT;
339 }
340
handle_partition_info_get_all(size_t * elem_count,struct ffa_rxtx * rxtx)341 static uint32_t handle_partition_info_get_all(size_t *elem_count,
342 struct ffa_rxtx *rxtx)
343 {
344 struct ffa_partition_info *fpi = rxtx->tx;
345
346 /* Add OP-TEE SP */
347 spmc_fill_partition_entry(fpi, my_endpoint_id, CFG_TEE_CORE_NB_CORE);
348 rxtx->tx_is_mine = false;
349 *elem_count = 1;
350 fpi++;
351
352 if (IS_ENABLED(CFG_SECURE_PARTITION)) {
353 size_t count = (rxtx->size / sizeof(*fpi)) - 1;
354
355 if (sp_partition_info_get_all(fpi, &count))
356 return FFA_NO_MEMORY;
357 *elem_count += count;
358 }
359
360 return FFA_OK;
361 }
362
spmc_handle_partition_info_get(struct thread_smc_args * args,struct ffa_rxtx * rxtx)363 void spmc_handle_partition_info_get(struct thread_smc_args *args,
364 struct ffa_rxtx *rxtx)
365 {
366 uint32_t ret_fid = FFA_ERROR;
367 uint32_t rc = 0;
368 uint32_t endpoint_id = my_endpoint_id;
369 struct ffa_partition_info *fpi = NULL;
370
371 cpu_spin_lock(&rxtx->spinlock);
372
373 if (!rxtx->size || !rxtx->tx_is_mine) {
374 if (rxtx->size)
375 rc = FFA_BUSY;
376 else
377 rc = FFA_DENIED; /* TX buffer not setup yet */
378 goto out;
379 }
380
381 fpi = rxtx->tx;
382
383 if (rxtx->size < sizeof(*fpi)) {
384 ret_fid = FFA_ERROR;
385 rc = FFA_NO_MEMORY;
386 goto out;
387 }
388
389 if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
390 size_t elem_count = 0;
391
392 ret_fid = handle_partition_info_get_all(&elem_count, rxtx);
393
394 if (ret_fid) {
395 rc = ret_fid;
396 ret_fid = FFA_ERROR;
397 } else {
398 ret_fid = FFA_SUCCESS_32;
399 rc = elem_count;
400 }
401
402 goto out;
403 }
404
405 if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
406 spmc_fill_partition_entry(fpi, endpoint_id,
407 CFG_TEE_CORE_NB_CORE);
408 } else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
409 uint32_t uuid_array[4] = { 0 };
410 TEE_UUID uuid = { };
411 TEE_Result res = TEE_SUCCESS;
412
413 uuid_array[0] = args->a1;
414 uuid_array[1] = args->a2;
415 uuid_array[2] = args->a3;
416 uuid_array[3] = args->a4;
417 tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
418
419 res = sp_find_session_id(&uuid, &endpoint_id);
420 if (res != TEE_SUCCESS) {
421 ret_fid = FFA_ERROR;
422 rc = FFA_INVALID_PARAMETERS;
423 goto out;
424 }
425 spmc_fill_partition_entry(fpi, endpoint_id, 1);
426 } else {
427 ret_fid = FFA_ERROR;
428 rc = FFA_INVALID_PARAMETERS;
429 goto out;
430 }
431
432 ret_fid = FFA_SUCCESS_32;
433 rxtx->tx_is_mine = false;
434 rc = 1;
435
436 out:
437 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
438 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
439 cpu_spin_unlock(&rxtx->spinlock);
440 }
441 #endif /*CFG_CORE_SEL1_SPMC*/
442
handle_yielding_call(struct thread_smc_args * args)443 static void handle_yielding_call(struct thread_smc_args *args)
444 {
445 TEE_Result res = 0;
446
447 thread_check_canaries();
448
449 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
450 /* Note connection to struct thread_rpc_arg::ret */
451 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
452 0);
453 res = TEE_ERROR_BAD_PARAMETERS;
454 } else {
455 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
456 args->a6, args->a7);
457 res = TEE_ERROR_BUSY;
458 }
459 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
460 swap_src_dst(args->a1), 0, res, 0, 0);
461 }
462
handle_unregister_shm(uint32_t a4,uint32_t a5)463 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
464 {
465 uint64_t cookie = reg_pair_to_64(a5, a4);
466 uint32_t res = 0;
467
468 res = mobj_ffa_unregister_by_cookie(cookie);
469 switch (res) {
470 case TEE_SUCCESS:
471 case TEE_ERROR_ITEM_NOT_FOUND:
472 return 0;
473 case TEE_ERROR_BUSY:
474 EMSG("res %#"PRIx32, res);
475 return FFA_BUSY;
476 default:
477 EMSG("res %#"PRIx32, res);
478 return FFA_INVALID_PARAMETERS;
479 }
480 }
481
handle_blocking_call(struct thread_smc_args * args)482 static void handle_blocking_call(struct thread_smc_args *args)
483 {
484 switch (args->a3) {
485 case OPTEE_FFA_GET_API_VERSION:
486 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
487 swap_src_dst(args->a1), 0,
488 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
489 0);
490 break;
491 case OPTEE_FFA_GET_OS_VERSION:
492 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
493 swap_src_dst(args->a1), 0,
494 CFG_OPTEE_REVISION_MAJOR,
495 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
496 break;
497 case OPTEE_FFA_EXCHANGE_CAPABILITIES:
498 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
499 swap_src_dst(args->a1), 0, 0,
500 THREAD_RPC_MAX_NUM_PARAMS, 0);
501 break;
502 case OPTEE_FFA_UNREGISTER_SHM:
503 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
504 swap_src_dst(args->a1), 0,
505 handle_unregister_shm(args->a4, args->a5), 0, 0);
506 break;
507 default:
508 EMSG("Unhandled blocking service ID %#"PRIx32,
509 (uint32_t)args->a3);
510 panic();
511 }
512 }
513
514 #if defined(CFG_CORE_SEL1_SPMC)
get_acc_perms(struct ffa_mem_access * mem_acc,unsigned int num_mem_accs,uint8_t * acc_perms,unsigned int * region_offs)515 static int get_acc_perms(struct ffa_mem_access *mem_acc,
516 unsigned int num_mem_accs, uint8_t *acc_perms,
517 unsigned int *region_offs)
518 {
519 unsigned int n = 0;
520
521 for (n = 0; n < num_mem_accs; n++) {
522 struct ffa_mem_access_perm *descr = &mem_acc[n].access_perm;
523
524 if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
525 *acc_perms = READ_ONCE(descr->perm);
526 *region_offs = READ_ONCE(mem_acc[n].region_offs);
527 return 0;
528 }
529 }
530
531 return FFA_INVALID_PARAMETERS;
532 }
533
mem_share_init(void * buf,size_t blen,unsigned int * page_count,unsigned int * region_count,size_t * addr_range_offs)534 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
535 unsigned int *region_count, size_t *addr_range_offs)
536 {
537 const uint8_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
538 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
539 struct ffa_mem_region *region_descr = NULL;
540 struct ffa_mem_transaction *descr = NULL;
541 unsigned int num_mem_accs = 0;
542 uint8_t mem_acc_perm = 0;
543 unsigned int region_descr_offs = 0;
544 size_t n = 0;
545
546 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_mem_transaction) ||
547 blen < sizeof(struct ffa_mem_transaction))
548 return FFA_INVALID_PARAMETERS;
549
550 descr = buf;
551
552 /* Check that the endpoint memory access descriptor array fits */
553 num_mem_accs = READ_ONCE(descr->mem_access_count);
554 if (MUL_OVERFLOW(sizeof(struct ffa_mem_access), num_mem_accs, &n) ||
555 ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
556 return FFA_INVALID_PARAMETERS;
557
558 if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
559 return FFA_INVALID_PARAMETERS;
560
561 /* Check that the access permissions matches what's expected */
562 if (get_acc_perms(descr->mem_access_array,
563 num_mem_accs, &mem_acc_perm, ®ion_descr_offs) ||
564 mem_acc_perm != exp_mem_acc_perm)
565 return FFA_INVALID_PARAMETERS;
566
567 /* Check that the Composite memory region descriptor fits */
568 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
569 n > blen)
570 return FFA_INVALID_PARAMETERS;
571
572 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)descr + region_descr_offs,
573 struct ffa_mem_region))
574 return FFA_INVALID_PARAMETERS;
575
576 region_descr = (struct ffa_mem_region *)((vaddr_t)descr +
577 region_descr_offs);
578 *page_count = READ_ONCE(region_descr->total_page_count);
579 *region_count = READ_ONCE(region_descr->address_range_count);
580 *addr_range_offs = n;
581 return 0;
582 }
583
add_mem_share_helper(struct mem_share_state * s,void * buf,size_t flen)584 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
585 size_t flen)
586 {
587 unsigned int region_count = flen / sizeof(struct ffa_address_range);
588 struct ffa_address_range *arange = NULL;
589 unsigned int n = 0;
590
591 if (region_count > s->region_count)
592 region_count = s->region_count;
593
594 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
595 return FFA_INVALID_PARAMETERS;
596 arange = buf;
597
598 for (n = 0; n < region_count; n++) {
599 unsigned int page_count = READ_ONCE(arange[n].page_count);
600 uint64_t addr = READ_ONCE(arange[n].address);
601
602 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
603 addr, page_count))
604 return FFA_INVALID_PARAMETERS;
605 }
606
607 s->region_count -= region_count;
608 if (s->region_count)
609 return region_count * sizeof(*arange);
610
611 if (s->current_page_idx != s->page_count)
612 return FFA_INVALID_PARAMETERS;
613
614 return 0;
615 }
616
add_mem_share_frag(struct mem_frag_state * s,void * buf,size_t flen)617 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
618 {
619 int rc = 0;
620
621 rc = add_mem_share_helper(&s->share, buf, flen);
622 if (rc >= 0) {
623 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
624 if (s->share.region_count)
625 return s->frag_offset;
626 /* We're done, return the number of consumed bytes */
627 rc = s->frag_offset;
628 } else {
629 rc = FFA_INVALID_PARAMETERS;
630 }
631 }
632
633 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
634 if (rc < 0)
635 mobj_ffa_sel1_spmc_delete(s->share.mf);
636 else
637 mobj_ffa_push_to_inactive(s->share.mf);
638 free(s);
639
640 return rc;
641 }
642
is_sp_share(void * buf)643 static bool is_sp_share(void *buf)
644 {
645 struct ffa_mem_transaction *input_descr = NULL;
646 struct ffa_mem_access_perm *perm = NULL;
647
648 if (!IS_ENABLED(CFG_SECURE_PARTITION))
649 return false;
650
651 input_descr = buf;
652 perm = &input_descr->mem_access_array[0].access_perm;
653
654 /*
655 * perm->endpoint_id is read here only to check if the endpoint is
656 * OP-TEE. We do read it later on again, but there are some additional
657 * checks there to make sure that the data is correct.
658 */
659 return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
660 }
661
add_mem_share(tee_mm_entry_t * mm,void * buf,size_t blen,size_t flen,uint64_t * global_handle)662 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
663 size_t flen, uint64_t *global_handle)
664 {
665 int rc = 0;
666 struct mem_share_state share = { };
667 size_t addr_range_offs = 0;
668 size_t n = 0;
669
670 if (flen > blen)
671 return FFA_INVALID_PARAMETERS;
672
673 rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
674 &addr_range_offs);
675 if (rc)
676 return rc;
677
678 if (MUL_OVERFLOW(share.region_count,
679 sizeof(struct ffa_address_range), &n) ||
680 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
681 return FFA_INVALID_PARAMETERS;
682
683 share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
684 if (!share.mf)
685 return FFA_NO_MEMORY;
686
687 if (flen != blen) {
688 struct mem_frag_state *s = calloc(sizeof(*s), 1);
689
690 if (!s) {
691 rc = FFA_NO_MEMORY;
692 goto err;
693 }
694 s->share = share;
695 s->mm = mm;
696 s->frag_offset = addr_range_offs;
697
698 SLIST_INSERT_HEAD(&frag_state_head, s, link);
699 rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
700 flen - addr_range_offs);
701
702 if (rc >= 0)
703 *global_handle = mobj_ffa_get_cookie(share.mf);
704
705 return rc;
706 }
707
708 rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
709 flen - addr_range_offs);
710 if (rc) {
711 /*
712 * Number of consumed bytes may be returned instead of 0 for
713 * done.
714 */
715 rc = FFA_INVALID_PARAMETERS;
716 goto err;
717 }
718
719 *global_handle = mobj_ffa_push_to_inactive(share.mf);
720
721 return 0;
722 err:
723 mobj_ffa_sel1_spmc_delete(share.mf);
724 return rc;
725 }
726
handle_mem_share_tmem(paddr_t pbuf,size_t blen,size_t flen,unsigned int page_count,uint64_t * global_handle,struct ffa_rxtx * rxtx)727 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
728 unsigned int page_count,
729 uint64_t *global_handle, struct ffa_rxtx *rxtx)
730 {
731 int rc = 0;
732 size_t len = 0;
733 tee_mm_entry_t *mm = NULL;
734 vaddr_t offs = pbuf & SMALL_PAGE_MASK;
735
736 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
737 return FFA_INVALID_PARAMETERS;
738 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
739 return FFA_INVALID_PARAMETERS;
740
741 /*
742 * Check that the length reported in blen is covered by len even
743 * if the offset is taken into account.
744 */
745 if (len < blen || len - offs < blen)
746 return FFA_INVALID_PARAMETERS;
747
748 mm = tee_mm_alloc(&tee_mm_shm, len);
749 if (!mm)
750 return FFA_NO_MEMORY;
751
752 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
753 page_count, MEM_AREA_NSEC_SHM)) {
754 rc = FFA_INVALID_PARAMETERS;
755 goto out;
756 }
757
758 cpu_spin_lock(&rxtx->spinlock);
759 rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
760 global_handle);
761 cpu_spin_unlock(&rxtx->spinlock);
762 if (rc > 0)
763 return rc;
764
765 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
766 out:
767 tee_mm_free(mm);
768 return rc;
769 }
770
handle_mem_share_rxbuf(size_t blen,size_t flen,uint64_t * global_handle,struct ffa_rxtx * rxtx)771 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
772 uint64_t *global_handle,
773 struct ffa_rxtx *rxtx)
774 {
775 int rc = FFA_DENIED;
776
777 cpu_spin_lock(&rxtx->spinlock);
778
779 if (rxtx->rx && flen <= rxtx->size) {
780 if (is_sp_share(rxtx->rx)) {
781 rc = spmc_sp_add_share(rxtx, blen,
782 global_handle, NULL);
783 } else {
784 rc = add_mem_share(NULL, rxtx->rx, blen, flen,
785 global_handle);
786 }
787 }
788
789 cpu_spin_unlock(&rxtx->spinlock);
790
791 return rc;
792 }
793
handle_mem_share(struct thread_smc_args * args,struct ffa_rxtx * rxtx)794 static void handle_mem_share(struct thread_smc_args *args,
795 struct ffa_rxtx *rxtx)
796 {
797 uint32_t ret_w1 = 0;
798 uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
799 uint32_t ret_w3 = 0;
800 uint32_t ret_fid = FFA_ERROR;
801 uint64_t global_handle = 0;
802 int rc = 0;
803
804 /* Check that the MBZs are indeed 0 */
805 if (args->a5 || args->a6 || args->a7)
806 goto out;
807
808 if (!args->a3) {
809 /*
810 * The memory transaction descriptor is passed via our rx
811 * buffer.
812 */
813 if (args->a4)
814 goto out;
815 rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle,
816 rxtx);
817 } else {
818 rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
819 args->a4, &global_handle, rxtx);
820 }
821 if (rc < 0) {
822 ret_w2 = rc;
823 goto out;
824 }
825 if (rc > 0) {
826 ret_fid = FFA_MEM_FRAG_RX;
827 ret_w3 = rc;
828 reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
829 }
830 ret_fid = FFA_SUCCESS_32;
831 reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
832 out:
833 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
834 }
835
get_frag_state(uint64_t global_handle)836 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
837 {
838 struct mem_frag_state *s = NULL;
839
840 SLIST_FOREACH(s, &frag_state_head, link)
841 if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
842 return s;
843
844 return NULL;
845 }
846
handle_mem_frag_tx(struct thread_smc_args * args,struct ffa_rxtx * rxtx)847 static void handle_mem_frag_tx(struct thread_smc_args *args,
848 struct ffa_rxtx *rxtx)
849 {
850 int rc = 0;
851 uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
852 READ_ONCE(args->a1));
853 size_t flen = READ_ONCE(args->a3);
854 struct mem_frag_state *s = NULL;
855 tee_mm_entry_t *mm = NULL;
856 unsigned int page_count = 0;
857 void *buf = NULL;
858 uint32_t ret_w1 = 0;
859 uint32_t ret_w2 = 0;
860 uint32_t ret_w3 = 0;
861 uint32_t ret_fid = 0;
862
863 /*
864 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
865 * requests.
866 */
867
868 cpu_spin_lock(&rxtx->spinlock);
869
870 s = get_frag_state(global_handle);
871 if (!s) {
872 rc = FFA_INVALID_PARAMETERS;
873 goto out;
874 }
875
876 mm = s->mm;
877 if (mm) {
878 if (flen > tee_mm_get_bytes(mm)) {
879 rc = FFA_INVALID_PARAMETERS;
880 goto out;
881 }
882 page_count = s->share.page_count;
883 buf = (void *)tee_mm_get_smem(mm);
884 } else {
885 if (flen > rxtx->size) {
886 rc = FFA_INVALID_PARAMETERS;
887 goto out;
888 }
889 buf = rxtx->rx;
890 }
891
892 rc = add_mem_share_frag(s, buf, flen);
893 out:
894 cpu_spin_unlock(&rxtx->spinlock);
895
896 if (rc <= 0 && mm) {
897 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
898 tee_mm_free(mm);
899 }
900
901 if (rc < 0) {
902 ret_fid = FFA_ERROR;
903 ret_w2 = rc;
904 } else if (rc > 0) {
905 ret_fid = FFA_MEM_FRAG_RX;
906 ret_w3 = rc;
907 reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
908 } else {
909 ret_fid = FFA_SUCCESS_32;
910 reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
911 }
912
913 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
914 }
915
handle_mem_reclaim(struct thread_smc_args * args)916 static void handle_mem_reclaim(struct thread_smc_args *args)
917 {
918 uint32_t ret_val = FFA_INVALID_PARAMETERS;
919 uint32_t ret_fid = FFA_ERROR;
920 uint64_t cookie = 0;
921
922 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
923 goto out;
924
925 cookie = reg_pair_to_64(args->a2, args->a1);
926 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
927 case TEE_SUCCESS:
928 ret_fid = FFA_SUCCESS_32;
929 ret_val = 0;
930 break;
931 case TEE_ERROR_ITEM_NOT_FOUND:
932 DMSG("cookie %#"PRIx64" not found", cookie);
933 ret_val = FFA_INVALID_PARAMETERS;
934 break;
935 default:
936 DMSG("cookie %#"PRIx64" busy", cookie);
937 ret_val = FFA_DENIED;
938 break;
939 }
940 out:
941 spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
942 }
943 #endif
944
945 /* Only called from assembly */
946 void thread_spmc_msg_recv(struct thread_smc_args *args);
thread_spmc_msg_recv(struct thread_smc_args * args)947 void thread_spmc_msg_recv(struct thread_smc_args *args)
948 {
949 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
950 switch (args->a0) {
951 #if defined(CFG_CORE_SEL1_SPMC)
952 case FFA_VERSION:
953 spmc_handle_version(args);
954 break;
955 case FFA_FEATURES:
956 handle_features(args);
957 break;
958 #ifdef ARM64
959 case FFA_RXTX_MAP_64:
960 #endif
961 case FFA_RXTX_MAP_32:
962 spmc_handle_rxtx_map(args, &nw_rxtx);
963 break;
964 case FFA_RXTX_UNMAP:
965 spmc_handle_rxtx_unmap(args, &nw_rxtx);
966 break;
967 case FFA_RX_RELEASE:
968 spmc_handle_rx_release(args, &nw_rxtx);
969 break;
970 case FFA_PARTITION_INFO_GET:
971 spmc_handle_partition_info_get(args, &nw_rxtx);
972 break;
973 #endif /*CFG_CORE_SEL1_SPMC*/
974 case FFA_INTERRUPT:
975 itr_core_handler();
976 spmc_set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
977 break;
978 case FFA_MSG_SEND_DIRECT_REQ_32:
979 if (IS_ENABLED(CFG_SECURE_PARTITION) &&
980 FFA_DST(args->a1) != my_endpoint_id) {
981 spmc_sp_start_thread(args);
982 break;
983 }
984
985 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
986 handle_yielding_call(args);
987 else
988 handle_blocking_call(args);
989 break;
990 #if defined(CFG_CORE_SEL1_SPMC)
991 #ifdef ARM64
992 case FFA_MEM_SHARE_64:
993 #endif
994 case FFA_MEM_SHARE_32:
995 handle_mem_share(args, &nw_rxtx);
996 break;
997 case FFA_MEM_RECLAIM:
998 if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
999 !ffa_mem_reclaim(args, NULL))
1000 handle_mem_reclaim(args);
1001 break;
1002 case FFA_MEM_FRAG_TX:
1003 handle_mem_frag_tx(args, &nw_rxtx);
1004 break;
1005 #endif /*CFG_CORE_SEL1_SPMC*/
1006 default:
1007 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1008 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1009 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1010 }
1011 }
1012
yielding_call_with_arg(uint64_t cookie,uint32_t offset)1013 static uint32_t yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1014 {
1015 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1016 struct thread_ctx *thr = threads + thread_get_id();
1017 uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
1018 struct optee_msg_arg *arg = NULL;
1019 struct mobj *mobj = NULL;
1020 uint32_t num_params = 0;
1021 size_t sz = 0;
1022
1023 mobj = mobj_ffa_get_by_cookie(cookie, 0);
1024 if (!mobj) {
1025 EMSG("Can't find cookie %#"PRIx64, cookie);
1026 return TEE_ERROR_BAD_PARAMETERS;
1027 }
1028
1029 rv = mobj_inc_map(mobj);
1030 if (rv)
1031 goto out_put_mobj;
1032
1033 rv = TEE_ERROR_BAD_PARAMETERS;
1034 arg = mobj_get_va(mobj, offset, sizeof(*arg));
1035 if (!arg)
1036 goto out_dec_map;
1037
1038 num_params = READ_ONCE(arg->num_params);
1039 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1040 goto out_dec_map;
1041
1042 sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1043
1044 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1045 if (!thr->rpc_arg)
1046 goto out_dec_map;
1047
1048 rv = tee_entry_std(arg, num_params);
1049
1050 thread_rpc_shm_cache_clear(&thr->shm_cache);
1051 thr->rpc_arg = NULL;
1052
1053 out_dec_map:
1054 mobj_dec_map(mobj);
1055 out_put_mobj:
1056 mobj_put(mobj);
1057 return rv;
1058 }
1059
1060 /*
1061 * Helper routine for the assembly function thread_std_smc_entry()
1062 *
1063 * Note: this function is weak just to make it possible to exclude it from
1064 * the unpaged area.
1065 */
__thread_std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5 __unused)1066 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1067 uint32_t a2, uint32_t a3,
1068 uint32_t a4, uint32_t a5 __unused)
1069 {
1070 /*
1071 * Arguments are supplied from handle_yielding_call() as:
1072 * a0 <- w1
1073 * a1 <- w3
1074 * a2 <- w4
1075 * a3 <- w5
1076 * a4 <- w6
1077 * a5 <- w7
1078 */
1079 thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1080 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1081 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1082 return FFA_DENIED;
1083 }
1084
set_fmem(struct optee_msg_param * param,struct thread_param * tpm)1085 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1086 {
1087 uint64_t offs = tpm->u.memref.offs;
1088
1089 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1090 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1091
1092 param->u.fmem.offs_low = offs;
1093 param->u.fmem.offs_high = offs >> 32;
1094 if (param->u.fmem.offs_high != offs >> 32)
1095 return false;
1096
1097 param->u.fmem.size = tpm->u.memref.size;
1098 if (tpm->u.memref.mobj) {
1099 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1100
1101 /* If a mobj is passed it better be one with a valid cookie. */
1102 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1103 return false;
1104 param->u.fmem.global_id = cookie;
1105 } else {
1106 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1107 }
1108
1109 return true;
1110 }
1111
get_rpc_arg(uint32_t cmd,size_t num_params,struct thread_param * params,struct optee_msg_arg ** arg_ret)1112 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1113 struct thread_param *params,
1114 struct optee_msg_arg **arg_ret)
1115 {
1116 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1117 struct thread_ctx *thr = threads + thread_get_id();
1118 struct optee_msg_arg *arg = thr->rpc_arg;
1119
1120 if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1121 return TEE_ERROR_BAD_PARAMETERS;
1122
1123 if (!arg) {
1124 EMSG("rpc_arg not set");
1125 return TEE_ERROR_GENERIC;
1126 }
1127
1128 memset(arg, 0, sz);
1129 arg->cmd = cmd;
1130 arg->num_params = num_params;
1131 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1132
1133 for (size_t n = 0; n < num_params; n++) {
1134 switch (params[n].attr) {
1135 case THREAD_PARAM_ATTR_NONE:
1136 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1137 break;
1138 case THREAD_PARAM_ATTR_VALUE_IN:
1139 case THREAD_PARAM_ATTR_VALUE_OUT:
1140 case THREAD_PARAM_ATTR_VALUE_INOUT:
1141 arg->params[n].attr = params[n].attr -
1142 THREAD_PARAM_ATTR_VALUE_IN +
1143 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1144 arg->params[n].u.value.a = params[n].u.value.a;
1145 arg->params[n].u.value.b = params[n].u.value.b;
1146 arg->params[n].u.value.c = params[n].u.value.c;
1147 break;
1148 case THREAD_PARAM_ATTR_MEMREF_IN:
1149 case THREAD_PARAM_ATTR_MEMREF_OUT:
1150 case THREAD_PARAM_ATTR_MEMREF_INOUT:
1151 if (!set_fmem(arg->params + n, params + n))
1152 return TEE_ERROR_BAD_PARAMETERS;
1153 break;
1154 default:
1155 return TEE_ERROR_BAD_PARAMETERS;
1156 }
1157 }
1158
1159 if (arg_ret)
1160 *arg_ret = arg;
1161
1162 return TEE_SUCCESS;
1163 }
1164
get_rpc_arg_res(struct optee_msg_arg * arg,size_t num_params,struct thread_param * params)1165 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1166 struct thread_param *params)
1167 {
1168 for (size_t n = 0; n < num_params; n++) {
1169 switch (params[n].attr) {
1170 case THREAD_PARAM_ATTR_VALUE_OUT:
1171 case THREAD_PARAM_ATTR_VALUE_INOUT:
1172 params[n].u.value.a = arg->params[n].u.value.a;
1173 params[n].u.value.b = arg->params[n].u.value.b;
1174 params[n].u.value.c = arg->params[n].u.value.c;
1175 break;
1176 case THREAD_PARAM_ATTR_MEMREF_OUT:
1177 case THREAD_PARAM_ATTR_MEMREF_INOUT:
1178 params[n].u.memref.size = arg->params[n].u.fmem.size;
1179 break;
1180 default:
1181 break;
1182 }
1183 }
1184
1185 return arg->ret;
1186 }
1187
thread_rpc_cmd(uint32_t cmd,size_t num_params,struct thread_param * params)1188 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1189 struct thread_param *params)
1190 {
1191 struct thread_rpc_arg rpc_arg = { .call = {
1192 .w1 = thread_get_tsd()->rpc_target_info,
1193 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1194 },
1195 };
1196 struct optee_msg_arg *arg = NULL;
1197 uint32_t ret = 0;
1198
1199 ret = get_rpc_arg(cmd, num_params, params, &arg);
1200 if (ret)
1201 return ret;
1202
1203 thread_rpc(&rpc_arg);
1204
1205 return get_rpc_arg_res(arg, num_params, params);
1206 }
1207
thread_rpc_free(unsigned int bt,uint64_t cookie,struct mobj * mobj)1208 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1209 {
1210 struct thread_rpc_arg rpc_arg = { .call = {
1211 .w1 = thread_get_tsd()->rpc_target_info,
1212 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1213 },
1214 };
1215 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1216 uint32_t res2 = 0;
1217 uint32_t res = 0;
1218
1219 DMSG("freeing cookie %#"PRIx64, cookie);
1220
1221 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL);
1222
1223 mobj_put(mobj);
1224 res2 = mobj_ffa_unregister_by_cookie(cookie);
1225 if (res2)
1226 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1227 cookie, res2);
1228 if (!res)
1229 thread_rpc(&rpc_arg);
1230 }
1231
thread_rpc_alloc(size_t size,size_t align,unsigned int bt)1232 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1233 {
1234 struct thread_rpc_arg rpc_arg = { .call = {
1235 .w1 = thread_get_tsd()->rpc_target_info,
1236 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1237 },
1238 };
1239 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1240 struct optee_msg_arg *arg = NULL;
1241 unsigned int internal_offset = 0;
1242 struct mobj *mobj = NULL;
1243 uint64_t cookie = 0;
1244
1245 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg))
1246 return NULL;
1247
1248 thread_rpc(&rpc_arg);
1249
1250 if (arg->num_params != 1 ||
1251 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1252 return NULL;
1253
1254 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1255 cookie = READ_ONCE(arg->params->u.fmem.global_id);
1256 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1257 if (!mobj) {
1258 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1259 cookie, internal_offset);
1260 return NULL;
1261 }
1262
1263 assert(mobj_is_nonsec(mobj));
1264
1265 if (mobj->size < size) {
1266 DMSG("Mobj %#"PRIx64": wrong size", cookie);
1267 mobj_put(mobj);
1268 return NULL;
1269 }
1270
1271 if (mobj_inc_map(mobj)) {
1272 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1273 mobj_put(mobj);
1274 return NULL;
1275 }
1276
1277 return mobj;
1278 }
1279
thread_rpc_alloc_payload(size_t size)1280 struct mobj *thread_rpc_alloc_payload(size_t size)
1281 {
1282 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1283 }
1284
thread_rpc_alloc_kernel_payload(size_t size)1285 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1286 {
1287 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1288 }
1289
thread_rpc_free_kernel_payload(struct mobj * mobj)1290 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1291 {
1292 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1293 }
1294
thread_rpc_free_payload(struct mobj * mobj)1295 void thread_rpc_free_payload(struct mobj *mobj)
1296 {
1297 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1298 mobj);
1299 }
1300
thread_rpc_alloc_global_payload(size_t size)1301 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1302 {
1303 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1304 }
1305
thread_rpc_free_global_payload(struct mobj * mobj)1306 void thread_rpc_free_global_payload(struct mobj *mobj)
1307 {
1308 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1309 mobj);
1310 }
1311
1312 #ifdef CFG_CORE_SEL2_SPMC
is_ffa_success(uint32_t fid)1313 static bool is_ffa_success(uint32_t fid)
1314 {
1315 #ifdef ARM64
1316 if (fid == FFA_SUCCESS_64)
1317 return true;
1318 #endif
1319 return fid == FFA_SUCCESS_32;
1320 }
1321
spmc_rxtx_map(struct ffa_rxtx * rxtx)1322 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
1323 {
1324 struct thread_smc_args args = {
1325 #ifdef ARM64
1326 .a0 = FFA_RXTX_MAP_64,
1327 #else
1328 .a0 = FFA_RXTX_MAP_32,
1329 #endif
1330 .a1 = (vaddr_t)rxtx->tx,
1331 .a2 = (vaddr_t)rxtx->rx,
1332 .a3 = 1,
1333 };
1334
1335 thread_smccc(&args);
1336 if (!is_ffa_success(args.a0)) {
1337 if (args.a0 == FFA_ERROR)
1338 EMSG("rxtx map failed with error %ld", args.a2);
1339 else
1340 EMSG("rxtx map failed");
1341 panic();
1342 }
1343 }
1344
spmc_get_id(void)1345 static uint16_t spmc_get_id(void)
1346 {
1347 struct thread_smc_args args = {
1348 .a0 = FFA_ID_GET,
1349 };
1350
1351 thread_smccc(&args);
1352 if (!is_ffa_success(args.a0)) {
1353 if (args.a0 == FFA_ERROR)
1354 EMSG("Get id failed with error %ld", args.a2);
1355 else
1356 EMSG("Get id failed");
1357 panic();
1358 }
1359
1360 return args.a2;
1361 }
1362
spmc_retrieve_req(uint64_t cookie)1363 static struct ffa_mem_transaction *spmc_retrieve_req(uint64_t cookie)
1364 {
1365 struct ffa_mem_transaction *trans_descr = nw_rxtx.tx;
1366 struct ffa_mem_access *acc_descr_array = NULL;
1367 struct ffa_mem_access_perm *perm_descr = NULL;
1368 size_t size = sizeof(*trans_descr) +
1369 1 * sizeof(struct ffa_mem_access);
1370 struct thread_smc_args args = {
1371 .a0 = FFA_MEM_RETRIEVE_REQ_32,
1372 .a1 = size, /* Total Length */
1373 .a2 = size, /* Frag Length == Total length */
1374 .a3 = 0, /* Address, Using TX -> MBZ */
1375 .a4 = 0, /* Using TX -> MBZ */
1376 };
1377
1378 memset(trans_descr, 0, size);
1379 trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1380 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1381 trans_descr->global_handle = cookie;
1382 trans_descr->flags = FFA_MEMORY_REGION_FLAG_TIME_SLICE |
1383 FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1384 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1385 trans_descr->mem_access_count = 1;
1386 acc_descr_array = trans_descr->mem_access_array;
1387 acc_descr_array->region_offs = 0;
1388 acc_descr_array->reserved = 0;
1389 perm_descr = &acc_descr_array->access_perm;
1390 perm_descr->endpoint_id = my_endpoint_id;
1391 perm_descr->perm = FFA_MEM_ACC_RW;
1392 perm_descr->flags = FFA_MEMORY_REGION_FLAG_TIME_SLICE;
1393
1394 thread_smccc(&args);
1395 if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
1396 if (args.a0 == FFA_ERROR)
1397 EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
1398 cookie, (int)args.a2);
1399 else
1400 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
1401 cookie, args.a0);
1402 return NULL;
1403 }
1404
1405 return nw_rxtx.rx;
1406 }
1407
thread_spmc_relinquish(uint64_t cookie)1408 void thread_spmc_relinquish(uint64_t cookie)
1409 {
1410 struct ffa_mem_relinquish *relinquish_desc = nw_rxtx.tx;
1411 struct thread_smc_args args = {
1412 .a0 = FFA_MEM_RELINQUISH,
1413 };
1414
1415 memset(relinquish_desc, 0, sizeof(*relinquish_desc));
1416 relinquish_desc->handle = cookie;
1417 relinquish_desc->flags = 0;
1418 relinquish_desc->endpoint_count = 1;
1419 relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
1420 thread_smccc(&args);
1421 if (!is_ffa_success(args.a0))
1422 EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
1423 }
1424
set_pages(struct ffa_address_range * regions,unsigned int num_regions,unsigned int num_pages,struct mobj_ffa * mf)1425 static int set_pages(struct ffa_address_range *regions,
1426 unsigned int num_regions, unsigned int num_pages,
1427 struct mobj_ffa *mf)
1428 {
1429 unsigned int n = 0;
1430 unsigned int idx = 0;
1431
1432 for (n = 0; n < num_regions; n++) {
1433 unsigned int page_count = READ_ONCE(regions[n].page_count);
1434 uint64_t addr = READ_ONCE(regions[n].address);
1435
1436 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
1437 return FFA_INVALID_PARAMETERS;
1438 }
1439
1440 if (idx != num_pages)
1441 return FFA_INVALID_PARAMETERS;
1442
1443 return 0;
1444 }
1445
thread_spmc_populate_mobj_from_rx(uint64_t cookie)1446 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
1447 {
1448 struct mobj_ffa *ret = NULL;
1449 struct ffa_mem_transaction *retrieve_desc = NULL;
1450 struct ffa_mem_access *descr_array = NULL;
1451 struct ffa_mem_region *descr = NULL;
1452 struct mobj_ffa *mf = NULL;
1453 unsigned int num_pages = 0;
1454 unsigned int offs = 0;
1455 struct thread_smc_args ffa_rx_release_args = {
1456 .a0 = FFA_RX_RELEASE
1457 };
1458
1459 /*
1460 * OP-TEE is only supporting a single mem_region while the
1461 * specification allows for more than one.
1462 */
1463 retrieve_desc = spmc_retrieve_req(cookie);
1464 if (!retrieve_desc) {
1465 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
1466 cookie);
1467 return NULL;
1468 }
1469
1470 descr_array = retrieve_desc->mem_access_array;
1471 offs = READ_ONCE(descr_array->region_offs);
1472 descr = (struct ffa_mem_region *)((vaddr_t)retrieve_desc + offs);
1473
1474 num_pages = READ_ONCE(descr->total_page_count);
1475 mf = mobj_ffa_sel2_spmc_new(cookie, num_pages);
1476 if (!mf)
1477 goto out;
1478
1479 if (set_pages(descr->address_range_array,
1480 READ_ONCE(descr->address_range_count), num_pages, mf)) {
1481 mobj_ffa_sel2_spmc_delete(mf);
1482 goto out;
1483 }
1484
1485 ret = mf;
1486
1487 out:
1488 /* Release RX buffer after the mem retrieve request. */
1489 thread_smccc(&ffa_rx_release_args);
1490
1491 return ret;
1492 }
1493
spmc_init(void)1494 static TEE_Result spmc_init(void)
1495 {
1496 spmc_rxtx_map(&nw_rxtx);
1497 my_endpoint_id = spmc_get_id();
1498 DMSG("My endpoint ID %#x", my_endpoint_id);
1499
1500 return TEE_SUCCESS;
1501 }
1502 #endif /*CFG_CORE_SEL2_SPMC*/
1503
1504 #if defined(CFG_CORE_SEL1_SPMC)
spmc_init(void)1505 static TEE_Result spmc_init(void)
1506 {
1507 my_endpoint_id = SPMC_ENDPOINT_ID;
1508 DMSG("My endpoint ID %#x", my_endpoint_id);
1509
1510 return TEE_SUCCESS;
1511 }
1512 #endif /*CFG_CORE_SEL1_SPMC*/
1513
1514 service_init(spmc_init);
1515