1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2021, Arm Limited
4 */
5 #include <assert.h>
6 #include <bench.h>
7 #include <io.h>
8 #include <kernel/panic.h>
9 #include <kernel/secure_partition.h>
10 #include <kernel/spinlock.h>
11 #include <kernel/spmc_sp_handler.h>
12 #include <kernel/tee_misc.h>
13 #include <mm/mobj.h>
14 #include <mm/sp_mem.h>
15 #include <mm/vm.h>
16 #include <optee_ffa.h>
17 #include <string.h>
18 #include "thread_private.h"
19
20 static unsigned int mem_ref_lock = SPINLOCK_UNLOCK;
21
spmc_sp_start_thread(struct thread_smc_args * args)22 void spmc_sp_start_thread(struct thread_smc_args *args)
23 {
24 thread_sp_alloc_and_run(args);
25 }
26
ffa_set_error(struct thread_smc_args * args,uint32_t error)27 static void ffa_set_error(struct thread_smc_args *args, uint32_t error)
28 {
29 args->a0 = FFA_ERROR;
30 args->a2 = error;
31 }
32
ffa_success(struct thread_smc_args * args)33 static void ffa_success(struct thread_smc_args *args)
34 {
35 args->a0 = FFA_SUCCESS_32;
36 }
37
ffa_get_dst(struct thread_smc_args * args,struct sp_session * caller,struct sp_session ** dst)38 static TEE_Result ffa_get_dst(struct thread_smc_args *args,
39 struct sp_session *caller,
40 struct sp_session **dst)
41 {
42 struct sp_session *s = NULL;
43
44 if (args->a2 != FFA_PARAM_MBZ)
45 return FFA_INVALID_PARAMETERS;
46
47 s = sp_get_session(FFA_DST(args->a1));
48
49 /* Message came from the NW */
50 if (!caller) {
51 if (!s) {
52 EMSG("Neither destination nor source is a SP");
53 return FFA_INVALID_PARAMETERS;
54 }
55 } else {
56 /* Check if the source matches the endpoint we came from */
57 if (FFA_SRC(args->a1) != caller->endpoint_id) {
58 EMSG("Source address doesn't match the endpoint id");
59 return FFA_INVALID_PARAMETERS;
60 }
61 }
62
63 *dst = s;
64
65 return FFA_OK;
66 }
67
find_sp_mem_receiver(struct sp_session * s,struct sp_mem * smem)68 static struct sp_mem_receiver *find_sp_mem_receiver(struct sp_session *s,
69 struct sp_mem *smem)
70 {
71 struct sp_mem_receiver *receiver = NULL;
72
73 /*
74 * FF-A Spec 8.10.2:
75 * Each Handle identifies a single unique composite memory region
76 * description that is, there is a 1:1 mapping between the two.
77 *
78 * Each memory share has an unique handle. We can only have each SP
79 * once as a receiver in the memory share. For each receiver of a
80 * memory share, we have one sp_mem_access_descr object.
81 * This means that there can only be one SP linked to a specific
82 * struct sp_mem_access_descr.
83 */
84 SLIST_FOREACH(receiver, &smem->receivers, link) {
85 if (receiver->perm.endpoint_id == s->endpoint_id)
86 break;
87 }
88 return receiver;
89 }
90
add_mem_region_to_sp(struct ffa_mem_access * mem_acc,struct sp_mem * smem)91 static int add_mem_region_to_sp(struct ffa_mem_access *mem_acc,
92 struct sp_mem *smem)
93 {
94 struct ffa_mem_access_perm *access_perm = &mem_acc->access_perm;
95 struct sp_session *s = NULL;
96 struct sp_mem_receiver *receiver = NULL;
97 uint8_t perm = READ_ONCE(access_perm->perm);
98 uint16_t endpoint_id = READ_ONCE(access_perm->endpoint_id);
99
100 s = sp_get_session(endpoint_id);
101
102 /* Only add memory shares of loaded SPs */
103 if (!s)
104 return FFA_DENIED;
105
106 /* Only allow each endpoint once */
107 if (find_sp_mem_receiver(s, smem))
108 return FFA_DENIED;
109
110 if (perm & ~FFA_MEM_ACC_MASK)
111 return FFA_DENIED;
112
113 receiver = calloc(1, sizeof(struct sp_mem_receiver));
114 if (!receiver)
115 return FFA_NO_MEMORY;
116
117 receiver->smem = smem;
118
119 receiver->perm.endpoint_id = endpoint_id;
120 receiver->perm.perm = perm;
121 receiver->perm.flags = READ_ONCE(access_perm->flags);
122
123 SLIST_INSERT_HEAD(&smem->receivers, receiver, link);
124
125 return FFA_OK;
126 }
127
spmc_sp_handle_mem_share(struct thread_smc_args * args,struct ffa_rxtx * rxtx,struct sp_session * owner_sp)128 static void spmc_sp_handle_mem_share(struct thread_smc_args *args,
129 struct ffa_rxtx *rxtx,
130 struct sp_session *owner_sp)
131 {
132 uint64_t global_handle = 0;
133 int res = FFA_OK;
134 uint32_t ret_w2 = 0;
135 uint32_t ret_w3 = 0;
136
137 cpu_spin_lock(&rxtx->spinlock);
138
139 res = spmc_sp_add_share(rxtx, args->a1, &global_handle, owner_sp);
140 if (!res) {
141 reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
142 args->a3 = ret_w3;
143 args->a2 = ret_w2;
144 args->a1 = FFA_PARAM_MBZ;
145 args->a0 = FFA_SUCCESS_32;
146 } else {
147 ffa_set_error(args, res);
148 }
149
150 cpu_spin_unlock(&rxtx->spinlock);
151 }
152
spmc_sp_add_sp_region(struct sp_mem * smem,struct ffa_address_range * mem_reg,struct sp_session * owner_sp,uint8_t highest_permission)153 static int spmc_sp_add_sp_region(struct sp_mem *smem,
154 struct ffa_address_range *mem_reg,
155 struct sp_session *owner_sp,
156 uint8_t highest_permission)
157 {
158 struct sp_ctx *sp_ctx = NULL;
159 uint64_t va = READ_ONCE(mem_reg->address);
160 int res = FFA_OK;
161 uint64_t region_len = READ_ONCE(mem_reg->page_count) * SMALL_PAGE_SIZE;
162 struct mobj *mobj = NULL;
163
164 sp_ctx = to_sp_ctx(owner_sp->ts_sess.ctx);
165
166 /*
167 * The memory region we try to share might not be linked to just one
168 * mobj. Create a new region for each mobj.
169 */
170 while (region_len) {
171 size_t len = region_len;
172 struct sp_mem_map_region *region = NULL;
173 uint16_t prot = 0;
174 size_t offs = 0;
175
176 /*
177 * There is already a mobj for each address that is in the SPs
178 * address range.
179 */
180 mobj = vm_get_mobj(&sp_ctx->uctx, va, &len, &prot, &offs);
181 if (!mobj)
182 return FFA_DENIED;
183
184 /*
185 * If we share memory from a SP, check if we are not sharing
186 * with a higher permission than the memory was originally
187 * mapped.
188 */
189 if ((highest_permission & FFA_MEM_ACC_RW) &&
190 !(prot & TEE_MATTR_UW)) {
191 res = FFA_DENIED;
192 goto err;
193 }
194
195 if ((highest_permission & FFA_MEM_ACC_EXE) &&
196 !(prot & TEE_MATTR_UX)) {
197 res = FFA_DENIED;
198 goto err;
199 }
200
201 region = calloc(1, sizeof(*region));
202 region->mobj = mobj;
203 region->page_offset = offs;
204 region->page_count = len / SMALL_PAGE_SIZE;
205
206 if (!sp_has_exclusive_access(region, &sp_ctx->uctx)) {
207 free(region);
208 res = FFA_DENIED;
209 goto err;
210 }
211
212 va += len;
213 region_len -= len;
214 SLIST_INSERT_HEAD(&smem->regions, region, link);
215 }
216
217 return FFA_OK;
218 err:
219 mobj_put(mobj);
220
221 return res;
222 }
223
spmc_sp_add_nw_region(struct sp_mem * smem,struct ffa_mem_region * mem_reg)224 static int spmc_sp_add_nw_region(struct sp_mem *smem,
225 struct ffa_mem_region *mem_reg)
226 {
227 uint64_t page_count = READ_ONCE(mem_reg->total_page_count);
228 struct sp_mem_map_region *region = NULL;
229 struct mobj *m = sp_mem_new_mobj(page_count);
230 unsigned int i = 0;
231 unsigned int idx = 0;
232 int res = FFA_OK;
233 uint64_t address_count = READ_ONCE(mem_reg->address_range_count);
234
235 if (!m)
236 return FFA_NO_MEMORY;
237
238 for (i = 0; i < address_count; i++) {
239 struct ffa_address_range *addr_range = NULL;
240
241 addr_range = &mem_reg->address_range_array[i];
242 if (sp_mem_add_pages(m, &idx,
243 READ_ONCE(addr_range->address),
244 READ_ONCE(addr_range->page_count))) {
245 res = FFA_DENIED;
246 goto clean_up;
247 }
248 }
249
250 region = calloc(1, sizeof(*region));
251 if (!region) {
252 res = FFA_NO_MEMORY;
253 goto clean_up;
254 }
255
256 region->mobj = m;
257 region->page_count = page_count;
258
259 if (!sp_has_exclusive_access(region, NULL)) {
260 free(region);
261 res = FFA_DENIED;
262 goto clean_up;
263 }
264
265 SLIST_INSERT_HEAD(&smem->regions, region, link);
266 return FFA_OK;
267 clean_up:
268 mobj_put(m);
269 return res;
270 }
271
spmc_sp_add_share(struct ffa_rxtx * rxtx,size_t blen,uint64_t * global_handle,struct sp_session * owner_sp)272 int spmc_sp_add_share(struct ffa_rxtx *rxtx,
273 size_t blen, uint64_t *global_handle,
274 struct sp_session *owner_sp)
275 {
276 int res = FFA_INVALID_PARAMETERS;
277 unsigned int num_mem_accs = 0;
278 unsigned int i = 0;
279 struct ffa_mem_access *mem_acc = NULL;
280 size_t needed_size = 0;
281 size_t addr_range_offs = 0;
282 struct ffa_mem_region *mem_reg = NULL;
283 uint8_t highest_permission = 0;
284 struct sp_mem *smem = sp_mem_new();
285 struct ffa_mem_transaction *input_descr = rxtx->rx;
286 uint16_t sender_id = READ_ONCE(input_descr->sender_id);
287
288 if (!smem)
289 return FFA_NO_MEMORY;
290
291 if ((owner_sp && owner_sp->endpoint_id != sender_id) ||
292 (!owner_sp && sp_get_session(sender_id))) {
293 res = FFA_DENIED;
294 goto cleanup;
295 }
296
297 num_mem_accs = READ_ONCE(input_descr->mem_access_count);
298 mem_acc = input_descr->mem_access_array;
299
300 if (!num_mem_accs) {
301 res = FFA_DENIED;
302 goto cleanup;
303 }
304
305 /* Store the ffa_mem_transaction */
306 smem->sender_id = sender_id;
307 smem->mem_reg_attr = READ_ONCE(input_descr->mem_reg_attr);
308 smem->flags = READ_ONCE(input_descr->flags);
309 smem->tag = READ_ONCE(input_descr->tag);
310
311 if (MUL_OVERFLOW(num_mem_accs, sizeof(*mem_acc), &needed_size) ||
312 ADD_OVERFLOW(needed_size, sizeof(*input_descr), &needed_size) ||
313 needed_size > blen) {
314 res = FFA_NO_MEMORY;
315 goto cleanup;
316 }
317
318 for (i = 0; i < num_mem_accs; i++)
319 highest_permission |= READ_ONCE(mem_acc[i].access_perm.perm);
320
321 addr_range_offs = READ_ONCE(mem_acc[0].region_offs);
322 mem_reg = (void *)((char *)input_descr + addr_range_offs);
323
324 /* Iterate over all the addresses */
325 if (owner_sp) {
326 size_t address_range = READ_ONCE(mem_reg->address_range_count);
327
328 for (i = 0; i < address_range; i++) {
329 struct ffa_address_range *addr_range = NULL;
330
331 addr_range = &mem_reg->address_range_array[i];
332
333 if (!core_is_buffer_inside((vaddr_t)addr_range,
334 sizeof(*addr_range),
335 (vaddr_t)rxtx->rx,
336 rxtx->size)) {
337 res = FFA_NO_MEMORY;
338 goto cleanup;
339 }
340 res = spmc_sp_add_sp_region(smem, addr_range,
341 owner_sp,
342 highest_permission);
343 if (res)
344 goto cleanup;
345 }
346 } else {
347 res = spmc_sp_add_nw_region(smem, mem_reg);
348 if (res)
349 goto cleanup;
350 }
351
352 /* Add the memory address to the SP */
353 for (i = 0; i < num_mem_accs; i++) {
354 res = add_mem_region_to_sp(&mem_acc[i], smem);
355 if (res)
356 goto cleanup;
357 }
358 *global_handle = smem->global_handle;
359 sp_mem_add(smem);
360
361 return FFA_OK;
362
363 cleanup:
364 sp_mem_remove(smem);
365 return res;
366 }
367
check_rxtx(struct ffa_rxtx * rxtx)368 static bool check_rxtx(struct ffa_rxtx *rxtx)
369 {
370 return rxtx && rxtx->rx && rxtx->tx && rxtx->size > 0;
371 }
372
check_retrieve_request(struct sp_mem_receiver * receiver,struct ffa_mem_transaction * retr_dsc,struct sp_mem * smem,int64_t tx_len)373 static TEE_Result check_retrieve_request(struct sp_mem_receiver *receiver,
374 struct ffa_mem_transaction *retr_dsc,
375 struct sp_mem *smem, int64_t tx_len)
376 {
377 struct ffa_mem_access *retr_access = NULL;
378 uint8_t share_perm = receiver->perm.perm;
379 uint32_t retr_perm = 0;
380 uint32_t retr_flags = READ_ONCE(retr_dsc->flags);
381 uint64_t retr_tag = READ_ONCE(retr_dsc->tag);
382 struct sp_mem_map_region *reg = NULL;
383
384 /*
385 * The request came from the endpoint. It should only have one
386 * ffa_mem_access element
387 */
388 if (READ_ONCE(retr_dsc->mem_access_count) != 1)
389 return TEE_ERROR_BAD_PARAMETERS;
390
391 retr_access = retr_dsc->mem_access_array;
392 retr_perm = READ_ONCE(retr_access->access_perm.perm);
393
394 /* Check if tag is correct */
395 if (receiver->smem->tag != retr_tag) {
396 EMSG("Incorrect tag %#"PRIx64" %#"PRIx64, receiver->smem->tag,
397 retr_tag);
398 return TEE_ERROR_BAD_PARAMETERS;
399 }
400
401 /* Check permissions and flags */
402 if ((retr_perm & FFA_MEM_ACC_RW) &&
403 !(share_perm & FFA_MEM_ACC_RW)) {
404 DMSG("Incorrect memshare permission set");
405 return TEE_ERROR_BAD_PARAMETERS;
406 }
407
408 if ((retr_perm & FFA_MEM_ACC_EXE) &&
409 !(share_perm & FFA_MEM_ACC_EXE)) {
410 DMSG("Incorrect memshare permission set");
411 return TEE_ERROR_BAD_PARAMETERS;
412 }
413
414 if (retr_flags & FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH) {
415 DMSG("CLEAR_RELINQUISH is not allowed for FFA_SHARE");
416 return TEE_ERROR_BAD_PARAMETERS;
417 }
418
419 /*
420 * Check if there is enough space in the tx buffer to send the respons.
421 */
422 tx_len -= sizeof(struct ffa_mem_transaction) +
423 sizeof(struct ffa_mem_access) +
424 sizeof(struct ffa_mem_region);
425
426 if (tx_len < 0)
427 return FFA_NO_MEMORY;
428
429 SLIST_FOREACH(reg, &smem->regions, link) {
430 tx_len -= sizeof(struct ffa_address_range);
431 if (tx_len < 0)
432 return FFA_NO_MEMORY;
433 }
434
435 return TEE_SUCCESS;
436 }
437
create_retrieve_response(void * dst_buffer,struct sp_mem_receiver * receiver,struct sp_mem * smem,struct sp_session * s)438 static void create_retrieve_response(void *dst_buffer,
439 struct sp_mem_receiver *receiver,
440 struct sp_mem *smem, struct sp_session *s)
441 {
442 size_t off = 0;
443 struct ffa_mem_region *dst_region = NULL;
444 struct ffa_mem_transaction *d_ds = dst_buffer;
445 struct ffa_address_range *addr_dst = NULL;
446 struct sp_mem_map_region *reg = NULL;
447
448 /*
449 * We respond with a FFA_MEM_RETRIEVE_RESP which defines the
450 * following data in the rx buffer of the SP.
451 * struct mem_transaction_descr
452 * struct mem_access_descr (always 1 Element)
453 * struct mem_region_descr
454 */
455 /* Copy the mem_transaction_descr */
456 d_ds->sender_id = receiver->smem->sender_id;
457 d_ds->mem_reg_attr = receiver->smem->mem_reg_attr;
458 d_ds->flags = receiver->smem->flags;
459 d_ds->tag = receiver->smem->tag;
460
461 off = sizeof(struct ffa_mem_transaction) +
462 sizeof(struct ffa_mem_access);
463
464 d_ds->mem_access_count = 1;
465
466 /* Copy the mem_accsess_descr */
467 d_ds->mem_access_array[0].region_offs = off;
468 memcpy(&d_ds->mem_access_array[0].access_perm,
469 &receiver->perm, sizeof(struct ffa_mem_access_perm));
470
471 /* Copy the mem_region_descr */
472 dst_region = (struct ffa_mem_region *)((vaddr_t)d_ds + off);
473
474 dst_region->address_range_count = 0;
475 dst_region->total_page_count = 0;
476
477 addr_dst = dst_region->address_range_array;
478
479 SLIST_FOREACH(reg, &smem->regions, link) {
480 uint32_t offset = reg->page_offset;
481 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
482
483 addr_dst->address = (uint64_t)sp_mem_get_va(&ctx->uctx,
484 offset,
485 reg->mobj);
486 addr_dst->page_count = reg->page_count;
487 dst_region->address_range_count++;
488
489 dst_region->total_page_count += addr_dst->page_count;
490 }
491 }
492
ffa_mem_retrieve(struct thread_smc_args * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)493 static void ffa_mem_retrieve(struct thread_smc_args *args,
494 struct sp_session *caller_sp,
495 struct ffa_rxtx *rxtx)
496 {
497 int ret = FFA_OK;
498 size_t tx_len = 0;
499 struct ffa_mem_transaction *retr_dsc = NULL;
500 struct ffa_mem_region *mem_region = NULL;
501 uint64_t va = 0;
502 struct sp_mem *smem = NULL;
503 struct sp_mem_receiver *receiver = NULL;
504 uint32_t exceptions = 0;
505 uint32_t address_offset = 0;
506 size_t needed_size = 0;
507
508 if (!check_rxtx(rxtx) || !rxtx->tx_is_mine) {
509 ret = FFA_DENIED;
510 goto err;
511 }
512
513 tx_len = rxtx->size;
514 retr_dsc = rxtx->rx;
515
516 smem = sp_mem_get(retr_dsc->global_handle);
517 if (!smem) {
518 DMSG("Incorrect handle");
519 ret = FFA_DENIED;
520 goto err;
521 }
522
523 receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
524
525 address_offset = READ_ONCE(retr_dsc->mem_access_array[0].region_offs);
526
527 if (ADD_OVERFLOW(address_offset, sizeof(struct ffa_mem_region),
528 &needed_size) || needed_size > tx_len) {
529 ret = FFA_INVALID_PARAMETERS;
530 goto err;
531 }
532
533 if (check_retrieve_request(receiver, retr_dsc, smem, tx_len) !=
534 TEE_SUCCESS) {
535 ret = FFA_INVALID_PARAMETERS;
536 goto err;
537 }
538
539 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
540
541 if (receiver->ref_count == UINT8_MAX) {
542 ret = FFA_DENIED;
543 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
544 goto err;
545 }
546
547 receiver->ref_count++;
548
549 /* We only need to map the region the first time we request it. */
550 if (receiver->ref_count == 1) {
551 TEE_Result ret_map = TEE_SUCCESS;
552
553 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
554
555 /*
556 * Try to map the memory linked to the handle in
557 * sp_mem_access_descr.
558 */
559 mem_region = (struct ffa_mem_region *)((vaddr_t)retr_dsc +
560 address_offset);
561
562 va = READ_ONCE(mem_region->address_range_array[0].address);
563 ret_map = sp_map_shared(caller_sp, receiver, smem, &va);
564
565 if (ret_map) {
566 EMSG("Could not map memory region: %#"PRIx32, ret_map);
567 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
568 receiver->ref_count--;
569 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
570 ret = FFA_DENIED;
571 goto err;
572 }
573 } else {
574 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
575 }
576
577 create_retrieve_response(rxtx->tx, receiver, smem, caller_sp);
578
579 args->a0 = FFA_MEM_RETRIEVE_RESP;
580 args->a1 = tx_len;
581 args->a2 = tx_len;
582
583 rxtx->tx_is_mine = false;
584
585 return;
586 err:
587 ffa_set_error(args, ret);
588 }
589
ffa_mem_relinquish(struct thread_smc_args * args,struct sp_session * caller_sp,struct ffa_rxtx * rxtx)590 static void ffa_mem_relinquish(struct thread_smc_args *args,
591 struct sp_session *caller_sp,
592 struct ffa_rxtx *rxtx)
593 {
594 struct sp_mem *smem = NULL;
595 struct ffa_mem_relinquish *mem = rxtx->rx;
596 struct sp_mem_receiver *receiver = NULL;
597 int err = FFA_NOT_SUPPORTED;
598 uint32_t exceptions = 0;
599
600 if (!check_rxtx(rxtx)) {
601 ffa_set_error(args, FFA_DENIED);
602 return;
603 }
604
605 exceptions = cpu_spin_lock_xsave(&rxtx->spinlock);
606 smem = sp_mem_get(READ_ONCE(mem->handle));
607
608 if (!smem) {
609 DMSG("Incorrect handle");
610 err = FFA_DENIED;
611 goto err_unlock_rxtwx;
612 }
613
614 if (READ_ONCE(mem->endpoint_count) != 1) {
615 DMSG("Incorrect endpoint count");
616 err = FFA_INVALID_PARAMETERS;
617 goto err_unlock_rxtwx;
618 }
619
620 if (READ_ONCE(mem->endpoint_id_array[0]) != caller_sp->endpoint_id) {
621 DMSG("Incorrect endpoint id");
622 err = FFA_DENIED;
623 goto err_unlock_rxtwx;
624 }
625
626 cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
627
628 receiver = sp_mem_get_receiver(caller_sp->endpoint_id, smem);
629
630 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
631 if (!receiver->ref_count) {
632 DMSG("To many relinquish requests");
633 err = FFA_DENIED;
634 goto err_unlock_memref;
635 }
636
637 receiver->ref_count--;
638 if (!receiver->ref_count) {
639 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
640 if (sp_unmap_ffa_regions(caller_sp, smem) != TEE_SUCCESS) {
641 DMSG("Failed to unmap region");
642 ffa_set_error(args, FFA_DENIED);
643 return;
644 }
645 } else {
646 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
647 }
648
649 ffa_success(args);
650 return;
651
652 err_unlock_rxtwx:
653 cpu_spin_unlock_xrestore(&rxtx->spinlock, exceptions);
654 ffa_set_error(args, err);
655 return;
656 err_unlock_memref:
657 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
658 ffa_set_error(args, err);
659 }
660
zero_mem_region(struct sp_mem * smem,struct sp_session * s)661 static void zero_mem_region(struct sp_mem *smem, struct sp_session *s)
662 {
663 void *addr = NULL;
664 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
665 struct sp_mem_map_region *reg = NULL;
666
667 ts_push_current_session(&s->ts_sess);
668 SLIST_FOREACH(reg, &smem->regions, link) {
669 size_t sz = reg->page_count * SMALL_PAGE_SIZE;
670
671 addr = sp_mem_get_va(&ctx->uctx, reg->page_offset, reg->mobj);
672
673 assert(addr);
674 memset(addr, 0, sz);
675 }
676 ts_pop_current_session();
677 }
678
679 /*
680 * ffa_mem_reclaim returns false if it couldn't process the reclaim message.
681 * This happens when the memory regions was shared with the OP-TEE endpoint.
682 * After this thread_spmc calls handle_mem_reclaim() to make sure that the
683 * region is reclaimed from the OP-TEE endpoint.
684 */
ffa_mem_reclaim(struct thread_smc_args * args,struct sp_session * caller_sp)685 bool ffa_mem_reclaim(struct thread_smc_args *args,
686 struct sp_session *caller_sp)
687 {
688 uint64_t handle = reg_pair_to_64(args->a2, args->a1);
689 uint32_t flags = args->a3;
690 uint32_t endpoint = 0;
691 struct sp_mem *smem = NULL;
692 struct sp_mem_receiver *receiver = NULL;
693 uint32_t exceptions = 0;
694
695 smem = sp_mem_get(handle);
696 if (!smem)
697 return false;
698
699 if (caller_sp)
700 endpoint = caller_sp->endpoint_id;
701
702 /* Make sure that the caller is the owner of the share */
703 if (smem->sender_id != endpoint) {
704 ffa_set_error(args, FFA_DENIED);
705 return true;
706 }
707
708 exceptions = cpu_spin_lock_xsave(&mem_ref_lock);
709
710 /* Make sure that all shares where relinquished */
711 SLIST_FOREACH(receiver, &smem->receivers, link) {
712 if (receiver->ref_count != 0) {
713 ffa_set_error(args, FFA_DENIED);
714 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
715 return true;
716 }
717 }
718
719 if (flags & FFA_MEMORY_REGION_FLAG_CLEAR) {
720 if (caller_sp) {
721 zero_mem_region(smem, caller_sp);
722 } else {
723 /*
724 * Currently we don't support zeroing Normal World
725 * memory. To do this we would have to map the memory
726 * again, zero it and unmap it.
727 */
728 ffa_set_error(args, FFA_DENIED);
729 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
730 return true;
731 }
732 }
733
734 sp_mem_remove(smem);
735 cpu_spin_unlock_xrestore(&mem_ref_lock, exceptions);
736
737 ffa_success(args);
738 return true;
739 }
740
741 static struct sp_session *
ffa_handle_sp_direct_req(struct thread_smc_args * args,struct sp_session * caller_sp)742 ffa_handle_sp_direct_req(struct thread_smc_args *args,
743 struct sp_session *caller_sp)
744 {
745 struct sp_session *dst = NULL;
746 TEE_Result res = FFA_OK;
747
748 if (args->a2 != FFA_PARAM_MBZ) {
749 ffa_set_error(args, FFA_INVALID_PARAMETERS);
750 return NULL;
751 }
752
753 res = ffa_get_dst(args, caller_sp, &dst);
754 if (res) {
755 /* Tried to send message to an incorrect endpoint */
756 ffa_set_error(args, res);
757 return caller_sp;
758 }
759 if (!dst) {
760 EMSG("Request to normal world not supported");
761 ffa_set_error(args, FFA_NOT_SUPPORTED);
762 return NULL;
763 }
764
765 cpu_spin_lock(&dst->spinlock);
766 if (dst->state != sp_idle) {
767 DMSG("SP is busy");
768 ffa_set_error(args, FFA_BUSY);
769 cpu_spin_unlock(&dst->spinlock);
770 return caller_sp;
771 }
772
773 dst->state = sp_busy;
774 cpu_spin_unlock(&dst->spinlock);
775
776 /*
777 * Store the calling endpoint id. This will make it possible to check
778 * if the response is sent back to the correct endpoint.
779 */
780 dst->caller_id = FFA_SRC(args->a1);
781
782 /* Forward the message to the destination SP */
783 res = sp_enter(args, dst);
784 if (res) {
785 /* The SP Panicked */
786 ffa_set_error(args, FFA_ABORTED);
787 /* Return error to calling SP */
788 return caller_sp;
789 }
790
791 return dst;
792 }
793
794 static struct sp_session *
ffa_handle_sp_direct_resp(struct thread_smc_args * args,struct sp_session * caller_sp)795 ffa_handle_sp_direct_resp(struct thread_smc_args *args,
796 struct sp_session *caller_sp)
797 {
798 struct sp_session *dst = NULL;
799 TEE_Result res = FFA_OK;
800
801 if (!caller_sp) {
802 EMSG("Response from normal world not supported");
803 ffa_set_error(args, FFA_NOT_SUPPORTED);
804 return NULL;
805 }
806
807 res = ffa_get_dst(args, caller_sp, &dst);
808 if (res) {
809 /* Tried to send response to an incorrect endpoint */
810 ffa_set_error(args, res);
811 return caller_sp;
812 }
813
814 if (caller_sp->state != sp_busy) {
815 EMSG("SP is not waiting for a request");
816 ffa_set_error(args, FFA_INVALID_PARAMETERS);
817 return caller_sp;
818 }
819
820 if (caller_sp->caller_id != FFA_DST(args->a1)) {
821 EMSG("FFA_MSG_SEND_DIRECT_RESP to incorrect SP");
822 ffa_set_error(args, FFA_INVALID_PARAMETERS);
823 return caller_sp;
824 }
825
826 caller_sp->caller_id = 0;
827
828 cpu_spin_lock(&caller_sp->spinlock);
829 caller_sp->state = sp_idle;
830 cpu_spin_unlock(&caller_sp->spinlock);
831
832 if (!dst) {
833 /* Send message back to the NW */
834 return NULL;
835 }
836
837 /* Forward the message to the destination SP */
838 res = sp_enter(args, dst);
839 if (res) {
840 /* The SP Panicked */
841 ffa_set_error(args, FFA_ABORTED);
842 /* Return error to calling SP */
843 return caller_sp;
844 }
845 return dst;
846 }
847
848 static struct sp_session *
ffa_handle_sp_error(struct thread_smc_args * args,struct sp_session * caller_sp)849 ffa_handle_sp_error(struct thread_smc_args *args,
850 struct sp_session *caller_sp)
851 {
852 struct sp_session *dst = NULL;
853
854 dst = sp_get_session(FFA_DST(args->a1));
855
856 /* FFA_ERROR Came from Noral World */
857 if (caller_sp)
858 caller_sp->state = sp_idle;
859
860 /* If dst == NULL send message to Normal World */
861 if (dst && sp_enter(args, dst)) {
862 /*
863 * We can not return the error. Unwind the call chain with one
864 * link. Set the state of the SP to dead.
865 */
866 dst->state = sp_dead;
867 /* Create error. */
868 ffa_set_error(args, FFA_DENIED);
869 return sp_get_session(dst->caller_id);
870 }
871
872 return dst;
873 }
874
handle_features(struct thread_smc_args * args)875 static void handle_features(struct thread_smc_args *args)
876 {
877 uint32_t ret_fid = 0;
878 uint32_t ret_w2 = FFA_PARAM_MBZ;
879
880 switch (args->a1) {
881 #ifdef ARM64
882 case FFA_RXTX_MAP_64:
883 #endif
884 case FFA_RXTX_MAP_32:
885 ret_fid = FFA_SUCCESS_32;
886 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
887 break;
888 case FFA_ERROR:
889 case FFA_VERSION:
890 case FFA_SUCCESS_32:
891 #ifdef ARM64
892 case FFA_SUCCESS_64:
893 #endif
894 default:
895 ret_fid = FFA_ERROR;
896 ret_w2 = FFA_NOT_SUPPORTED;
897 break;
898 }
899
900 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
901 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
902 }
903
904 /*
905 * FF-A messages handler for SP. Every messages for or from a SP is handled
906 * here. This is the entry of the sp_spmc kernel thread. The caller_sp is set
907 * to NULL when it is the Normal World.
908 */
spmc_sp_msg_handler(struct thread_smc_args * args,struct sp_session * caller_sp)909 void spmc_sp_msg_handler(struct thread_smc_args *args,
910 struct sp_session *caller_sp)
911 {
912 thread_check_canaries();
913 do {
914 switch (args->a0) {
915 case FFA_MSG_SEND_DIRECT_REQ_32:
916 caller_sp = ffa_handle_sp_direct_req(args, caller_sp);
917 break;
918 case FFA_MSG_SEND_DIRECT_RESP_32:
919 caller_sp = ffa_handle_sp_direct_resp(args, caller_sp);
920 break;
921 case FFA_ERROR:
922 caller_sp = ffa_handle_sp_error(args, caller_sp);
923 break;
924 case FFA_MSG_WAIT:
925 /* FFA_WAIT gives control back to NW */
926 cpu_spin_lock(&caller_sp->spinlock);
927 caller_sp->state = sp_idle;
928 cpu_spin_unlock(&caller_sp->spinlock);
929 caller_sp = NULL;
930 break;
931 #ifdef ARM64
932 case FFA_RXTX_MAP_64:
933 #endif
934 case FFA_RXTX_MAP_32:
935 ts_push_current_session(&caller_sp->ts_sess);
936 spmc_handle_rxtx_map(args, &caller_sp->rxtx);
937 ts_pop_current_session();
938 sp_enter(args, caller_sp);
939 break;
940 case FFA_RXTX_UNMAP:
941 ts_push_current_session(&caller_sp->ts_sess);
942 spmc_handle_rxtx_unmap(args, &caller_sp->rxtx);
943 ts_pop_current_session();
944 sp_enter(args, caller_sp);
945 break;
946 case FFA_RX_RELEASE:
947 ts_push_current_session(&caller_sp->ts_sess);
948 spmc_handle_rx_release(args, &caller_sp->rxtx);
949 ts_pop_current_session();
950 sp_enter(args, caller_sp);
951 break;
952 case FFA_ID_GET:
953 args->a0 = FFA_SUCCESS_32;
954 args->a2 = caller_sp->endpoint_id;
955 sp_enter(args, caller_sp);
956 break;
957 case FFA_VERSION:
958 spmc_handle_version(args);
959 sp_enter(args, caller_sp);
960 break;
961 case FFA_FEATURES:
962 handle_features(args);
963 sp_enter(args, caller_sp);
964 break;
965 case FFA_PARTITION_INFO_GET:
966 ts_push_current_session(&caller_sp->ts_sess);
967 spmc_handle_partition_info_get(args, &caller_sp->rxtx);
968 ts_pop_current_session();
969 sp_enter(args, caller_sp);
970 break;
971 #ifdef ARM64
972 case FFA_MEM_SHARE_64:
973 #endif
974 case FFA_MEM_SHARE_32:
975 ts_push_current_session(&caller_sp->ts_sess);
976 spmc_sp_handle_mem_share(args, &caller_sp->rxtx,
977 caller_sp);
978 ts_pop_current_session();
979 sp_enter(args, caller_sp);
980 break;
981 #ifdef ARM64
982 case FFA_MEM_RETRIEVE_REQ_64:
983 #endif
984 case FFA_MEM_RETRIEVE_REQ_32:
985 ts_push_current_session(&caller_sp->ts_sess);
986 ffa_mem_retrieve(args, caller_sp, &caller_sp->rxtx);
987 ts_pop_current_session();
988 sp_enter(args, caller_sp);
989 break;
990 case FFA_MEM_RELINQUISH:
991 ts_push_current_session(&caller_sp->ts_sess);
992 ffa_mem_relinquish(args, caller_sp, &caller_sp->rxtx);
993 ts_pop_current_session();
994 sp_enter(args, caller_sp);
995 break;
996 case FFA_MEM_RECLAIM:
997 ffa_mem_reclaim(args, caller_sp);
998 sp_enter(args, caller_sp);
999 break;
1000 default:
1001 EMSG("Unhandled FFA function ID %#"PRIx32,
1002 (uint32_t)args->a0);
1003 ffa_set_error(args, FFA_INVALID_PARAMETERS);
1004 sp_enter(args, caller_sp);
1005 }
1006 } while (caller_sp);
1007 }
1008