1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <compiler.h>
8 #include <config.h>
9 #include <io.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/thread.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_mmu.h>
15 #include <optee_msg.h>
16 #include <optee_rpc_cmd.h>
17 #include <sm/optee_smc.h>
18 #include <sm/sm.h>
19 #include <string.h>
20 #include <tee/entry_std.h>
21 #include <tee/entry_fast.h>
22 #include <tee/tee_cryp_utl.h>
23 #include <tee/tee_fs_rpc.h>
24 
25 #include "thread_private.h"
26 
27 static bool thread_prealloc_rpc_cache;
28 static unsigned int thread_rpc_pnum;
29 
thread_handle_fast_smc(struct thread_smc_args * args)30 void thread_handle_fast_smc(struct thread_smc_args *args)
31 {
32 	thread_check_canaries();
33 
34 	if (IS_ENABLED(CFG_VIRTUALIZATION) &&
35 	    virt_set_guest(args->a7)) {
36 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
37 		goto out;
38 	}
39 
40 	tee_entry_fast(args);
41 
42 	if (IS_ENABLED(CFG_VIRTUALIZATION))
43 		virt_unset_guest();
44 
45 out:
46 	/* Fast handlers must not unmask any exceptions */
47 	assert(thread_get_exceptions() == THREAD_EXCP_ALL);
48 }
49 
thread_handle_std_smc(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6 __unused,uint32_t a7 __maybe_unused)50 uint32_t thread_handle_std_smc(uint32_t a0, uint32_t a1, uint32_t a2,
51 			       uint32_t a3, uint32_t a4, uint32_t a5,
52 			       uint32_t a6 __unused, uint32_t a7 __maybe_unused)
53 {
54 	uint32_t rv = OPTEE_SMC_RETURN_OK;
55 
56 	thread_check_canaries();
57 
58 	if (IS_ENABLED(CFG_VIRTUALIZATION) && virt_set_guest(a7))
59 		return OPTEE_SMC_RETURN_ENOTAVAIL;
60 
61 	/*
62 	 * thread_resume_from_rpc() and thread_alloc_and_run() only return
63 	 * on error. Successful return is done via thread_exit() or
64 	 * thread_rpc().
65 	 */
66 	if (a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) {
67 		thread_resume_from_rpc(a3, a1, a2, a4, a5);
68 		rv = OPTEE_SMC_RETURN_ERESUME;
69 	} else {
70 		thread_alloc_and_run(a0, a1, a2, a3, 0, 0);
71 		rv = OPTEE_SMC_RETURN_ETHREAD_LIMIT;
72 	}
73 
74 	if (IS_ENABLED(CFG_VIRTUALIZATION))
75 		virt_unset_guest();
76 
77 	return rv;
78 }
79 
80 /**
81  * Free physical memory previously allocated with thread_rpc_alloc_arg()
82  *
83  * @cookie:	cookie received when allocating the buffer
84  */
thread_rpc_free_arg(uint64_t cookie)85 static void thread_rpc_free_arg(uint64_t cookie)
86 {
87 	if (cookie) {
88 		uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
89 			OPTEE_SMC_RETURN_RPC_FREE
90 		};
91 
92 		reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2);
93 		thread_rpc(rpc_args);
94 	}
95 }
96 
get_cmd_buffer(paddr_t parg,uint32_t * num_params)97 static struct mobj *get_cmd_buffer(paddr_t parg, uint32_t *num_params)
98 {
99 	struct optee_msg_arg *arg;
100 	size_t args_size;
101 
102 	arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM,
103 			   sizeof(struct optee_msg_arg));
104 	if (!arg)
105 		return NULL;
106 
107 	*num_params = READ_ONCE(arg->num_params);
108 	if (*num_params > OPTEE_MSG_MAX_NUM_PARAMS)
109 		return NULL;
110 
111 	args_size = OPTEE_MSG_GET_ARG_SIZE(*num_params);
112 
113 	return mobj_shm_alloc(parg, args_size, 0);
114 }
115 
116 #ifdef CFG_CORE_DYN_SHM
map_cmd_buffer(paddr_t parg,uint32_t * num_params)117 static struct mobj *map_cmd_buffer(paddr_t parg, uint32_t *num_params)
118 {
119 	struct mobj *mobj;
120 	struct optee_msg_arg *arg;
121 	size_t args_size;
122 
123 	assert(!(parg & SMALL_PAGE_MASK));
124 	/* mobj_mapped_shm_alloc checks if parg resides in nonsec ddr */
125 	mobj = mobj_mapped_shm_alloc(&parg, 1, 0, 0);
126 	if (!mobj)
127 		return NULL;
128 
129 	arg = mobj_get_va(mobj, 0, SMALL_PAGE_SIZE);
130 	if (!arg)
131 		goto err;
132 
133 	*num_params = READ_ONCE(arg->num_params);
134 	if (*num_params > OPTEE_MSG_MAX_NUM_PARAMS)
135 		goto err;
136 
137 	args_size = OPTEE_MSG_GET_ARG_SIZE(*num_params);
138 	if (args_size > SMALL_PAGE_SIZE) {
139 		EMSG("Command buffer spans across page boundary");
140 		goto err;
141 	}
142 
143 	return mobj;
144 err:
145 	mobj_put(mobj);
146 	return NULL;
147 }
148 #else
map_cmd_buffer(paddr_t pargi __unused,uint32_t * num_params __unused)149 static struct mobj *map_cmd_buffer(paddr_t pargi __unused,
150 				   uint32_t *num_params __unused)
151 {
152 	return NULL;
153 }
154 #endif /*CFG_CORE_DYN_SHM*/
155 
std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3 __unused)156 static uint32_t std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
157 			      uint32_t a3 __unused)
158 {
159 	paddr_t parg = 0;
160 	struct optee_msg_arg *arg = NULL;
161 	uint32_t num_params = 0;
162 	struct mobj *mobj = NULL;
163 	uint32_t rv = 0;
164 
165 	if (a0 != OPTEE_SMC_CALL_WITH_ARG) {
166 		EMSG("Unknown SMC 0x%"PRIx32, a0);
167 		DMSG("Expected 0x%x", OPTEE_SMC_CALL_WITH_ARG);
168 		return OPTEE_SMC_RETURN_EBADCMD;
169 	}
170 	parg = reg_pair_to_64(a1, a2);
171 
172 	/* Check if this region is in static shared space */
173 	if (core_pbuf_is(CORE_MEM_NSEC_SHM, parg,
174 			 sizeof(struct optee_msg_arg))) {
175 		mobj = get_cmd_buffer(parg, &num_params);
176 	} else {
177 		if (parg & SMALL_PAGE_MASK)
178 			return OPTEE_SMC_RETURN_EBADADDR;
179 		mobj = map_cmd_buffer(parg, &num_params);
180 	}
181 
182 	if (!mobj || !IS_ALIGNED_WITH_TYPE(parg, struct optee_msg_arg)) {
183 		EMSG("Bad arg address 0x%" PRIxPA, parg);
184 		mobj_put(mobj);
185 		return OPTEE_SMC_RETURN_EBADADDR;
186 	}
187 
188 	arg = mobj_get_va(mobj, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
189 	assert(arg && mobj_is_nonsec(mobj));
190 	rv = tee_entry_std(arg, num_params);
191 	mobj_put(mobj);
192 
193 	return rv;
194 }
195 
196 /*
197  * Helper routine for the assembly function thread_std_smc_entry()
198  *
199  * Note: this function is weak just to make it possible to exclude it from
200  * the unpaged area.
201  */
__thread_std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4 __unused,uint32_t a5 __unused)202 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
203 				       uint32_t a3, uint32_t a4 __unused,
204 				       uint32_t a5 __unused)
205 {
206 	uint32_t rv = 0;
207 
208 	if (IS_ENABLED(CFG_VIRTUALIZATION))
209 		virt_on_stdcall();
210 
211 	rv = std_smc_entry(a0, a1, a2, a3);
212 
213 	if (rv == OPTEE_SMC_RETURN_OK) {
214 		struct thread_ctx *thr = threads + thread_get_id();
215 
216 		thread_rpc_shm_cache_clear(&thr->shm_cache);
217 		if (!thread_prealloc_rpc_cache) {
218 			thread_rpc_free_arg(mobj_get_cookie(thr->rpc_mobj));
219 			mobj_put(thr->rpc_mobj);
220 			thr->rpc_arg = NULL;
221 			thr->rpc_mobj = NULL;
222 		}
223 	}
224 
225 	return rv;
226 }
227 
thread_disable_prealloc_rpc_cache(uint64_t * cookie)228 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie)
229 {
230 	bool rv = false;
231 	size_t n = 0;
232 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
233 
234 	thread_lock_global();
235 
236 	for (n = 0; n < CFG_NUM_THREADS; n++) {
237 		if (threads[n].state != THREAD_STATE_FREE) {
238 			rv = false;
239 			goto out;
240 		}
241 	}
242 
243 	rv = true;
244 
245 	if (IS_ENABLED(CFG_PREALLOC_RPC_CACHE)) {
246 		for (n = 0; n < CFG_NUM_THREADS; n++) {
247 			if (threads[n].rpc_arg) {
248 				*cookie = mobj_get_cookie(threads[n].rpc_mobj);
249 				mobj_put(threads[n].rpc_mobj);
250 				threads[n].rpc_arg = NULL;
251 				threads[n].rpc_mobj = NULL;
252 				goto out;
253 			}
254 		}
255 	}
256 
257 	*cookie = 0;
258 	thread_prealloc_rpc_cache = false;
259 out:
260 	thread_unlock_global();
261 	thread_unmask_exceptions(exceptions);
262 	return rv;
263 }
264 
thread_enable_prealloc_rpc_cache(void)265 bool thread_enable_prealloc_rpc_cache(void)
266 {
267 	bool rv = false;
268 	size_t n = 0;
269 	uint32_t exceptions = 0;
270 
271 	if (!IS_ENABLED(CFG_PREALLOC_RPC_CACHE))
272 		return true;
273 
274 	exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
275 	thread_lock_global();
276 
277 	for (n = 0; n < CFG_NUM_THREADS; n++) {
278 		if (threads[n].state != THREAD_STATE_FREE) {
279 			rv = false;
280 			goto out;
281 		}
282 	}
283 
284 	rv = true;
285 	thread_prealloc_rpc_cache = true;
286 out:
287 	thread_unlock_global();
288 	thread_unmask_exceptions(exceptions);
289 	return rv;
290 }
291 
rpc_shm_mobj_alloc(paddr_t pa,size_t sz,uint64_t cookie)292 static struct mobj *rpc_shm_mobj_alloc(paddr_t pa, size_t sz, uint64_t cookie)
293 {
294 	/* Check if this region is in static shared space */
295 	if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, sz))
296 		return mobj_shm_alloc(pa, sz, cookie);
297 
298 	if (IS_ENABLED(CFG_CORE_DYN_SHM) &&
299 	    !(pa & SMALL_PAGE_MASK) && sz <= SMALL_PAGE_SIZE)
300 		return mobj_mapped_shm_alloc(&pa, 1, 0, cookie);
301 
302 	return NULL;
303 }
304 
305 /**
306  * Allocates data for struct optee_msg_arg.
307  *
308  * @size:	size in bytes of struct optee_msg_arg
309  *
310  * @returns	mobj that describes allocated buffer or NULL on error
311  */
thread_rpc_alloc_arg(size_t size)312 static struct mobj *thread_rpc_alloc_arg(size_t size)
313 {
314 	paddr_t pa;
315 	uint64_t co;
316 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
317 		OPTEE_SMC_RETURN_RPC_ALLOC, size
318 	};
319 	struct mobj *mobj = NULL;
320 
321 	thread_rpc(rpc_args);
322 
323 	/* Registers 1 and 2 passed from normal world */
324 	pa = reg_pair_to_64(rpc_args[0], rpc_args[1]);
325 	/* Registers 4 and 5 passed from normal world */
326 	co = reg_pair_to_64(rpc_args[2], rpc_args[3]);
327 
328 	if (!IS_ALIGNED_WITH_TYPE(pa, struct optee_msg_arg))
329 		goto err;
330 
331 	mobj = rpc_shm_mobj_alloc(pa, size, co);
332 	if (!mobj)
333 		goto err;
334 
335 	return mobj;
336 err:
337 	thread_rpc_free_arg(co);
338 	mobj_put(mobj);
339 	return NULL;
340 }
341 
set_rmem(struct optee_msg_param * param,struct thread_param * tpm)342 static bool set_rmem(struct optee_msg_param *param,
343 		     struct thread_param *tpm)
344 {
345 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
346 		      OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
347 	param->u.rmem.offs = tpm->u.memref.offs;
348 	param->u.rmem.size = tpm->u.memref.size;
349 	if (tpm->u.memref.mobj) {
350 		param->u.rmem.shm_ref = mobj_get_cookie(tpm->u.memref.mobj);
351 		if (!param->u.rmem.shm_ref)
352 			return false;
353 	} else {
354 		param->u.rmem.shm_ref = 0;
355 	}
356 
357 	return true;
358 }
359 
set_tmem(struct optee_msg_param * param,struct thread_param * tpm)360 static bool set_tmem(struct optee_msg_param *param,
361 		     struct thread_param *tpm)
362 {
363 	paddr_t pa = 0;
364 	uint64_t shm_ref = 0;
365 	struct mobj *mobj = tpm->u.memref.mobj;
366 
367 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
368 		      OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
369 	if (mobj) {
370 		shm_ref = mobj_get_cookie(mobj);
371 		if (!shm_ref)
372 			return false;
373 		if (mobj_get_pa(mobj, tpm->u.memref.offs, 0, &pa))
374 			return false;
375 	}
376 
377 	param->u.tmem.size = tpm->u.memref.size;
378 	param->u.tmem.buf_ptr = pa;
379 	param->u.tmem.shm_ref = shm_ref;
380 
381 	return true;
382 }
383 
get_rpc_arg(uint32_t cmd,size_t num_params,struct thread_param * params,void ** arg_ret,uint64_t * carg_ret)384 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
385 			    struct thread_param *params, void **arg_ret,
386 			    uint64_t *carg_ret)
387 {
388 	struct thread_ctx *thr = threads + thread_get_id();
389 	struct optee_msg_arg *arg = thr->rpc_arg;
390 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
391 
392 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
393 		return TEE_ERROR_BAD_PARAMETERS;
394 
395 	if (!arg) {
396 		struct mobj *mobj = thread_rpc_alloc_arg(sz);
397 
398 		if (!mobj)
399 			return TEE_ERROR_OUT_OF_MEMORY;
400 
401 		arg = mobj_get_va(mobj, 0, sz);
402 		if (!arg) {
403 			thread_rpc_free_arg(mobj_get_cookie(mobj));
404 			return TEE_ERROR_OUT_OF_MEMORY;
405 		}
406 
407 		thr->rpc_arg = arg;
408 		thr->rpc_mobj = mobj;
409 	}
410 
411 	memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
412 	arg->cmd = cmd;
413 	arg->num_params = num_params;
414 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
415 
416 	for (size_t n = 0; n < num_params; n++) {
417 		switch (params[n].attr) {
418 		case THREAD_PARAM_ATTR_NONE:
419 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
420 			break;
421 		case THREAD_PARAM_ATTR_VALUE_IN:
422 		case THREAD_PARAM_ATTR_VALUE_OUT:
423 		case THREAD_PARAM_ATTR_VALUE_INOUT:
424 			arg->params[n].attr = params[n].attr -
425 					      THREAD_PARAM_ATTR_VALUE_IN +
426 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
427 			arg->params[n].u.value.a = params[n].u.value.a;
428 			arg->params[n].u.value.b = params[n].u.value.b;
429 			arg->params[n].u.value.c = params[n].u.value.c;
430 			break;
431 		case THREAD_PARAM_ATTR_MEMREF_IN:
432 		case THREAD_PARAM_ATTR_MEMREF_OUT:
433 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
434 			if (!params[n].u.memref.mobj ||
435 			    mobj_matches(params[n].u.memref.mobj,
436 					 CORE_MEM_NSEC_SHM)) {
437 				if (!set_tmem(arg->params + n, params + n))
438 					return TEE_ERROR_BAD_PARAMETERS;
439 			} else  if (mobj_matches(params[n].u.memref.mobj,
440 						 CORE_MEM_REG_SHM)) {
441 				if (!set_rmem(arg->params + n, params + n))
442 					return TEE_ERROR_BAD_PARAMETERS;
443 			} else {
444 				return TEE_ERROR_BAD_PARAMETERS;
445 			}
446 			break;
447 		default:
448 			return TEE_ERROR_BAD_PARAMETERS;
449 		}
450 	}
451 
452 	*arg_ret = arg;
453 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
454 
455 	return TEE_SUCCESS;
456 }
457 
get_rpc_arg_res(struct optee_msg_arg * arg,size_t num_params,struct thread_param * params)458 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
459 				struct thread_param *params)
460 {
461 	for (size_t n = 0; n < num_params; n++) {
462 		switch (params[n].attr) {
463 		case THREAD_PARAM_ATTR_VALUE_OUT:
464 		case THREAD_PARAM_ATTR_VALUE_INOUT:
465 			params[n].u.value.a = arg->params[n].u.value.a;
466 			params[n].u.value.b = arg->params[n].u.value.b;
467 			params[n].u.value.c = arg->params[n].u.value.c;
468 			break;
469 		case THREAD_PARAM_ATTR_MEMREF_OUT:
470 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
471 			/*
472 			 * rmem.size and tmem.size is the same type and
473 			 * location.
474 			 */
475 			params[n].u.memref.size = arg->params[n].u.rmem.size;
476 			break;
477 		default:
478 			break;
479 		}
480 	}
481 
482 	return arg->ret;
483 }
484 
thread_rpc_cmd(uint32_t cmd,size_t num_params,struct thread_param * params)485 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
486 			struct thread_param *params)
487 {
488 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
489 	void *arg = NULL;
490 	uint64_t carg = 0;
491 	uint32_t ret = 0;
492 
493 	/* The source CRYPTO_RNG_SRC_JITTER_RPC is safe to use here */
494 	plat_prng_add_jitter_entropy(CRYPTO_RNG_SRC_JITTER_RPC,
495 				     &thread_rpc_pnum);
496 
497 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
498 	if (ret)
499 		return ret;
500 
501 	reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
502 	thread_rpc(rpc_args);
503 
504 	return get_rpc_arg_res(arg, num_params, params);
505 }
506 
507 /**
508  * Free physical memory previously allocated with thread_rpc_alloc()
509  *
510  * @cookie:	cookie received when allocating the buffer
511  * @bt:		must be the same as supplied when allocating
512  * @mobj:	mobj that describes allocated buffer
513  *
514  * This function also frees corresponding mobj.
515  */
thread_rpc_free(unsigned int bt,uint64_t cookie,struct mobj * mobj)516 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
517 {
518 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
519 	void *arg = NULL;
520 	uint64_t carg = 0;
521 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
522 	uint32_t ret = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param,
523 				   &arg, &carg);
524 
525 	mobj_put(mobj);
526 
527 	if (!ret) {
528 		reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
529 		thread_rpc(rpc_args);
530 	}
531 }
532 
get_rpc_alloc_res(struct optee_msg_arg * arg,unsigned int bt,size_t size)533 static struct mobj *get_rpc_alloc_res(struct optee_msg_arg *arg,
534 				      unsigned int bt, size_t size)
535 {
536 	struct mobj *mobj = NULL;
537 	uint64_t cookie = 0;
538 	size_t sz = 0;
539 	paddr_t p = 0;
540 
541 	if (arg->ret || arg->num_params != 1)
542 		return NULL;
543 
544 	if (arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT  &&
545 	    arg->params[0].attr != (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
546 				    OPTEE_MSG_ATTR_NONCONTIG))
547 		return NULL;
548 
549 	p = arg->params[0].u.tmem.buf_ptr;
550 	sz = READ_ONCE(arg->params[0].u.tmem.size);
551 	cookie = arg->params[0].u.tmem.shm_ref;
552 	if (sz < size)
553 		return NULL;
554 
555 	if (arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT)
556 		mobj = rpc_shm_mobj_alloc(p, sz, cookie);
557 	else
558 		mobj = msg_param_mobj_from_noncontig(p, sz, cookie, true);
559 
560 	if (!mobj) {
561 		thread_rpc_free(bt, cookie, mobj);
562 		return NULL;
563 	}
564 
565 	assert(mobj_is_nonsec(mobj));
566 
567 	return mobj;
568 }
569 
570 /**
571  * Allocates shared memory buffer via RPC
572  *
573  * @size:	size in bytes of shared memory buffer
574  * @align:	required alignment of buffer
575  * @bt:		buffer type OPTEE_RPC_SHM_TYPE_*
576  *
577  * Returns a pointer to MOBJ for the memory on success, or NULL on failure.
578  */
thread_rpc_alloc(size_t size,size_t align,unsigned int bt)579 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
580 {
581 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
582 	void *arg = NULL;
583 	uint64_t carg = 0;
584 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
585 	uint32_t ret = get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param,
586 				   &arg, &carg);
587 
588 	if (ret)
589 		return NULL;
590 
591 	reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
592 	thread_rpc(rpc_args);
593 
594 	return get_rpc_alloc_res(arg, bt, size);
595 }
596 
thread_rpc_alloc_payload(size_t size)597 struct mobj *thread_rpc_alloc_payload(size_t size)
598 {
599 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
600 }
601 
thread_rpc_alloc_kernel_payload(size_t size)602 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
603 {
604 	/*
605 	 * Error out early since kernel private dynamic shared memory
606 	 * allocations don't currently use the `OPTEE_MSG_ATTR_NONCONTIG` bit
607 	 * and therefore cannot be larger than a page.
608 	 */
609 	if (IS_ENABLED(CFG_CORE_DYN_SHM) && size > SMALL_PAGE_SIZE)
610 		return NULL;
611 
612 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
613 }
614 
thread_rpc_free_kernel_payload(struct mobj * mobj)615 void thread_rpc_free_kernel_payload(struct mobj *mobj)
616 {
617 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
618 }
619 
thread_rpc_free_payload(struct mobj * mobj)620 void thread_rpc_free_payload(struct mobj *mobj)
621 {
622 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
623 			mobj);
624 }
625 
thread_rpc_alloc_global_payload(size_t size)626 struct mobj *thread_rpc_alloc_global_payload(size_t size)
627 {
628 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
629 }
630 
thread_rpc_free_global_payload(struct mobj * mobj)631 void thread_rpc_free_global_payload(struct mobj *mobj)
632 {
633 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
634 			mobj);
635 }
636