1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2021 Linaro Limited
5  * Copyright (c) 2020, Arm Limited.
6  */
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <ctype.h>
12 #include <initcall.h>
13 #include <keep.h>
14 #include <kernel/ldelf_loader.h>
15 #include <kernel/linker.h>
16 #include <kernel/panic.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/ts_store.h>
21 #include <kernel/user_access.h>
22 #include <kernel/user_mode_ctx.h>
23 #include <kernel/user_ta.h>
24 #include <mm/core_memprot.h>
25 #include <mm/core_mmu.h>
26 #include <mm/file.h>
27 #include <mm/fobj.h>
28 #include <mm/mobj.h>
29 #include <mm/pgt_cache.h>
30 #include <mm/tee_mm.h>
31 #include <mm/tee_pager.h>
32 #include <mm/vm.h>
33 #include <optee_rpc_cmd.h>
34 #include <printk.h>
35 #include <signed_hdr.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <sys/queue.h>
39 #include <ta_pub_key.h>
40 #include <tee/arch_svc.h>
41 #include <tee/tee_cryp_utl.h>
42 #include <tee/tee_obj.h>
43 #include <tee/tee_svc_cryp.h>
44 #include <tee/tee_svc.h>
45 #include <tee/tee_svc_storage.h>
46 #include <tee/uuid.h>
47 #include <trace.h>
48 #include <types_ext.h>
49 #include <utee_defines.h>
50 #include <util.h>
51 
init_utee_param(struct utee_params * up,const struct tee_ta_param * p,void * va[TEE_NUM_PARAMS])52 static void init_utee_param(struct utee_params *up,
53 			const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS])
54 {
55 	size_t n;
56 
57 	up->types = p->types;
58 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
59 		uintptr_t a;
60 		uintptr_t b;
61 
62 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
63 		case TEE_PARAM_TYPE_MEMREF_INPUT:
64 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
65 		case TEE_PARAM_TYPE_MEMREF_INOUT:
66 			a = (uintptr_t)va[n];
67 			b = p->u[n].mem.size;
68 			break;
69 		case TEE_PARAM_TYPE_VALUE_INPUT:
70 		case TEE_PARAM_TYPE_VALUE_INOUT:
71 			a = p->u[n].val.a;
72 			b = p->u[n].val.b;
73 			break;
74 		default:
75 			a = 0;
76 			b = 0;
77 			break;
78 		}
79 		/* See comment for struct utee_params in utee_types.h */
80 		up->vals[n * 2] = a;
81 		up->vals[n * 2 + 1] = b;
82 	}
83 }
84 
update_from_utee_param(struct tee_ta_param * p,const struct utee_params * up)85 static void update_from_utee_param(struct tee_ta_param *p,
86 			const struct utee_params *up)
87 {
88 	size_t n;
89 
90 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
91 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
92 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
93 		case TEE_PARAM_TYPE_MEMREF_INOUT:
94 			/* See comment for struct utee_params in utee_types.h */
95 			p->u[n].mem.size = up->vals[n * 2 + 1];
96 			break;
97 		case TEE_PARAM_TYPE_VALUE_OUTPUT:
98 		case TEE_PARAM_TYPE_VALUE_INOUT:
99 			/* See comment for struct utee_params in utee_types.h */
100 			p->u[n].val.a = up->vals[n * 2];
101 			p->u[n].val.b = up->vals[n * 2 + 1];
102 			break;
103 		default:
104 			break;
105 		}
106 	}
107 }
108 
inc_recursion(void)109 static bool inc_recursion(void)
110 {
111 	struct thread_specific_data *tsd = thread_get_tsd();
112 
113 	if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
114 		DMSG("Maximum allowed recursion depth reached (%u)",
115 		     CFG_CORE_MAX_SYSCALL_RECURSION);
116 		return false;
117 	}
118 
119 	tsd->syscall_recursion++;
120 	return true;
121 }
122 
dec_recursion(void)123 static void dec_recursion(void)
124 {
125 	struct thread_specific_data *tsd = thread_get_tsd();
126 
127 	assert(tsd->syscall_recursion);
128 	tsd->syscall_recursion--;
129 }
130 
user_ta_enter(struct ts_session * session,enum utee_entry_func func,uint32_t cmd)131 static TEE_Result user_ta_enter(struct ts_session *session,
132 				enum utee_entry_func func, uint32_t cmd)
133 {
134 	TEE_Result res = TEE_SUCCESS;
135 	struct utee_params *usr_params = NULL;
136 	uaddr_t usr_stack = 0;
137 	struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
138 	struct tee_ta_session *ta_sess = to_ta_session(session);
139 	struct ts_session *ts_sess __maybe_unused = NULL;
140 	void *param_va[TEE_NUM_PARAMS] = { NULL };
141 
142 	if (!inc_recursion()) {
143 		/* Using this error code since we've run out of resources. */
144 		res = TEE_ERROR_OUT_OF_MEMORY;
145 		goto out_clr_cancel;
146 	}
147 	if (ta_sess->param) {
148 		/* Map user space memory */
149 		res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
150 		if (res != TEE_SUCCESS)
151 			goto out;
152 	}
153 
154 	/* Switch to user ctx */
155 	ts_push_current_session(session);
156 
157 	/* Make room for usr_params at top of stack */
158 	usr_stack = utc->uctx.stack_ptr;
159 	usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
160 	usr_params = (struct utee_params *)usr_stack;
161 	if (ta_sess->param)
162 		init_utee_param(usr_params, ta_sess->param, param_va);
163 	else
164 		memset(usr_params, 0, sizeof(*usr_params));
165 
166 	res = thread_enter_user_mode(func, kaddr_to_uref(session),
167 				     (vaddr_t)usr_params, cmd, usr_stack,
168 				     utc->uctx.entry_func, utc->uctx.is_32bit,
169 				     &utc->ta_ctx.panicked,
170 				     &utc->ta_ctx.panic_code);
171 
172 	thread_user_clear_vfp(&utc->uctx);
173 
174 	if (utc->ta_ctx.panicked) {
175 		abort_print_current_ts();
176 		DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
177 		     utc->ta_ctx.panic_code);
178 		res = TEE_ERROR_TARGET_DEAD;
179 	} else {
180 		/*
181 		 * According to GP spec the origin should allways be set to
182 		 * the TA after TA execution
183 		 */
184 		ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
185 	}
186 
187 	if (ta_sess->param) {
188 		/* Copy out value results */
189 		update_from_utee_param(ta_sess->param, usr_params);
190 
191 		/*
192 		 * Clear out the parameter mappings added with
193 		 * vm_clean_param() above.
194 		 */
195 		vm_clean_param(&utc->uctx);
196 	}
197 
198 
199 	ts_sess = ts_pop_current_session();
200 	assert(ts_sess == session);
201 
202 out:
203 	dec_recursion();
204 out_clr_cancel:
205 	/*
206 	 * Clear the cancel state now that the user TA has returned. The next
207 	 * time the TA will be invoked will be with a new operation and should
208 	 * not have an old cancellation pending.
209 	 */
210 	ta_sess->cancel = false;
211 
212 	return res;
213 }
214 
user_ta_enter_open_session(struct ts_session * s)215 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
216 {
217 	return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
218 }
219 
user_ta_enter_invoke_cmd(struct ts_session * s,uint32_t cmd)220 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
221 {
222 	return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
223 }
224 
user_ta_enter_close_session(struct ts_session * s)225 static void user_ta_enter_close_session(struct ts_session *s)
226 {
227 	/* Only if the TA was fully initialized by ldelf */
228 	if (!to_user_ta_ctx(s->ctx)->uctx.is_initializing)
229 		user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
230 }
231 
dump_state_no_ldelf_dbg(struct user_ta_ctx * utc)232 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
233 {
234 	user_mode_ctx_print_mappings(&utc->uctx);
235 }
236 
user_ta_dump_state(struct ts_ctx * ctx)237 static void user_ta_dump_state(struct ts_ctx *ctx)
238 {
239 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
240 
241 	if (utc->uctx.dump_entry_func) {
242 		TEE_Result res = ldelf_dump_state(&utc->uctx);
243 
244 		if (!res || res == TEE_ERROR_TARGET_DEAD)
245 			return;
246 		/*
247 		 * Fall back to dump_state_no_ldelf_dbg() if
248 		 * ldelf_dump_state() fails for some reason.
249 		 *
250 		 * If ldelf_dump_state() failed with panic
251 		 * we are done since abort_print_current_ts() will be
252 		 * called which will dump the memory map.
253 		 */
254 	}
255 
256 	dump_state_no_ldelf_dbg(utc);
257 }
258 
259 #ifdef CFG_FTRACE_SUPPORT
user_ta_dump_ftrace(struct ts_ctx * ctx)260 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
261 {
262 	uint32_t prot = TEE_MATTR_URW;
263 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
264 	struct thread_param params[3] = { };
265 	TEE_Result res = TEE_SUCCESS;
266 	struct mobj *mobj = NULL;
267 	uint8_t *ubuf = NULL;
268 	void *buf = NULL;
269 	size_t pl_sz = 0;
270 	size_t blen = 0, ld_addr_len = 0;
271 	vaddr_t va = 0;
272 
273 	res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
274 	if (res != TEE_ERROR_SHORT_BUFFER)
275 		return;
276 
277 #define LOAD_ADDR_DUMP_SIZE	64
278 	pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
279 			SMALL_PAGE_SIZE);
280 
281 	mobj = thread_rpc_alloc_payload(pl_sz);
282 	if (!mobj) {
283 		EMSG("Ftrace thread_rpc_alloc_payload failed");
284 		return;
285 	}
286 
287 	buf = mobj_get_va(mobj, 0, pl_sz);
288 	if (!buf)
289 		goto out_free_pl;
290 
291 	res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
292 		     mobj, 0);
293 	if (res)
294 		goto out_free_pl;
295 
296 	ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
297 	memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
298 	ubuf += sizeof(TEE_UUID);
299 
300 	ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
301 			       "TEE load address @ %#"PRIxVA"\n",
302 			       VCORE_START_VA);
303 	ubuf += ld_addr_len;
304 
305 	res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
306 	if (res) {
307 		EMSG("Ftrace dump failed: %#"PRIx32, res);
308 		goto out_unmap_pl;
309 	}
310 
311 	params[0] = THREAD_PARAM_VALUE(INOUT, 0, 0, 0);
312 	params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
313 	params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
314 					blen + ld_addr_len);
315 
316 	res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
317 	if (res)
318 		EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
319 
320 out_unmap_pl:
321 	res = vm_unmap(&utc->uctx, va, mobj->size);
322 	assert(!res);
323 out_free_pl:
324 	thread_rpc_free_payload(mobj);
325 }
326 #endif /*CFG_FTRACE_SUPPORT*/
327 
328 #ifdef CFG_TA_GPROF_SUPPORT
user_ta_gprof_set_status(enum ts_gprof_status status)329 static void user_ta_gprof_set_status(enum ts_gprof_status status)
330 {
331 	if (status == TS_GPROF_SUSPEND)
332 		tee_ta_update_session_utime_suspend();
333 	else
334 		tee_ta_update_session_utime_resume();
335 }
336 #endif /*CFG_TA_GPROF_SUPPORT*/
337 
free_utc(struct user_ta_ctx * utc)338 static void free_utc(struct user_ta_ctx *utc)
339 {
340 	tee_pager_rem_um_regions(&utc->uctx);
341 
342 	/*
343 	 * Close sessions opened by this TA
344 	 * Note that tee_ta_close_session() removes the item
345 	 * from the utc->open_sessions list.
346 	 */
347 	while (!TAILQ_EMPTY(&utc->open_sessions)) {
348 		tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
349 				     &utc->open_sessions, KERN_IDENTITY);
350 	}
351 
352 	vm_info_final(&utc->uctx);
353 
354 	/* Free cryp states created by this TA */
355 	tee_svc_cryp_free_states(utc);
356 	/* Close cryp objects opened by this TA */
357 	tee_obj_close_all(utc);
358 	/* Free emums created by this TA */
359 	tee_svc_storage_close_all_enum(utc);
360 	free(utc);
361 }
362 
user_ta_ctx_destroy(struct ts_ctx * ctx)363 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
364 {
365 	free_utc(to_user_ta_ctx(ctx));
366 }
367 
user_ta_get_instance_id(struct ts_ctx * ctx)368 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
369 {
370 	return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
371 }
372 
373 /*
374  * Note: this variable is weak just to ease breaking its dependency chain
375  * when added to the unpaged area.
376  */
377 const struct ts_ops user_ta_ops __weak __rodata_unpaged("user_ta_ops") = {
378 	.enter_open_session = user_ta_enter_open_session,
379 	.enter_invoke_cmd = user_ta_enter_invoke_cmd,
380 	.enter_close_session = user_ta_enter_close_session,
381 	.dump_state = user_ta_dump_state,
382 #ifdef CFG_FTRACE_SUPPORT
383 	.dump_ftrace = user_ta_dump_ftrace,
384 #endif
385 	.destroy = user_ta_ctx_destroy,
386 	.get_instance_id = user_ta_get_instance_id,
387 	.handle_svc = user_ta_handle_svc,
388 #ifdef CFG_TA_GPROF_SUPPORT
389 	.gprof_set_status = user_ta_gprof_set_status,
390 #endif
391 };
392 
set_ta_ctx_ops(struct tee_ta_ctx * ctx)393 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
394 {
395 	ctx->ts_ctx.ops = &user_ta_ops;
396 }
397 
is_user_ta_ctx(struct ts_ctx * ctx)398 bool is_user_ta_ctx(struct ts_ctx *ctx)
399 {
400 	return ctx && ctx->ops == &user_ta_ops;
401 }
402 
check_ta_store(void)403 static TEE_Result check_ta_store(void)
404 {
405 	const struct ts_store_ops *op = NULL;
406 
407 	SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
408 		DMSG("TA store: \"%s\"", op->description);
409 
410 	return TEE_SUCCESS;
411 }
412 service_init(check_ta_store);
413 
tee_ta_init_user_ta_session(const TEE_UUID * uuid,struct tee_ta_session * s)414 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
415 				       struct tee_ta_session *s)
416 {
417 	TEE_Result res = TEE_SUCCESS;
418 	struct user_ta_ctx *utc = NULL;
419 
420 	utc = calloc(1, sizeof(struct user_ta_ctx));
421 	if (!utc)
422 		return TEE_ERROR_OUT_OF_MEMORY;
423 
424 	utc->uctx.is_initializing = true;
425 	TAILQ_INIT(&utc->open_sessions);
426 	TAILQ_INIT(&utc->cryp_states);
427 	TAILQ_INIT(&utc->objects);
428 	TAILQ_INIT(&utc->storage_enums);
429 	condvar_init(&utc->ta_ctx.busy_cv);
430 	utc->ta_ctx.ref_count = 1;
431 
432 	utc->uctx.ts_ctx = &utc->ta_ctx.ts_ctx;
433 
434 	/*
435 	 * Set context TA operation structure. It is required by generic
436 	 * implementation to identify userland TA versus pseudo TA contexts.
437 	 */
438 	set_ta_ctx_ops(&utc->ta_ctx);
439 
440 	utc->ta_ctx.ts_ctx.uuid = *uuid;
441 	res = vm_info_init(&utc->uctx);
442 	if (res)
443 		goto out;
444 
445 	mutex_lock(&tee_ta_mutex);
446 	s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
447 	s->ts_sess.handle_svc = s->ts_sess.ctx->ops->handle_svc;
448 	/*
449 	 * Another thread trying to load this same TA may need to wait
450 	 * until this context is fully initialized. This is needed to
451 	 * handle single instance TAs.
452 	 */
453 	TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
454 	mutex_unlock(&tee_ta_mutex);
455 
456 	/*
457 	 * We must not hold tee_ta_mutex while allocating page tables as
458 	 * that may otherwise lead to a deadlock.
459 	 */
460 	ts_push_current_session(&s->ts_sess);
461 
462 	res = ldelf_load_ldelf(&utc->uctx);
463 	if (!res)
464 		res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
465 
466 	ts_pop_current_session();
467 
468 	mutex_lock(&tee_ta_mutex);
469 
470 	if (!res) {
471 		utc->uctx.is_initializing = false;
472 	} else {
473 		s->ts_sess.ctx = NULL;
474 		TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
475 	}
476 
477 	/* The state has changed for the context, notify eventual waiters. */
478 	condvar_broadcast(&tee_ta_init_cv);
479 
480 	mutex_unlock(&tee_ta_mutex);
481 
482 out:
483 	if (res) {
484 		condvar_destroy(&utc->ta_ctx.busy_cv);
485 		pgt_flush_ctx(&utc->ta_ctx.ts_ctx);
486 		free_utc(utc);
487 	}
488 
489 	return res;
490 }
491