1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Arm Limited.
4  */
5 #include <bench.h>
6 #include <crypto/crypto.h>
7 #include <initcall.h>
8 #include <kernel/embedded_ts.h>
9 #include <kernel/ldelf_loader.h>
10 #include <kernel/secure_partition.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/spmc_sp_handler.h>
13 #include <kernel/thread_spmc.h>
14 #include <kernel/ts_store.h>
15 #include <ldelf.h>
16 #include <mm/core_mmu.h>
17 #include <mm/fobj.h>
18 #include <mm/mobj.h>
19 #include <mm/vm.h>
20 #include <optee_ffa.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <tee_api_types.h>
24 #include <trace.h>
25 #include <types_ext.h>
26 #include <utee_defines.h>
27 #include <util.h>
28 #include <zlib.h>
29 
30 #include "thread_private.h"
31 
32 const struct ts_ops sp_ops;
33 
34 /* List that holds all of the loaded SP's */
35 static struct sp_sessions_head open_sp_sessions =
36 	TAILQ_HEAD_INITIALIZER(open_sp_sessions);
37 
find_secure_partition(const TEE_UUID * uuid)38 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid)
39 {
40 	const struct embedded_ts *sp = NULL;
41 
42 	for_each_secure_partition(sp) {
43 		if (!memcmp(&sp->uuid, uuid, sizeof(*uuid)))
44 			return sp;
45 	}
46 	return NULL;
47 }
48 
is_sp_ctx(struct ts_ctx * ctx)49 bool is_sp_ctx(struct ts_ctx *ctx)
50 {
51 	return ctx && (ctx->ops == &sp_ops);
52 }
53 
set_sp_ctx_ops(struct ts_ctx * ctx)54 static void set_sp_ctx_ops(struct ts_ctx *ctx)
55 {
56 	ctx->ops = &sp_ops;
57 }
58 
sp_find_session_id(const TEE_UUID * uuid,uint32_t * session_id)59 TEE_Result sp_find_session_id(const TEE_UUID *uuid, uint32_t *session_id)
60 {
61 	struct sp_session *s = NULL;
62 
63 	TAILQ_FOREACH(s, &open_sp_sessions, link) {
64 		if (!memcmp(&s->ts_sess.ctx->uuid, uuid, sizeof(*uuid))) {
65 			if (s->state == sp_dead)
66 				return TEE_ERROR_TARGET_DEAD;
67 
68 			*session_id  = s->endpoint_id;
69 			return TEE_SUCCESS;
70 		}
71 	}
72 
73 	return TEE_ERROR_ITEM_NOT_FOUND;
74 }
75 
sp_get_session(uint32_t session_id)76 struct sp_session *sp_get_session(uint32_t session_id)
77 {
78 	struct sp_session *s = NULL;
79 
80 	TAILQ_FOREACH(s, &open_sp_sessions, link) {
81 		if (s->endpoint_id == session_id)
82 			return s;
83 	}
84 
85 	return NULL;
86 }
87 
sp_partition_info_get_all(struct ffa_partition_info * fpi,size_t * elem_count)88 TEE_Result sp_partition_info_get_all(struct ffa_partition_info *fpi,
89 				     size_t *elem_count)
90 {
91 	size_t in_count = *elem_count;
92 	struct sp_session *s = NULL;
93 	size_t count = 0;
94 
95 	TAILQ_FOREACH(s, &open_sp_sessions, link) {
96 		if (s->state == sp_dead)
97 			continue;
98 		if (count < in_count) {
99 			spmc_fill_partition_entry(fpi, s->endpoint_id, 1);
100 			fpi++;
101 		}
102 		count++;
103 	}
104 
105 	*elem_count = count;
106 	if (count > in_count)
107 		return TEE_ERROR_SHORT_BUFFER;
108 
109 	return TEE_SUCCESS;
110 }
111 
sp_has_exclusive_access(struct sp_mem_map_region * mem,struct user_mode_ctx * uctx)112 bool sp_has_exclusive_access(struct sp_mem_map_region *mem,
113 			     struct user_mode_ctx *uctx)
114 {
115 	/*
116 	 * Check that we have access to the region if it is supposed to be
117 	 * mapped to the current context.
118 	 */
119 	if (uctx) {
120 		struct vm_region *region = NULL;
121 
122 		/* Make sure that each mobj belongs to the SP */
123 		TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
124 			if (region->mobj == mem->mobj)
125 				break;
126 		}
127 
128 		if (!region)
129 			return false;
130 	}
131 
132 	/* Check that it is not shared with another SP */
133 	return !sp_mem_is_shared(mem);
134 }
135 
sp_init_info(struct sp_ctx * ctx,struct thread_smc_args * args)136 static void sp_init_info(struct sp_ctx *ctx, struct thread_smc_args *args)
137 {
138 	struct sp_ffa_init_info *info = NULL;
139 
140 	/*
141 	 * When starting the SP for the first time a init_info struct is passed.
142 	 * Store the struct on the stack and store the address in x0
143 	 */
144 	ctx->uctx.stack_ptr -= ROUNDUP(sizeof(*info), STACK_ALIGNMENT);
145 
146 	info = (struct sp_ffa_init_info *)ctx->uctx.stack_ptr;
147 
148 	info->magic = 0;
149 	info->count = 0;
150 	args->a0 = (vaddr_t)info;
151 }
152 
new_session_id(struct sp_sessions_head * open_sessions)153 static uint16_t new_session_id(struct sp_sessions_head *open_sessions)
154 {
155 	struct sp_session *last = NULL;
156 	uint16_t id = SPMC_ENDPOINT_ID + 1;
157 
158 	last = TAILQ_LAST(open_sessions, sp_sessions_head);
159 	if (last)
160 		id = last->endpoint_id + 1;
161 
162 	assert(id > SPMC_ENDPOINT_ID);
163 	return id;
164 }
165 
sp_create_ctx(const TEE_UUID * uuid,struct sp_session * s)166 static TEE_Result sp_create_ctx(const TEE_UUID *uuid, struct sp_session *s)
167 {
168 	TEE_Result res = TEE_SUCCESS;
169 	struct sp_ctx *spc = NULL;
170 
171 	/* Register context */
172 	spc = calloc(1, sizeof(struct sp_ctx));
173 	if (!spc)
174 		return TEE_ERROR_OUT_OF_MEMORY;
175 
176 	spc->uctx.ts_ctx = &spc->ts_ctx;
177 	spc->open_session = s;
178 	s->ts_sess.ctx = &spc->ts_ctx;
179 	spc->ts_ctx.uuid = *uuid;
180 
181 	res = vm_info_init(&spc->uctx);
182 	if (res)
183 		goto err;
184 
185 	set_sp_ctx_ops(&spc->ts_ctx);
186 
187 	return TEE_SUCCESS;
188 
189 err:
190 	free(spc);
191 	return res;
192 }
193 
sp_create_session(struct sp_sessions_head * open_sessions,const TEE_UUID * uuid,struct sp_session ** sess)194 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions,
195 				    const TEE_UUID *uuid,
196 				    struct sp_session **sess)
197 {
198 	TEE_Result res = TEE_SUCCESS;
199 	struct sp_session *s = calloc(1, sizeof(struct sp_session));
200 
201 	if (!s)
202 		return TEE_ERROR_OUT_OF_MEMORY;
203 
204 	s->endpoint_id = new_session_id(open_sessions);
205 	if (!s->endpoint_id) {
206 		res = TEE_ERROR_OVERFLOW;
207 		goto err;
208 	}
209 
210 	DMSG("Loading Secure Partition %pUl", (void *)uuid);
211 	res = sp_create_ctx(uuid, s);
212 	if (res)
213 		goto err;
214 
215 	TAILQ_INSERT_TAIL(open_sessions, s, link);
216 	*sess = s;
217 	return TEE_SUCCESS;
218 
219 err:
220 	free(s);
221 	return res;
222 }
223 
sp_init_set_registers(struct sp_ctx * ctx)224 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx)
225 {
226 	struct thread_ctx_regs *sp_regs = &ctx->sp_regs;
227 
228 	memset(sp_regs, 0, sizeof(*sp_regs));
229 	sp_regs->sp = ctx->uctx.stack_ptr;
230 	sp_regs->pc = ctx->uctx.entry_func;
231 
232 	return TEE_SUCCESS;
233 }
234 
sp_map_shared(struct sp_session * s,struct sp_mem_receiver * receiver,struct sp_mem * smem,uint64_t * va)235 TEE_Result sp_map_shared(struct sp_session *s,
236 			 struct sp_mem_receiver *receiver,
237 			 struct sp_mem *smem,
238 			 uint64_t *va)
239 {
240 	TEE_Result res = TEE_SUCCESS;
241 	struct sp_ctx *ctx = NULL;
242 	uint32_t perm = TEE_MATTR_UR;
243 	struct sp_mem_map_region *reg = NULL;
244 
245 	ctx = to_sp_ctx(s->ts_sess.ctx);
246 
247 	/* Get the permission */
248 	if (receiver->perm.perm & FFA_MEM_ACC_EXE)
249 		perm |= TEE_MATTR_UX;
250 
251 	if (receiver->perm.perm & FFA_MEM_ACC_RW) {
252 		if (receiver->perm.perm & FFA_MEM_ACC_EXE)
253 			return TEE_ERROR_ACCESS_CONFLICT;
254 
255 		perm |= TEE_MATTR_UW;
256 	}
257 	/*
258 	 * Currently we don't support passing a va. We can't guarantee that the
259 	 * full region will be mapped in a contiguous region. A smem->region can
260 	 * have multiple mobj for one share. Currently there doesn't seem to be
261 	 * an option to guarantee that these will be mapped in a contiguous va
262 	 * space.
263 	 */
264 	if (*va)
265 		return TEE_ERROR_NOT_SUPPORTED;
266 
267 	SLIST_FOREACH(reg, &smem->regions, link) {
268 		res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE,
269 			     perm, 0, reg->mobj, reg->page_offset);
270 
271 		if (res != TEE_SUCCESS) {
272 			EMSG("Failed to map memory region %#"PRIx32, res);
273 			return res;
274 		}
275 	}
276 	return TEE_SUCCESS;
277 }
278 
sp_unmap_ffa_regions(struct sp_session * s,struct sp_mem * smem)279 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem)
280 {
281 	TEE_Result res = TEE_SUCCESS;
282 	vaddr_t vaddr = 0;
283 	size_t len = 0;
284 	struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx);
285 	struct sp_mem_map_region *reg = NULL;
286 
287 	SLIST_FOREACH(reg, &smem->regions, link) {
288 		vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset,
289 					       reg->mobj);
290 		len = reg->page_count * SMALL_PAGE_SIZE;
291 
292 		res = vm_unmap(&ctx->uctx, vaddr, len);
293 		if (res != TEE_SUCCESS)
294 			return res;
295 	}
296 
297 	return TEE_SUCCESS;
298 }
299 
sp_open_session(struct sp_session ** sess,struct sp_sessions_head * open_sessions,const TEE_UUID * uuid)300 static TEE_Result sp_open_session(struct sp_session **sess,
301 				  struct sp_sessions_head *open_sessions,
302 				  const TEE_UUID *uuid)
303 {
304 	TEE_Result res = TEE_SUCCESS;
305 	struct sp_session *s = NULL;
306 	struct sp_ctx *ctx = NULL;
307 
308 	if (!find_secure_partition(uuid))
309 		return TEE_ERROR_ITEM_NOT_FOUND;
310 
311 	res = sp_create_session(open_sessions, uuid, &s);
312 	if (res != TEE_SUCCESS) {
313 		DMSG("sp_create_session failed %#"PRIx32, res);
314 		return res;
315 	}
316 
317 	ctx = to_sp_ctx(s->ts_sess.ctx);
318 	assert(ctx);
319 	if (!ctx)
320 		return TEE_ERROR_TARGET_DEAD;
321 	*sess = s;
322 
323 	ts_push_current_session(&s->ts_sess);
324 	/* Load the SP using ldelf. */
325 	ldelf_load_ldelf(&ctx->uctx);
326 	res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx);
327 
328 	if (res != TEE_SUCCESS) {
329 		EMSG("Failed. loading SP using ldelf %#"PRIx32, res);
330 		ts_pop_current_session();
331 		return TEE_ERROR_TARGET_DEAD;
332 	}
333 
334 	/* Make the SP ready for its first run */
335 	s->state = sp_idle;
336 	s->caller_id = 0;
337 	sp_init_set_registers(ctx);
338 	ts_pop_current_session();
339 
340 	return TEE_SUCCESS;
341 }
342 
sp_init_uuid(const TEE_UUID * uuid)343 static TEE_Result sp_init_uuid(const TEE_UUID *uuid)
344 {
345 	TEE_Result res = TEE_SUCCESS;
346 	struct sp_session *sess = NULL;
347 	struct thread_smc_args args = { };
348 
349 	res = sp_open_session(&sess,
350 			      &open_sp_sessions,
351 			      uuid);
352 	if (res)
353 		return res;
354 
355 	ts_push_current_session(&sess->ts_sess);
356 	sp_init_info(to_sp_ctx(sess->ts_sess.ctx), &args);
357 	ts_pop_current_session();
358 
359 	if (sp_enter(&args, sess))
360 		return FFA_ABORTED;
361 
362 	spmc_sp_msg_handler(&args, sess);
363 
364 	return TEE_SUCCESS;
365 }
366 
sp_enter(struct thread_smc_args * args,struct sp_session * sp)367 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp)
368 {
369 	TEE_Result res = FFA_OK;
370 	struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx);
371 
372 	ctx->sp_regs.x[0] = args->a0;
373 	ctx->sp_regs.x[1] = args->a1;
374 	ctx->sp_regs.x[2] = args->a2;
375 	ctx->sp_regs.x[3] = args->a3;
376 	ctx->sp_regs.x[4] = args->a4;
377 	ctx->sp_regs.x[5] = args->a5;
378 	ctx->sp_regs.x[6] = args->a6;
379 	ctx->sp_regs.x[7] = args->a7;
380 
381 	res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0);
382 
383 	args->a0 = ctx->sp_regs.x[0];
384 	args->a1 = ctx->sp_regs.x[1];
385 	args->a2 = ctx->sp_regs.x[2];
386 	args->a3 = ctx->sp_regs.x[3];
387 	args->a4 = ctx->sp_regs.x[4];
388 	args->a5 = ctx->sp_regs.x[5];
389 	args->a6 = ctx->sp_regs.x[6];
390 	args->a7 = ctx->sp_regs.x[7];
391 
392 	return res;
393 }
394 
sp_enter_invoke_cmd(struct ts_session * s,uint32_t cmd __unused)395 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s,
396 				      uint32_t cmd __unused)
397 {
398 	struct sp_ctx *ctx = to_sp_ctx(s->ctx);
399 	TEE_Result res = TEE_SUCCESS;
400 	uint32_t exceptions = 0;
401 	uint64_t cpsr = 0;
402 	struct sp_session *sp_s = to_sp_session(s);
403 	struct ts_session *sess = NULL;
404 	struct thread_ctx_regs *sp_regs = NULL;
405 	uint32_t panicked = false;
406 	uint32_t panic_code = 0;
407 
408 	bm_timestamp();
409 
410 	sp_regs = &ctx->sp_regs;
411 	ts_push_current_session(s);
412 
413 	cpsr = sp_regs->cpsr;
414 	sp_regs->cpsr = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
415 
416 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
417 	__thread_enter_user_mode(sp_regs, &panicked, &panic_code);
418 	sp_regs->cpsr = cpsr;
419 	thread_unmask_exceptions(exceptions);
420 
421 	thread_user_clear_vfp(&ctx->uctx);
422 
423 	if (panicked) {
424 		DMSG("SP panicked with code  %#"PRIx32, panic_code);
425 		abort_print_current_ts();
426 
427 		sess = ts_pop_current_session();
428 		cpu_spin_lock(&sp_s->spinlock);
429 		sp_s->state = sp_dead;
430 		cpu_spin_unlock(&sp_s->spinlock);
431 
432 		return TEE_ERROR_TARGET_DEAD;
433 	}
434 
435 	sess = ts_pop_current_session();
436 	assert(sess == s);
437 
438 	bm_timestamp();
439 
440 	return res;
441 }
442 
443 /* We currently don't support 32 bits */
444 #ifdef ARM64
sp_svc_store_registers(struct thread_svc_regs * regs,struct thread_ctx_regs * sp_regs)445 static void sp_svc_store_registers(struct thread_svc_regs *regs,
446 				   struct thread_ctx_regs *sp_regs)
447 {
448 	COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0));
449 	memcpy(sp_regs->x, &regs->x0, 31 * sizeof(regs->x0));
450 	sp_regs->pc = regs->elr;
451 	sp_regs->sp = regs->sp_el0;
452 }
453 #endif
454 
sp_handle_svc(struct thread_svc_regs * regs)455 static bool sp_handle_svc(struct thread_svc_regs *regs)
456 {
457 	struct ts_session *ts = ts_get_current_session();
458 	struct sp_ctx *uctx = to_sp_ctx(ts->ctx);
459 	struct sp_session *s = uctx->open_session;
460 
461 	assert(s);
462 
463 	sp_svc_store_registers(regs, &uctx->sp_regs);
464 
465 	regs->x0 = 0;
466 	regs->x1 = 0; /* panic */
467 	regs->x2 = 0; /* panic code */
468 
469 	/*
470 	 * All the registers of the SP are saved in the SP session by the SVC
471 	 * handler.
472 	 * We always return to S-El1 after handling the SVC. We will continue
473 	 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode).
474 	 * The sp_enter() function copies the FF-A parameters (a0-a7) from the
475 	 * saved registers to the thread_smc_args. The thread_smc_args object is
476 	 * afterward used by the spmc_sp_msg_handler() to handle the
477 	 * FF-A message send by the SP.
478 	 */
479 	return false;
480 }
481 
482 /*
483  * Note: this variable is weak just to ease breaking its dependency chain
484  * when added to the unpaged area.
485  */
486 const struct ts_ops sp_ops __weak __rodata_unpaged("sp_ops") = {
487 	.enter_invoke_cmd = sp_enter_invoke_cmd,
488 	.handle_svc = sp_handle_svc,
489 };
490 
sp_init_all(void)491 static TEE_Result sp_init_all(void)
492 {
493 	TEE_Result res = TEE_SUCCESS;
494 	const struct embedded_ts *sp = NULL;
495 	char __maybe_unused msg[60] = { '\0', };
496 
497 	for_each_secure_partition(sp) {
498 		if (sp->uncompressed_size)
499 			snprintf(msg, sizeof(msg),
500 				 " (compressed, uncompressed %u)",
501 				 sp->uncompressed_size);
502 		else
503 			msg[0] = '\0';
504 		DMSG("SP %pUl size %u%s", (void *)&sp->uuid, sp->size, msg);
505 
506 		res = sp_init_uuid(&sp->uuid);
507 
508 		if (res != TEE_SUCCESS) {
509 			EMSG("Failed initializing SP(%pUl) err:%#"PRIx32,
510 			     &sp->uuid, res);
511 			if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
512 				panic();
513 		}
514 	}
515 
516 	return TEE_SUCCESS;
517 }
518 
519 boot_final(sp_init_all);
520 
secure_partition_open(const TEE_UUID * uuid,struct ts_store_handle ** h)521 static TEE_Result secure_partition_open(const TEE_UUID *uuid,
522 					struct ts_store_handle **h)
523 {
524 	return emb_ts_open(uuid, h, find_secure_partition);
525 }
526 
527 REGISTER_SP_STORE(2) = {
528 	.description = "SP store",
529 	.open = secure_partition_open,
530 	.get_size = emb_ts_get_size,
531 	.get_tag = emb_ts_get_tag,
532 	.read = emb_ts_read,
533 	.close = emb_ts_close,
534 };
535