1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2020, Linaro Limited
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <kernel/ldelf_loader.h>
10 #include <kernel/ldelf_syscalls.h>
11 #include <ldelf.h>
12 #include <mm/mobj.h>
13 #include <mm/vm.h>
14 #include <tee/arch_svc.h>
15 
16 extern uint8_t ldelf_data[];
17 extern const unsigned int ldelf_code_size;
18 extern const unsigned int ldelf_data_size;
19 extern const unsigned int ldelf_entry;
20 
21 /* ldelf has the same architecture/register width as the kernel */
22 #ifdef ARM32
23 static const bool is_arm32 = true;
24 #else
25 static const bool is_arm32;
26 #endif
27 
alloc_and_map_ldelf_fobj(struct user_mode_ctx * uctx,size_t sz,uint32_t prot,vaddr_t * va)28 static TEE_Result alloc_and_map_ldelf_fobj(struct user_mode_ctx *uctx,
29 					   size_t sz, uint32_t prot,
30 					   vaddr_t *va)
31 {
32 	size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
33 	struct fobj *fobj = fobj_ta_mem_alloc(num_pgs);
34 	struct mobj *mobj = mobj_with_fobj_alloc(fobj, NULL);
35 	TEE_Result res = TEE_SUCCESS;
36 
37 	fobj_put(fobj);
38 	if (!mobj)
39 		return TEE_ERROR_OUT_OF_MEMORY;
40 	res = vm_map(uctx, va, num_pgs * SMALL_PAGE_SIZE,
41 		     prot, VM_FLAG_LDELF, mobj, 0);
42 	mobj_put(mobj);
43 
44 	return res;
45 }
46 
47 /*
48  * This function may leave a few mappings behind on error, but that's taken
49  * care of by tee_ta_init_user_ta_session() since the entire context is
50  * removed then.
51  */
ldelf_load_ldelf(struct user_mode_ctx * uctx)52 TEE_Result ldelf_load_ldelf(struct user_mode_ctx *uctx)
53 {
54 	TEE_Result res = TEE_SUCCESS;
55 	vaddr_t stack_addr = 0;
56 	vaddr_t code_addr = 0;
57 	vaddr_t rw_addr = 0;
58 	uint32_t prot = 0;
59 
60 	uctx->is_32bit = is_arm32;
61 
62 	res = alloc_and_map_ldelf_fobj(uctx, LDELF_STACK_SIZE,
63 				       TEE_MATTR_URW | TEE_MATTR_PRW,
64 				       &stack_addr);
65 	if (res)
66 		return res;
67 	uctx->ldelf_stack_ptr = stack_addr + LDELF_STACK_SIZE;
68 
69 	res = alloc_and_map_ldelf_fobj(uctx, ldelf_code_size, TEE_MATTR_PRW,
70 				       &code_addr);
71 	if (res)
72 		return res;
73 	uctx->entry_func = code_addr + ldelf_entry;
74 
75 	rw_addr = ROUNDUP(code_addr + ldelf_code_size, SMALL_PAGE_SIZE);
76 	res = alloc_and_map_ldelf_fobj(uctx, ldelf_data_size,
77 				       TEE_MATTR_URW | TEE_MATTR_PRW, &rw_addr);
78 	if (res)
79 		return res;
80 
81 	vm_set_ctx(uctx->ts_ctx);
82 
83 	memcpy((void *)code_addr, ldelf_data, ldelf_code_size);
84 	memcpy((void *)rw_addr, ldelf_data + ldelf_code_size, ldelf_data_size);
85 
86 	prot = TEE_MATTR_URX;
87 	if (IS_ENABLED(CFG_CORE_BTI))
88 		prot |= TEE_MATTR_GUARDED;
89 
90 	res = vm_set_prot(uctx, code_addr,
91 			  ROUNDUP(ldelf_code_size, SMALL_PAGE_SIZE), prot);
92 	if (res)
93 		return res;
94 
95 	DMSG("ldelf load address %#"PRIxVA, code_addr);
96 
97 	return TEE_SUCCESS;
98 }
99 
ldelf_init_with_ldelf(struct ts_session * sess,struct user_mode_ctx * uctx)100 TEE_Result ldelf_init_with_ldelf(struct ts_session *sess,
101 				 struct user_mode_ctx *uctx)
102 {
103 	TEE_Result res = TEE_SUCCESS;
104 	struct ldelf_arg *arg = NULL;
105 	uint32_t panic_code = 0;
106 	uint32_t panicked = 0;
107 	uaddr_t usr_stack = 0;
108 
109 	usr_stack = uctx->ldelf_stack_ptr;
110 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
111 	arg = (struct ldelf_arg *)usr_stack;
112 	memset(arg, 0, sizeof(*arg));
113 	arg->uuid = uctx->ts_ctx->uuid;
114 	sess->handle_svc = ldelf_handle_svc;
115 
116 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
117 				     usr_stack, uctx->entry_func,
118 				     is_arm32, &panicked, &panic_code);
119 
120 	sess->handle_svc = sess->ctx->ops->handle_svc;
121 	thread_user_clear_vfp(uctx);
122 	ldelf_sess_cleanup(sess);
123 
124 	if (panicked) {
125 		abort_print_current_ts();
126 		EMSG("ldelf panicked");
127 		return TEE_ERROR_GENERIC;
128 	}
129 	if (res) {
130 		EMSG("ldelf failed with res: %#"PRIx32, res);
131 		return res;
132 	}
133 
134 	res = vm_check_access_rights(uctx,
135 				     TEE_MEMORY_ACCESS_READ |
136 				     TEE_MEMORY_ACCESS_ANY_OWNER,
137 				     (uaddr_t)arg, sizeof(*arg));
138 	if (res)
139 		return res;
140 
141 	if (is_user_ta_ctx(uctx->ts_ctx)) {
142 		/*
143 		 * This is already checked by the elf loader, but since it runs
144 		 * in user mode we're not trusting it entirely.
145 		 */
146 		if (arg->flags & ~TA_FLAGS_MASK)
147 			return TEE_ERROR_BAD_FORMAT;
148 
149 		to_user_ta_ctx(uctx->ts_ctx)->ta_ctx.flags = arg->flags;
150 	}
151 
152 	uctx->is_32bit = arg->is_32bit;
153 	uctx->entry_func = arg->entry_func;
154 	uctx->stack_ptr = arg->stack_ptr;
155 	uctx->dump_entry_func = arg->dump_entry;
156 #ifdef CFG_FTRACE_SUPPORT
157 	uctx->ftrace_entry_func = arg->ftrace_entry;
158 	sess->fbuf = arg->fbuf;
159 #endif
160 	uctx->dl_entry_func = arg->dl_entry;
161 
162 	return TEE_SUCCESS;
163 }
164 
ldelf_dump_state(struct user_mode_ctx * uctx)165 TEE_Result ldelf_dump_state(struct user_mode_ctx *uctx)
166 {
167 	TEE_Result res = TEE_SUCCESS;
168 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
169 	struct dump_entry_arg *arg = NULL;
170 	uint32_t panic_code = 0;
171 	uint32_t panicked = 0;
172 	struct thread_specific_data *tsd = thread_get_tsd();
173 	struct ts_session *sess = NULL;
174 	struct vm_region *r = NULL;
175 	size_t n = 0;
176 
177 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
178 		if (r->attr & TEE_MATTR_URWX)
179 			n++;
180 
181 	usr_stack = uctx->ldelf_stack_ptr;
182 	usr_stack -= ROUNDUP(sizeof(*arg) + n * sizeof(struct dump_map),
183 			     STACK_ALIGNMENT);
184 	arg = (struct dump_entry_arg *)usr_stack;
185 
186 	res = vm_check_access_rights(uctx,
187 				     TEE_MEMORY_ACCESS_READ |
188 				     TEE_MEMORY_ACCESS_ANY_OWNER,
189 				     (uaddr_t)arg, sizeof(*arg));
190 	if (res) {
191 		EMSG("ldelf stack is inaccessible!");
192 		return res;
193 	}
194 
195 	memset(arg, 0, sizeof(*arg) + n * sizeof(struct dump_map));
196 
197 	arg->num_maps = n;
198 	n = 0;
199 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
200 		if (r->attr & TEE_MATTR_URWX) {
201 			if (r->mobj)
202 				mobj_get_pa(r->mobj, r->offset, 0,
203 					    &arg->maps[n].pa);
204 			arg->maps[n].va = r->va;
205 			arg->maps[n].sz = r->size;
206 			if (r->attr & TEE_MATTR_UR)
207 				arg->maps[n].flags |= DUMP_MAP_READ;
208 			if (r->attr & TEE_MATTR_UW)
209 				arg->maps[n].flags |= DUMP_MAP_WRITE;
210 			if (r->attr & TEE_MATTR_UX)
211 				arg->maps[n].flags |= DUMP_MAP_EXEC;
212 			if (r->attr & TEE_MATTR_SECURE)
213 				arg->maps[n].flags |= DUMP_MAP_SECURE;
214 			if (r->flags & VM_FLAG_EPHEMERAL)
215 				arg->maps[n].flags |= DUMP_MAP_EPHEM;
216 			if (r->flags & VM_FLAG_LDELF)
217 				arg->maps[n].flags |= DUMP_MAP_LDELF;
218 			n++;
219 		}
220 	}
221 
222 	arg->is_arm32 = uctx->is_32bit;
223 #ifdef ARM32
224 	arg->arm32.regs[0] = tsd->abort_regs.r0;
225 	arg->arm32.regs[1] = tsd->abort_regs.r1;
226 	arg->arm32.regs[2] = tsd->abort_regs.r2;
227 	arg->arm32.regs[3] = tsd->abort_regs.r3;
228 	arg->arm32.regs[4] = tsd->abort_regs.r4;
229 	arg->arm32.regs[5] = tsd->abort_regs.r5;
230 	arg->arm32.regs[6] = tsd->abort_regs.r6;
231 	arg->arm32.regs[7] = tsd->abort_regs.r7;
232 	arg->arm32.regs[8] = tsd->abort_regs.r8;
233 	arg->arm32.regs[9] = tsd->abort_regs.r9;
234 	arg->arm32.regs[10] = tsd->abort_regs.r10;
235 	arg->arm32.regs[11] = tsd->abort_regs.r11;
236 	arg->arm32.regs[12] = tsd->abort_regs.ip;
237 	arg->arm32.regs[13] = tsd->abort_regs.usr_sp; /*SP*/
238 	arg->arm32.regs[14] = tsd->abort_regs.usr_lr; /*LR*/
239 	arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
240 #endif /*ARM32*/
241 #ifdef ARM64
242 	if (uctx->is_32bit) {
243 		arg->arm32.regs[0] = tsd->abort_regs.x0;
244 		arg->arm32.regs[1] = tsd->abort_regs.x1;
245 		arg->arm32.regs[2] = tsd->abort_regs.x2;
246 		arg->arm32.regs[3] = tsd->abort_regs.x3;
247 		arg->arm32.regs[4] = tsd->abort_regs.x4;
248 		arg->arm32.regs[5] = tsd->abort_regs.x5;
249 		arg->arm32.regs[6] = tsd->abort_regs.x6;
250 		arg->arm32.regs[7] = tsd->abort_regs.x7;
251 		arg->arm32.regs[8] = tsd->abort_regs.x8;
252 		arg->arm32.regs[9] = tsd->abort_regs.x9;
253 		arg->arm32.regs[10] = tsd->abort_regs.x10;
254 		arg->arm32.regs[11] = tsd->abort_regs.x11;
255 		arg->arm32.regs[12] = tsd->abort_regs.x12;
256 		arg->arm32.regs[13] = tsd->abort_regs.x13; /*SP*/
257 		arg->arm32.regs[14] = tsd->abort_regs.x14; /*LR*/
258 		arg->arm32.regs[15] = tsd->abort_regs.elr; /*PC*/
259 	} else {
260 		arg->arm64.fp = tsd->abort_regs.x29;
261 		arg->arm64.pc = tsd->abort_regs.elr;
262 		arg->arm64.sp = tsd->abort_regs.sp_el0;
263 	}
264 #endif /*ARM64*/
265 
266 	sess = ts_get_current_session();
267 	sess->handle_svc = ldelf_handle_svc;
268 
269 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
270 				     usr_stack, uctx->dump_entry_func,
271 				     is_arm32, &panicked, &panic_code);
272 
273 	sess->handle_svc = sess->ctx->ops->handle_svc;
274 	thread_user_clear_vfp(uctx);
275 	ldelf_sess_cleanup(sess);
276 
277 	if (panicked) {
278 		uctx->dump_entry_func = 0;
279 		EMSG("ldelf dump function panicked");
280 		abort_print_current_ts();
281 		res = TEE_ERROR_TARGET_DEAD;
282 	}
283 
284 	return res;
285 }
286 
287 #ifdef CFG_FTRACE_SUPPORT
ldelf_dump_ftrace(struct user_mode_ctx * uctx,void * buf,size_t * blen)288 TEE_Result ldelf_dump_ftrace(struct user_mode_ctx *uctx,
289 			     void *buf, size_t *blen)
290 {
291 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
292 	TEE_Result res = TEE_SUCCESS;
293 	uint32_t panic_code = 0;
294 	uint32_t panicked = 0;
295 	size_t *arg = NULL;
296 	struct ts_session *sess = NULL;
297 
298 	if (!uctx->ftrace_entry_func)
299 		return TEE_ERROR_NOT_SUPPORTED;
300 
301 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
302 	arg = (size_t *)usr_stack;
303 
304 	res = vm_check_access_rights(uctx,
305 				     TEE_MEMORY_ACCESS_READ |
306 				     TEE_MEMORY_ACCESS_ANY_OWNER,
307 				     (uaddr_t)arg, sizeof(*arg));
308 	if (res) {
309 		EMSG("ldelf stack is inaccessible!");
310 		return res;
311 	}
312 
313 	*arg = *blen;
314 
315 	sess = ts_get_current_session();
316 	sess->handle_svc = ldelf_handle_svc;
317 
318 	res = thread_enter_user_mode((vaddr_t)buf, (vaddr_t)arg, 0, 0,
319 				     usr_stack, uctx->ftrace_entry_func,
320 				     is_arm32, &panicked, &panic_code);
321 
322 	sess->handle_svc = sess->ctx->ops->handle_svc;
323 	thread_user_clear_vfp(uctx);
324 	ldelf_sess_cleanup(sess);
325 
326 	if (panicked) {
327 		uctx->ftrace_entry_func = 0;
328 		EMSG("ldelf ftrace function panicked");
329 		abort_print_current_ts();
330 		res = TEE_ERROR_TARGET_DEAD;
331 	}
332 
333 	if (!res) {
334 		if (*arg > *blen)
335 			res = TEE_ERROR_SHORT_BUFFER;
336 		*blen = *arg;
337 	}
338 
339 	return res;
340 }
341 #endif /*CFG_FTRACE_SUPPORT*/
342 
ldelf_dlopen(struct user_mode_ctx * uctx,TEE_UUID * uuid,uint32_t flags)343 TEE_Result ldelf_dlopen(struct user_mode_ctx *uctx, TEE_UUID *uuid,
344 			uint32_t flags)
345 {
346 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
347 	TEE_Result res = TEE_ERROR_GENERIC;
348 	struct dl_entry_arg *arg = NULL;
349 	uint32_t panic_code = 0;
350 	uint32_t panicked = 0;
351 	struct ts_session *sess = NULL;
352 
353 	assert(uuid);
354 
355 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
356 	arg = (struct dl_entry_arg *)usr_stack;
357 
358 	res = vm_check_access_rights(uctx,
359 				     TEE_MEMORY_ACCESS_READ |
360 				     TEE_MEMORY_ACCESS_WRITE |
361 				     TEE_MEMORY_ACCESS_ANY_OWNER,
362 				     (uaddr_t)arg, sizeof(*arg));
363 	if (res) {
364 		EMSG("ldelf stack is inaccessible!");
365 		return res;
366 	}
367 
368 	memset(arg, 0, sizeof(*arg));
369 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
370 	arg->dlopen.uuid = *uuid;
371 	arg->dlopen.flags = flags;
372 
373 	sess = ts_get_current_session();
374 	sess->handle_svc = ldelf_handle_svc;
375 
376 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
377 				     usr_stack, uctx->dl_entry_func,
378 				     is_arm32, &panicked, &panic_code);
379 
380 	sess->handle_svc = sess->ctx->ops->handle_svc;
381 	ldelf_sess_cleanup(sess);
382 
383 	if (panicked) {
384 		EMSG("ldelf dl_entry function panicked");
385 		abort_print_current_ts();
386 		res = TEE_ERROR_TARGET_DEAD;
387 	}
388 	if (!res)
389 		res = arg->ret;
390 
391 	return res;
392 }
393 
ldelf_dlsym(struct user_mode_ctx * uctx,TEE_UUID * uuid,const char * sym,size_t maxlen,vaddr_t * val)394 TEE_Result ldelf_dlsym(struct user_mode_ctx *uctx, TEE_UUID *uuid,
395 		       const char *sym, size_t maxlen, vaddr_t *val)
396 {
397 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
398 	TEE_Result res = TEE_ERROR_GENERIC;
399 	struct dl_entry_arg *arg = NULL;
400 	uint32_t panic_code = 0;
401 	uint32_t panicked = 0;
402 	size_t len = strnlen(sym, maxlen);
403 	struct ts_session *sess = NULL;
404 
405 	if (len == maxlen)
406 		return TEE_ERROR_BAD_PARAMETERS;
407 
408 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
409 	arg = (struct dl_entry_arg *)usr_stack;
410 
411 	res = vm_check_access_rights(uctx,
412 				     TEE_MEMORY_ACCESS_READ |
413 				     TEE_MEMORY_ACCESS_WRITE |
414 				     TEE_MEMORY_ACCESS_ANY_OWNER,
415 				     (uaddr_t)arg, sizeof(*arg) + len + 1);
416 	if (res) {
417 		EMSG("ldelf stack is inaccessible!");
418 		return res;
419 	}
420 
421 	memset(arg, 0, sizeof(*arg));
422 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
423 	arg->dlsym.uuid = *uuid;
424 	memcpy(arg->dlsym.symbol, sym, len);
425 	arg->dlsym.symbol[len] = '\0';
426 
427 	sess = ts_get_current_session();
428 	sess->handle_svc = ldelf_handle_svc;
429 
430 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
431 				     usr_stack, uctx->dl_entry_func,
432 				     is_arm32, &panicked, &panic_code);
433 
434 	sess->handle_svc = sess->ctx->ops->handle_svc;
435 	ldelf_sess_cleanup(sess);
436 
437 	if (panicked) {
438 		EMSG("ldelf dl_entry function panicked");
439 		abort_print_current_ts();
440 		res = TEE_ERROR_TARGET_DEAD;
441 	}
442 	if (!res) {
443 		res = arg->ret;
444 		if (!res)
445 			*val = arg->dlsym.val;
446 	}
447 
448 	return res;
449 }
450