1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2018-2019, Linaro Limited
4 * Copyright (c) 2020-2021, Arm Limited
5 */
6
7 #include <assert.h>
8 #include <crypto/crypto.h>
9 #include <kernel/ldelf_syscalls.h>
10 #include <kernel/user_mode_ctx.h>
11 #include <ldelf.h>
12 #include <mm/file.h>
13 #include <mm/fobj.h>
14 #include <mm/mobj.h>
15 #include <mm/vm.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <trace.h>
19 #include <util.h>
20
21 struct bin_handle {
22 const struct ts_store_ops *op;
23 struct ts_store_handle *h;
24 struct file *f;
25 size_t offs_bytes;
26 size_t size_bytes;
27 };
28
ldelf_syscall_map_zi(vaddr_t * va,size_t num_bytes,size_t pad_begin,size_t pad_end,unsigned long flags)29 TEE_Result ldelf_syscall_map_zi(vaddr_t *va, size_t num_bytes, size_t pad_begin,
30 size_t pad_end, unsigned long flags)
31 {
32 TEE_Result res = TEE_SUCCESS;
33 struct ts_session *sess = ts_get_current_session();
34 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
35 struct fobj *f = NULL;
36 struct mobj *mobj = NULL;
37 uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
38 uint32_t vm_flags = 0;
39
40 if (flags & ~LDELF_MAP_FLAG_SHAREABLE)
41 return TEE_ERROR_BAD_PARAMETERS;
42
43 if (flags & LDELF_MAP_FLAG_SHAREABLE)
44 vm_flags |= VM_FLAG_SHAREABLE;
45
46 f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
47 if (!f)
48 return TEE_ERROR_OUT_OF_MEMORY;
49 mobj = mobj_with_fobj_alloc(f, NULL);
50 fobj_put(f);
51 if (!mobj)
52 return TEE_ERROR_OUT_OF_MEMORY;
53 res = vm_map_pad(uctx, va, num_bytes, prot, vm_flags,
54 mobj, 0, pad_begin, pad_end, 0);
55 mobj_put(mobj);
56
57 return res;
58 }
59
ldelf_syscall_unmap(vaddr_t va,size_t num_bytes)60 TEE_Result ldelf_syscall_unmap(vaddr_t va, size_t num_bytes)
61 {
62 TEE_Result res = TEE_SUCCESS;
63 struct ts_session *sess = ts_get_current_session();
64 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
65 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
66 uint32_t vm_flags = 0;
67 vaddr_t end_va = 0;
68
69 /*
70 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
71 * overflow directly or indirectly. However, since this function is an
72 * API function it's worth having an extra guard here. If nothing else,
73 * to increase code clarity.
74 */
75 if (ADD_OVERFLOW(va, sz, &end_va))
76 return TEE_ERROR_BAD_PARAMETERS;
77
78 res = vm_get_flags(uctx, va, sz, &vm_flags);
79 if (res)
80 return res;
81 if (vm_flags & VM_FLAG_PERMANENT)
82 return TEE_ERROR_ACCESS_DENIED;
83
84 return vm_unmap(uctx, va, sz);
85 }
86
bin_close(void * ptr)87 static void bin_close(void *ptr)
88 {
89 struct bin_handle *binh = ptr;
90
91 if (binh) {
92 if (binh->op && binh->h)
93 binh->op->close(binh->h);
94 file_put(binh->f);
95 }
96 free(binh);
97 }
98
ldelf_syscall_open_bin(const TEE_UUID * uuid,size_t uuid_size,uint32_t * handle)99 TEE_Result ldelf_syscall_open_bin(const TEE_UUID *uuid, size_t uuid_size,
100 uint32_t *handle)
101 {
102 TEE_Result res = TEE_SUCCESS;
103 struct ts_session *sess = ts_get_current_session();
104 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
105 struct system_ctx *sys_ctx = sess->user_ctx;
106 struct bin_handle *binh = NULL;
107 uint8_t tag[FILE_TAG_SIZE] = { 0 };
108 unsigned int tag_len = sizeof(tag);
109 int h = 0;
110
111 res = vm_check_access_rights(uctx,
112 TEE_MEMORY_ACCESS_READ |
113 TEE_MEMORY_ACCESS_ANY_OWNER,
114 (uaddr_t)uuid, sizeof(TEE_UUID));
115 if (res)
116 return res;
117
118 res = vm_check_access_rights(uctx,
119 TEE_MEMORY_ACCESS_WRITE |
120 TEE_MEMORY_ACCESS_ANY_OWNER,
121 (uaddr_t)handle, sizeof(uint32_t));
122 if (res)
123 return res;
124
125 if (uuid_size != sizeof(*uuid))
126 return TEE_ERROR_BAD_PARAMETERS;
127
128 if (!sys_ctx) {
129 sys_ctx = calloc(1, sizeof(*sys_ctx));
130 if (!sys_ctx)
131 return TEE_ERROR_OUT_OF_MEMORY;
132 sess->user_ctx = sys_ctx;
133 }
134
135 binh = calloc(1, sizeof(*binh));
136 if (!binh)
137 return TEE_ERROR_OUT_OF_MEMORY;
138
139 if (is_user_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)) {
140 SCATTERED_ARRAY_FOREACH(binh->op, ta_stores,
141 struct ts_store_ops) {
142 DMSG("Lookup user TA ELF %pUl (%s)",
143 (void *)uuid, binh->op->description);
144
145 res = binh->op->open(uuid, &binh->h);
146 DMSG("res=%#"PRIx32, res);
147 if (res != TEE_ERROR_ITEM_NOT_FOUND &&
148 res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
149 break;
150 }
151 } else if (is_sp_ctx(sess->ctx)) {
152 SCATTERED_ARRAY_FOREACH(binh->op, sp_stores,
153 struct ts_store_ops) {
154 DMSG("Lookup user SP ELF %pUl (%s)",
155 (void *)uuid, binh->op->description);
156
157 res = binh->op->open(uuid, &binh->h);
158 DMSG("res=%#"PRIx32, res);
159 if (res != TEE_ERROR_ITEM_NOT_FOUND &&
160 res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
161 break;
162 }
163 } else {
164 res = TEE_ERROR_ITEM_NOT_FOUND;
165 }
166
167 if (res)
168 goto err;
169
170 res = binh->op->get_size(binh->h, &binh->size_bytes);
171 if (res)
172 goto err;
173 res = binh->op->get_tag(binh->h, tag, &tag_len);
174 if (res)
175 goto err;
176 binh->f = file_get_by_tag(tag, tag_len);
177 if (!binh->f)
178 goto err_oom;
179
180 h = handle_get(&sys_ctx->db, binh);
181 if (h < 0)
182 goto err_oom;
183 *handle = h;
184
185 return TEE_SUCCESS;
186
187 err_oom:
188 res = TEE_ERROR_OUT_OF_MEMORY;
189 err:
190 bin_close(binh);
191 return res;
192 }
193
ldelf_syscall_close_bin(unsigned long handle)194 TEE_Result ldelf_syscall_close_bin(unsigned long handle)
195 {
196 TEE_Result res = TEE_SUCCESS;
197 struct ts_session *sess = ts_get_current_session();
198 struct system_ctx *sys_ctx = sess->user_ctx;
199 struct bin_handle *binh = NULL;
200
201 if (!sys_ctx)
202 return TEE_ERROR_BAD_PARAMETERS;
203
204 binh = handle_put(&sys_ctx->db, handle);
205 if (!binh)
206 return TEE_ERROR_BAD_PARAMETERS;
207
208 if (binh->offs_bytes < binh->size_bytes)
209 res = binh->op->read(binh->h, NULL,
210 binh->size_bytes - binh->offs_bytes);
211
212 bin_close(binh);
213 if (handle_db_is_empty(&sys_ctx->db)) {
214 handle_db_destroy(&sys_ctx->db, bin_close);
215 free(sys_ctx);
216 sess->user_ctx = NULL;
217 }
218
219 return res;
220 }
221
binh_copy_to(struct bin_handle * binh,vaddr_t va,size_t offs_bytes,size_t num_bytes)222 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
223 size_t offs_bytes, size_t num_bytes)
224 {
225 TEE_Result res = TEE_SUCCESS;
226 size_t next_offs = 0;
227
228 if (offs_bytes < binh->offs_bytes)
229 return TEE_ERROR_BAD_STATE;
230
231 if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
232 return TEE_ERROR_BAD_PARAMETERS;
233
234 if (offs_bytes > binh->offs_bytes) {
235 res = binh->op->read(binh->h, NULL,
236 offs_bytes - binh->offs_bytes);
237 if (res)
238 return res;
239 binh->offs_bytes = offs_bytes;
240 }
241
242 if (next_offs > binh->size_bytes) {
243 size_t rb = binh->size_bytes - binh->offs_bytes;
244
245 res = binh->op->read(binh->h, (void *)va, rb);
246 if (res)
247 return res;
248 memset((uint8_t *)va + rb, 0, num_bytes - rb);
249 binh->offs_bytes = binh->size_bytes;
250 } else {
251 res = binh->op->read(binh->h, (void *)va, num_bytes);
252 if (res)
253 return res;
254 binh->offs_bytes = next_offs;
255 }
256
257 return TEE_SUCCESS;
258 }
259
ldelf_syscall_map_bin(vaddr_t * va,size_t num_bytes,unsigned long handle,size_t offs_bytes,size_t pad_begin,size_t pad_end,unsigned long flags)260 TEE_Result ldelf_syscall_map_bin(vaddr_t *va, size_t num_bytes,
261 unsigned long handle, size_t offs_bytes,
262 size_t pad_begin, size_t pad_end,
263 unsigned long flags)
264 {
265 TEE_Result res = TEE_SUCCESS;
266 struct ts_session *sess = ts_get_current_session();
267 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
268 struct system_ctx *sys_ctx = sess->user_ctx;
269 struct bin_handle *binh = NULL;
270 uint32_t num_rounded_bytes = 0;
271 struct file_slice *fs = NULL;
272 bool file_is_locked = false;
273 struct mobj *mobj = NULL;
274 uint32_t offs_pages = 0;
275 size_t num_pages = 0;
276 uint32_t prot = 0;
277 const uint32_t accept_flags = LDELF_MAP_FLAG_SHAREABLE |
278 LDELF_MAP_FLAG_WRITEABLE |
279 LDELF_MAP_FLAG_BTI |
280 LDELF_MAP_FLAG_EXECUTABLE;
281
282 if (!sys_ctx)
283 return TEE_ERROR_BAD_PARAMETERS;
284
285 binh = handle_lookup(&sys_ctx->db, handle);
286 if (!binh)
287 return TEE_ERROR_BAD_PARAMETERS;
288
289 if ((flags & accept_flags) != flags)
290 return TEE_ERROR_BAD_PARAMETERS;
291
292 if ((flags & LDELF_MAP_FLAG_SHAREABLE) &&
293 (flags & LDELF_MAP_FLAG_WRITEABLE))
294 return TEE_ERROR_BAD_PARAMETERS;
295
296 if ((flags & LDELF_MAP_FLAG_EXECUTABLE) &&
297 (flags & LDELF_MAP_FLAG_WRITEABLE))
298 return TEE_ERROR_BAD_PARAMETERS;
299
300 if (offs_bytes & SMALL_PAGE_MASK)
301 return TEE_ERROR_BAD_PARAMETERS;
302
303 prot = TEE_MATTR_UR | TEE_MATTR_PR;
304 if (flags & LDELF_MAP_FLAG_WRITEABLE)
305 prot |= TEE_MATTR_UW | TEE_MATTR_PW;
306 if (flags & LDELF_MAP_FLAG_EXECUTABLE)
307 prot |= TEE_MATTR_UX;
308 if (flags & LDELF_MAP_FLAG_BTI)
309 prot |= TEE_MATTR_GUARDED;
310
311 offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
312 if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
313 return TEE_ERROR_BAD_PARAMETERS;
314 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
315
316 if (!file_trylock(binh->f)) {
317 /*
318 * Before we can block on the file lock we must make all
319 * our page tables available for reclaiming in order to
320 * avoid a dead-lock with the other thread (which already
321 * is holding the file lock) mapping lots of memory below.
322 */
323 vm_set_ctx(NULL);
324 file_lock(binh->f);
325 vm_set_ctx(uctx->ts_ctx);
326 }
327 file_is_locked = true;
328 fs = file_find_slice(binh->f, offs_pages);
329 if (fs) {
330 /* If there's registered slice it has to match */
331 if (fs->page_offset != offs_pages ||
332 num_pages > fs->fobj->num_pages) {
333 res = TEE_ERROR_BAD_PARAMETERS;
334 goto err;
335 }
336
337 /* If there's a slice we must be mapping shareable */
338 if (!(flags & LDELF_MAP_FLAG_SHAREABLE)) {
339 res = TEE_ERROR_BAD_PARAMETERS;
340 goto err;
341 }
342
343 mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
344 if (!mobj) {
345 res = TEE_ERROR_OUT_OF_MEMORY;
346 goto err;
347 }
348 res = vm_map_pad(uctx, va, num_rounded_bytes,
349 prot, VM_FLAG_READONLY,
350 mobj, 0, pad_begin, pad_end, 0);
351 mobj_put(mobj);
352 if (res)
353 goto err;
354 } else {
355 struct fobj *f = fobj_ta_mem_alloc(num_pages);
356 struct file *file = NULL;
357 uint32_t vm_flags = 0;
358
359 if (!f) {
360 res = TEE_ERROR_OUT_OF_MEMORY;
361 goto err;
362 }
363 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
364 file = binh->f;
365 vm_flags |= VM_FLAG_READONLY;
366 }
367
368 mobj = mobj_with_fobj_alloc(f, file);
369 fobj_put(f);
370 if (!mobj) {
371 res = TEE_ERROR_OUT_OF_MEMORY;
372 goto err;
373 }
374 res = vm_map_pad(uctx, va, num_rounded_bytes,
375 TEE_MATTR_PRW, vm_flags, mobj, 0,
376 pad_begin, pad_end, 0);
377 mobj_put(mobj);
378 if (res)
379 goto err;
380 res = binh_copy_to(binh, *va, offs_bytes, num_bytes);
381 if (res)
382 goto err_unmap_va;
383 res = vm_set_prot(uctx, *va, num_rounded_bytes,
384 prot);
385 if (res)
386 goto err_unmap_va;
387
388 /*
389 * The context currently is active set it again to update
390 * the mapping.
391 */
392 vm_set_ctx(uctx->ts_ctx);
393
394 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
395 res = file_add_slice(binh->f, f, offs_pages);
396 if (res)
397 goto err_unmap_va;
398 }
399 }
400
401 file_unlock(binh->f);
402
403 return TEE_SUCCESS;
404
405 err_unmap_va:
406 if (vm_unmap(uctx, *va, num_rounded_bytes))
407 panic();
408
409 /*
410 * The context currently is active set it again to update
411 * the mapping.
412 */
413 vm_set_ctx(uctx->ts_ctx);
414
415 err:
416 if (file_is_locked)
417 file_unlock(binh->f);
418
419 return res;
420 }
421
ldelf_syscall_copy_from_bin(void * dst,size_t offs,size_t num_bytes,unsigned long handle)422 TEE_Result ldelf_syscall_copy_from_bin(void *dst, size_t offs, size_t num_bytes,
423 unsigned long handle)
424 {
425 TEE_Result res = TEE_SUCCESS;
426 struct ts_session *sess = ts_get_current_session();
427 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
428 struct system_ctx *sys_ctx = sess->user_ctx;
429 struct bin_handle *binh = NULL;
430
431 res = vm_check_access_rights(uctx,
432 TEE_MEMORY_ACCESS_WRITE |
433 TEE_MEMORY_ACCESS_ANY_OWNER,
434 (uaddr_t)dst, num_bytes);
435 if (res)
436 return res;
437
438 if (!sys_ctx)
439 return TEE_ERROR_BAD_PARAMETERS;
440
441 binh = handle_lookup(&sys_ctx->db, handle);
442 if (!binh)
443 return TEE_ERROR_BAD_PARAMETERS;
444
445 return binh_copy_to(binh, (vaddr_t)dst, offs, num_bytes);
446 }
447
ldelf_syscall_set_prot(unsigned long va,size_t num_bytes,unsigned long flags)448 TEE_Result ldelf_syscall_set_prot(unsigned long va, size_t num_bytes,
449 unsigned long flags)
450 {
451 TEE_Result res = TEE_SUCCESS;
452 struct ts_session *sess = ts_get_current_session();
453 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
454 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
455 uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
456 uint32_t vm_flags = 0;
457 vaddr_t end_va = 0;
458 const uint32_t accept_flags = LDELF_MAP_FLAG_WRITEABLE |
459 LDELF_MAP_FLAG_BTI |
460 LDELF_MAP_FLAG_EXECUTABLE;
461
462 if ((flags & accept_flags) != flags)
463 return TEE_ERROR_BAD_PARAMETERS;
464 if (flags & LDELF_MAP_FLAG_WRITEABLE)
465 prot |= TEE_MATTR_UW | TEE_MATTR_PW;
466 if (flags & LDELF_MAP_FLAG_EXECUTABLE)
467 prot |= TEE_MATTR_UX;
468 if (flags & LDELF_MAP_FLAG_BTI)
469 prot |= TEE_MATTR_GUARDED;
470
471 /*
472 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
473 * overflow directly or indirectly. However, since this function is an
474 * API function it's worth having an extra guard here. If nothing else,
475 * to increase code clarity.
476 */
477 if (ADD_OVERFLOW(va, sz, &end_va))
478 return TEE_ERROR_BAD_PARAMETERS;
479
480 res = vm_get_flags(uctx, va, sz, &vm_flags);
481 if (res)
482 return res;
483 if (vm_flags & VM_FLAG_PERMANENT)
484 return TEE_ERROR_ACCESS_DENIED;
485
486 /*
487 * If the segment is a mapping of a part of a file (vm_flags &
488 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
489 * files are mapped read-only.
490 */
491 if ((vm_flags & VM_FLAG_READONLY) &&
492 (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
493 return TEE_ERROR_ACCESS_DENIED;
494
495 return vm_set_prot(uctx, va, sz, prot);
496 }
497
ldelf_syscall_remap(unsigned long old_va,vaddr_t * new_va,size_t num_bytes,size_t pad_begin,size_t pad_end)498 TEE_Result ldelf_syscall_remap(unsigned long old_va, vaddr_t *new_va,
499 size_t num_bytes, size_t pad_begin,
500 size_t pad_end)
501 {
502 TEE_Result res = TEE_SUCCESS;
503 struct ts_session *sess = ts_get_current_session();
504 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
505 uint32_t vm_flags = 0;
506
507 res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags);
508 if (res)
509 return res;
510 if (vm_flags & VM_FLAG_PERMANENT)
511 return TEE_ERROR_ACCESS_DENIED;
512
513 res = vm_remap(uctx, new_va, old_va, num_bytes, pad_begin, pad_end);
514
515 return res;
516 }
517
ldelf_syscall_gen_rnd_num(void * buf,size_t num_bytes)518 TEE_Result ldelf_syscall_gen_rnd_num(void *buf, size_t num_bytes)
519 {
520 TEE_Result res = TEE_SUCCESS;
521 struct ts_session *sess = ts_get_current_session();
522 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
523
524 res = vm_check_access_rights(uctx,
525 TEE_MEMORY_ACCESS_WRITE |
526 TEE_MEMORY_ACCESS_ANY_OWNER,
527 (uaddr_t)buf, num_bytes);
528 if (res)
529 return res;
530
531 return crypto_rng_read(buf, num_bytes);
532 }
533
534 /*
535 * Should be called after returning from ldelf. If user_ctx is not NULL means
536 * that ldelf crashed or otherwise didn't complete properly. This function will
537 * close the remaining handles and free the context structs allocated by ldelf.
538 */
ldelf_sess_cleanup(struct ts_session * sess)539 void ldelf_sess_cleanup(struct ts_session *sess)
540 {
541 struct system_ctx *sys_ctx = sess->user_ctx;
542
543 if (sys_ctx) {
544 handle_db_destroy(&sys_ctx->db, bin_close);
545 free(sys_ctx);
546 sess->user_ctx = NULL;
547 }
548 }
549