1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <config.h>
9 #include <confine_array_index.h>
10 #include <ctype.h>
11 #include <elf32.h>
12 #include <elf64.h>
13 #include <elf_common.h>
14 #include <ldelf.h>
15 #include <link.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string_ext.h>
19 #include <string.h>
20 #include <tee_api_types.h>
21 #include <tee_internal_api_extensions.h>
22 #include <unw/unwind.h>
23 #include <user_ta_header.h>
24 #include <util.h>
25 
26 #include "sys.h"
27 #include "ta_elf.h"
28 
29 /*
30  * Layout of a 32-bit struct dl_phdr_info for a 64-bit ldelf to access a 32-bit
31  * TA
32  */
33 struct dl_phdr_info32 {
34 	uint32_t dlpi_addr;
35 	uint32_t dlpi_name;
36 	uint32_t dlpi_phdr;
37 	uint16_t dlpi_phnum;
38 	uint64_t dlpi_adds;
39 	uint64_t dlpi_subs;
40 	uint32_t dlpi_tls_modid;
41 	uint32_t dlpi_tls_data;
42 };
43 
44 static vaddr_t ta_stack;
45 static vaddr_t ta_stack_size;
46 
47 struct ta_elf_queue main_elf_queue = TAILQ_HEAD_INITIALIZER(main_elf_queue);
48 
49 /*
50  * Main application is always ID 1, shared libraries with TLS take IDs 2 and
51  * above
52  */
assign_tls_mod_id(struct ta_elf * elf)53 static void assign_tls_mod_id(struct ta_elf *elf)
54 {
55 	static size_t last_tls_mod_id = 1;
56 
57 	if (elf->is_main)
58 		assert(last_tls_mod_id == 1); /* Main always comes first */
59 	elf->tls_mod_id = last_tls_mod_id++;
60 }
61 
queue_elf_helper(const TEE_UUID * uuid)62 static struct ta_elf *queue_elf_helper(const TEE_UUID *uuid)
63 {
64 	struct ta_elf *elf = calloc(1, sizeof(*elf));
65 
66 	if (!elf)
67 		return NULL;
68 
69 	TAILQ_INIT(&elf->segs);
70 
71 	elf->uuid = *uuid;
72 	TAILQ_INSERT_TAIL(&main_elf_queue, elf, link);
73 	return elf;
74 }
75 
queue_elf(const TEE_UUID * uuid)76 static struct ta_elf *queue_elf(const TEE_UUID *uuid)
77 {
78 	struct ta_elf *elf = ta_elf_find_elf(uuid);
79 
80 	if (elf)
81 		return NULL;
82 
83 	elf = queue_elf_helper(uuid);
84 	if (!elf)
85 		err(TEE_ERROR_OUT_OF_MEMORY, "queue_elf_helper");
86 
87 	return elf;
88 }
89 
ta_elf_find_elf(const TEE_UUID * uuid)90 struct ta_elf *ta_elf_find_elf(const TEE_UUID *uuid)
91 {
92 	struct ta_elf *elf = NULL;
93 
94 	TAILQ_FOREACH(elf, &main_elf_queue, link)
95 		if (!memcmp(uuid, &elf->uuid, sizeof(*uuid)))
96 			return elf;
97 
98 	return NULL;
99 }
100 
e32_parse_ehdr(struct ta_elf * elf,Elf32_Ehdr * ehdr)101 static TEE_Result e32_parse_ehdr(struct ta_elf *elf, Elf32_Ehdr *ehdr)
102 {
103 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
104 	    ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
105 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
106 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
107 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_ARM ||
108 	    (ehdr->e_flags & EF_ARM_ABIMASK) != EF_ARM_ABI_VERSION ||
109 #ifndef CFG_WITH_VFP
110 	    (ehdr->e_flags & EF_ARM_ABI_FLOAT_HARD) ||
111 #endif
112 	    ehdr->e_phentsize != sizeof(Elf32_Phdr) ||
113 	    ehdr->e_shentsize != sizeof(Elf32_Shdr))
114 		return TEE_ERROR_BAD_FORMAT;
115 
116 	elf->is_32bit = true;
117 	elf->e_entry = ehdr->e_entry;
118 	elf->e_phoff = ehdr->e_phoff;
119 	elf->e_shoff = ehdr->e_shoff;
120 	elf->e_phnum = ehdr->e_phnum;
121 	elf->e_shnum = ehdr->e_shnum;
122 	elf->e_phentsize = ehdr->e_phentsize;
123 	elf->e_shentsize = ehdr->e_shentsize;
124 
125 	return TEE_SUCCESS;
126 }
127 
128 #ifdef ARM64
e64_parse_ehdr(struct ta_elf * elf,Elf64_Ehdr * ehdr)129 static TEE_Result e64_parse_ehdr(struct ta_elf *elf, Elf64_Ehdr *ehdr)
130 {
131 	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
132 	    ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
133 	    ehdr->e_ident[EI_DATA] != ELFDATA2LSB ||
134 	    ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE ||
135 	    ehdr->e_type != ET_DYN || ehdr->e_machine != EM_AARCH64 ||
136 	    ehdr->e_flags || ehdr->e_phentsize != sizeof(Elf64_Phdr) ||
137 	    ehdr->e_shentsize != sizeof(Elf64_Shdr))
138 		return TEE_ERROR_BAD_FORMAT;
139 
140 
141 	elf->is_32bit = false;
142 	elf->e_entry = ehdr->e_entry;
143 	elf->e_phoff = ehdr->e_phoff;
144 	elf->e_shoff = ehdr->e_shoff;
145 	elf->e_phnum = ehdr->e_phnum;
146 	elf->e_shnum = ehdr->e_shnum;
147 	elf->e_phentsize = ehdr->e_phentsize;
148 	elf->e_shentsize = ehdr->e_shentsize;
149 
150 	return TEE_SUCCESS;
151 }
152 #else /*ARM64*/
e64_parse_ehdr(struct ta_elf * elf __unused,Elf64_Ehdr * ehdr __unused)153 static TEE_Result e64_parse_ehdr(struct ta_elf *elf __unused,
154 				 Elf64_Ehdr *ehdr __unused)
155 {
156 	return TEE_ERROR_NOT_SUPPORTED;
157 }
158 #endif /*ARM64*/
159 
check_phdr_in_range(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)160 static void check_phdr_in_range(struct ta_elf *elf, unsigned int type,
161 				vaddr_t addr, size_t memsz)
162 {
163 	vaddr_t max_addr = 0;
164 
165 	if (ADD_OVERFLOW(addr, memsz, &max_addr))
166 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x overflow", type);
167 
168 	/*
169 	 * elf->load_addr and elf->max_addr are both using the
170 	 * final virtual addresses, while this program header is
171 	 * relative to 0.
172 	 */
173 	if (max_addr > elf->max_addr - elf->load_addr)
174 		err(TEE_ERROR_BAD_FORMAT, "Program header %#x out of bounds",
175 		    type);
176 }
177 
read_dyn(struct ta_elf * elf,vaddr_t addr,size_t idx,unsigned int * tag,size_t * val)178 static void read_dyn(struct ta_elf *elf, vaddr_t addr,
179 		     size_t idx, unsigned int *tag, size_t *val)
180 {
181 	if (elf->is_32bit) {
182 		Elf32_Dyn *dyn = (Elf32_Dyn *)(addr + elf->load_addr);
183 
184 		*tag = dyn[idx].d_tag;
185 		*val = dyn[idx].d_un.d_val;
186 	} else {
187 		Elf64_Dyn *dyn = (Elf64_Dyn *)(addr + elf->load_addr);
188 
189 		*tag = dyn[idx].d_tag;
190 		*val = dyn[idx].d_un.d_val;
191 	}
192 }
193 
save_hashtab_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)194 static void save_hashtab_from_segment(struct ta_elf *elf, unsigned int type,
195 				      vaddr_t addr, size_t memsz)
196 {
197 	size_t dyn_entsize = 0;
198 	size_t num_dyns = 0;
199 	size_t n = 0;
200 	unsigned int tag = 0;
201 	size_t val = 0;
202 
203 	if (type != PT_DYNAMIC)
204 		return;
205 
206 	check_phdr_in_range(elf, type, addr, memsz);
207 
208 	if (elf->is_32bit)
209 		dyn_entsize = sizeof(Elf32_Dyn);
210 	else
211 		dyn_entsize = sizeof(Elf64_Dyn);
212 
213 	assert(!(memsz % dyn_entsize));
214 	num_dyns = memsz / dyn_entsize;
215 
216 	for (n = 0; n < num_dyns; n++) {
217 		read_dyn(elf, addr, n, &tag, &val);
218 		if (tag == DT_HASH) {
219 			elf->hashtab = (void *)(val + elf->load_addr);
220 			break;
221 		}
222 	}
223 }
224 
check_range(struct ta_elf * elf,const char * name,const void * ptr,size_t sz)225 static void check_range(struct ta_elf *elf, const char *name, const void *ptr,
226 			size_t sz)
227 {
228 	size_t max_addr = 0;
229 
230 	if ((vaddr_t)ptr < elf->load_addr)
231 		err(TEE_ERROR_BAD_FORMAT, "%s %p out of range", name, ptr);
232 
233 	if (ADD_OVERFLOW((vaddr_t)ptr, sz, &max_addr))
234 		err(TEE_ERROR_BAD_FORMAT, "%s range overflow", name);
235 
236 	if (max_addr > elf->max_addr)
237 		err(TEE_ERROR_BAD_FORMAT,
238 		    "%s %p..%#zx out of range", name, ptr, max_addr);
239 }
240 
check_hashtab(struct ta_elf * elf,void * ptr,size_t num_buckets,size_t num_chains)241 static void check_hashtab(struct ta_elf *elf, void *ptr, size_t num_buckets,
242 			  size_t num_chains)
243 {
244 	/*
245 	 * Starting from 2 as the first two words are mandatory and hold
246 	 * num_buckets and num_chains. So this function is called twice,
247 	 * first to see that there's indeed room for num_buckets and
248 	 * num_chains and then to see that all of it fits.
249 	 * See http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
250 	 */
251 	size_t num_words = 2;
252 	size_t sz = 0;
253 
254 	if (!IS_ALIGNED_WITH_TYPE(ptr, uint32_t))
255 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of DT_HASH %p", ptr);
256 
257 	if (ADD_OVERFLOW(num_words, num_buckets, &num_words) ||
258 	    ADD_OVERFLOW(num_words, num_chains, &num_words) ||
259 	    MUL_OVERFLOW(num_words, sizeof(uint32_t), &sz))
260 		err(TEE_ERROR_BAD_FORMAT, "DT_HASH overflow");
261 
262 	check_range(elf, "DT_HASH", ptr, sz);
263 }
264 
save_hashtab(struct ta_elf * elf)265 static void save_hashtab(struct ta_elf *elf)
266 {
267 	uint32_t *hashtab = NULL;
268 	size_t n = 0;
269 
270 	if (elf->is_32bit) {
271 		Elf32_Phdr *phdr = elf->phdr;
272 
273 		for (n = 0; n < elf->e_phnum; n++)
274 			save_hashtab_from_segment(elf, phdr[n].p_type,
275 						  phdr[n].p_vaddr,
276 						  phdr[n].p_memsz);
277 	} else {
278 		Elf64_Phdr *phdr = elf->phdr;
279 
280 		for (n = 0; n < elf->e_phnum; n++)
281 			save_hashtab_from_segment(elf, phdr[n].p_type,
282 						  phdr[n].p_vaddr,
283 						  phdr[n].p_memsz);
284 	}
285 
286 	check_hashtab(elf, elf->hashtab, 0, 0);
287 	hashtab = elf->hashtab;
288 	check_hashtab(elf, elf->hashtab, hashtab[0], hashtab[1]);
289 }
290 
save_soname_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)291 static void save_soname_from_segment(struct ta_elf *elf, unsigned int type,
292 				     vaddr_t addr, size_t memsz)
293 {
294 	size_t dyn_entsize = 0;
295 	size_t num_dyns = 0;
296 	size_t n = 0;
297 	unsigned int tag = 0;
298 	size_t val = 0;
299 	char *str_tab = NULL;
300 
301 	if (type != PT_DYNAMIC)
302 		return;
303 
304 	if (elf->is_32bit)
305 		dyn_entsize = sizeof(Elf32_Dyn);
306 	else
307 		dyn_entsize = sizeof(Elf64_Dyn);
308 
309 	assert(!(memsz % dyn_entsize));
310 	num_dyns = memsz / dyn_entsize;
311 
312 	for (n = 0; n < num_dyns; n++) {
313 		read_dyn(elf, addr, n, &tag, &val);
314 		if (tag == DT_STRTAB) {
315 			str_tab = (char *)(val + elf->load_addr);
316 			break;
317 		}
318 	}
319 	for (n = 0; n < num_dyns; n++) {
320 		read_dyn(elf, addr, n, &tag, &val);
321 		if (tag == DT_SONAME) {
322 			elf->soname = str_tab + val;
323 			break;
324 		}
325 	}
326 }
327 
save_soname(struct ta_elf * elf)328 static void save_soname(struct ta_elf *elf)
329 {
330 	size_t n = 0;
331 
332 	if (elf->is_32bit) {
333 		Elf32_Phdr *phdr = elf->phdr;
334 
335 		for (n = 0; n < elf->e_phnum; n++)
336 			save_soname_from_segment(elf, phdr[n].p_type,
337 						 phdr[n].p_vaddr,
338 						 phdr[n].p_memsz);
339 	} else {
340 		Elf64_Phdr *phdr = elf->phdr;
341 
342 		for (n = 0; n < elf->e_phnum; n++)
343 			save_soname_from_segment(elf, phdr[n].p_type,
344 						 phdr[n].p_vaddr,
345 						 phdr[n].p_memsz);
346 	}
347 }
348 
e32_save_symtab(struct ta_elf * elf,size_t tab_idx)349 static void e32_save_symtab(struct ta_elf *elf, size_t tab_idx)
350 {
351 	Elf32_Shdr *shdr = elf->shdr;
352 	size_t str_idx = shdr[tab_idx].sh_link;
353 
354 	elf->dynsymtab = (void *)(shdr[tab_idx].sh_addr + elf->load_addr);
355 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf32_Sym))
356 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of dynsymtab %p",
357 		    elf->dynsymtab);
358 	check_range(elf, "Dynsymtab", elf->dynsymtab, shdr[tab_idx].sh_size);
359 
360 	if (shdr[tab_idx].sh_size % sizeof(Elf32_Sym))
361 		err(TEE_ERROR_BAD_FORMAT,
362 		    "Size of dynsymtab not an even multiple of Elf32_Sym");
363 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf32_Sym);
364 
365 	if (str_idx >= elf->e_shnum)
366 		err(TEE_ERROR_BAD_FORMAT, "Dynstr section index out of range");
367 	elf->dynstr = (void *)(shdr[str_idx].sh_addr + elf->load_addr);
368 	check_range(elf, "Dynstr", elf->dynstr, shdr[str_idx].sh_size);
369 
370 	elf->dynstr_size = shdr[str_idx].sh_size;
371 }
372 
e64_save_symtab(struct ta_elf * elf,size_t tab_idx)373 static void e64_save_symtab(struct ta_elf *elf, size_t tab_idx)
374 {
375 	Elf64_Shdr *shdr = elf->shdr;
376 	size_t str_idx = shdr[tab_idx].sh_link;
377 
378 	elf->dynsymtab = (void *)(vaddr_t)(shdr[tab_idx].sh_addr +
379 					   elf->load_addr);
380 
381 	if (!IS_ALIGNED_WITH_TYPE(elf->dynsymtab, Elf64_Sym))
382 		err(TEE_ERROR_BAD_FORMAT, "Bad alignment of .dynsym/DYNSYM %p",
383 		    elf->dynsymtab);
384 	check_range(elf, ".dynsym/DYNSYM", elf->dynsymtab,
385 		    shdr[tab_idx].sh_size);
386 
387 	if (shdr[tab_idx].sh_size % sizeof(Elf64_Sym))
388 		err(TEE_ERROR_BAD_FORMAT,
389 		    "Size of .dynsym/DYNSYM not an even multiple of Elf64_Sym");
390 	elf->num_dynsyms = shdr[tab_idx].sh_size / sizeof(Elf64_Sym);
391 
392 	if (str_idx >= elf->e_shnum)
393 		err(TEE_ERROR_BAD_FORMAT,
394 		    ".dynstr/STRTAB section index out of range");
395 	elf->dynstr = (void *)(vaddr_t)(shdr[str_idx].sh_addr + elf->load_addr);
396 	check_range(elf, ".dynstr/STRTAB", elf->dynstr, shdr[str_idx].sh_size);
397 
398 	elf->dynstr_size = shdr[str_idx].sh_size;
399 }
400 
save_symtab(struct ta_elf * elf)401 static void save_symtab(struct ta_elf *elf)
402 {
403 	size_t n = 0;
404 
405 	if (elf->is_32bit) {
406 		Elf32_Shdr *shdr = elf->shdr;
407 
408 		for (n = 0; n < elf->e_shnum; n++) {
409 			if (shdr[n].sh_type == SHT_DYNSYM) {
410 				e32_save_symtab(elf, n);
411 				break;
412 			}
413 		}
414 	} else {
415 		Elf64_Shdr *shdr = elf->shdr;
416 
417 		for (n = 0; n < elf->e_shnum; n++) {
418 			if (shdr[n].sh_type == SHT_DYNSYM) {
419 				e64_save_symtab(elf, n);
420 				break;
421 			}
422 		}
423 
424 	}
425 
426 	save_hashtab(elf);
427 	save_soname(elf);
428 }
429 
init_elf(struct ta_elf * elf)430 static void init_elf(struct ta_elf *elf)
431 {
432 	TEE_Result res = TEE_SUCCESS;
433 	vaddr_t va = 0;
434 	uint32_t flags = LDELF_MAP_FLAG_SHAREABLE;
435 	size_t sz = 0;
436 
437 	res = sys_open_ta_bin(&elf->uuid, &elf->handle);
438 	if (res)
439 		err(res, "sys_open_ta_bin(%pUl)", (void *)&elf->uuid);
440 
441 	/*
442 	 * Map it read-only executable when we're loading a library where
443 	 * the ELF header is included in a load segment.
444 	 */
445 	if (!elf->is_main)
446 		flags |= LDELF_MAP_FLAG_EXECUTABLE;
447 	res = sys_map_ta_bin(&va, SMALL_PAGE_SIZE, flags, elf->handle, 0, 0, 0);
448 	if (res)
449 		err(res, "sys_map_ta_bin");
450 	elf->ehdr_addr = va;
451 	if (!elf->is_main) {
452 		elf->load_addr = va;
453 		elf->max_addr = va + SMALL_PAGE_SIZE;
454 		elf->max_offs = SMALL_PAGE_SIZE;
455 	}
456 
457 	if (!IS_ELF(*(Elf32_Ehdr *)va))
458 		err(TEE_ERROR_BAD_FORMAT, "TA is not an ELF");
459 
460 	res = e32_parse_ehdr(elf, (void *)va);
461 	if (res == TEE_ERROR_BAD_FORMAT)
462 		res = e64_parse_ehdr(elf, (void *)va);
463 	if (res)
464 		err(res, "Cannot parse ELF");
465 
466 	if (MUL_OVERFLOW(elf->e_phnum, elf->e_phentsize, &sz) ||
467 	    ADD_OVERFLOW(sz, elf->e_phoff, &sz))
468 		err(TEE_ERROR_BAD_FORMAT, "Program headers size overflow");
469 
470 	if (sz > SMALL_PAGE_SIZE)
471 		err(TEE_ERROR_NOT_SUPPORTED, "Cannot read program headers");
472 
473 	elf->phdr = (void *)(va + elf->e_phoff);
474 }
475 
roundup(size_t v)476 static size_t roundup(size_t v)
477 {
478 	return ROUNDUP(v, SMALL_PAGE_SIZE);
479 }
480 
rounddown(size_t v)481 static size_t rounddown(size_t v)
482 {
483 	return ROUNDDOWN(v, SMALL_PAGE_SIZE);
484 }
485 
add_segment(struct ta_elf * elf,size_t offset,size_t vaddr,size_t filesz,size_t memsz,size_t flags,size_t align)486 static void add_segment(struct ta_elf *elf, size_t offset, size_t vaddr,
487 			size_t filesz, size_t memsz, size_t flags, size_t align)
488 {
489 	struct segment *seg = calloc(1, sizeof(*seg));
490 
491 	if (!seg)
492 		err(TEE_ERROR_OUT_OF_MEMORY, "calloc");
493 
494 	if (memsz < filesz)
495 		err(TEE_ERROR_BAD_FORMAT, "Memsz smaller than filesz");
496 
497 	seg->offset = offset;
498 	seg->vaddr = vaddr;
499 	seg->filesz = filesz;
500 	seg->memsz = memsz;
501 	seg->flags = flags;
502 	seg->align = align;
503 
504 	TAILQ_INSERT_TAIL(&elf->segs, seg, link);
505 }
506 
parse_load_segments(struct ta_elf * elf)507 static void parse_load_segments(struct ta_elf *elf)
508 {
509 	size_t n = 0;
510 
511 	if (elf->is_32bit) {
512 		Elf32_Phdr *phdr = elf->phdr;
513 
514 		for (n = 0; n < elf->e_phnum; n++)
515 			if (phdr[n].p_type == PT_LOAD) {
516 				add_segment(elf, phdr[n].p_offset,
517 					    phdr[n].p_vaddr, phdr[n].p_filesz,
518 					    phdr[n].p_memsz, phdr[n].p_flags,
519 					    phdr[n].p_align);
520 			} else if (phdr[n].p_type == PT_ARM_EXIDX) {
521 				elf->exidx_start = phdr[n].p_vaddr;
522 				elf->exidx_size = phdr[n].p_filesz;
523 			} else if (phdr[n].p_type == PT_TLS) {
524 				assign_tls_mod_id(elf);
525 			}
526 	} else {
527 		Elf64_Phdr *phdr = elf->phdr;
528 
529 		for (n = 0; n < elf->e_phnum; n++)
530 			if (phdr[n].p_type == PT_LOAD) {
531 				add_segment(elf, phdr[n].p_offset,
532 					    phdr[n].p_vaddr, phdr[n].p_filesz,
533 					    phdr[n].p_memsz, phdr[n].p_flags,
534 					    phdr[n].p_align);
535 			} else if (phdr[n].p_type == PT_TLS) {
536 				elf->tls_start = phdr[n].p_vaddr;
537 				elf->tls_filesz = phdr[n].p_filesz;
538 				elf->tls_memsz = phdr[n].p_memsz;
539 			} else if (IS_ENABLED(CFG_TA_BTI) &&
540 				   phdr[n].p_type == PT_GNU_PROPERTY) {
541 				elf->prop_start = phdr[n].p_vaddr;
542 				elf->prop_align = phdr[n].p_align;
543 				elf->prop_memsz = phdr[n].p_memsz;
544 			}
545 	}
546 }
547 
copy_remapped_to(struct ta_elf * elf,const struct segment * seg)548 static void copy_remapped_to(struct ta_elf *elf, const struct segment *seg)
549 {
550 	uint8_t *dst = (void *)(seg->vaddr + elf->load_addr);
551 	size_t n = 0;
552 	size_t offs = seg->offset;
553 	size_t num_bytes = seg->filesz;
554 
555 	if (offs < elf->max_offs) {
556 		n = MIN(elf->max_offs - offs, num_bytes);
557 		memcpy(dst, (void *)(elf->max_addr + offs - elf->max_offs), n);
558 		dst += n;
559 		offs += n;
560 		num_bytes -= n;
561 	}
562 
563 	if (num_bytes) {
564 		TEE_Result res = sys_copy_from_ta_bin(dst, num_bytes,
565 						      elf->handle, offs);
566 
567 		if (res)
568 			err(res, "sys_copy_from_ta_bin");
569 		elf->max_offs += offs;
570 	}
571 }
572 
adjust_segments(struct ta_elf * elf)573 static void adjust_segments(struct ta_elf *elf)
574 {
575 	struct segment *seg = NULL;
576 	struct segment *prev_seg = NULL;
577 	size_t prev_end_addr = 0;
578 	size_t align = 0;
579 	size_t mask = 0;
580 
581 	/* Sanity check */
582 	TAILQ_FOREACH(seg, &elf->segs, link) {
583 		size_t dummy __maybe_unused = 0;
584 
585 		assert(seg->align >= SMALL_PAGE_SIZE);
586 		assert(!ADD_OVERFLOW(seg->vaddr, seg->memsz, &dummy));
587 		assert(seg->filesz <= seg->memsz);
588 		assert((seg->offset & SMALL_PAGE_MASK) ==
589 		       (seg->vaddr & SMALL_PAGE_MASK));
590 
591 		prev_seg = TAILQ_PREV(seg, segment_head, link);
592 		if (prev_seg) {
593 			assert(seg->vaddr >= prev_seg->vaddr + prev_seg->memsz);
594 			assert(seg->offset >=
595 			       prev_seg->offset + prev_seg->filesz);
596 		}
597 		if (!align)
598 			align = seg->align;
599 		assert(align == seg->align);
600 	}
601 
602 	mask = align - 1;
603 
604 	seg = TAILQ_FIRST(&elf->segs);
605 	if (seg)
606 		seg = TAILQ_NEXT(seg, link);
607 	while (seg) {
608 		prev_seg = TAILQ_PREV(seg, segment_head, link);
609 		prev_end_addr = prev_seg->vaddr + prev_seg->memsz;
610 
611 		/*
612 		 * This segment may overlap with the last "page" in the
613 		 * previous segment in two different ways:
614 		 * 1. Virtual address (and offset) overlaps =>
615 		 *    Permissions needs to be merged. The offset must have
616 		 *    the SMALL_PAGE_MASK bits set as vaddr and offset must
617 		 *    add up with prevsion segment.
618 		 *
619 		 * 2. Only offset overlaps =>
620 		 *    The same page in the ELF is mapped at two different
621 		 *    virtual addresses. As a limitation this segment must
622 		 *    be mapped as writeable.
623 		 */
624 
625 		/* Case 1. */
626 		if (rounddown(seg->vaddr) < prev_end_addr) {
627 			assert((seg->vaddr & mask) == (seg->offset & mask));
628 			assert(prev_seg->memsz == prev_seg->filesz);
629 
630 			/*
631 			 * Merge the segments and their permissions.
632 			 * Note that the may be a small hole between the
633 			 * two sections.
634 			 */
635 			prev_seg->filesz = seg->vaddr + seg->filesz -
636 					   prev_seg->vaddr;
637 			prev_seg->memsz = seg->vaddr + seg->memsz -
638 					   prev_seg->vaddr;
639 			prev_seg->flags |= seg->flags;
640 
641 			TAILQ_REMOVE(&elf->segs, seg, link);
642 			free(seg);
643 			seg = TAILQ_NEXT(prev_seg, link);
644 			continue;
645 		}
646 
647 		/* Case 2. */
648 		if ((seg->offset & mask) &&
649 		    rounddown(seg->offset) <
650 		    (prev_seg->offset + prev_seg->filesz)) {
651 
652 			assert(seg->flags & PF_W);
653 			seg->remapped_writeable = true;
654 		}
655 
656 		/*
657 		 * No overlap, but we may need to align address, offset and
658 		 * size.
659 		 */
660 		seg->filesz += seg->vaddr - rounddown(seg->vaddr);
661 		seg->memsz += seg->vaddr - rounddown(seg->vaddr);
662 		seg->vaddr = rounddown(seg->vaddr);
663 		seg->offset = rounddown(seg->offset);
664 		seg = TAILQ_NEXT(seg, link);
665 	}
666 
667 }
668 
populate_segments_legacy(struct ta_elf * elf)669 static void populate_segments_legacy(struct ta_elf *elf)
670 {
671 	TEE_Result res = TEE_SUCCESS;
672 	struct segment *seg = NULL;
673 	vaddr_t va = 0;
674 
675 	assert(elf->is_legacy);
676 	TAILQ_FOREACH(seg, &elf->segs, link) {
677 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
678 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
679 					 seg->vaddr - seg->memsz);
680 		size_t num_bytes = roundup(seg->memsz);
681 
682 		if (!elf->load_addr)
683 			va = 0;
684 		else
685 			va = seg->vaddr + elf->load_addr;
686 
687 
688 		if (!(seg->flags & PF_R))
689 			err(TEE_ERROR_NOT_SUPPORTED,
690 			    "Segment must be readable");
691 
692 		res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
693 		if (res)
694 			err(res, "sys_map_zi");
695 		res = sys_copy_from_ta_bin((void *)va, seg->filesz,
696 					   elf->handle, seg->offset);
697 		if (res)
698 			err(res, "sys_copy_from_ta_bin");
699 
700 		if (!elf->load_addr)
701 			elf->load_addr = va;
702 		elf->max_addr = va + num_bytes;
703 		elf->max_offs = seg->offset + seg->filesz;
704 	}
705 }
706 
get_pad_begin(void)707 static size_t get_pad_begin(void)
708 {
709 #ifdef CFG_TA_ASLR
710 	size_t min = CFG_TA_ASLR_MIN_OFFSET_PAGES;
711 	size_t max = CFG_TA_ASLR_MAX_OFFSET_PAGES;
712 	TEE_Result res = TEE_SUCCESS;
713 	uint32_t rnd32 = 0;
714 	size_t rnd = 0;
715 
716 	COMPILE_TIME_ASSERT(CFG_TA_ASLR_MIN_OFFSET_PAGES <
717 			    CFG_TA_ASLR_MAX_OFFSET_PAGES);
718 	if (max > min) {
719 		res = sys_gen_random_num(&rnd32, sizeof(rnd32));
720 		if (res) {
721 			DMSG("Random read failed: %#"PRIx32, res);
722 			return min * SMALL_PAGE_SIZE;
723 		}
724 		rnd = rnd32 % (max - min);
725 	}
726 
727 	return (min + rnd) * SMALL_PAGE_SIZE;
728 #else /*!CFG_TA_ASLR*/
729 	return 0;
730 #endif /*!CFG_TA_ASLR*/
731 }
732 
populate_segments(struct ta_elf * elf)733 static void populate_segments(struct ta_elf *elf)
734 {
735 	TEE_Result res = TEE_SUCCESS;
736 	struct segment *seg = NULL;
737 	vaddr_t va = 0;
738 	size_t pad_begin = 0;
739 
740 	assert(!elf->is_legacy);
741 	TAILQ_FOREACH(seg, &elf->segs, link) {
742 		struct segment *last_seg = TAILQ_LAST(&elf->segs, segment_head);
743 		size_t pad_end = roundup(last_seg->vaddr + last_seg->memsz -
744 					 seg->vaddr - seg->memsz);
745 
746 		if (seg->remapped_writeable) {
747 			size_t num_bytes = roundup(seg->vaddr + seg->memsz) -
748 					   rounddown(seg->vaddr);
749 
750 			assert(elf->load_addr);
751 			va = rounddown(elf->load_addr + seg->vaddr);
752 			assert(va >= elf->max_addr);
753 			res = sys_map_zi(num_bytes, 0, &va, 0, pad_end);
754 			if (res)
755 				err(res, "sys_map_zi");
756 
757 			copy_remapped_to(elf, seg);
758 			elf->max_addr = va + num_bytes;
759 		} else {
760 			uint32_t flags =  0;
761 			size_t filesz = seg->filesz;
762 			size_t memsz = seg->memsz;
763 			size_t offset = seg->offset;
764 			size_t vaddr = seg->vaddr;
765 
766 			if (offset < elf->max_offs) {
767 				/*
768 				 * We're in a load segment which overlaps
769 				 * with (or is covered by) the first page
770 				 * of a shared library.
771 				 */
772 				if (vaddr + filesz < SMALL_PAGE_SIZE) {
773 					size_t num_bytes = 0;
774 
775 					/*
776 					 * If this segment is completely
777 					 * covered, take next.
778 					 */
779 					if (vaddr + memsz <= SMALL_PAGE_SIZE)
780 						continue;
781 
782 					/*
783 					 * All data of the segment is
784 					 * loaded, but we need to zero
785 					 * extend it.
786 					 */
787 					va = elf->max_addr;
788 					num_bytes = roundup(vaddr + memsz) -
789 						    roundup(vaddr) -
790 						    SMALL_PAGE_SIZE;
791 					assert(num_bytes);
792 					res = sys_map_zi(num_bytes, 0, &va, 0,
793 							 0);
794 					if (res)
795 						err(res, "sys_map_zi");
796 					elf->max_addr = roundup(va + num_bytes);
797 					continue;
798 				}
799 
800 				/* Partial overlap, remove the first page. */
801 				vaddr += SMALL_PAGE_SIZE;
802 				filesz -= SMALL_PAGE_SIZE;
803 				memsz -= SMALL_PAGE_SIZE;
804 				offset += SMALL_PAGE_SIZE;
805 			}
806 
807 			if (!elf->load_addr) {
808 				va = 0;
809 				pad_begin = get_pad_begin();
810 				/*
811 				 * If mapping with pad_begin fails we'll
812 				 * retry without pad_begin, effectively
813 				 * disabling ASLR for the current ELF file.
814 				 */
815 			} else {
816 				va = vaddr + elf->load_addr;
817 				pad_begin = 0;
818 			}
819 
820 			if (seg->flags & PF_W)
821 				flags |= LDELF_MAP_FLAG_WRITEABLE;
822 			else
823 				flags |= LDELF_MAP_FLAG_SHAREABLE;
824 			if (seg->flags & PF_X)
825 				flags |= LDELF_MAP_FLAG_EXECUTABLE;
826 			if (!(seg->flags & PF_R))
827 				err(TEE_ERROR_NOT_SUPPORTED,
828 				    "Segment must be readable");
829 			if (flags & LDELF_MAP_FLAG_WRITEABLE) {
830 				res = sys_map_zi(memsz, 0, &va, pad_begin,
831 						 pad_end);
832 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
833 					res = sys_map_zi(memsz, 0, &va, 0,
834 							 pad_end);
835 				if (res)
836 					err(res, "sys_map_zi");
837 				res = sys_copy_from_ta_bin((void *)va, filesz,
838 							   elf->handle, offset);
839 				if (res)
840 					err(res, "sys_copy_from_ta_bin");
841 			} else {
842 				if (filesz != memsz)
843 					err(TEE_ERROR_BAD_FORMAT,
844 					    "Filesz and memsz mismatch");
845 				res = sys_map_ta_bin(&va, filesz, flags,
846 						     elf->handle, offset,
847 						     pad_begin, pad_end);
848 				if (pad_begin && res == TEE_ERROR_OUT_OF_MEMORY)
849 					res = sys_map_ta_bin(&va, filesz, flags,
850 							     elf->handle,
851 							     offset, 0,
852 							     pad_end);
853 				if (res)
854 					err(res, "sys_map_ta_bin");
855 			}
856 
857 			if (!elf->load_addr)
858 				elf->load_addr = va;
859 			elf->max_addr = roundup(va + memsz);
860 			elf->max_offs += filesz;
861 		}
862 	}
863 }
864 
ta_elf_add_bti(struct ta_elf * elf)865 static void ta_elf_add_bti(struct ta_elf *elf)
866 {
867 	TEE_Result res = TEE_SUCCESS;
868 	struct segment *seg = NULL;
869 	uint32_t flags = LDELF_MAP_FLAG_EXECUTABLE | LDELF_MAP_FLAG_BTI;
870 
871 	TAILQ_FOREACH(seg, &elf->segs, link) {
872 		vaddr_t va = elf->load_addr + seg->vaddr;
873 
874 		if (seg->flags & PF_X) {
875 			res = sys_set_prot(va, seg->memsz, flags);
876 			if (res)
877 				err(res, "sys_set_prot");
878 		}
879 	}
880 }
881 
parse_property_segment(struct ta_elf * elf)882 static void parse_property_segment(struct ta_elf *elf)
883 {
884 	char *desc = NULL;
885 	size_t align = elf->prop_align;
886 	size_t desc_offset = 0;
887 	size_t prop_offset = 0;
888 	vaddr_t va = 0;
889 	Elf_Note *note = NULL;
890 	char *name = NULL;
891 
892 	if (!IS_ENABLED(CFG_TA_BTI) || !elf->prop_start)
893 		return;
894 
895 	check_phdr_in_range(elf, PT_GNU_PROPERTY, elf->prop_start,
896 			    elf->prop_memsz);
897 
898 	va = elf->load_addr + elf->prop_start;
899 	note = (void *)va;
900 	name = (char *)(note + 1);
901 
902 	if (elf->prop_memsz < sizeof(*note) + sizeof(ELF_NOTE_GNU))
903 		return;
904 
905 	if (note->n_type != NT_GNU_PROPERTY_TYPE_0 ||
906 	    note->n_namesz != sizeof(ELF_NOTE_GNU) ||
907 	    memcmp(name, ELF_NOTE_GNU, sizeof(ELF_NOTE_GNU)) ||
908 	    !IS_POWER_OF_TWO(align))
909 		return;
910 
911 	desc_offset = ROUNDUP(sizeof(*note) + sizeof(ELF_NOTE_GNU), align);
912 
913 	if (desc_offset > elf->prop_memsz ||
914 	    ROUNDUP(desc_offset + note->n_descsz, align) > elf->prop_memsz)
915 		return;
916 
917 	desc = (char *)(va + desc_offset);
918 
919 	do {
920 		Elf_Prop *prop = (void *)(desc + prop_offset);
921 		size_t data_offset = prop_offset + sizeof(*prop);
922 
923 		if (note->n_descsz < data_offset)
924 			return;
925 
926 		data_offset = confine_array_index(data_offset, note->n_descsz);
927 
928 		if (prop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
929 			uint32_t *pr_data = (void *)(desc + data_offset);
930 
931 			if (note->n_descsz < (data_offset + sizeof(*pr_data)) &&
932 			    prop->pr_datasz != sizeof(*pr_data))
933 				return;
934 
935 			if (*pr_data & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) {
936 				DMSG("BTI Feature present in note property");
937 				elf->bti_enabled = true;
938 			}
939 		}
940 
941 		prop_offset += ROUNDUP(sizeof(*prop) + prop->pr_datasz, align);
942 	} while (prop_offset < note->n_descsz);
943 }
944 
map_segments(struct ta_elf * elf)945 static void map_segments(struct ta_elf *elf)
946 {
947 	TEE_Result res = TEE_SUCCESS;
948 
949 	parse_load_segments(elf);
950 	adjust_segments(elf);
951 	if (TAILQ_FIRST(&elf->segs)->offset < SMALL_PAGE_SIZE) {
952 		vaddr_t va = 0;
953 		size_t sz = elf->max_addr - elf->load_addr;
954 		struct segment *seg = TAILQ_LAST(&elf->segs, segment_head);
955 		size_t pad_begin = get_pad_begin();
956 
957 		/*
958 		 * We're loading a library, if not other parts of the code
959 		 * need to be updated too.
960 		 */
961 		assert(!elf->is_main);
962 
963 		/*
964 		 * Now that we know how much virtual memory is needed move
965 		 * the already mapped part to a location which can
966 		 * accommodate us.
967 		 */
968 		res = sys_remap(elf->load_addr, &va, sz, pad_begin,
969 				roundup(seg->vaddr + seg->memsz));
970 		if (res == TEE_ERROR_OUT_OF_MEMORY)
971 			res = sys_remap(elf->load_addr, &va, sz, 0,
972 					roundup(seg->vaddr + seg->memsz));
973 		if (res)
974 			err(res, "sys_remap");
975 		elf->ehdr_addr = va;
976 		elf->load_addr = va;
977 		elf->max_addr = va + sz;
978 		elf->phdr = (void *)(va + elf->e_phoff);
979 	}
980 }
981 
add_deps_from_segment(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz)982 static void add_deps_from_segment(struct ta_elf *elf, unsigned int type,
983 				  vaddr_t addr, size_t memsz)
984 {
985 	size_t dyn_entsize = 0;
986 	size_t num_dyns = 0;
987 	size_t n = 0;
988 	unsigned int tag = 0;
989 	size_t val = 0;
990 	TEE_UUID uuid = { };
991 	char *str_tab = NULL;
992 	size_t str_tab_sz = 0;
993 
994 	if (type != PT_DYNAMIC)
995 		return;
996 
997 	check_phdr_in_range(elf, type, addr, memsz);
998 
999 	if (elf->is_32bit)
1000 		dyn_entsize = sizeof(Elf32_Dyn);
1001 	else
1002 		dyn_entsize = sizeof(Elf64_Dyn);
1003 
1004 	assert(!(memsz % dyn_entsize));
1005 	num_dyns = memsz / dyn_entsize;
1006 
1007 	for (n = 0; n < num_dyns && !(str_tab && str_tab_sz); n++) {
1008 		read_dyn(elf, addr, n, &tag, &val);
1009 		if (tag == DT_STRTAB)
1010 			str_tab = (char *)(val + elf->load_addr);
1011 		else if (tag == DT_STRSZ)
1012 			str_tab_sz = val;
1013 	}
1014 	check_range(elf, ".dynstr/STRTAB", str_tab, str_tab_sz);
1015 
1016 	for (n = 0; n < num_dyns; n++) {
1017 		read_dyn(elf, addr, n, &tag, &val);
1018 		if (tag != DT_NEEDED)
1019 			continue;
1020 		if (val >= str_tab_sz)
1021 			err(TEE_ERROR_BAD_FORMAT,
1022 			    "Offset into .dynstr/STRTAB out of range");
1023 		tee_uuid_from_str(&uuid, str_tab + val);
1024 		queue_elf(&uuid);
1025 	}
1026 }
1027 
add_dependencies(struct ta_elf * elf)1028 static void add_dependencies(struct ta_elf *elf)
1029 {
1030 	size_t n = 0;
1031 
1032 	if (elf->is_32bit) {
1033 		Elf32_Phdr *phdr = elf->phdr;
1034 
1035 		for (n = 0; n < elf->e_phnum; n++)
1036 			add_deps_from_segment(elf, phdr[n].p_type,
1037 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1038 	} else {
1039 		Elf64_Phdr *phdr = elf->phdr;
1040 
1041 		for (n = 0; n < elf->e_phnum; n++)
1042 			add_deps_from_segment(elf, phdr[n].p_type,
1043 					      phdr[n].p_vaddr, phdr[n].p_memsz);
1044 	}
1045 }
1046 
copy_section_headers(struct ta_elf * elf)1047 static void copy_section_headers(struct ta_elf *elf)
1048 {
1049 	TEE_Result res = TEE_SUCCESS;
1050 	size_t sz = 0;
1051 	size_t offs = 0;
1052 
1053 	if (MUL_OVERFLOW(elf->e_shnum, elf->e_shentsize, &sz))
1054 		err(TEE_ERROR_BAD_FORMAT, "Section headers size overflow");
1055 
1056 	elf->shdr = malloc(sz);
1057 	if (!elf->shdr)
1058 		err(TEE_ERROR_OUT_OF_MEMORY, "malloc");
1059 
1060 	/*
1061 	 * We're assuming that section headers comes after the load segments,
1062 	 * but if it's a very small dynamically linked library the section
1063 	 * headers can still end up (partially?) in the first mapped page.
1064 	 */
1065 	if (elf->e_shoff < SMALL_PAGE_SIZE) {
1066 		assert(!elf->is_main);
1067 		offs = MIN(SMALL_PAGE_SIZE - elf->e_shoff, sz);
1068 		memcpy(elf->shdr, (void *)(elf->load_addr + elf->e_shoff),
1069 		       offs);
1070 	}
1071 
1072 	if (offs < sz) {
1073 		res = sys_copy_from_ta_bin((uint8_t *)elf->shdr + offs,
1074 					   sz - offs, elf->handle,
1075 					   elf->e_shoff + offs);
1076 		if (res)
1077 			err(res, "sys_copy_from_ta_bin");
1078 	}
1079 }
1080 
close_handle(struct ta_elf * elf)1081 static void close_handle(struct ta_elf *elf)
1082 {
1083 	TEE_Result res = sys_close_ta_bin(elf->handle);
1084 
1085 	if (res)
1086 		err(res, "sys_close_ta_bin");
1087 	elf->handle = -1;
1088 }
1089 
clean_elf_load_main(struct ta_elf * elf)1090 static void clean_elf_load_main(struct ta_elf *elf)
1091 {
1092 	TEE_Result res = TEE_SUCCESS;
1093 
1094 	/*
1095 	 * Clean up from last attempt to load
1096 	 */
1097 	res = sys_unmap(elf->ehdr_addr, SMALL_PAGE_SIZE);
1098 	if (res)
1099 		err(res, "sys_unmap");
1100 
1101 	while (!TAILQ_EMPTY(&elf->segs)) {
1102 		struct segment *seg = TAILQ_FIRST(&elf->segs);
1103 		vaddr_t va = 0;
1104 		size_t num_bytes = 0;
1105 
1106 		va = rounddown(elf->load_addr + seg->vaddr);
1107 		if (seg->remapped_writeable)
1108 			num_bytes = roundup(seg->vaddr + seg->memsz) -
1109 				    rounddown(seg->vaddr);
1110 		else
1111 			num_bytes = seg->memsz;
1112 
1113 		res = sys_unmap(va, num_bytes);
1114 		if (res)
1115 			err(res, "sys_unmap");
1116 
1117 		TAILQ_REMOVE(&elf->segs, seg, link);
1118 		free(seg);
1119 	}
1120 
1121 	free(elf->shdr);
1122 	memset(&elf->is_32bit, 0,
1123 	       (vaddr_t)&elf->uuid - (vaddr_t)&elf->is_32bit);
1124 
1125 	TAILQ_INIT(&elf->segs);
1126 }
1127 
1128 #ifdef ARM64
1129 /*
1130  * Allocates an offset in the TA's Thread Control Block for the TLS segment of
1131  * the @elf module.
1132  */
1133 #define TCB_HEAD_SIZE (2 * sizeof(long))
set_tls_offset(struct ta_elf * elf)1134 static void set_tls_offset(struct ta_elf *elf)
1135 {
1136 	static size_t next_offs = TCB_HEAD_SIZE;
1137 
1138 	if (!elf->tls_start)
1139 		return;
1140 
1141 	/* Module has a TLS segment */
1142 	elf->tls_tcb_offs = next_offs;
1143 	next_offs += elf->tls_memsz;
1144 }
1145 #else
set_tls_offset(struct ta_elf * elf __unused)1146 static void set_tls_offset(struct ta_elf *elf __unused) {}
1147 #endif
1148 
load_main(struct ta_elf * elf)1149 static void load_main(struct ta_elf *elf)
1150 {
1151 	init_elf(elf);
1152 	map_segments(elf);
1153 	populate_segments(elf);
1154 	add_dependencies(elf);
1155 	copy_section_headers(elf);
1156 	save_symtab(elf);
1157 	close_handle(elf);
1158 	set_tls_offset(elf);
1159 	parse_property_segment(elf);
1160 	if (elf->bti_enabled)
1161 		ta_elf_add_bti(elf);
1162 
1163 	elf->head = (struct ta_head *)elf->load_addr;
1164 	if (elf->head->depr_entry != UINT64_MAX) {
1165 		/*
1166 		 * Legacy TAs sets their entry point in ta_head. For
1167 		 * non-legacy TAs the entry point of the ELF is set instead
1168 		 * and leaving the ta_head entry point set to UINT64_MAX to
1169 		 * indicate that it's not used.
1170 		 *
1171 		 * NB, everything before the commit a73b5878c89d ("Replace
1172 		 * ta_head.entry with elf entry") is considered legacy TAs
1173 		 * for ldelf.
1174 		 *
1175 		 * Legacy TAs cannot be mapped with shared memory segments
1176 		 * so restart the mapping if it turned out we're loading a
1177 		 * legacy TA.
1178 		 */
1179 
1180 		DMSG("Reloading TA %pUl as legacy TA", (void *)&elf->uuid);
1181 		clean_elf_load_main(elf);
1182 		elf->is_legacy = true;
1183 		init_elf(elf);
1184 		map_segments(elf);
1185 		populate_segments_legacy(elf);
1186 		add_dependencies(elf);
1187 		copy_section_headers(elf);
1188 		save_symtab(elf);
1189 		close_handle(elf);
1190 		elf->head = (struct ta_head *)elf->load_addr;
1191 		/*
1192 		 * Check that the TA is still a legacy TA, if it isn't give
1193 		 * up now since we're likely under attack.
1194 		 */
1195 		if (elf->head->depr_entry == UINT64_MAX)
1196 			err(TEE_ERROR_GENERIC,
1197 			    "TA %pUl was changed on disk to non-legacy",
1198 			    (void *)&elf->uuid);
1199 	}
1200 
1201 }
1202 
ta_elf_load_main(const TEE_UUID * uuid,uint32_t * is_32bit,uint64_t * sp,uint32_t * ta_flags)1203 void ta_elf_load_main(const TEE_UUID *uuid, uint32_t *is_32bit, uint64_t *sp,
1204 		      uint32_t *ta_flags)
1205 {
1206 	struct ta_elf *elf = queue_elf(uuid);
1207 	vaddr_t va = 0;
1208 	TEE_Result res = TEE_SUCCESS;
1209 
1210 	assert(elf);
1211 	elf->is_main = true;
1212 
1213 	load_main(elf);
1214 
1215 	*is_32bit = elf->is_32bit;
1216 	res = sys_map_zi(elf->head->stack_size, 0, &va, 0, 0);
1217 	if (res)
1218 		err(res, "sys_map_zi stack");
1219 
1220 	if (elf->head->flags & ~TA_FLAGS_MASK)
1221 		err(TEE_ERROR_BAD_FORMAT, "Invalid TA flags(s) %#"PRIx32,
1222 		    elf->head->flags & ~TA_FLAGS_MASK);
1223 
1224 	*ta_flags = elf->head->flags;
1225 	*sp = va + elf->head->stack_size;
1226 	ta_stack = va;
1227 	ta_stack_size = elf->head->stack_size;
1228 }
1229 
ta_elf_finalize_load_main(uint64_t * entry)1230 void ta_elf_finalize_load_main(uint64_t *entry)
1231 {
1232 	struct ta_elf *elf = TAILQ_FIRST(&main_elf_queue);
1233 	TEE_Result res = TEE_SUCCESS;
1234 
1235 	assert(elf->is_main);
1236 
1237 	res = ta_elf_set_init_fini_info_compat(elf->is_32bit);
1238 	if (res)
1239 		err(res, "ta_elf_set_init_fini_info_compat");
1240 	res = ta_elf_set_elf_phdr_info(elf->is_32bit);
1241 	if (res)
1242 		err(res, "ta_elf_set_elf_phdr_info");
1243 
1244 	if (elf->is_legacy)
1245 		*entry = elf->head->depr_entry;
1246 	else
1247 		*entry = elf->e_entry + elf->load_addr;
1248 }
1249 
1250 
ta_elf_load_dependency(struct ta_elf * elf,bool is_32bit)1251 void ta_elf_load_dependency(struct ta_elf *elf, bool is_32bit)
1252 {
1253 	if (elf->is_main)
1254 		return;
1255 
1256 	init_elf(elf);
1257 	if (elf->is_32bit != is_32bit)
1258 		err(TEE_ERROR_BAD_FORMAT, "ELF %pUl is %sbit (expected %sbit)",
1259 		    (void *)&elf->uuid, elf->is_32bit ? "32" : "64",
1260 		    is_32bit ? "32" : "64");
1261 
1262 	map_segments(elf);
1263 	populate_segments(elf);
1264 	add_dependencies(elf);
1265 	copy_section_headers(elf);
1266 	save_symtab(elf);
1267 	close_handle(elf);
1268 	set_tls_offset(elf);
1269 	parse_property_segment(elf);
1270 	if (elf->bti_enabled)
1271 		ta_elf_add_bti(elf);
1272 }
1273 
ta_elf_finalize_mappings(struct ta_elf * elf)1274 void ta_elf_finalize_mappings(struct ta_elf *elf)
1275 {
1276 	TEE_Result res = TEE_SUCCESS;
1277 	struct segment *seg = NULL;
1278 
1279 	if (!elf->is_legacy)
1280 		return;
1281 
1282 	TAILQ_FOREACH(seg, &elf->segs, link) {
1283 		vaddr_t va = elf->load_addr + seg->vaddr;
1284 		uint32_t flags =  0;
1285 
1286 		if (seg->flags & PF_W)
1287 			flags |= LDELF_MAP_FLAG_WRITEABLE;
1288 		if (seg->flags & PF_X)
1289 			flags |= LDELF_MAP_FLAG_EXECUTABLE;
1290 
1291 		res = sys_set_prot(va, seg->memsz, flags);
1292 		if (res)
1293 			err(res, "sys_set_prot");
1294 	}
1295 }
1296 
print_wrapper(void * pctx,print_func_t print_func,const char * fmt,...)1297 static void __printf(3, 4) print_wrapper(void *pctx, print_func_t print_func,
1298 					 const char *fmt, ...)
1299 {
1300 	va_list ap;
1301 
1302 	va_start(ap, fmt);
1303 	print_func(pctx, fmt, ap);
1304 	va_end(ap);
1305 }
1306 
print_seg(void * pctx,print_func_t print_func,size_t idx __maybe_unused,int elf_idx __maybe_unused,vaddr_t va __maybe_unused,paddr_t pa __maybe_unused,size_t sz __maybe_unused,uint32_t flags)1307 static void print_seg(void *pctx, print_func_t print_func,
1308 		      size_t idx __maybe_unused, int elf_idx __maybe_unused,
1309 		      vaddr_t va __maybe_unused, paddr_t pa __maybe_unused,
1310 		      size_t sz __maybe_unused, uint32_t flags)
1311 {
1312 	int rc __maybe_unused = 0;
1313 	int width __maybe_unused = 8;
1314 	char desc[14] __maybe_unused = "";
1315 	char flags_str[] __maybe_unused = "----";
1316 
1317 	if (elf_idx > -1) {
1318 		rc = snprintf(desc, sizeof(desc), " [%d]", elf_idx);
1319 		assert(rc >= 0);
1320 	} else {
1321 		if (flags & DUMP_MAP_EPHEM) {
1322 			rc = snprintf(desc, sizeof(desc), " (param)");
1323 			assert(rc >= 0);
1324 		}
1325 		if (flags & DUMP_MAP_LDELF) {
1326 			rc = snprintf(desc, sizeof(desc), " (ldelf)");
1327 			assert(rc >= 0);
1328 		}
1329 		if (va == ta_stack) {
1330 			rc = snprintf(desc, sizeof(desc), " (stack)");
1331 			assert(rc >= 0);
1332 		}
1333 	}
1334 
1335 	if (flags & DUMP_MAP_READ)
1336 		flags_str[0] = 'r';
1337 	if (flags & DUMP_MAP_WRITE)
1338 		flags_str[1] = 'w';
1339 	if (flags & DUMP_MAP_EXEC)
1340 		flags_str[2] = 'x';
1341 	if (flags & DUMP_MAP_SECURE)
1342 		flags_str[3] = 's';
1343 
1344 	print_wrapper(pctx, print_func,
1345 		      "region %2zu: va 0x%0*"PRIxVA" pa 0x%0*"PRIxPA" size 0x%06zx flags %s%s\n",
1346 		      idx, width, va, width, pa, sz, flags_str, desc);
1347 }
1348 
get_next_in_order(struct ta_elf_queue * elf_queue,struct ta_elf ** elf,struct segment ** seg,size_t * elf_idx)1349 static bool get_next_in_order(struct ta_elf_queue *elf_queue,
1350 			      struct ta_elf **elf, struct segment **seg,
1351 			      size_t *elf_idx)
1352 {
1353 	struct ta_elf *e = NULL;
1354 	struct segment *s = NULL;
1355 	size_t idx = 0;
1356 	vaddr_t va = 0;
1357 	struct ta_elf *e2 = NULL;
1358 	size_t i2 = 0;
1359 
1360 	assert(elf && seg && elf_idx);
1361 	e = *elf;
1362 	s = *seg;
1363 	assert((e == NULL && s == NULL) || (e != NULL && s != NULL));
1364 
1365 	if (s) {
1366 		s = TAILQ_NEXT(s, link);
1367 		if (s) {
1368 			*seg = s;
1369 			return true;
1370 		}
1371 	}
1372 
1373 	if (e)
1374 		va = e->load_addr;
1375 
1376 	/* Find the ELF with next load address */
1377 	e = NULL;
1378 	TAILQ_FOREACH(e2, elf_queue, link) {
1379 		if (e2->load_addr > va) {
1380 			if (!e || e2->load_addr < e->load_addr) {
1381 				e = e2;
1382 				idx = i2;
1383 			}
1384 		}
1385 		i2++;
1386 	}
1387 	if (!e)
1388 		return false;
1389 
1390 	*elf = e;
1391 	*seg = TAILQ_FIRST(&e->segs);
1392 	*elf_idx = idx;
1393 	return true;
1394 }
1395 
ta_elf_print_mappings(void * pctx,print_func_t print_func,struct ta_elf_queue * elf_queue,size_t num_maps,struct dump_map * maps,vaddr_t mpool_base)1396 void ta_elf_print_mappings(void *pctx, print_func_t print_func,
1397 			   struct ta_elf_queue *elf_queue, size_t num_maps,
1398 			   struct dump_map *maps, vaddr_t mpool_base)
1399 {
1400 	struct segment *seg = NULL;
1401 	struct ta_elf *elf = NULL;
1402 	size_t elf_idx = 0;
1403 	size_t idx = 0;
1404 	size_t map_idx = 0;
1405 
1406 	/*
1407 	 * Loop over all segments and maps, printing virtual address in
1408 	 * order. Segment has priority if the virtual address is present
1409 	 * in both map and segment.
1410 	 */
1411 	get_next_in_order(elf_queue, &elf, &seg, &elf_idx);
1412 	while (true) {
1413 		vaddr_t va = -1;
1414 		size_t sz = 0;
1415 		uint32_t flags = DUMP_MAP_SECURE;
1416 		size_t offs = 0;
1417 
1418 		if (seg) {
1419 			va = rounddown(seg->vaddr + elf->load_addr);
1420 			sz = roundup(seg->vaddr + seg->memsz) -
1421 				     rounddown(seg->vaddr);
1422 		}
1423 
1424 		while (map_idx < num_maps && maps[map_idx].va <= va) {
1425 			uint32_t f = 0;
1426 
1427 			/* If there's a match, it should be the same map */
1428 			if (maps[map_idx].va == va) {
1429 				/*
1430 				 * In shared libraries the first page is
1431 				 * mapped separately with the rest of that
1432 				 * segment following back to back in a
1433 				 * separate entry.
1434 				 */
1435 				if (map_idx + 1 < num_maps &&
1436 				    maps[map_idx].sz == SMALL_PAGE_SIZE) {
1437 					vaddr_t next_va = maps[map_idx].va +
1438 							  maps[map_idx].sz;
1439 					size_t comb_sz = maps[map_idx].sz +
1440 							 maps[map_idx + 1].sz;
1441 
1442 					if (next_va == maps[map_idx + 1].va &&
1443 					    comb_sz == sz &&
1444 					    maps[map_idx].flags ==
1445 					    maps[map_idx + 1].flags) {
1446 						/* Skip this and next entry */
1447 						map_idx += 2;
1448 						continue;
1449 					}
1450 				}
1451 				assert(maps[map_idx].sz == sz);
1452 			} else if (maps[map_idx].va < va) {
1453 				if (maps[map_idx].va == mpool_base)
1454 					f |= DUMP_MAP_LDELF;
1455 				print_seg(pctx, print_func, idx, -1,
1456 					  maps[map_idx].va, maps[map_idx].pa,
1457 					  maps[map_idx].sz,
1458 					  maps[map_idx].flags | f);
1459 				idx++;
1460 			}
1461 			map_idx++;
1462 		}
1463 
1464 		if (!seg)
1465 			break;
1466 
1467 		offs = rounddown(seg->offset);
1468 		if (seg->flags & PF_R)
1469 			flags |= DUMP_MAP_READ;
1470 		if (seg->flags & PF_W)
1471 			flags |= DUMP_MAP_WRITE;
1472 		if (seg->flags & PF_X)
1473 			flags |= DUMP_MAP_EXEC;
1474 
1475 		print_seg(pctx, print_func, idx, elf_idx, va, offs, sz, flags);
1476 		idx++;
1477 
1478 		if (!get_next_in_order(elf_queue, &elf, &seg, &elf_idx))
1479 			seg = NULL;
1480 	}
1481 
1482 	elf_idx = 0;
1483 	TAILQ_FOREACH(elf, elf_queue, link) {
1484 		print_wrapper(pctx, print_func,
1485 			      " [%zu] %pUl @ 0x%0*"PRIxVA"\n",
1486 			      elf_idx, (void *)&elf->uuid, 8, elf->load_addr);
1487 		elf_idx++;
1488 	}
1489 }
1490 
1491 #ifdef CFG_UNWIND
1492 /* Called by libunw */
find_exidx(vaddr_t addr,vaddr_t * idx_start,vaddr_t * idx_end)1493 bool find_exidx(vaddr_t addr, vaddr_t *idx_start, vaddr_t *idx_end)
1494 {
1495 	struct segment *seg = NULL;
1496 	struct ta_elf *elf = NULL;
1497 	vaddr_t a = 0;
1498 
1499 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1500 		if (addr < elf->load_addr)
1501 			continue;
1502 		a = addr - elf->load_addr;
1503 		TAILQ_FOREACH(seg, &elf->segs, link) {
1504 			if (a < seg->vaddr)
1505 				continue;
1506 			if (a - seg->vaddr < seg->filesz) {
1507 				*idx_start = elf->exidx_start + elf->load_addr;
1508 				*idx_end = elf->exidx_start + elf->load_addr +
1509 					   elf->exidx_size;
1510 				return true;
1511 			}
1512 		}
1513 	}
1514 
1515 	return false;
1516 }
1517 
ta_elf_stack_trace_a32(uint32_t regs[16])1518 void ta_elf_stack_trace_a32(uint32_t regs[16])
1519 {
1520 	struct unwind_state_arm32 state = { };
1521 
1522 	memcpy(state.registers, regs, sizeof(state.registers));
1523 	print_stack_arm32(&state, ta_stack, ta_stack_size);
1524 }
1525 
ta_elf_stack_trace_a64(uint64_t fp,uint64_t sp,uint64_t pc)1526 void ta_elf_stack_trace_a64(uint64_t fp, uint64_t sp, uint64_t pc)
1527 {
1528 	struct unwind_state_arm64 state = { .fp = fp, .sp = sp, .pc = pc };
1529 
1530 	print_stack_arm64(&state, ta_stack, ta_stack_size);
1531 }
1532 #endif
1533 
ta_elf_add_library(const TEE_UUID * uuid)1534 TEE_Result ta_elf_add_library(const TEE_UUID *uuid)
1535 {
1536 	TEE_Result res = TEE_ERROR_GENERIC;
1537 	struct ta_elf *ta = TAILQ_FIRST(&main_elf_queue);
1538 	struct ta_elf *lib = ta_elf_find_elf(uuid);
1539 	struct ta_elf *elf = NULL;
1540 
1541 	if (lib)
1542 		return TEE_SUCCESS; /* Already mapped */
1543 
1544 	lib = queue_elf_helper(uuid);
1545 	if (!lib)
1546 		return TEE_ERROR_OUT_OF_MEMORY;
1547 
1548 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1549 		ta_elf_load_dependency(elf, ta->is_32bit);
1550 
1551 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link)) {
1552 		ta_elf_relocate(elf);
1553 		ta_elf_finalize_mappings(elf);
1554 	}
1555 
1556 	for (elf = lib; elf; elf = TAILQ_NEXT(elf, link))
1557 		DMSG("ELF (%pUl) at %#"PRIxVA,
1558 		     (void *)&elf->uuid, elf->load_addr);
1559 
1560 	res = ta_elf_set_init_fini_info_compat(ta->is_32bit);
1561 	if (res)
1562 		return res;
1563 
1564 	return ta_elf_set_elf_phdr_info(ta->is_32bit);
1565 }
1566 
1567 /* Get address/size of .init_array and .fini_array from the dynamic segment */
get_init_fini_array(struct ta_elf * elf,unsigned int type,vaddr_t addr,size_t memsz,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1568 static void get_init_fini_array(struct ta_elf *elf, unsigned int type,
1569 				vaddr_t addr, size_t memsz, vaddr_t *init,
1570 				size_t *init_cnt, vaddr_t *fini,
1571 				size_t *fini_cnt)
1572 {
1573 	size_t addrsz = 0;
1574 	size_t dyn_entsize = 0;
1575 	size_t num_dyns = 0;
1576 	size_t n = 0;
1577 	unsigned int tag = 0;
1578 	size_t val = 0;
1579 
1580 	assert(type == PT_DYNAMIC);
1581 
1582 	check_phdr_in_range(elf, type, addr, memsz);
1583 
1584 	if (elf->is_32bit) {
1585 		dyn_entsize = sizeof(Elf32_Dyn);
1586 		addrsz = 4;
1587 	} else {
1588 		dyn_entsize = sizeof(Elf64_Dyn);
1589 		addrsz = 8;
1590 	}
1591 
1592 	assert(!(memsz % dyn_entsize));
1593 	num_dyns = memsz / dyn_entsize;
1594 
1595 	for (n = 0; n < num_dyns; n++) {
1596 		read_dyn(elf, addr, n, &tag, &val);
1597 		if (tag == DT_INIT_ARRAY)
1598 			*init = val + elf->load_addr;
1599 		else if (tag == DT_FINI_ARRAY)
1600 			*fini = val + elf->load_addr;
1601 		else if (tag == DT_INIT_ARRAYSZ)
1602 			*init_cnt = val / addrsz;
1603 		else if (tag == DT_FINI_ARRAYSZ)
1604 			*fini_cnt = val / addrsz;
1605 	}
1606 }
1607 
1608 /* Get address/size of .init_array and .fini_array in @elf (if present) */
elf_get_init_fini_array(struct ta_elf * elf,vaddr_t * init,size_t * init_cnt,vaddr_t * fini,size_t * fini_cnt)1609 static void elf_get_init_fini_array(struct ta_elf *elf, vaddr_t *init,
1610 				    size_t *init_cnt, vaddr_t *fini,
1611 				    size_t *fini_cnt)
1612 {
1613 	size_t n = 0;
1614 
1615 	if (elf->is_32bit) {
1616 		Elf32_Phdr *phdr = elf->phdr;
1617 
1618 		for (n = 0; n < elf->e_phnum; n++) {
1619 			if (phdr[n].p_type == PT_DYNAMIC) {
1620 				get_init_fini_array(elf, phdr[n].p_type,
1621 						    phdr[n].p_vaddr,
1622 						    phdr[n].p_memsz,
1623 						    init, init_cnt, fini,
1624 						    fini_cnt);
1625 				return;
1626 			}
1627 		}
1628 	} else {
1629 		Elf64_Phdr *phdr = elf->phdr;
1630 
1631 		for (n = 0; n < elf->e_phnum; n++) {
1632 			if (phdr[n].p_type == PT_DYNAMIC) {
1633 				get_init_fini_array(elf, phdr[n].p_type,
1634 						    phdr[n].p_vaddr,
1635 						    phdr[n].p_memsz,
1636 						    init, init_cnt, fini,
1637 						    fini_cnt);
1638 				return;
1639 			}
1640 		}
1641 	}
1642 }
1643 
1644 /*
1645  * Deprecated by __elf_phdr_info below. Kept for compatibility.
1646  *
1647  * Pointers to ELF initialization and finalization functions are extracted by
1648  * ldelf and stored on the TA heap, then exported to the TA via the global
1649  * symbol __init_fini_info. libutee in OP-TEE 3.9.0 uses this mechanism.
1650  */
1651 
1652 struct __init_fini {
1653 	uint32_t flags;
1654 	uint16_t init_size;
1655 	uint16_t fini_size;
1656 
1657 	void (**init)(void); /* @init_size entries */
1658 	void (**fini)(void); /* @fini_size entries */
1659 };
1660 
1661 #define __IFS_VALID            BIT(0)
1662 #define __IFS_INIT_HAS_RUN     BIT(1)
1663 #define __IFS_FINI_HAS_RUN     BIT(2)
1664 
1665 struct __init_fini_info {
1666 	uint32_t reserved;
1667 	uint16_t size;
1668 	uint16_t pad;
1669 	struct __init_fini *ifs; /* @size entries */
1670 };
1671 
1672 /* 32-bit variants for a 64-bit ldelf to access a 32-bit TA */
1673 
1674 struct __init_fini32 {
1675 	uint32_t flags;
1676 	uint16_t init_size;
1677 	uint16_t fini_size;
1678 	uint32_t init;
1679 	uint32_t fini;
1680 };
1681 
1682 struct __init_fini_info32 {
1683 	uint32_t reserved;
1684 	uint16_t size;
1685 	uint16_t pad;
1686 	uint32_t ifs;
1687 };
1688 
realloc_ifs(vaddr_t va,size_t cnt,bool is_32bit)1689 static TEE_Result realloc_ifs(vaddr_t va, size_t cnt, bool is_32bit)
1690 {
1691 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1692 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1693 	struct __init_fini32 *ifs32 = NULL;
1694 	struct __init_fini *ifs = NULL;
1695 	size_t prev_cnt = 0;
1696 	void *ptr = NULL;
1697 
1698 	if (is_32bit) {
1699 		ptr = (void *)(vaddr_t)info32->ifs;
1700 		ptr = realloc(ptr, cnt * sizeof(struct __init_fini32));
1701 		if (!ptr)
1702 			return TEE_ERROR_OUT_OF_MEMORY;
1703 		ifs32 = ptr;
1704 		prev_cnt = info32->size;
1705 		if (cnt > prev_cnt)
1706 			memset(ifs32 + prev_cnt, 0,
1707 			       (cnt - prev_cnt) * sizeof(*ifs32));
1708 		info32->ifs = (uint32_t)(vaddr_t)ifs32;
1709 		info32->size = cnt;
1710 	} else {
1711 		ptr = realloc(info->ifs, cnt * sizeof(struct __init_fini));
1712 		if (!ptr)
1713 			return TEE_ERROR_OUT_OF_MEMORY;
1714 		ifs = ptr;
1715 		prev_cnt = info->size;
1716 		if (cnt > prev_cnt)
1717 			memset(ifs + prev_cnt, 0,
1718 			       (cnt - prev_cnt) * sizeof(*ifs));
1719 		info->ifs = ifs;
1720 		info->size = cnt;
1721 	}
1722 
1723 	return TEE_SUCCESS;
1724 }
1725 
fill_ifs(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1726 static void fill_ifs(vaddr_t va, size_t idx, struct ta_elf *elf, bool is_32bit)
1727 {
1728 	struct __init_fini_info32 *info32 = (struct __init_fini_info32 *)va;
1729 	struct __init_fini_info *info = (struct __init_fini_info *)va;
1730 	struct __init_fini32 *ifs32 = NULL;
1731 	struct __init_fini *ifs = NULL;
1732 	size_t init_cnt = 0;
1733 	size_t fini_cnt = 0;
1734 	vaddr_t init = 0;
1735 	vaddr_t fini = 0;
1736 
1737 	if (is_32bit) {
1738 		assert(idx < info32->size);
1739 		ifs32 = &((struct __init_fini32 *)(vaddr_t)info32->ifs)[idx];
1740 
1741 		if (ifs32->flags & __IFS_VALID)
1742 			return;
1743 
1744 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1745 					&fini_cnt);
1746 
1747 		ifs32->init = (uint32_t)init;
1748 		ifs32->init_size = init_cnt;
1749 
1750 		ifs32->fini = (uint32_t)fini;
1751 		ifs32->fini_size = fini_cnt;
1752 
1753 		ifs32->flags |= __IFS_VALID;
1754 	} else {
1755 		assert(idx < info->size);
1756 		ifs = &info->ifs[idx];
1757 
1758 		if (ifs->flags & __IFS_VALID)
1759 			return;
1760 
1761 		elf_get_init_fini_array(elf, &init, &init_cnt, &fini,
1762 					&fini_cnt);
1763 
1764 		ifs->init = (void (**)(void))init;
1765 		ifs->init_size = init_cnt;
1766 
1767 		ifs->fini = (void (**)(void))fini;
1768 		ifs->fini_size = fini_cnt;
1769 
1770 		ifs->flags |= __IFS_VALID;
1771 	}
1772 }
1773 
1774 /*
1775  * Set or update __init_fini_info in the TA with information from the ELF
1776  * queue
1777  */
ta_elf_set_init_fini_info_compat(bool is_32bit)1778 TEE_Result ta_elf_set_init_fini_info_compat(bool is_32bit)
1779 {
1780 	struct __init_fini_info *info = NULL;
1781 	TEE_Result res = TEE_SUCCESS;
1782 	struct ta_elf *elf = NULL;
1783 	vaddr_t info_va = 0;
1784 	size_t cnt = 0;
1785 
1786 	res = ta_elf_resolve_sym("__init_fini_info", &info_va, NULL, NULL);
1787 	if (res) {
1788 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1789 			/*
1790 			 * Not an error, only TAs linked against libutee from
1791 			 * OP-TEE 3.9.0 have this symbol.
1792 			 */
1793 			return TEE_SUCCESS;
1794 		}
1795 		return res;
1796 	}
1797 	assert(info_va);
1798 
1799 	info = (struct __init_fini_info *)info_va;
1800 	if (info->reserved)
1801 		return TEE_ERROR_NOT_SUPPORTED;
1802 
1803 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1804 		cnt++;
1805 
1806 	/* Queue has at least one file (main) */
1807 	assert(cnt);
1808 
1809 	res = realloc_ifs(info_va, cnt, is_32bit);
1810 	if (res)
1811 		goto err;
1812 
1813 	cnt = 0;
1814 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1815 		fill_ifs(info_va, cnt, elf, is_32bit);
1816 		cnt++;
1817 	}
1818 
1819 	return TEE_SUCCESS;
1820 err:
1821 	free(info);
1822 	return res;
1823 }
1824 
realloc_elf_phdr_info(vaddr_t va,size_t cnt,bool is_32bit)1825 static TEE_Result realloc_elf_phdr_info(vaddr_t va, size_t cnt, bool is_32bit)
1826 {
1827 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1828 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1829 	struct dl_phdr_info32 *dlpi32 = NULL;
1830 	struct dl_phdr_info *dlpi = NULL;
1831 	size_t prev_cnt = 0;
1832 	void *ptr = NULL;
1833 
1834 	if (is_32bit) {
1835 		ptr = (void *)(vaddr_t)info32->dlpi;
1836 		ptr = realloc(ptr, cnt * sizeof(*dlpi32));
1837 		if (!ptr)
1838 			return TEE_ERROR_OUT_OF_MEMORY;
1839 		dlpi32 = ptr;
1840 		prev_cnt = info32->count;
1841 		if (cnt > prev_cnt)
1842 			memset(dlpi32 + prev_cnt, 0,
1843 			       (cnt - prev_cnt) * sizeof(*dlpi32));
1844 		info32->dlpi = (uint32_t)(vaddr_t)dlpi32;
1845 		info32->count = cnt;
1846 	} else {
1847 		ptr = realloc(info->dlpi, cnt * sizeof(*dlpi));
1848 		if (!ptr)
1849 			return TEE_ERROR_OUT_OF_MEMORY;
1850 		dlpi = ptr;
1851 		prev_cnt = info->count;
1852 		if (cnt > prev_cnt)
1853 			memset(dlpi + prev_cnt, 0,
1854 			       (cnt - prev_cnt) * sizeof(*dlpi));
1855 		info->dlpi = dlpi;
1856 		info->count = cnt;
1857 	}
1858 
1859 	return TEE_SUCCESS;
1860 }
1861 
fill_elf_phdr_info(vaddr_t va,size_t idx,struct ta_elf * elf,bool is_32bit)1862 static void fill_elf_phdr_info(vaddr_t va, size_t idx, struct ta_elf *elf,
1863 			       bool is_32bit)
1864 {
1865 	struct __elf_phdr_info32 *info32 = (struct __elf_phdr_info32 *)va;
1866 	struct __elf_phdr_info *info = (struct __elf_phdr_info *)va;
1867 	struct dl_phdr_info32 *dlpi32 = NULL;
1868 	struct dl_phdr_info *dlpi = NULL;
1869 
1870 	if (is_32bit) {
1871 		assert(idx < info32->count);
1872 		dlpi32 = (struct dl_phdr_info32 *)(vaddr_t)info32->dlpi + idx;
1873 
1874 		dlpi32->dlpi_addr = elf->load_addr;
1875 		if (elf->soname)
1876 			dlpi32->dlpi_name = (vaddr_t)elf->soname;
1877 		else
1878 			dlpi32->dlpi_name = (vaddr_t)&info32->zero;
1879 		dlpi32->dlpi_phdr = (vaddr_t)elf->phdr;
1880 		dlpi32->dlpi_phnum = elf->e_phnum;
1881 		dlpi32->dlpi_adds = 1; /* No unloading on dlclose() currently */
1882 		dlpi32->dlpi_subs = 0; /* No unloading on dlclose() currently */
1883 		dlpi32->dlpi_tls_modid = elf->tls_mod_id;
1884 		dlpi32->dlpi_tls_data = elf->tls_start;
1885 	} else {
1886 		assert(idx < info->count);
1887 		dlpi = info->dlpi + idx;
1888 
1889 		dlpi->dlpi_addr = elf->load_addr;
1890 		if (elf->soname)
1891 			dlpi->dlpi_name = elf->soname;
1892 		else
1893 			dlpi->dlpi_name = &info32->zero;
1894 		dlpi->dlpi_phdr = elf->phdr;
1895 		dlpi->dlpi_phnum = elf->e_phnum;
1896 		dlpi->dlpi_adds = 1; /* No unloading on dlclose() currently */
1897 		dlpi->dlpi_subs = 0; /* No unloading on dlclose() currently */
1898 		dlpi->dlpi_tls_modid = elf->tls_mod_id;
1899 		dlpi->dlpi_tls_data = (void *)elf->tls_start;
1900 	}
1901 }
1902 
1903 /* Set or update __elf_hdr_info in the TA with information from the ELF queue */
ta_elf_set_elf_phdr_info(bool is_32bit)1904 TEE_Result ta_elf_set_elf_phdr_info(bool is_32bit)
1905 {
1906 	struct __elf_phdr_info *info = NULL;
1907 	TEE_Result res = TEE_SUCCESS;
1908 	struct ta_elf *elf = NULL;
1909 	vaddr_t info_va = 0;
1910 	size_t cnt = 0;
1911 
1912 	res = ta_elf_resolve_sym("__elf_phdr_info", &info_va, NULL, NULL);
1913 	if (res) {
1914 		if (res == TEE_ERROR_ITEM_NOT_FOUND) {
1915 			/* Older TA */
1916 			return TEE_SUCCESS;
1917 		}
1918 		return res;
1919 	}
1920 	assert(info_va);
1921 
1922 	info = (struct __elf_phdr_info *)info_va;
1923 	if (info->reserved)
1924 		return TEE_ERROR_NOT_SUPPORTED;
1925 
1926 	TAILQ_FOREACH(elf, &main_elf_queue, link)
1927 		cnt++;
1928 
1929 	res = realloc_elf_phdr_info(info_va, cnt, is_32bit);
1930 	if (res)
1931 		return res;
1932 
1933 	cnt = 0;
1934 	TAILQ_FOREACH(elf, &main_elf_queue, link) {
1935 		fill_elf_phdr_info(info_va, cnt, elf, is_32bit);
1936 		cnt++;
1937 	}
1938 
1939 	return TEE_SUCCESS;
1940 }
1941