1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <drivers/gic.h>
13 #include <initcall.h>
14 #include <inttypes.h>
15 #include <keep.h>
16 #include <kernel/asan.h>
17 #include <kernel/boot.h>
18 #include <kernel/linker.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/tee_misc.h>
22 #include <kernel/thread.h>
23 #include <kernel/tpm.h>
24 #include <libfdt.h>
25 #include <malloc.h>
26 #include <mm/core_memprot.h>
27 #include <mm/core_mmu.h>
28 #include <mm/fobj.h>
29 #include <mm/tee_mm.h>
30 #include <mm/tee_pager.h>
31 #include <sm/psci.h>
32 #include <stdio.h>
33 #include <trace.h>
34 #include <utee_defines.h>
35 #include <util.h>
36 
37 #include <platform_config.h>
38 
39 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
40 #include <sm/sm.h>
41 #endif
42 
43 #if defined(CFG_WITH_VFP)
44 #include <kernel/vfp.h>
45 #endif
46 
47 /*
48  * In this file we're using unsigned long to represent physical pointers as
49  * they are received in a single register when OP-TEE is initially entered.
50  * This limits 32-bit systems to only use make use of the lower 32 bits
51  * of a physical address for initial parameters.
52  *
53  * 64-bit systems on the other hand can use full 64-bit physical pointers.
54  */
55 #define PADDR_INVALID		ULONG_MAX
56 
57 #if defined(CFG_BOOT_SECONDARY_REQUEST)
58 struct ns_entry_context {
59 	uintptr_t entry_point;
60 	uintptr_t context_id;
61 };
62 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
63 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
64 #endif
65 
66 #ifdef CFG_BOOT_SYNC_CPU
67 /*
68  * Array used when booting, to synchronize cpu.
69  * When 0, the cpu has not started.
70  * When 1, it has started
71  */
72 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
73 DECLARE_KEEP_PAGER(sem_cpu_sync);
74 #endif
75 
76 #ifdef CFG_DT
77 struct dt_descriptor {
78 	void *blob;
79 #ifdef _CFG_USE_DTB_OVERLAY
80 	int frag_id;
81 #endif
82 };
83 
84 static struct dt_descriptor external_dt __nex_bss;
85 #endif
86 
87 #ifdef CFG_SECONDARY_INIT_CNTFRQ
88 static uint32_t cntfrq;
89 #endif
90 
91 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_primary_init_early(void)92 __weak void plat_primary_init_early(void)
93 {
94 }
95 DECLARE_KEEP_PAGER(plat_primary_init_early);
96 
97 /* May be overridden in plat-$(PLATFORM)/main.c */
main_init_gic(void)98 __weak void main_init_gic(void)
99 {
100 }
101 
102 /* May be overridden in plat-$(PLATFORM)/main.c */
main_secondary_init_gic(void)103 __weak void main_secondary_init_gic(void)
104 {
105 }
106 
107 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_get_aslr_seed(void)108 __weak unsigned long plat_get_aslr_seed(void)
109 {
110 	DMSG("Warning: no ASLR seed");
111 
112 	return 0;
113 }
114 
115 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_sec_mon(unsigned long nsec_entry __maybe_unused)116 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
117 {
118 	assert(nsec_entry == PADDR_INVALID);
119 	/* Do nothing as we don't have a secure monitor */
120 }
121 #else
122 /* May be overridden in plat-$(PLATFORM)/main.c */
init_sec_mon(unsigned long nsec_entry)123 __weak void init_sec_mon(unsigned long nsec_entry)
124 {
125 	struct sm_nsec_ctx *nsec_ctx;
126 
127 	assert(nsec_entry != PADDR_INVALID);
128 
129 	/* Initialize secure monitor */
130 	nsec_ctx = sm_get_nsec_ctx();
131 	nsec_ctx->mon_lr = nsec_entry;
132 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
133 	if (nsec_entry & 1)
134 		nsec_ctx->mon_spsr |= CPSR_T;
135 }
136 #endif
137 
138 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_vfp_nsec(void)139 static void init_vfp_nsec(void)
140 {
141 }
142 #else
init_vfp_nsec(void)143 static void init_vfp_nsec(void)
144 {
145 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
146 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
147 }
148 #endif
149 
150 #if defined(CFG_WITH_VFP)
151 
152 #ifdef ARM32
init_vfp_sec(void)153 static void init_vfp_sec(void)
154 {
155 	uint32_t cpacr = read_cpacr();
156 
157 	/*
158 	 * Enable Advanced SIMD functionality.
159 	 * Enable use of D16-D31 of the Floating-point Extension register
160 	 * file.
161 	 */
162 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
163 	/*
164 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
165 	 * mode.
166 	 */
167 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
168 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
169 	write_cpacr(cpacr);
170 }
171 #endif /* ARM32 */
172 
173 #ifdef ARM64
init_vfp_sec(void)174 static void init_vfp_sec(void)
175 {
176 	/* Not using VFP until thread_kernel_enable_vfp() */
177 	vfp_disable();
178 }
179 #endif /* ARM64 */
180 
181 #else /* CFG_WITH_VFP */
182 
init_vfp_sec(void)183 static void init_vfp_sec(void)
184 {
185 	/* Not using VFP */
186 }
187 #endif
188 
189 #ifdef CFG_SECONDARY_INIT_CNTFRQ
primary_save_cntfrq(void)190 static void primary_save_cntfrq(void)
191 {
192 	assert(cntfrq == 0);
193 
194 	/*
195 	 * CNTFRQ should be initialized on the primary CPU by a
196 	 * previous boot stage
197 	 */
198 	cntfrq = read_cntfrq();
199 }
200 
secondary_init_cntfrq(void)201 static void secondary_init_cntfrq(void)
202 {
203 	assert(cntfrq != 0);
204 	write_cntfrq(cntfrq);
205 }
206 #else /* CFG_SECONDARY_INIT_CNTFRQ */
primary_save_cntfrq(void)207 static void primary_save_cntfrq(void)
208 {
209 }
210 
secondary_init_cntfrq(void)211 static void secondary_init_cntfrq(void)
212 {
213 }
214 #endif
215 
216 #ifdef CFG_CORE_SANITIZE_KADDRESS
init_run_constructors(void)217 static void init_run_constructors(void)
218 {
219 	const vaddr_t *ctor;
220 
221 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
222 		((void (*)(void))(*ctor))();
223 }
224 
init_asan(void)225 static void init_asan(void)
226 {
227 
228 	/*
229 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
230 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
231 	 * Since all the needed values to calculate the value of
232 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
233 	 * calculate it in advance and hard code it into the platform
234 	 * conf.mk. Here where we have all the needed values we double
235 	 * check that the compiler is supplied the correct value.
236 	 */
237 
238 #define __ASAN_SHADOW_START \
239 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
240 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
241 #define __CFG_ASAN_SHADOW_OFFSET \
242 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
243 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
244 #undef __ASAN_SHADOW_START
245 #undef __CFG_ASAN_SHADOW_OFFSET
246 
247 	/*
248 	 * Assign area covered by the shadow area, everything from start up
249 	 * to the beginning of the shadow area.
250 	 */
251 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
252 
253 	/*
254 	 * Add access to areas that aren't opened automatically by a
255 	 * constructor.
256 	 */
257 	asan_tag_access(&__ctor_list, &__ctor_end);
258 	asan_tag_access(__rodata_start, __rodata_end);
259 #ifdef CFG_WITH_PAGER
260 	asan_tag_access(__pageable_start, __pageable_end);
261 #endif /*CFG_WITH_PAGER*/
262 	asan_tag_access(__nozi_start, __nozi_end);
263 	asan_tag_access(__exidx_start, __exidx_end);
264 	asan_tag_access(__extab_start, __extab_end);
265 
266 	init_run_constructors();
267 
268 	/* Everything is tagged correctly, let's start address sanitizing. */
269 	asan_start();
270 }
271 #else /*CFG_CORE_SANITIZE_KADDRESS*/
init_asan(void)272 static void init_asan(void)
273 {
274 }
275 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
276 
277 #ifdef CFG_WITH_PAGER
278 
279 #ifdef CFG_CORE_SANITIZE_KADDRESS
carve_out_asan_mem(tee_mm_pool_t * pool)280 static void carve_out_asan_mem(tee_mm_pool_t *pool)
281 {
282 	const size_t s = pool->hi - pool->lo;
283 	tee_mm_entry_t *mm;
284 	paddr_t apa = ASAN_MAP_PA;
285 	size_t asz = ASAN_MAP_SZ;
286 
287 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
288 		return;
289 
290 	/* Reserve the shadow area */
291 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
292 		if (apa < pool->lo) {
293 			/*
294 			 * ASAN buffer is overlapping with the beginning of
295 			 * the pool.
296 			 */
297 			asz -= pool->lo - apa;
298 			apa = pool->lo;
299 		} else {
300 			/*
301 			 * ASAN buffer is overlapping with the end of the
302 			 * pool.
303 			 */
304 			asz = pool->hi - apa;
305 		}
306 	}
307 	mm = tee_mm_alloc2(pool, apa, asz);
308 	assert(mm);
309 }
310 #else
carve_out_asan_mem(tee_mm_pool_t * pool __unused)311 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
312 {
313 }
314 #endif
315 
print_pager_pool_size(void)316 static void print_pager_pool_size(void)
317 {
318 	struct tee_pager_stats __maybe_unused stats;
319 
320 	tee_pager_get_stats(&stats);
321 	IMSG("Pager pool size: %zukB",
322 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
323 }
324 
init_vcore(tee_mm_pool_t * mm_vcore)325 static void init_vcore(tee_mm_pool_t *mm_vcore)
326 {
327 	const vaddr_t begin = VCORE_START_VA;
328 	size_t size = TEE_RAM_VA_SIZE;
329 
330 #ifdef CFG_CORE_SANITIZE_KADDRESS
331 	/* Carve out asan memory, flat maped after core memory */
332 	if (begin + size > ASAN_SHADOW_PA)
333 		size = ASAN_MAP_PA - begin;
334 #endif
335 
336 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
337 			 TEE_MM_POOL_NO_FLAGS))
338 		panic("tee_mm_vcore init failed");
339 }
340 
341 /*
342  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
343  * The init part is also paged just as the rest of the normal paged code, with
344  * the difference that it's preloaded during boot. When the backing store
345  * is configured the entire paged binary is copied in place and then also
346  * the init part. Since the init part has been relocated (references to
347  * addresses updated to compensate for the new load address) this has to be
348  * undone for the hashes of those pages to match with the original binary.
349  *
350  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
351  * unchanged.
352  */
undo_init_relocation(uint8_t * paged_store __maybe_unused)353 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
354 {
355 #ifdef CFG_CORE_ASLR
356 	unsigned long *ptr = NULL;
357 	const uint32_t *reloc = NULL;
358 	const uint32_t *reloc_end = NULL;
359 	unsigned long offs = boot_mmu_config.load_offset;
360 	const struct boot_embdata *embdata = (const void *)__init_end;
361 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
362 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
363 
364 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
365 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
366 
367 	for (; reloc < reloc_end; reloc++) {
368 		if (*reloc < addr_start)
369 			continue;
370 		if (*reloc >= addr_end)
371 			break;
372 		ptr = (void *)(paged_store + *reloc - addr_start);
373 		*ptr -= offs;
374 	}
375 #endif
376 }
377 
ro_paged_alloc(tee_mm_entry_t * mm,void * hashes,void * store)378 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
379 				   void *store)
380 {
381 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
382 #ifdef CFG_CORE_ASLR
383 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
384 	const struct boot_embdata *embdata = (const void *)__init_end;
385 	const void *reloc = __init_end + embdata->reloc_offset;
386 
387 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
388 					 reloc, embdata->reloc_len, store);
389 #else
390 	return fobj_ro_paged_alloc(num_pages, hashes, store);
391 #endif
392 }
393 
init_runtime(unsigned long pageable_part)394 static void init_runtime(unsigned long pageable_part)
395 {
396 	size_t n;
397 	size_t init_size = (size_t)(__init_end - __init_start);
398 	size_t pageable_start = (size_t)__pageable_start;
399 	size_t pageable_end = (size_t)__pageable_end;
400 	size_t pageable_size = pageable_end - pageable_start;
401 	size_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE;
402 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
403 			   TEE_SHA256_HASH_SIZE;
404 	const struct boot_embdata *embdata = (const void *)__init_end;
405 	const void *tmp_hashes = NULL;
406 	tee_mm_entry_t *mm = NULL;
407 	struct fobj *fobj = NULL;
408 	uint8_t *paged_store = NULL;
409 	uint8_t *hashes = NULL;
410 
411 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
412 	assert(embdata->total_len >= embdata->hashes_offset +
413 				     embdata->hashes_len);
414 	assert(hash_size == embdata->hashes_len);
415 
416 	tmp_hashes = __init_end + embdata->hashes_offset;
417 
418 	init_asan();
419 
420 	/* Add heap2 first as heap1 may be too small as initial bget pool */
421 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
422 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
423 
424 	/*
425 	 * This needs to be initialized early to support address lookup
426 	 * in MEM_AREA_TEE_RAM
427 	 */
428 	tee_pager_early_init();
429 
430 	hashes = malloc(hash_size);
431 	IMSG_RAW("\n");
432 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
433 	assert(hashes);
434 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
435 
436 	/*
437 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
438 	 * DDR below.
439 	 */
440 	core_mmu_init_ta_ram();
441 
442 	carve_out_asan_mem(&tee_mm_sec_ddr);
443 
444 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
445 	assert(mm);
446 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
447 				   pageable_size);
448 	/*
449 	 * Load pageable part in the dedicated allocated area:
450 	 * - Move pageable non-init part into pageable area. Note bootloader
451 	 *   may have loaded it anywhere in TA RAM hence use memmove().
452 	 * - Copy pageable init part from current location into pageable area.
453 	 */
454 	memmove(paged_store + init_size,
455 		phys_to_virt(pageable_part,
456 			     core_mmu_get_type_by_pa(pageable_part),
457 			     __pageable_part_end - __pageable_part_start),
458 		__pageable_part_end - __pageable_part_start);
459 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
460 	/*
461 	 * Undo eventual relocation for the init part so the hash checks
462 	 * can pass.
463 	 */
464 	undo_init_relocation(paged_store);
465 
466 	/* Check that hashes of what's in pageable area is OK */
467 	DMSG("Checking hashes of pageable area");
468 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
469 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
470 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
471 		TEE_Result res;
472 
473 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
474 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
475 		if (res != TEE_SUCCESS) {
476 			EMSG("Hash failed for page %zu at %p: res 0x%x",
477 			     n, (void *)page, res);
478 			panic();
479 		}
480 	}
481 
482 	/*
483 	 * Assert prepaged init sections are page aligned so that nothing
484 	 * trails uninited at the end of the premapped init area.
485 	 */
486 	assert(!(init_size & SMALL_PAGE_MASK));
487 
488 	/*
489 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
490 	 * is supplied to tee_pager_init() below.
491 	 */
492 	init_vcore(&tee_mm_vcore);
493 
494 	/*
495 	 * Assign alias area for pager end of the small page block the rest
496 	 * of the binary is loaded into. We're taking more than needed, but
497 	 * we're guaranteed to not need more than the physical amount of
498 	 * TZSRAM.
499 	 */
500 	mm = tee_mm_alloc2(&tee_mm_vcore,
501 			   (vaddr_t)tee_mm_vcore.lo +
502 			   tee_mm_vcore.size - TZSRAM_SIZE,
503 			   TZSRAM_SIZE);
504 	assert(mm);
505 	tee_pager_set_alias_area(mm);
506 
507 	/*
508 	 * Claim virtual memory which isn't paged.
509 	 * Linear memory (flat map core memory) ends there.
510 	 */
511 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
512 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
513 	assert(mm);
514 
515 	/*
516 	 * Allocate virtual memory for the pageable area and let the pager
517 	 * take charge of all the pages already assigned to that memory.
518 	 */
519 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
520 			   pageable_size);
521 	assert(mm);
522 	fobj = ro_paged_alloc(mm, hashes, paged_store);
523 	assert(fobj);
524 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
525 				  fobj);
526 	fobj_put(fobj);
527 
528 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
529 	tee_pager_add_pages(pageable_start + init_size,
530 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
531 			    true);
532 	if (pageable_end < tzsram_end)
533 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
534 						   SMALL_PAGE_SIZE, true);
535 
536 	/*
537 	 * There may be physical pages in TZSRAM before the core load address.
538 	 * These pages can be added to the physical pages pool of the pager.
539 	 * This setup may happen when a the secure bootloader runs in TZRAM
540 	 * and its memory can be reused by OP-TEE once boot stages complete.
541 	 */
542 	tee_pager_add_pages(tee_mm_vcore.lo,
543 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
544 			true);
545 
546 	print_pager_pool_size();
547 }
548 #else
549 
init_runtime(unsigned long pageable_part __unused)550 static void init_runtime(unsigned long pageable_part __unused)
551 {
552 	init_asan();
553 
554 	/*
555 	 * By default whole OP-TEE uses malloc, so we need to initialize
556 	 * it early. But, when virtualization is enabled, malloc is used
557 	 * only by TEE runtime, so malloc should be initialized later, for
558 	 * every virtual partition separately. Core code uses nex_malloc
559 	 * instead.
560 	 */
561 #ifdef CFG_VIRTUALIZATION
562 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
563 					      __nex_heap_start);
564 #else
565 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
566 #endif
567 
568 	IMSG_RAW("\n");
569 }
570 #endif
571 
get_dt(void)572 void *get_dt(void)
573 {
574 	void *fdt = get_embedded_dt();
575 
576 	if (!fdt)
577 		fdt = get_external_dt();
578 
579 	return fdt;
580 }
581 
582 #if defined(CFG_EMBED_DTB)
get_embedded_dt(void)583 void *get_embedded_dt(void)
584 {
585 	static bool checked;
586 
587 	assert(cpu_mmu_enabled());
588 
589 	if (!checked) {
590 		IMSG("Embedded DTB found");
591 
592 		if (fdt_check_header(embedded_secure_dtb))
593 			panic("Invalid embedded DTB");
594 
595 		checked = true;
596 	}
597 
598 	return embedded_secure_dtb;
599 }
600 #else
get_embedded_dt(void)601 void *get_embedded_dt(void)
602 {
603 	return NULL;
604 }
605 #endif /*CFG_EMBED_DTB*/
606 
607 #if defined(CFG_DT)
get_external_dt(void)608 void *get_external_dt(void)
609 {
610 	assert(cpu_mmu_enabled());
611 	return external_dt.blob;
612 }
613 
release_external_dt(void)614 static TEE_Result release_external_dt(void)
615 {
616 	int ret = 0;
617 
618 	if (!external_dt.blob)
619 		return TEE_SUCCESS;
620 
621 	ret = fdt_pack(external_dt.blob);
622 	if (ret < 0) {
623 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
624 		     virt_to_phys(external_dt.blob), ret);
625 		panic();
626 	}
627 
628 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
629 				    CFG_DTB_MAX_SIZE))
630 		panic("Failed to remove temporary Device Tree mapping");
631 
632 	/* External DTB no more reached, reset pointer to invalid */
633 	external_dt.blob = NULL;
634 
635 	return TEE_SUCCESS;
636 }
637 boot_final(release_external_dt);
638 
639 #ifdef _CFG_USE_DTB_OVERLAY
add_dt_overlay_fragment(struct dt_descriptor * dt,int ioffs)640 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
641 {
642 	char frag[32];
643 	int offs;
644 	int ret;
645 
646 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
647 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
648 	if (offs < 0)
649 		return offs;
650 
651 	dt->frag_id += 1;
652 
653 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
654 	if (ret < 0)
655 		return -1;
656 
657 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
658 }
659 
init_dt_overlay(struct dt_descriptor * dt,int __maybe_unused dt_size)660 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
661 {
662 	int fragment;
663 
664 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
665 		if (!fdt_check_header(dt->blob)) {
666 			fdt_for_each_subnode(fragment, dt->blob, 0)
667 				dt->frag_id += 1;
668 			return 0;
669 		}
670 	}
671 
672 	return fdt_create_empty_tree(dt->blob, dt_size);
673 }
674 #else
add_dt_overlay_fragment(struct dt_descriptor * dt __unused,int offs)675 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
676 {
677 	return offs;
678 }
679 
init_dt_overlay(struct dt_descriptor * dt __unused,int dt_size __unused)680 static int init_dt_overlay(struct dt_descriptor *dt __unused,
681 			   int dt_size __unused)
682 {
683 	return 0;
684 }
685 #endif /* _CFG_USE_DTB_OVERLAY */
686 
add_dt_path_subnode(struct dt_descriptor * dt,const char * path,const char * subnode)687 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
688 			       const char *subnode)
689 {
690 	int offs;
691 
692 	offs = fdt_path_offset(dt->blob, path);
693 	if (offs < 0)
694 		return -1;
695 	offs = add_dt_overlay_fragment(dt, offs);
696 	if (offs < 0)
697 		return -1;
698 	offs = fdt_add_subnode(dt->blob, offs, subnode);
699 	if (offs < 0)
700 		return -1;
701 	return offs;
702 }
703 
add_optee_dt_node(struct dt_descriptor * dt)704 static int add_optee_dt_node(struct dt_descriptor *dt)
705 {
706 	int offs;
707 	int ret;
708 
709 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
710 		DMSG("OP-TEE Device Tree node already exists!");
711 		return 0;
712 	}
713 
714 	offs = fdt_path_offset(dt->blob, "/firmware");
715 	if (offs < 0) {
716 		offs = add_dt_path_subnode(dt, "/", "firmware");
717 		if (offs < 0)
718 			return -1;
719 	}
720 
721 	offs = fdt_add_subnode(dt->blob, offs, "optee");
722 	if (offs < 0)
723 		return -1;
724 
725 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
726 				 "linaro,optee-tz");
727 	if (ret < 0)
728 		return -1;
729 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
730 	if (ret < 0)
731 		return -1;
732 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
733 		/*
734 		 * The format of the interrupt property is defined by the
735 		 * binding of the interrupt domain root. In this case it's
736 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
737 		 * these.
738 		 *
739 		 * An SPI type of interrupt is indicated with a 0 in the
740 		 * first cell.
741 		 *
742 		 * The interrupt number goes in the second cell where
743 		 * SPIs ranges from 0 to 987.
744 		 *
745 		 * Flags are passed in the third cell where a 1 means edge
746 		 * triggered.
747 		 */
748 		const uint32_t gic_spi = 0;
749 		const uint32_t irq_type_edge = 1;
750 		uint32_t val[] = {
751 			TEE_U32_TO_BIG_ENDIAN(gic_spi),
752 			TEE_U32_TO_BIG_ENDIAN(CFG_CORE_ASYNC_NOTIF_GIC_INTID -
753 					      GIC_SPI_BASE),
754 			TEE_U32_TO_BIG_ENDIAN(irq_type_edge),
755 		};
756 
757 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
758 				  sizeof(val));
759 		if (ret < 0)
760 			return -1;
761 	}
762 	return 0;
763 }
764 
765 #ifdef CFG_PSCI_ARM32
append_psci_compatible(void * fdt,int offs,const char * str)766 static int append_psci_compatible(void *fdt, int offs, const char *str)
767 {
768 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
769 }
770 
dt_add_psci_node(struct dt_descriptor * dt)771 static int dt_add_psci_node(struct dt_descriptor *dt)
772 {
773 	int offs;
774 
775 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
776 		DMSG("PSCI Device Tree node already exists!");
777 		return 0;
778 	}
779 
780 	offs = add_dt_path_subnode(dt, "/", "psci");
781 	if (offs < 0)
782 		return -1;
783 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
784 		return -1;
785 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
786 		return -1;
787 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
788 		return -1;
789 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
790 		return -1;
791 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
792 		return -1;
793 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
794 		return -1;
795 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
796 		return -1;
797 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
798 		return -1;
799 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
800 		return -1;
801 	return 0;
802 }
803 
check_node_compat_prefix(struct dt_descriptor * dt,int offs,const char * prefix)804 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
805 				    const char *prefix)
806 {
807 	const size_t prefix_len = strlen(prefix);
808 	size_t l;
809 	int plen;
810 	const char *prop;
811 
812 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
813 	if (!prop)
814 		return -1;
815 
816 	while (plen > 0) {
817 		if (memcmp(prop, prefix, prefix_len) == 0)
818 			return 0; /* match */
819 
820 		l = strlen(prop) + 1;
821 		prop += l;
822 		plen -= l;
823 	}
824 
825 	return -1;
826 }
827 
dt_add_psci_cpu_enable_methods(struct dt_descriptor * dt)828 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
829 {
830 	int offs = 0;
831 
832 	while (1) {
833 		offs = fdt_next_node(dt->blob, offs, NULL);
834 		if (offs < 0)
835 			break;
836 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
837 			continue; /* already set */
838 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
839 			continue; /* no compatible */
840 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
841 			return -1;
842 		/* Need to restart scanning as offsets may have changed */
843 		offs = 0;
844 	}
845 	return 0;
846 }
847 
config_psci(struct dt_descriptor * dt)848 static int config_psci(struct dt_descriptor *dt)
849 {
850 	if (dt_add_psci_node(dt))
851 		return -1;
852 	return dt_add_psci_cpu_enable_methods(dt);
853 }
854 #else
config_psci(struct dt_descriptor * dt __unused)855 static int config_psci(struct dt_descriptor *dt __unused)
856 {
857 	return 0;
858 }
859 #endif /*CFG_PSCI_ARM32*/
860 
set_dt_val(void * data,uint32_t cell_size,uint64_t val)861 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
862 {
863 	if (cell_size == 1) {
864 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
865 
866 		memcpy(data, &v, sizeof(v));
867 	} else {
868 		fdt64_t v = cpu_to_fdt64(val);
869 
870 		memcpy(data, &v, sizeof(v));
871 	}
872 }
873 
add_res_mem_dt_node(struct dt_descriptor * dt,const char * name,paddr_t pa,size_t size)874 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
875 			       paddr_t pa, size_t size)
876 {
877 	int offs = 0;
878 	int ret = 0;
879 	int addr_size = -1;
880 	int len_size = -1;
881 	bool found = true;
882 	char subnode_name[80] = { 0 };
883 
884 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
885 
886 	if (offs < 0) {
887 		found = false;
888 		offs = 0;
889 	}
890 
891 	if (IS_ENABLED(_CFG_USE_DTB_OVERLAY)) {
892 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
893 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
894 	} else {
895 		len_size = fdt_size_cells(dt->blob, offs);
896 		if (len_size < 0)
897 			return -1;
898 		addr_size = fdt_address_cells(dt->blob, offs);
899 		if (addr_size < 0)
900 			return -1;
901 	}
902 
903 	if (!found) {
904 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
905 		if (offs < 0)
906 			return -1;
907 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
908 				       addr_size);
909 		if (ret < 0)
910 			return -1;
911 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
912 		if (ret < 0)
913 			return -1;
914 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
915 		if (ret < 0)
916 			return -1;
917 	}
918 
919 	ret = snprintf(subnode_name, sizeof(subnode_name),
920 		       "%s@%" PRIxPA, name, pa);
921 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
922 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
923 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
924 	if (offs >= 0) {
925 		uint32_t data[FDT_MAX_NCELLS * 2];
926 
927 		set_dt_val(data, addr_size, pa);
928 		set_dt_val(data + addr_size, len_size, size);
929 		ret = fdt_setprop(dt->blob, offs, "reg", data,
930 				  sizeof(uint32_t) * (addr_size + len_size));
931 		if (ret < 0)
932 			return -1;
933 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
934 		if (ret < 0)
935 			return -1;
936 	} else {
937 		return -1;
938 	}
939 	return 0;
940 }
941 
942 #ifdef CFG_CORE_DYN_SHM
get_dt_val_and_advance(const void * data,size_t * offs,uint32_t cell_size)943 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
944 				       uint32_t cell_size)
945 {
946 	uint64_t rv = 0;
947 
948 	if (cell_size == 1) {
949 		uint32_t v;
950 
951 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
952 		*offs += sizeof(v);
953 		rv = fdt32_to_cpu(v);
954 	} else {
955 		uint64_t v;
956 
957 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
958 		*offs += sizeof(v);
959 		rv = fdt64_to_cpu(v);
960 	}
961 
962 	return rv;
963 }
964 
965 /*
966  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
967  * World is ignored since it could not be mapped to be used as dynamic shared
968  * memory.
969  */
get_nsec_memory_helper(void * fdt,struct core_mmu_phys_mem * mem)970 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
971 {
972 	const uint8_t *prop = NULL;
973 	uint64_t a = 0;
974 	uint64_t l = 0;
975 	size_t prop_offs = 0;
976 	size_t prop_len = 0;
977 	int elems_total = 0;
978 	int addr_size = 0;
979 	int len_size = 0;
980 	int offs = 0;
981 	size_t n = 0;
982 	int len = 0;
983 
984 	addr_size = fdt_address_cells(fdt, 0);
985 	if (addr_size < 0)
986 		return 0;
987 
988 	len_size = fdt_size_cells(fdt, 0);
989 	if (len_size < 0)
990 		return 0;
991 
992 	while (true) {
993 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
994 						     "memory",
995 						     sizeof("memory"));
996 		if (offs < 0)
997 			break;
998 
999 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1000 						   DT_STATUS_OK_SEC))
1001 			continue;
1002 
1003 		prop = fdt_getprop(fdt, offs, "reg", &len);
1004 		if (!prop)
1005 			continue;
1006 
1007 		prop_len = len;
1008 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1009 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1010 			if (prop_offs >= prop_len) {
1011 				n--;
1012 				break;
1013 			}
1014 
1015 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1016 			if (mem) {
1017 				mem->type = MEM_AREA_DDR_OVERALL;
1018 				mem->addr = a;
1019 				mem->size = l;
1020 				mem++;
1021 			}
1022 		}
1023 
1024 		elems_total += n;
1025 	}
1026 
1027 	return elems_total;
1028 }
1029 
get_nsec_memory(void * fdt,size_t * nelems)1030 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1031 {
1032 	struct core_mmu_phys_mem *mem = NULL;
1033 	int elems_total = 0;
1034 
1035 	elems_total = get_nsec_memory_helper(fdt, NULL);
1036 	if (elems_total <= 0)
1037 		return NULL;
1038 
1039 	mem = nex_calloc(elems_total, sizeof(*mem));
1040 	if (!mem)
1041 		panic();
1042 
1043 	elems_total = get_nsec_memory_helper(fdt, mem);
1044 	assert(elems_total > 0);
1045 
1046 	*nelems = elems_total;
1047 
1048 	return mem;
1049 }
1050 #endif /*CFG_CORE_DYN_SHM*/
1051 
1052 #ifdef CFG_CORE_RESERVED_SHM
mark_static_shm_as_reserved(struct dt_descriptor * dt)1053 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1054 {
1055 	vaddr_t shm_start;
1056 	vaddr_t shm_end;
1057 
1058 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1059 	if (shm_start != shm_end)
1060 		return add_res_mem_dt_node(dt, "optee_shm",
1061 					   virt_to_phys((void *)shm_start),
1062 					   shm_end - shm_start);
1063 
1064 	DMSG("No SHM configured");
1065 	return -1;
1066 }
1067 #endif /*CFG_CORE_RESERVED_SHM*/
1068 
init_external_dt(unsigned long phys_dt)1069 static void init_external_dt(unsigned long phys_dt)
1070 {
1071 	struct dt_descriptor *dt = &external_dt;
1072 	void *fdt;
1073 	int ret;
1074 
1075 	if (!phys_dt) {
1076 		/*
1077 		 * No need to panic as we're not using the DT in OP-TEE
1078 		 * yet, we're only adding some nodes for normal world use.
1079 		 * This makes the switch to using DT easier as we can boot
1080 		 * a newer OP-TEE with older boot loaders. Once we start to
1081 		 * initialize devices based on DT we'll likely panic
1082 		 * instead of returning here.
1083 		 */
1084 		IMSG("No non-secure external DT");
1085 		return;
1086 	}
1087 
1088 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1089 	if (!fdt)
1090 		panic("Failed to map external DTB");
1091 
1092 	dt->blob = fdt;
1093 
1094 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1095 	if (ret < 0) {
1096 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1097 		     ret);
1098 		panic();
1099 	}
1100 
1101 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1102 	if (ret < 0) {
1103 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1104 		panic();
1105 	}
1106 
1107 	IMSG("Non-secure external DT found");
1108 }
1109 
mark_tzdram_as_reserved(struct dt_descriptor * dt)1110 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1111 {
1112 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1113 				   CFG_TZDRAM_SIZE);
1114 }
1115 
update_external_dt(void)1116 static void update_external_dt(void)
1117 {
1118 	struct dt_descriptor *dt = &external_dt;
1119 
1120 	if (!dt->blob)
1121 		return;
1122 
1123 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1124 		panic("Failed to add OP-TEE Device Tree node");
1125 
1126 	if (config_psci(dt))
1127 		panic("Failed to config PSCI");
1128 
1129 #ifdef CFG_CORE_RESERVED_SHM
1130 	if (mark_static_shm_as_reserved(dt))
1131 		panic("Failed to config non-secure memory");
1132 #endif
1133 
1134 	if (mark_tzdram_as_reserved(dt))
1135 		panic("Failed to config secure memory");
1136 }
1137 #else /*CFG_DT*/
get_external_dt(void)1138 void *get_external_dt(void)
1139 {
1140 	return NULL;
1141 }
1142 
init_external_dt(unsigned long phys_dt __unused)1143 static void init_external_dt(unsigned long phys_dt __unused)
1144 {
1145 }
1146 
update_external_dt(void)1147 static void update_external_dt(void)
1148 {
1149 }
1150 
1151 #ifdef CFG_CORE_DYN_SHM
get_nsec_memory(void * fdt __unused,size_t * nelems __unused)1152 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1153 						 size_t *nelems __unused)
1154 {
1155 	return NULL;
1156 }
1157 #endif /*CFG_CORE_DYN_SHM*/
1158 #endif /*!CFG_DT*/
1159 
1160 #ifdef CFG_CORE_DYN_SHM
discover_nsec_memory(void)1161 static void discover_nsec_memory(void)
1162 {
1163 	struct core_mmu_phys_mem *mem;
1164 	const struct core_mmu_phys_mem *mem_begin = NULL;
1165 	const struct core_mmu_phys_mem *mem_end = NULL;
1166 	size_t nelems;
1167 	void *fdt = get_external_dt();
1168 
1169 	if (fdt) {
1170 		mem = get_nsec_memory(fdt, &nelems);
1171 		if (mem) {
1172 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1173 			return;
1174 		}
1175 
1176 		DMSG("No non-secure memory found in FDT");
1177 	}
1178 
1179 	mem_begin = phys_ddr_overall_begin;
1180 	mem_end = phys_ddr_overall_end;
1181 	nelems = mem_end - mem_begin;
1182 	if (nelems) {
1183 		/*
1184 		 * Platform cannot use both register_ddr() and the now
1185 		 * deprecated register_dynamic_shm().
1186 		 */
1187 		assert(phys_ddr_overall_compat_begin ==
1188 		       phys_ddr_overall_compat_end);
1189 	} else {
1190 		mem_begin = phys_ddr_overall_compat_begin;
1191 		mem_end = phys_ddr_overall_compat_end;
1192 		nelems = mem_end - mem_begin;
1193 		if (!nelems)
1194 			return;
1195 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1196 	}
1197 
1198 	mem = nex_calloc(nelems, sizeof(*mem));
1199 	if (!mem)
1200 		panic();
1201 
1202 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1203 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1204 }
1205 #else /*CFG_CORE_DYN_SHM*/
discover_nsec_memory(void)1206 static void discover_nsec_memory(void)
1207 {
1208 }
1209 #endif /*!CFG_CORE_DYN_SHM*/
1210 
1211 #ifdef CFG_VIRTUALIZATION
virt_init_heap(void)1212 static TEE_Result virt_init_heap(void)
1213 {
1214 	/* We need to initialize pool for every virtual guest partition */
1215 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1216 
1217 	return TEE_SUCCESS;
1218 }
1219 preinit_early(virt_init_heap);
1220 #endif
1221 
init_tee_runtime(void)1222 void init_tee_runtime(void)
1223 {
1224 #ifndef CFG_WITH_PAGER
1225 	/* Pager initializes TA RAM early */
1226 	core_mmu_init_ta_ram();
1227 #endif
1228 	/*
1229 	 * With virtualization we call this function when creating the
1230 	 * OP-TEE partition instead.
1231 	 */
1232 	if (!IS_ENABLED(CFG_VIRTUALIZATION))
1233 		call_preinitcalls();
1234 	call_initcalls();
1235 }
1236 
init_primary(unsigned long pageable_part,unsigned long nsec_entry)1237 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1238 {
1239 	/*
1240 	 * Mask asynchronous exceptions before switch to the thread vector
1241 	 * as the thread handler requires those to be masked while
1242 	 * executing with the temporary stack. The thread subsystem also
1243 	 * asserts that the foreign interrupts are blocked when using most of
1244 	 * its functions.
1245 	 */
1246 	thread_set_exceptions(THREAD_EXCP_ALL);
1247 	primary_save_cntfrq();
1248 	init_vfp_sec();
1249 	/*
1250 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1251 	 * set a current thread right now to avoid a chicken-and-egg problem
1252 	 * (thread_init_boot_thread() sets the current thread but needs
1253 	 * things set by init_runtime()).
1254 	 */
1255 	thread_get_core_local()->curr_thread = 0;
1256 	init_runtime(pageable_part);
1257 
1258 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1259 		/*
1260 		 * Virtualization: We can't initialize threads right now because
1261 		 * threads belong to "tee" part and will be initialized
1262 		 * separately per each new virtual guest. So, we'll clear
1263 		 * "curr_thread" and call it done.
1264 		 */
1265 		thread_get_core_local()->curr_thread = -1;
1266 	} else {
1267 		thread_init_boot_thread();
1268 	}
1269 	thread_init_primary();
1270 	thread_init_per_cpu();
1271 	init_sec_mon(nsec_entry);
1272 }
1273 
1274 /*
1275  * Note: this function is weak just to make it possible to exclude it from
1276  * the unpaged area.
1277  */
boot_init_primary_late(unsigned long fdt)1278 void __weak boot_init_primary_late(unsigned long fdt)
1279 {
1280 	init_external_dt(fdt);
1281 	tpm_map_log_area(get_external_dt());
1282 	discover_nsec_memory();
1283 	update_external_dt();
1284 	configure_console_from_dt();
1285 
1286 	IMSG("OP-TEE version: %s", core_v_str);
1287 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1288 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1289 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1290 	}
1291 	IMSG("Primary CPU initializing");
1292 #ifdef CFG_CORE_ASLR
1293 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1294 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1295 #endif
1296 
1297 	main_init_gic();
1298 	init_vfp_nsec();
1299 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1300 		IMSG("Initializing virtualization support");
1301 		core_mmu_init_virtualization();
1302 	} else {
1303 		init_tee_runtime();
1304 	}
1305 	call_finalcalls();
1306 	IMSG("Primary CPU switching to normal world boot");
1307 }
1308 
init_secondary_helper(unsigned long nsec_entry)1309 static void init_secondary_helper(unsigned long nsec_entry)
1310 {
1311 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1312 
1313 	/*
1314 	 * Mask asynchronous exceptions before switch to the thread vector
1315 	 * as the thread handler requires those to be masked while
1316 	 * executing with the temporary stack. The thread subsystem also
1317 	 * asserts that the foreign interrupts are blocked when using most of
1318 	 * its functions.
1319 	 */
1320 	thread_set_exceptions(THREAD_EXCP_ALL);
1321 
1322 	secondary_init_cntfrq();
1323 	thread_init_per_cpu();
1324 	init_sec_mon(nsec_entry);
1325 	main_secondary_init_gic();
1326 	init_vfp_sec();
1327 	init_vfp_nsec();
1328 
1329 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1330 }
1331 
1332 /*
1333  * Note: this function is weak just to make it possible to exclude it from
1334  * the unpaged area so that it lies in the init area.
1335  */
boot_init_primary_early(unsigned long pageable_part,unsigned long nsec_entry __maybe_unused)1336 void __weak boot_init_primary_early(unsigned long pageable_part,
1337 				    unsigned long nsec_entry __maybe_unused)
1338 {
1339 	unsigned long e = PADDR_INVALID;
1340 
1341 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1342 	e = nsec_entry;
1343 #endif
1344 
1345 	init_primary(pageable_part, e);
1346 }
1347 
1348 #if defined(CFG_WITH_ARM_TRUSTED_FW)
boot_cpu_on_handler(unsigned long a0 __maybe_unused,unsigned long a1 __unused)1349 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1350 				  unsigned long a1 __unused)
1351 {
1352 	init_secondary_helper(PADDR_INVALID);
1353 	return 0;
1354 }
1355 #else
boot_init_secondary(unsigned long nsec_entry)1356 void boot_init_secondary(unsigned long nsec_entry)
1357 {
1358 	init_secondary_helper(nsec_entry);
1359 }
1360 #endif
1361 
1362 #if defined(CFG_BOOT_SECONDARY_REQUEST)
boot_set_core_ns_entry(size_t core_idx,uintptr_t entry,uintptr_t context_id)1363 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1364 			    uintptr_t context_id)
1365 {
1366 	ns_entry_contexts[core_idx].entry_point = entry;
1367 	ns_entry_contexts[core_idx].context_id = context_id;
1368 	dsb_ishst();
1369 }
1370 
boot_core_release(size_t core_idx,paddr_t entry)1371 int boot_core_release(size_t core_idx, paddr_t entry)
1372 {
1373 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1374 		return -1;
1375 
1376 	ns_entry_contexts[core_idx].entry_point = entry;
1377 	dmb();
1378 	spin_table[core_idx] = 1;
1379 	dsb();
1380 	sev();
1381 
1382 	return 0;
1383 }
1384 
1385 /*
1386  * spin until secondary boot request, then returns with
1387  * the secondary core entry address.
1388  */
boot_core_hpen(void)1389 struct ns_entry_context *boot_core_hpen(void)
1390 {
1391 #ifdef CFG_PSCI_ARM32
1392 	return &ns_entry_contexts[get_core_pos()];
1393 #else
1394 	do {
1395 		wfe();
1396 	} while (!spin_table[get_core_pos()]);
1397 	dmb();
1398 	return &ns_entry_contexts[get_core_pos()];
1399 #endif
1400 }
1401 #endif
1402 
1403 #if defined(CFG_CORE_ASLR)
1404 #if defined(CFG_DT)
get_aslr_seed(void * fdt)1405 unsigned long __weak get_aslr_seed(void *fdt)
1406 {
1407 	int rc = fdt_check_header(fdt);
1408 	const uint64_t *seed = NULL;
1409 	int offs = 0;
1410 	int len = 0;
1411 
1412 	if (rc) {
1413 		DMSG("Bad fdt: %d", rc);
1414 		goto err;
1415 	}
1416 
1417 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1418 	if (offs < 0) {
1419 		DMSG("Cannot find /secure-chosen");
1420 		goto err;
1421 	}
1422 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1423 	if (!seed || len != sizeof(*seed)) {
1424 		DMSG("Cannot find valid kaslr-seed");
1425 		goto err;
1426 	}
1427 
1428 	return fdt64_to_cpu(*seed);
1429 
1430 err:
1431 	/* Try platform implementation */
1432 	return plat_get_aslr_seed();
1433 }
1434 #else /*!CFG_DT*/
get_aslr_seed(void * fdt __unused)1435 unsigned long __weak get_aslr_seed(void *fdt __unused)
1436 {
1437 	/* Try platform implementation */
1438 	return plat_get_aslr_seed();
1439 }
1440 #endif /*!CFG_DT*/
1441 #endif /*CFG_CORE_ASLR*/
1442