1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <bitstring.h>
10 #include <config.h>
11 #include <kernel/boot.h>
12 #include <kernel/cache_helpers.h>
13 #include <kernel/linker.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/tee_l2cc_mutex.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/tz_ssvce_pl310.h>
22 #include <kernel/user_mode_ctx.h>
23 #include <kernel/virtualization.h>
24 #include <mm/core_memprot.h>
25 #include <mm/core_mmu.h>
26 #include <mm/mobj.h>
27 #include <mm/pgt_cache.h>
28 #include <mm/tee_pager.h>
29 #include <mm/vm.h>
30 #include <platform_config.h>
31 #include <stdlib.h>
32 #include <trace.h>
33 #include <util.h>
34 
35 #include "core_mmu_private.h"
36 
37 #ifndef DEBUG_XLAT_TABLE
38 #define DEBUG_XLAT_TABLE 0
39 #endif
40 
41 #define SHM_VASPACE_SIZE	(1024 * 1024 * 32)
42 
43 /*
44  * These variables are initialized before .bss is cleared. To avoid
45  * resetting them when .bss is cleared we're storing them in .data instead,
46  * even if they initially are zero.
47  */
48 
49 #ifdef CFG_CORE_RESERVED_SHM
50 /* Default NSec shared memory allocated from NSec world */
51 unsigned long default_nsec_shm_size __nex_bss;
52 unsigned long default_nsec_shm_paddr __nex_bss;
53 #endif
54 
55 static struct tee_mmap_region static_memory_map[CFG_MMAP_REGIONS
56 #ifdef CFG_CORE_ASLR
57 						+ 1
58 #endif
59 						+ 1] __nex_bss;
60 
61 /* Define the platform's memory layout. */
62 struct memaccess_area {
63 	paddr_t paddr;
64 	size_t size;
65 };
66 #define MEMACCESS_AREA(a, s) { .paddr = a, .size = s }
67 
68 static struct memaccess_area secure_only[] __nex_data = {
69 #ifdef TZSRAM_BASE
70 	MEMACCESS_AREA(TZSRAM_BASE, TZSRAM_SIZE),
71 #endif
72 	MEMACCESS_AREA(TZDRAM_BASE, TZDRAM_SIZE),
73 };
74 
75 static struct memaccess_area nsec_shared[] __nex_data = {
76 #ifdef CFG_CORE_RESERVED_SHM
77 	MEMACCESS_AREA(TEE_SHMEM_START, TEE_SHMEM_SIZE),
78 #endif
79 };
80 
81 #if defined(CFG_SECURE_DATA_PATH)
82 #ifdef CFG_TEE_SDP_MEM_BASE
83 register_sdp_mem(CFG_TEE_SDP_MEM_BASE, CFG_TEE_SDP_MEM_SIZE);
84 #endif
85 #ifdef TEE_SDP_TEST_MEM_BASE
86 register_sdp_mem(TEE_SDP_TEST_MEM_BASE, TEE_SDP_TEST_MEM_SIZE);
87 #endif
88 #endif
89 
90 #ifdef CFG_CORE_RWDATA_NOEXEC
91 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, TEE_RAM_START,
92 		     VCORE_UNPG_RX_PA - TEE_RAM_START);
93 register_phys_mem_ul(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
94 		     VCORE_UNPG_RX_SZ_UNSAFE);
95 register_phys_mem_ul(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
96 		     VCORE_UNPG_RO_SZ_UNSAFE);
97 
98 #ifdef CFG_VIRTUALIZATION
99 register_phys_mem_ul(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA,
100 		     VCORE_UNPG_RW_SZ_UNSAFE);
101 register_phys_mem_ul(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA,
102 		     VCORE_NEX_RW_SZ_UNSAFE);
103 #else
104 register_phys_mem_ul(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA,
105 		     VCORE_UNPG_RW_SZ_UNSAFE);
106 #endif
107 
108 #ifdef CFG_WITH_PAGER
109 register_phys_mem_ul(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA,
110 		     VCORE_INIT_RX_SZ_UNSAFE);
111 register_phys_mem_ul(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA,
112 		     VCORE_INIT_RO_SZ_UNSAFE);
113 #endif /*CFG_WITH_PAGER*/
114 #else /*!CFG_CORE_RWDATA_NOEXEC*/
115 register_phys_mem(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE);
116 #endif /*!CFG_CORE_RWDATA_NOEXEC*/
117 
118 #ifdef CFG_VIRTUALIZATION
119 register_phys_mem(MEM_AREA_SEC_RAM_OVERALL, TZDRAM_BASE, TZDRAM_SIZE);
120 #endif
121 
122 #if defined(CFG_CORE_SANITIZE_KADDRESS) && defined(CFG_WITH_PAGER)
123 /* Asan ram is part of MEM_AREA_TEE_RAM_RW when pager is disabled */
124 register_phys_mem_ul(MEM_AREA_TEE_ASAN, ASAN_MAP_PA, ASAN_MAP_SZ);
125 #endif
126 
127 #ifndef CFG_VIRTUALIZATION
128 /* Every guest will have own TA RAM if virtualization support is enabled */
129 register_phys_mem(MEM_AREA_TA_RAM, TA_RAM_START, TA_RAM_SIZE);
130 #endif
131 #ifdef CFG_CORE_RESERVED_SHM
132 register_phys_mem(MEM_AREA_NSEC_SHM, TEE_SHMEM_START, TEE_SHMEM_SIZE);
133 #endif
134 
135 /*
136  * Two ASIDs per context, one for kernel mode and one for user mode. ASID 0
137  * and 1 are reserved and not used. This means a maximum of 126 loaded user
138  * mode contexts. This value can be increased but not beyond the maximum
139  * ASID, which is architecture dependent (max 255 for ARMv7-A and ARMv8-A
140  * Aarch32). This constant defines number of ASID pairs.
141  */
142 #define MMU_NUM_ASID_PAIRS		64
143 
144 static bitstr_t bit_decl(g_asid, MMU_NUM_ASID_PAIRS) __nex_bss;
145 static unsigned int g_asid_spinlock __nex_bss = SPINLOCK_UNLOCK;
146 
147 static unsigned int mmu_spinlock;
148 
mmu_lock(void)149 static uint32_t mmu_lock(void)
150 {
151 	return cpu_spin_lock_xsave(&mmu_spinlock);
152 }
153 
mmu_unlock(uint32_t exceptions)154 static void mmu_unlock(uint32_t exceptions)
155 {
156 	cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions);
157 }
158 
get_memory_map(void)159 static struct tee_mmap_region *get_memory_map(void)
160 {
161 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
162 		struct tee_mmap_region *map = virt_get_memory_map();
163 
164 		if (map)
165 			return map;
166 	}
167 
168 	return static_memory_map;
169 
170 }
171 
_pbuf_intersects(struct memaccess_area * a,size_t alen,paddr_t pa,size_t size)172 static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
173 			     paddr_t pa, size_t size)
174 {
175 	size_t n;
176 
177 	for (n = 0; n < alen; n++)
178 		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
179 			return true;
180 	return false;
181 }
182 #define pbuf_intersects(a, pa, size) \
183 	_pbuf_intersects((a), ARRAY_SIZE(a), (pa), (size))
184 
_pbuf_is_inside(struct memaccess_area * a,size_t alen,paddr_t pa,size_t size)185 static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen,
186 			    paddr_t pa, size_t size)
187 {
188 	size_t n;
189 
190 	for (n = 0; n < alen; n++)
191 		if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size))
192 			return true;
193 	return false;
194 }
195 #define pbuf_is_inside(a, pa, size) \
196 	_pbuf_is_inside((a), ARRAY_SIZE(a), (pa), (size))
197 
pa_is_in_map(struct tee_mmap_region * map,paddr_t pa,size_t len)198 static bool pa_is_in_map(struct tee_mmap_region *map, paddr_t pa, size_t len)
199 {
200 	paddr_t end_pa = 0;
201 
202 	if (!map)
203 		return false;
204 
205 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
206 		return false;
207 
208 	return (pa >= map->pa && end_pa <= map->pa + map->size - 1);
209 }
210 
va_is_in_map(struct tee_mmap_region * map,vaddr_t va)211 static bool va_is_in_map(struct tee_mmap_region *map, vaddr_t va)
212 {
213 	if (!map)
214 		return false;
215 	return (va >= map->va && va <= (map->va + map->size - 1));
216 }
217 
218 /* check if target buffer fits in a core default map area */
pbuf_inside_map_area(unsigned long p,size_t l,struct tee_mmap_region * map)219 static bool pbuf_inside_map_area(unsigned long p, size_t l,
220 				 struct tee_mmap_region *map)
221 {
222 	return core_is_buffer_inside(p, l, map->pa, map->size);
223 }
224 
find_map_by_type(enum teecore_memtypes type)225 static struct tee_mmap_region *find_map_by_type(enum teecore_memtypes type)
226 {
227 	struct tee_mmap_region *map;
228 
229 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++)
230 		if (map->type == type)
231 			return map;
232 	return NULL;
233 }
234 
find_map_by_type_and_pa(enum teecore_memtypes type,paddr_t pa,size_t len)235 static struct tee_mmap_region *find_map_by_type_and_pa(
236 			enum teecore_memtypes type, paddr_t pa, size_t len)
237 {
238 	struct tee_mmap_region *map;
239 
240 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
241 		if (map->type != type)
242 			continue;
243 		if (pa_is_in_map(map, pa, len))
244 			return map;
245 	}
246 	return NULL;
247 }
248 
find_map_by_va(void * va)249 static struct tee_mmap_region *find_map_by_va(void *va)
250 {
251 	struct tee_mmap_region *map = get_memory_map();
252 	unsigned long a = (unsigned long)va;
253 
254 	while (!core_mmap_is_end_of_table(map)) {
255 		if ((a >= map->va) && (a <= (map->va - 1 + map->size)))
256 			return map;
257 		map++;
258 	}
259 	return NULL;
260 }
261 
find_map_by_pa(unsigned long pa)262 static struct tee_mmap_region *find_map_by_pa(unsigned long pa)
263 {
264 	struct tee_mmap_region *map = get_memory_map();
265 
266 	while (!core_mmap_is_end_of_table(map)) {
267 		if ((pa >= map->pa) && (pa <= (map->pa + map->size - 1)))
268 			return map;
269 		map++;
270 	}
271 	return NULL;
272 }
273 
274 #if defined(CFG_CORE_DYN_SHM) || defined(CFG_SECURE_DATA_PATH)
pbuf_is_special_mem(paddr_t pbuf,size_t len,const struct core_mmu_phys_mem * start,const struct core_mmu_phys_mem * end)275 static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
276 				const struct core_mmu_phys_mem *start,
277 				const struct core_mmu_phys_mem *end)
278 {
279 	const struct core_mmu_phys_mem *mem;
280 
281 	for (mem = start; mem < end; mem++) {
282 		if (core_is_buffer_inside(pbuf, len, mem->addr, mem->size))
283 			return true;
284 	}
285 
286 	return false;
287 }
288 #endif
289 
290 #ifdef CFG_CORE_DYN_SHM
carve_out_phys_mem(struct core_mmu_phys_mem ** mem,size_t * nelems,paddr_t pa,size_t size)291 static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
292 			       paddr_t pa, size_t size)
293 {
294 	struct core_mmu_phys_mem *m = *mem;
295 	size_t n = 0;
296 
297 	while (true) {
298 		if (n >= *nelems) {
299 			DMSG("No need to carve out %#" PRIxPA " size %#zx",
300 			     pa, size);
301 			return;
302 		}
303 		if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size))
304 			break;
305 		if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size))
306 			panic();
307 		n++;
308 	}
309 
310 	if (pa == m[n].addr && size == m[n].size) {
311 		/* Remove this entry */
312 		(*nelems)--;
313 		memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n));
314 		m = nex_realloc(m, sizeof(*m) * *nelems);
315 		if (!m)
316 			panic();
317 		*mem = m;
318 	} else if (pa == m[n].addr) {
319 		m[n].addr += size;
320 		m[n].size -= size;
321 	} else if ((pa + size) == (m[n].addr + m[n].size)) {
322 		m[n].size -= size;
323 	} else {
324 		/* Need to split the memory entry */
325 		m = nex_realloc(m, sizeof(*m) * (*nelems + 1));
326 		if (!m)
327 			panic();
328 		*mem = m;
329 		memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n));
330 		(*nelems)++;
331 		m[n].size = pa - m[n].addr;
332 		m[n + 1].size -= size + m[n].size;
333 		m[n + 1].addr = pa + size;
334 	}
335 }
336 
check_phys_mem_is_outside(struct core_mmu_phys_mem * start,size_t nelems,struct tee_mmap_region * map)337 static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
338 				      size_t nelems,
339 				      struct tee_mmap_region *map)
340 {
341 	size_t n;
342 
343 	for (n = 0; n < nelems; n++) {
344 		if (!core_is_buffer_outside(start[n].addr, start[n].size,
345 					    map->pa, map->size)) {
346 			EMSG("Non-sec mem (%#" PRIxPA ":%#" PRIxPASZ
347 			     ") overlaps map (type %d %#" PRIxPA ":%#zx)",
348 			     start[n].addr, start[n].size,
349 			     map->type, map->pa, map->size);
350 			panic();
351 		}
352 	}
353 }
354 
355 static const struct core_mmu_phys_mem *discovered_nsec_ddr_start __nex_bss;
356 static size_t discovered_nsec_ddr_nelems __nex_bss;
357 
cmp_pmem_by_addr(const void * a,const void * b)358 static int cmp_pmem_by_addr(const void *a, const void *b)
359 {
360 	const struct core_mmu_phys_mem *pmem_a = a;
361 	const struct core_mmu_phys_mem *pmem_b = b;
362 
363 	return CMP_TRILEAN(pmem_a->addr, pmem_b->addr);
364 }
365 
core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem * start,size_t nelems)366 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
367 				      size_t nelems)
368 {
369 	struct core_mmu_phys_mem *m = start;
370 	size_t num_elems = nelems;
371 	struct tee_mmap_region *map = static_memory_map;
372 	const struct core_mmu_phys_mem __maybe_unused *pmem;
373 
374 	assert(!discovered_nsec_ddr_start);
375 	assert(m && num_elems);
376 
377 	qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);
378 
379 	/*
380 	 * Non-secure shared memory and also secure data
381 	 * path memory are supposed to reside inside
382 	 * non-secure memory. Since NSEC_SHM and SDP_MEM
383 	 * are used for a specific purpose make holes for
384 	 * those memory in the normal non-secure memory.
385 	 *
386 	 * This has to be done since for instance QEMU
387 	 * isn't aware of which memory range in the
388 	 * non-secure memory is used for NSEC_SHM.
389 	 */
390 
391 #ifdef CFG_SECURE_DATA_PATH
392 	for (pmem = phys_sdp_mem_begin; pmem < phys_sdp_mem_end; pmem++)
393 		carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
394 #endif
395 
396 	carve_out_phys_mem(&m, &num_elems, TEE_RAM_START, TEE_RAM_PH_SIZE);
397 	carve_out_phys_mem(&m, &num_elems, TA_RAM_START, TA_RAM_SIZE);
398 
399 	for (map = static_memory_map; !core_mmap_is_end_of_table(map); map++) {
400 		switch (map->type) {
401 		case MEM_AREA_NSEC_SHM:
402 			carve_out_phys_mem(&m, &num_elems, map->pa, map->size);
403 			break;
404 		case MEM_AREA_EXT_DT:
405 		case MEM_AREA_RES_VASPACE:
406 		case MEM_AREA_SHM_VASPACE:
407 		case MEM_AREA_TS_VASPACE:
408 		case MEM_AREA_PAGER_VASPACE:
409 			break;
410 		default:
411 			check_phys_mem_is_outside(m, num_elems, map);
412 		}
413 	}
414 
415 	discovered_nsec_ddr_start = m;
416 	discovered_nsec_ddr_nelems = num_elems;
417 
418 	if (!core_mmu_check_end_pa(m[num_elems - 1].addr,
419 				   m[num_elems - 1].size))
420 		panic();
421 }
422 
get_discovered_nsec_ddr(const struct core_mmu_phys_mem ** start,const struct core_mmu_phys_mem ** end)423 static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
424 				    const struct core_mmu_phys_mem **end)
425 {
426 	if (!discovered_nsec_ddr_start)
427 		return false;
428 
429 	*start = discovered_nsec_ddr_start;
430 	*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;
431 
432 	return true;
433 }
434 
pbuf_is_nsec_ddr(paddr_t pbuf,size_t len)435 static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
436 {
437 	const struct core_mmu_phys_mem *start;
438 	const struct core_mmu_phys_mem *end;
439 
440 	if (!get_discovered_nsec_ddr(&start, &end))
441 		return false;
442 
443 	return pbuf_is_special_mem(pbuf, len, start, end);
444 }
445 
core_mmu_nsec_ddr_is_defined(void)446 bool core_mmu_nsec_ddr_is_defined(void)
447 {
448 	const struct core_mmu_phys_mem *start;
449 	const struct core_mmu_phys_mem *end;
450 
451 	if (!get_discovered_nsec_ddr(&start, &end))
452 		return false;
453 
454 	return start != end;
455 }
456 #else
pbuf_is_nsec_ddr(paddr_t pbuf __unused,size_t len __unused)457 static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
458 {
459 	return false;
460 }
461 #endif /*CFG_CORE_DYN_SHM*/
462 
463 #define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
464 	EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
465 			pa1, (uint64_t)pa1 + sz1, pa2, (uint64_t)pa2 + sz2)
466 
467 #ifdef CFG_SECURE_DATA_PATH
pbuf_is_sdp_mem(paddr_t pbuf,size_t len)468 static bool pbuf_is_sdp_mem(paddr_t pbuf, size_t len)
469 {
470 	return pbuf_is_special_mem(pbuf, len, phys_sdp_mem_begin,
471 				   phys_sdp_mem_end);
472 }
473 
core_sdp_mem_create_mobjs(void)474 struct mobj **core_sdp_mem_create_mobjs(void)
475 {
476 	const struct core_mmu_phys_mem *mem;
477 	struct mobj **mobj_base;
478 	struct mobj **mobj;
479 	int cnt = phys_sdp_mem_end - phys_sdp_mem_begin;
480 
481 	/* SDP mobjs table must end with a NULL entry */
482 	mobj_base = calloc(cnt + 1, sizeof(struct mobj *));
483 	if (!mobj_base)
484 		panic("Out of memory");
485 
486 	for (mem = phys_sdp_mem_begin, mobj = mobj_base;
487 	     mem < phys_sdp_mem_end; mem++, mobj++) {
488 		*mobj = mobj_phys_alloc(mem->addr, mem->size,
489 					TEE_MATTR_CACHE_CACHED,
490 					CORE_MEM_SDP_MEM);
491 		if (!*mobj)
492 			panic("can't create SDP physical memory object");
493 	}
494 	return mobj_base;
495 }
496 
497 #else /* CFG_SECURE_DATA_PATH */
pbuf_is_sdp_mem(paddr_t pbuf __unused,size_t len __unused)498 static bool pbuf_is_sdp_mem(paddr_t pbuf __unused, size_t len __unused)
499 {
500 	return false;
501 }
502 
503 #endif /* CFG_SECURE_DATA_PATH */
504 
505 /* Check special memories comply with registered memories */
verify_special_mem_areas(struct tee_mmap_region * mem_map,size_t len,const struct core_mmu_phys_mem * start,const struct core_mmu_phys_mem * end,const char * area_name __maybe_unused)506 static void verify_special_mem_areas(struct tee_mmap_region *mem_map,
507 				     size_t len,
508 				     const struct core_mmu_phys_mem *start,
509 				     const struct core_mmu_phys_mem *end,
510 				     const char *area_name __maybe_unused)
511 {
512 	const struct core_mmu_phys_mem *mem;
513 	const struct core_mmu_phys_mem *mem2;
514 	struct tee_mmap_region *mmap;
515 	size_t n;
516 
517 	if (start == end) {
518 		DMSG("No %s memory area defined", area_name);
519 		return;
520 	}
521 
522 	for (mem = start; mem < end; mem++)
523 		DMSG("%s memory [%" PRIxPA " %" PRIx64 "]",
524 		     area_name, mem->addr, (uint64_t)mem->addr + mem->size);
525 
526 	/* Check memories do not intersect each other */
527 	for (mem = start; mem + 1 < end; mem++) {
528 		for (mem2 = mem + 1; mem2 < end; mem2++) {
529 			if (core_is_buffer_intersect(mem2->addr, mem2->size,
530 						     mem->addr, mem->size)) {
531 				MSG_MEM_INSTERSECT(mem2->addr, mem2->size,
532 						   mem->addr, mem->size);
533 				panic("Special memory intersection");
534 			}
535 		}
536 	}
537 
538 	/*
539 	 * Check memories do not intersect any mapped memory.
540 	 * This is called before reserved VA space is loaded in mem_map.
541 	 */
542 	for (mem = start; mem < end; mem++) {
543 		for (mmap = mem_map, n = 0; n < len; mmap++, n++) {
544 			if (core_is_buffer_intersect(mem->addr, mem->size,
545 						     mmap->pa, mmap->size)) {
546 				MSG_MEM_INSTERSECT(mem->addr, mem->size,
547 						   mmap->pa, mmap->size);
548 				panic("Special memory intersection");
549 			}
550 		}
551 	}
552 }
553 
add_phys_mem(struct tee_mmap_region * memory_map,size_t num_elems,const struct core_mmu_phys_mem * mem,size_t * last)554 static void add_phys_mem(struct tee_mmap_region *memory_map, size_t num_elems,
555 			 const struct core_mmu_phys_mem *mem, size_t *last)
556 {
557 	size_t n = 0;
558 	paddr_t pa;
559 	paddr_size_t size;
560 
561 	/*
562 	 * If some ranges of memory of the same type do overlap
563 	 * each others they are coalesced into one entry. To help this
564 	 * added entries are sorted by increasing physical.
565 	 *
566 	 * Note that it's valid to have the same physical memory as several
567 	 * different memory types, for instance the same device memory
568 	 * mapped as both secure and non-secure. This will probably not
569 	 * happen often in practice.
570 	 */
571 	DMSG("%s type %s 0x%08" PRIxPA " size 0x%08" PRIxPASZ,
572 	     mem->name, teecore_memtype_name(mem->type), mem->addr, mem->size);
573 	while (true) {
574 		if (n >= (num_elems - 1)) {
575 			EMSG("Out of entries (%zu) in memory_map", num_elems);
576 			panic();
577 		}
578 		if (n == *last)
579 			break;
580 		pa = memory_map[n].pa;
581 		size = memory_map[n].size;
582 		if (mem->type == memory_map[n].type &&
583 		    ((pa <= (mem->addr + (mem->size - 1))) &&
584 		    (mem->addr <= (pa + (size - 1))))) {
585 			DMSG("Physical mem map overlaps 0x%" PRIxPA, mem->addr);
586 			memory_map[n].pa = MIN(pa, mem->addr);
587 			memory_map[n].size = MAX(size, mem->size) +
588 					     (pa - memory_map[n].pa);
589 			return;
590 		}
591 		if (mem->type < memory_map[n].type ||
592 		    (mem->type == memory_map[n].type && mem->addr < pa))
593 			break; /* found the spot where to insert this memory */
594 		n++;
595 	}
596 
597 	memmove(memory_map + n + 1, memory_map + n,
598 		sizeof(struct tee_mmap_region) * (*last - n));
599 	(*last)++;
600 	memset(memory_map + n, 0, sizeof(memory_map[0]));
601 	memory_map[n].type = mem->type;
602 	memory_map[n].pa = mem->addr;
603 	memory_map[n].size = mem->size;
604 }
605 
add_va_space(struct tee_mmap_region * memory_map,size_t num_elems,enum teecore_memtypes type,size_t size,size_t * last)606 static void add_va_space(struct tee_mmap_region *memory_map, size_t num_elems,
607 			 enum teecore_memtypes type, size_t size, size_t *last)
608 {
609 	size_t n = 0;
610 
611 	DMSG("type %s size 0x%08zx", teecore_memtype_name(type), size);
612 	while (true) {
613 		if (n >= (num_elems - 1)) {
614 			EMSG("Out of entries (%zu) in memory_map", num_elems);
615 			panic();
616 		}
617 		if (n == *last)
618 			break;
619 		if (type < memory_map[n].type)
620 			break;
621 		n++;
622 	}
623 
624 	memmove(memory_map + n + 1, memory_map + n,
625 		sizeof(struct tee_mmap_region) * (*last - n));
626 	(*last)++;
627 	memset(memory_map + n, 0, sizeof(memory_map[0]));
628 	memory_map[n].type = type;
629 	memory_map[n].size = size;
630 }
631 
core_mmu_type_to_attr(enum teecore_memtypes t)632 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
633 {
634 	const uint32_t attr = TEE_MATTR_VALID_BLOCK;
635 	const uint32_t cached = TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT;
636 	const uint32_t noncache = TEE_MATTR_CACHE_NONCACHE <<
637 				  TEE_MATTR_CACHE_SHIFT;
638 
639 	switch (t) {
640 	case MEM_AREA_TEE_RAM:
641 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | cached;
642 	case MEM_AREA_TEE_RAM_RX:
643 	case MEM_AREA_INIT_RAM_RX:
644 	case MEM_AREA_IDENTITY_MAP_RX:
645 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRX | cached;
646 	case MEM_AREA_TEE_RAM_RO:
647 	case MEM_AREA_INIT_RAM_RO:
648 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached;
649 	case MEM_AREA_TEE_RAM_RW:
650 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
651 	case MEM_AREA_NEX_RAM_RW:
652 	case MEM_AREA_TEE_ASAN:
653 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
654 	case MEM_AREA_TEE_COHERENT:
655 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache;
656 	case MEM_AREA_TA_RAM:
657 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
658 	case MEM_AREA_NSEC_SHM:
659 		return attr | TEE_MATTR_PRW | cached;
660 	case MEM_AREA_EXT_DT:
661 	case MEM_AREA_IO_NSEC:
662 		return attr | TEE_MATTR_PRW | noncache;
663 	case MEM_AREA_IO_SEC:
664 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | noncache;
665 	case MEM_AREA_RAM_NSEC:
666 		return attr | TEE_MATTR_PRW | cached;
667 	case MEM_AREA_RAM_SEC:
668 	case MEM_AREA_SEC_RAM_OVERALL:
669 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached;
670 	case MEM_AREA_RES_VASPACE:
671 	case MEM_AREA_SHM_VASPACE:
672 		return 0;
673 	case MEM_AREA_PAGER_VASPACE:
674 		return TEE_MATTR_SECURE;
675 	default:
676 		panic("invalid type");
677 	}
678 }
679 
map_is_tee_ram(const struct tee_mmap_region * mm)680 static bool __maybe_unused map_is_tee_ram(const struct tee_mmap_region *mm)
681 {
682 	switch (mm->type) {
683 	case MEM_AREA_TEE_RAM:
684 	case MEM_AREA_TEE_RAM_RX:
685 	case MEM_AREA_TEE_RAM_RO:
686 	case MEM_AREA_TEE_RAM_RW:
687 	case MEM_AREA_INIT_RAM_RX:
688 	case MEM_AREA_INIT_RAM_RO:
689 	case MEM_AREA_NEX_RAM_RW:
690 	case MEM_AREA_NEX_RAM_RO:
691 	case MEM_AREA_TEE_ASAN:
692 		return true;
693 	default:
694 		return false;
695 	}
696 }
697 
map_is_secure(const struct tee_mmap_region * mm)698 static bool __maybe_unused map_is_secure(const struct tee_mmap_region *mm)
699 {
700 	return !!(core_mmu_type_to_attr(mm->type) & TEE_MATTR_SECURE);
701 }
702 
map_is_pgdir(const struct tee_mmap_region * mm)703 static bool __maybe_unused map_is_pgdir(const struct tee_mmap_region *mm)
704 {
705 	return mm->region_size == CORE_MMU_PGDIR_SIZE;
706 }
707 
cmp_mmap_by_lower_va(const void * a,const void * b)708 static int cmp_mmap_by_lower_va(const void *a, const void *b)
709 {
710 	const struct tee_mmap_region *mm_a = a;
711 	const struct tee_mmap_region *mm_b = b;
712 
713 	return CMP_TRILEAN(mm_a->va, mm_b->va);
714 }
715 
dump_mmap_table(struct tee_mmap_region * memory_map)716 static void dump_mmap_table(struct tee_mmap_region *memory_map)
717 {
718 	struct tee_mmap_region *map;
719 
720 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
721 		vaddr_t __maybe_unused vstart;
722 
723 		vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1));
724 		DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA
725 		     " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)",
726 		     teecore_memtype_name(map->type), vstart,
727 		     vstart + map->size - 1, map->pa,
728 		     (paddr_t)(map->pa + map->size - 1), map->size,
729 		     map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir");
730 	}
731 }
732 
733 #if DEBUG_XLAT_TABLE
734 
dump_xlat_table(vaddr_t va,int level)735 static void dump_xlat_table(vaddr_t va, int level)
736 {
737 	struct core_mmu_table_info tbl_info;
738 	unsigned int idx = 0;
739 	paddr_t pa;
740 	uint32_t attr;
741 
742 	core_mmu_find_table(NULL, va, level, &tbl_info);
743 	va = tbl_info.va_base;
744 	for (idx = 0; idx < tbl_info.num_entries; idx++) {
745 		core_mmu_get_entry(&tbl_info, idx, &pa, &attr);
746 		if (attr || level > CORE_MMU_BASE_TABLE_LEVEL) {
747 			if (attr & TEE_MATTR_TABLE) {
748 #ifdef CFG_WITH_LPAE
749 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
750 					" TBL:0x%010" PRIxPA "\n",
751 					level * 2, "", level, va, pa);
752 #else
753 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
754 					" TBL:0x%010" PRIxPA " %s\n",
755 					level * 2, "", level, va, pa,
756 					attr & TEE_MATTR_SECURE ? " S" : "NS");
757 #endif
758 				dump_xlat_table(va, level + 1);
759 			} else if (attr) {
760 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
761 					" PA:0x%010" PRIxPA " %s-%s-%s-%s",
762 					level * 2, "", level, va, pa,
763 					attr & (TEE_MATTR_CACHE_CACHED <<
764 					TEE_MATTR_CACHE_SHIFT) ? "MEM" : "DEV",
765 					attr & TEE_MATTR_PW ? "RW" : "RO",
766 					attr & TEE_MATTR_PX ? "X " : "XN",
767 					attr & TEE_MATTR_SECURE ? " S" : "NS");
768 			} else {
769 				DMSG_RAW("%*s [LVL%d] VA:0x%010" PRIxVA
770 					    " INVALID\n",
771 					    level * 2, "", level, va);
772 			}
773 		}
774 		va += BIT64(tbl_info.shift);
775 	}
776 }
777 
778 #else
779 
dump_xlat_table(vaddr_t va __unused,int level __unused)780 static void dump_xlat_table(vaddr_t va __unused, int level __unused)
781 {
782 }
783 
784 #endif
785 
786 /*
787  * Reserves virtual memory space for pager usage.
788  *
789  * From the start of the first memory used by the link script +
790  * TEE_RAM_VA_SIZE should be covered, either with a direct mapping or empty
791  * mapping for pager usage. This adds translation tables as needed for the
792  * pager to operate.
793  */
add_pager_vaspace(struct tee_mmap_region * mmap,size_t num_elems,size_t * last)794 static void add_pager_vaspace(struct tee_mmap_region *mmap, size_t num_elems,
795 			      size_t *last)
796 {
797 	paddr_t begin = 0;
798 	paddr_t end = 0;
799 	size_t size = 0;
800 	size_t pos = 0;
801 	size_t n = 0;
802 
803 	if (*last >= (num_elems - 1)) {
804 		EMSG("Out of entries (%zu) in memory map", num_elems);
805 		panic();
806 	}
807 
808 	for (n = 0; !core_mmap_is_end_of_table(mmap + n); n++) {
809 		if (map_is_tee_ram(mmap + n)) {
810 			if (!begin)
811 				begin = mmap[n].pa;
812 			pos = n + 1;
813 		}
814 	}
815 
816 	end = mmap[pos - 1].pa + mmap[pos - 1].size;
817 	size = TEE_RAM_VA_SIZE - (end - begin);
818 	if (!size)
819 		return;
820 
821 	assert(pos <= *last);
822 	memmove(mmap + pos + 1, mmap + pos,
823 		sizeof(struct tee_mmap_region) * (*last - pos));
824 	(*last)++;
825 	memset(mmap + pos, 0, sizeof(mmap[0]));
826 	mmap[pos].type = MEM_AREA_PAGER_VASPACE;
827 	mmap[pos].va = 0;
828 	mmap[pos].size = size;
829 	mmap[pos].region_size = SMALL_PAGE_SIZE;
830 	mmap[pos].attr = core_mmu_type_to_attr(MEM_AREA_PAGER_VASPACE);
831 }
832 
check_sec_nsec_mem_config(void)833 static void check_sec_nsec_mem_config(void)
834 {
835 	size_t n = 0;
836 
837 	for (n = 0; n < ARRAY_SIZE(secure_only); n++) {
838 		if (pbuf_intersects(nsec_shared, secure_only[n].paddr,
839 				    secure_only[n].size))
840 			panic("Invalid memory access config: sec/nsec");
841 	}
842 }
843 
collect_mem_ranges(struct tee_mmap_region * memory_map,size_t num_elems)844 static size_t collect_mem_ranges(struct tee_mmap_region *memory_map,
845 				 size_t num_elems)
846 {
847 	const struct core_mmu_phys_mem *mem = NULL;
848 	size_t last = 0;
849 
850 	for (mem = phys_mem_map_begin; mem < phys_mem_map_end; mem++) {
851 		struct core_mmu_phys_mem m = *mem;
852 
853 		/* Discard null size entries */
854 		if (!m.size)
855 			continue;
856 
857 		/* Only unmapped virtual range may have a null phys addr */
858 		assert(m.addr || !core_mmu_type_to_attr(m.type));
859 
860 		add_phys_mem(memory_map, num_elems, &m, &last);
861 	}
862 
863 	if (IS_ENABLED(CFG_SECURE_DATA_PATH))
864 		verify_special_mem_areas(memory_map, num_elems,
865 					 phys_sdp_mem_begin,
866 					 phys_sdp_mem_end, "SDP");
867 
868 	add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
869 		     CFG_RESERVED_VASPACE_SIZE, &last);
870 
871 	add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE,
872 		     SHM_VASPACE_SIZE, &last);
873 
874 	memory_map[last].type = MEM_AREA_END;
875 
876 	return last;
877 }
878 
assign_mem_granularity(struct tee_mmap_region * memory_map)879 static void assign_mem_granularity(struct tee_mmap_region *memory_map)
880 {
881 	struct tee_mmap_region *map = NULL;
882 
883 	/*
884 	 * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
885 	 * SMALL_PAGE_SIZE.
886 	 */
887 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
888 		paddr_t mask = map->pa | map->size;
889 
890 		if (!(mask & CORE_MMU_PGDIR_MASK))
891 			map->region_size = CORE_MMU_PGDIR_SIZE;
892 		else if (!(mask & SMALL_PAGE_MASK))
893 			map->region_size = SMALL_PAGE_SIZE;
894 		else
895 			panic("Impossible memory alignment");
896 
897 		if (map_is_tee_ram(map))
898 			map->region_size = SMALL_PAGE_SIZE;
899 	}
900 }
901 
get_va_width(void)902 static unsigned int get_va_width(void)
903 {
904 	if (IS_ENABLED(ARM64)) {
905 		COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS >= 32);
906 		COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS <= 48);
907 		return CFG_LPAE_ADDR_SPACE_BITS;
908 	}
909 	return 32;
910 }
911 
assign_mem_va(vaddr_t tee_ram_va,struct tee_mmap_region * memory_map)912 static bool assign_mem_va(vaddr_t tee_ram_va,
913 			  struct tee_mmap_region *memory_map)
914 {
915 	struct tee_mmap_region *map = NULL;
916 	vaddr_t va = tee_ram_va;
917 	bool va_is_secure = true;
918 
919 	/*
920 	 * Check that we're not overlapping with the user VA range.
921 	 */
922 	if (IS_ENABLED(CFG_WITH_LPAE)) {
923 		/*
924 		 * User VA range is supposed to be defined after these
925 		 * mappings have been established.
926 		 */
927 		assert(!core_mmu_user_va_range_is_defined());
928 	} else {
929 		vaddr_t user_va_base = 0;
930 		size_t user_va_size = 0;
931 
932 		assert(core_mmu_user_va_range_is_defined());
933 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
934 		if (tee_ram_va < (user_va_base + user_va_size))
935 			return false;
936 	}
937 
938 	/* Clear eventual previous assignments */
939 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
940 		map->va = 0;
941 
942 	/*
943 	 * TEE RAM regions are always aligned with region_size.
944 	 *
945 	 * Note that MEM_AREA_PAGER_VASPACE also counts as TEE RAM here
946 	 * since it handles virtual memory which covers the part of the ELF
947 	 * that cannot fit directly into memory.
948 	 */
949 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
950 		if (map_is_tee_ram(map) ||
951 		    map->type == MEM_AREA_PAGER_VASPACE) {
952 			assert(!(va & (map->region_size - 1)));
953 			assert(!(map->size & (map->region_size - 1)));
954 			map->va = va;
955 			if (ADD_OVERFLOW(va, map->size, &va))
956 				return false;
957 			if (IS_ENABLED(ARM64) && va >= BIT64(get_va_width()))
958 				return false;
959 		}
960 	}
961 
962 	if (core_mmu_place_tee_ram_at_top(tee_ram_va)) {
963 		/*
964 		 * Map non-tee ram regions at addresses lower than the tee
965 		 * ram region.
966 		 */
967 		va = tee_ram_va;
968 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
969 			map->attr = core_mmu_type_to_attr(map->type);
970 			if (map->va)
971 				continue;
972 
973 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
974 			    va_is_secure != map_is_secure(map)) {
975 				va_is_secure = !va_is_secure;
976 				va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
977 			}
978 
979 			if (SUB_OVERFLOW(va, map->size, &va))
980 				return false;
981 			va = ROUNDDOWN(va, map->region_size);
982 			/*
983 			 * Make sure that va is aligned with pa for
984 			 * efficient pgdir mapping. Basically pa &
985 			 * pgdir_mask should be == va & pgdir_mask
986 			 */
987 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
988 				if (SUB_OVERFLOW(va, CORE_MMU_PGDIR_SIZE, &va))
989 					return false;
990 				va += (map->pa - va) & CORE_MMU_PGDIR_MASK;
991 			}
992 			map->va = va;
993 		}
994 	} else {
995 		/*
996 		 * Map non-tee ram regions at addresses higher than the tee
997 		 * ram region.
998 		 */
999 		for (map = memory_map; !core_mmap_is_end_of_table(map); map++) {
1000 			map->attr = core_mmu_type_to_attr(map->type);
1001 			if (map->va)
1002 				continue;
1003 
1004 			if (!IS_ENABLED(CFG_WITH_LPAE) &&
1005 			    va_is_secure != map_is_secure(map)) {
1006 				va_is_secure = !va_is_secure;
1007 				if (ROUNDUP_OVERFLOW(va, CORE_MMU_PGDIR_SIZE,
1008 						     &va))
1009 					return false;
1010 			}
1011 
1012 			if (ROUNDUP_OVERFLOW(va, map->region_size, &va))
1013 				return false;
1014 			/*
1015 			 * Make sure that va is aligned with pa for
1016 			 * efficient pgdir mapping. Basically pa &
1017 			 * pgdir_mask should be == va & pgdir_mask
1018 			 */
1019 			if (map->size > 2 * CORE_MMU_PGDIR_SIZE) {
1020 				vaddr_t offs = (map->pa - va) &
1021 					       CORE_MMU_PGDIR_MASK;
1022 
1023 				if (ADD_OVERFLOW(va, offs, &va))
1024 					return false;
1025 			}
1026 
1027 			map->va = va;
1028 			if (ADD_OVERFLOW(va, map->size, &va))
1029 				return false;
1030 			if (IS_ENABLED(ARM64) && va >= BIT64(get_va_width()))
1031 				return false;
1032 		}
1033 	}
1034 
1035 	return true;
1036 }
1037 
cmp_init_mem_map(const void * a,const void * b)1038 static int cmp_init_mem_map(const void *a, const void *b)
1039 {
1040 	const struct tee_mmap_region *mm_a = a;
1041 	const struct tee_mmap_region *mm_b = b;
1042 	int rc = 0;
1043 
1044 	rc = CMP_TRILEAN(mm_a->region_size, mm_b->region_size);
1045 	if (!rc)
1046 		rc = CMP_TRILEAN(mm_a->pa, mm_b->pa);
1047 	/*
1048 	 * 32bit MMU descriptors cannot mix secure and non-secure mapping in
1049 	 * the same level2 table. Hence sort secure mapping from non-secure
1050 	 * mapping.
1051 	 */
1052 	if (!rc && !IS_ENABLED(CFG_WITH_LPAE))
1053 		rc = CMP_TRILEAN(map_is_secure(mm_a), map_is_secure(mm_b));
1054 
1055 	return rc;
1056 }
1057 
mem_map_add_id_map(struct tee_mmap_region * memory_map,size_t num_elems,size_t * last,vaddr_t id_map_start,vaddr_t id_map_end)1058 static bool mem_map_add_id_map(struct tee_mmap_region *memory_map,
1059 			       size_t num_elems, size_t *last,
1060 			       vaddr_t id_map_start, vaddr_t id_map_end)
1061 {
1062 	struct tee_mmap_region *map = NULL;
1063 	vaddr_t start = ROUNDDOWN(id_map_start, SMALL_PAGE_SIZE);
1064 	vaddr_t end = ROUNDUP(id_map_end, SMALL_PAGE_SIZE);
1065 	size_t len = end - start;
1066 
1067 	if (*last >= num_elems - 1) {
1068 		EMSG("Out of entries (%zu) in memory map", num_elems);
1069 		panic();
1070 	}
1071 
1072 	for (map = memory_map; !core_mmap_is_end_of_table(map); map++)
1073 		if (core_is_buffer_intersect(map->va, map->size, start, len))
1074 			return false;
1075 
1076 	*map = (struct tee_mmap_region){
1077 		.type = MEM_AREA_IDENTITY_MAP_RX,
1078 		/*
1079 		 * Could use CORE_MMU_PGDIR_SIZE to potentially save a
1080 		 * translation table, at the increased risk of clashes with
1081 		 * the rest of the memory map.
1082 		 */
1083 		.region_size = SMALL_PAGE_SIZE,
1084 		.pa = start,
1085 		.va = start,
1086 		.size = len,
1087 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1088 	};
1089 
1090 	(*last)++;
1091 
1092 	return true;
1093 }
1094 
init_mem_map(struct tee_mmap_region * memory_map,size_t num_elems,unsigned long seed)1095 static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
1096 				  size_t num_elems, unsigned long seed)
1097 {
1098 	/*
1099 	 * @id_map_start and @id_map_end describes a physical memory range
1100 	 * that must be mapped Read-Only eXecutable at identical virtual
1101 	 * addresses.
1102 	 */
1103 	vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
1104 	vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
1105 	unsigned long offs = 0;
1106 	size_t last = 0;
1107 
1108 	last = collect_mem_ranges(memory_map, num_elems);
1109 	assign_mem_granularity(memory_map);
1110 
1111 	/*
1112 	 * To ease mapping and lower use of xlat tables, sort mapping
1113 	 * description moving small-page regions after the pgdir regions.
1114 	 */
1115 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1116 	      cmp_init_mem_map);
1117 
1118 	add_pager_vaspace(memory_map, num_elems, &last);
1119 	if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
1120 		vaddr_t base_addr = TEE_RAM_START + seed;
1121 		const unsigned int va_width = get_va_width();
1122 		const vaddr_t va_mask = GENMASK_64(va_width - 1,
1123 						   SMALL_PAGE_SHIFT);
1124 		vaddr_t ba = base_addr;
1125 		size_t n = 0;
1126 
1127 		for (n = 0; n < 3; n++) {
1128 			if (n)
1129 				ba = base_addr ^ BIT64(va_width - n);
1130 			ba &= va_mask;
1131 			if (assign_mem_va(ba, memory_map) &&
1132 			    mem_map_add_id_map(memory_map, num_elems, &last,
1133 					       id_map_start, id_map_end)) {
1134 				offs = ba - TEE_RAM_START;
1135 				DMSG("Mapping core at %#"PRIxVA" offs %#lx",
1136 				     ba, offs);
1137 				goto out;
1138 			} else {
1139 				DMSG("Failed to map core at %#"PRIxVA, ba);
1140 			}
1141 		}
1142 		EMSG("Failed to map core with seed %#lx", seed);
1143 	}
1144 
1145 	if (!assign_mem_va(TEE_RAM_START, memory_map))
1146 		panic();
1147 
1148 out:
1149 	qsort(memory_map, last, sizeof(struct tee_mmap_region),
1150 	      cmp_mmap_by_lower_va);
1151 
1152 	dump_mmap_table(memory_map);
1153 
1154 	return offs;
1155 }
1156 
check_mem_map(struct tee_mmap_region * map)1157 static void check_mem_map(struct tee_mmap_region *map)
1158 {
1159 	struct tee_mmap_region *m = NULL;
1160 
1161 	for (m = map; !core_mmap_is_end_of_table(m); m++) {
1162 		switch (m->type) {
1163 		case MEM_AREA_TEE_RAM:
1164 		case MEM_AREA_TEE_RAM_RX:
1165 		case MEM_AREA_TEE_RAM_RO:
1166 		case MEM_AREA_TEE_RAM_RW:
1167 		case MEM_AREA_INIT_RAM_RX:
1168 		case MEM_AREA_INIT_RAM_RO:
1169 		case MEM_AREA_NEX_RAM_RW:
1170 		case MEM_AREA_NEX_RAM_RO:
1171 		case MEM_AREA_IDENTITY_MAP_RX:
1172 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1173 				panic("TEE_RAM can't fit in secure_only");
1174 			break;
1175 		case MEM_AREA_TA_RAM:
1176 			if (!pbuf_is_inside(secure_only, m->pa, m->size))
1177 				panic("TA_RAM can't fit in secure_only");
1178 			break;
1179 		case MEM_AREA_NSEC_SHM:
1180 			if (!pbuf_is_inside(nsec_shared, m->pa, m->size))
1181 				panic("NS_SHM can't fit in nsec_shared");
1182 			break;
1183 		case MEM_AREA_SEC_RAM_OVERALL:
1184 		case MEM_AREA_TEE_COHERENT:
1185 		case MEM_AREA_TEE_ASAN:
1186 		case MEM_AREA_IO_SEC:
1187 		case MEM_AREA_IO_NSEC:
1188 		case MEM_AREA_EXT_DT:
1189 		case MEM_AREA_RAM_SEC:
1190 		case MEM_AREA_RAM_NSEC:
1191 		case MEM_AREA_RES_VASPACE:
1192 		case MEM_AREA_SHM_VASPACE:
1193 		case MEM_AREA_PAGER_VASPACE:
1194 			break;
1195 		default:
1196 			EMSG("Uhandled memtype %d", m->type);
1197 			panic();
1198 		}
1199 	}
1200 }
1201 
get_tmp_mmap(void)1202 static struct tee_mmap_region *get_tmp_mmap(void)
1203 {
1204 	struct tee_mmap_region *tmp_mmap = (void *)__heap1_start;
1205 
1206 #ifdef CFG_WITH_PAGER
1207 	if (__heap1_end - __heap1_start < (ptrdiff_t)sizeof(static_memory_map))
1208 		tmp_mmap = (void *)__heap2_start;
1209 #endif
1210 
1211 	memset(tmp_mmap, 0, sizeof(static_memory_map));
1212 
1213 	return tmp_mmap;
1214 }
1215 
1216 /*
1217  * core_init_mmu_map() - init tee core default memory mapping
1218  *
1219  * This routine sets the static default TEE core mapping. If @seed is > 0
1220  * and configured with CFG_CORE_ASLR it will map tee core at a location
1221  * based on the seed and return the offset from the link address.
1222  *
1223  * If an error happened: core_init_mmu_map is expected to panic.
1224  *
1225  * Note: this function is weak just to make it possible to exclude it from
1226  * the unpaged area.
1227  */
core_init_mmu_map(unsigned long seed,struct core_mmu_config * cfg)1228 void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg)
1229 {
1230 #ifndef CFG_VIRTUALIZATION
1231 	vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE);
1232 #else
1233 	vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start,
1234 				  SMALL_PAGE_SIZE);
1235 #endif
1236 	vaddr_t len = ROUNDUP((vaddr_t)__nozi_end, SMALL_PAGE_SIZE) - start;
1237 	struct tee_mmap_region *tmp_mmap = get_tmp_mmap();
1238 	unsigned long offs = 0;
1239 
1240 	check_sec_nsec_mem_config();
1241 
1242 	/*
1243 	 * Add a entry covering the translation tables which will be
1244 	 * involved in some virt_to_phys() and phys_to_virt() conversions.
1245 	 */
1246 	static_memory_map[0] = (struct tee_mmap_region){
1247 		.type = MEM_AREA_TEE_RAM,
1248 		.region_size = SMALL_PAGE_SIZE,
1249 		.pa = start,
1250 		.va = start,
1251 		.size = len,
1252 		.attr = core_mmu_type_to_attr(MEM_AREA_IDENTITY_MAP_RX),
1253 	};
1254 
1255 	COMPILE_TIME_ASSERT(CFG_MMAP_REGIONS >= 13);
1256 	offs = init_mem_map(tmp_mmap, ARRAY_SIZE(static_memory_map), seed);
1257 
1258 	check_mem_map(tmp_mmap);
1259 	core_init_mmu(tmp_mmap);
1260 	dump_xlat_table(0x0, CORE_MMU_BASE_TABLE_LEVEL);
1261 	core_init_mmu_regs(cfg);
1262 	cfg->load_offset = offs;
1263 	memcpy(static_memory_map, tmp_mmap, sizeof(static_memory_map));
1264 }
1265 
core_mmu_mattr_is_ok(uint32_t mattr)1266 bool core_mmu_mattr_is_ok(uint32_t mattr)
1267 {
1268 	/*
1269 	 * Keep in sync with core_mmu_lpae.c:mattr_to_desc and
1270 	 * core_mmu_v7.c:mattr_to_texcb
1271 	 */
1272 
1273 	switch ((mattr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
1274 	case TEE_MATTR_CACHE_NONCACHE:
1275 	case TEE_MATTR_CACHE_CACHED:
1276 		return true;
1277 	default:
1278 		return false;
1279 	}
1280 }
1281 
1282 /*
1283  * test attributes of target physical buffer
1284  *
1285  * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT).
1286  *
1287  */
core_pbuf_is(uint32_t attr,paddr_t pbuf,size_t len)1288 bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len)
1289 {
1290 	struct tee_mmap_region *map;
1291 
1292 	/* Empty buffers complies with anything */
1293 	if (len == 0)
1294 		return true;
1295 
1296 	switch (attr) {
1297 	case CORE_MEM_SEC:
1298 		return pbuf_is_inside(secure_only, pbuf, len);
1299 	case CORE_MEM_NON_SEC:
1300 		return pbuf_is_inside(nsec_shared, pbuf, len) ||
1301 			pbuf_is_nsec_ddr(pbuf, len);
1302 	case CORE_MEM_TEE_RAM:
1303 		return core_is_buffer_inside(pbuf, len, TEE_RAM_START,
1304 							TEE_RAM_PH_SIZE);
1305 	case CORE_MEM_TA_RAM:
1306 		return core_is_buffer_inside(pbuf, len, TA_RAM_START,
1307 							TA_RAM_SIZE);
1308 #ifdef CFG_CORE_RESERVED_SHM
1309 	case CORE_MEM_NSEC_SHM:
1310 		return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START,
1311 							TEE_SHMEM_SIZE);
1312 #endif
1313 	case CORE_MEM_SDP_MEM:
1314 		return pbuf_is_sdp_mem(pbuf, len);
1315 	case CORE_MEM_CACHED:
1316 		map = find_map_by_pa(pbuf);
1317 		if (map == NULL || !pbuf_inside_map_area(pbuf, len, map))
1318 			return false;
1319 		return map->attr >> TEE_MATTR_CACHE_SHIFT ==
1320 		       TEE_MATTR_CACHE_CACHED;
1321 	default:
1322 		return false;
1323 	}
1324 }
1325 
1326 /* test attributes of target virtual buffer (in core mapping) */
core_vbuf_is(uint32_t attr,const void * vbuf,size_t len)1327 bool core_vbuf_is(uint32_t attr, const void *vbuf, size_t len)
1328 {
1329 	paddr_t p;
1330 
1331 	/* Empty buffers complies with anything */
1332 	if (len == 0)
1333 		return true;
1334 
1335 	p = virt_to_phys((void *)vbuf);
1336 	if (!p)
1337 		return false;
1338 
1339 	return core_pbuf_is(attr, p, len);
1340 }
1341 
1342 /* core_va2pa - teecore exported service */
core_va2pa_helper(void * va,paddr_t * pa)1343 static int __maybe_unused core_va2pa_helper(void *va, paddr_t *pa)
1344 {
1345 	struct tee_mmap_region *map;
1346 
1347 	map = find_map_by_va(va);
1348 	if (!va_is_in_map(map, (vaddr_t)va))
1349 		return -1;
1350 
1351 	/*
1352 	 * We can calculate PA for static map. Virtual address ranges
1353 	 * reserved to core dynamic mapping return a 'match' (return 0;)
1354 	 * together with an invalid null physical address.
1355 	 */
1356 	if (map->pa)
1357 		*pa = map->pa + (vaddr_t)va  - map->va;
1358 	else
1359 		*pa = 0;
1360 
1361 	return 0;
1362 }
1363 
map_pa2va(struct tee_mmap_region * map,paddr_t pa,size_t len)1364 static void *map_pa2va(struct tee_mmap_region *map, paddr_t pa, size_t len)
1365 {
1366 	if (!pa_is_in_map(map, pa, len))
1367 		return NULL;
1368 
1369 	return (void *)(vaddr_t)(map->va + pa - map->pa);
1370 }
1371 
1372 /*
1373  * teecore gets some memory area definitions
1374  */
core_mmu_get_mem_by_type(unsigned int type,vaddr_t * s,vaddr_t * e)1375 void core_mmu_get_mem_by_type(unsigned int type, vaddr_t *s, vaddr_t *e)
1376 {
1377 	struct tee_mmap_region *map = find_map_by_type(type);
1378 
1379 	if (map) {
1380 		*s = map->va;
1381 		*e = map->va + map->size;
1382 	} else {
1383 		*s = 0;
1384 		*e = 0;
1385 	}
1386 }
1387 
core_mmu_get_type_by_pa(paddr_t pa)1388 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)
1389 {
1390 	struct tee_mmap_region *map = find_map_by_pa(pa);
1391 
1392 	if (!map)
1393 		return MEM_AREA_MAXTYPE;
1394 	return map->type;
1395 }
1396 
tlbi_mva_range(vaddr_t va,size_t len,size_t granule)1397 void tlbi_mva_range(vaddr_t va, size_t len, size_t granule)
1398 {
1399 	assert(granule == CORE_MMU_PGDIR_SIZE || granule == SMALL_PAGE_SIZE);
1400 	assert(!(va & (granule - 1)) && !(len & (granule - 1)));
1401 
1402 	dsb_ishst();
1403 	while (len) {
1404 		tlbi_mva_allasid_nosync(va);
1405 		len -= granule;
1406 		va += granule;
1407 	}
1408 	dsb_ish();
1409 	isb();
1410 }
1411 
tlbi_mva_range_asid(vaddr_t va,size_t len,size_t granule,uint32_t asid)1412 void tlbi_mva_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid)
1413 {
1414 	assert(granule == CORE_MMU_PGDIR_SIZE || granule == SMALL_PAGE_SIZE);
1415 	assert(!(va & (granule - 1)) && !(len & (granule - 1)));
1416 
1417 	dsb_ishst();
1418 	while (len) {
1419 		tlbi_mva_asid_nosync(va, asid);
1420 		len -= granule;
1421 		va += granule;
1422 	}
1423 	dsb_ish();
1424 	isb();
1425 }
1426 
cache_op_inner(enum cache_op op,void * va,size_t len)1427 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len)
1428 {
1429 	switch (op) {
1430 	case DCACHE_CLEAN:
1431 		dcache_op_all(DCACHE_OP_CLEAN);
1432 		break;
1433 	case DCACHE_AREA_CLEAN:
1434 		dcache_clean_range(va, len);
1435 		break;
1436 	case DCACHE_INVALIDATE:
1437 		dcache_op_all(DCACHE_OP_INV);
1438 		break;
1439 	case DCACHE_AREA_INVALIDATE:
1440 		dcache_inv_range(va, len);
1441 		break;
1442 	case ICACHE_INVALIDATE:
1443 		icache_inv_all();
1444 		break;
1445 	case ICACHE_AREA_INVALIDATE:
1446 		icache_inv_range(va, len);
1447 		break;
1448 	case DCACHE_CLEAN_INV:
1449 		dcache_op_all(DCACHE_OP_CLEAN_INV);
1450 		break;
1451 	case DCACHE_AREA_CLEAN_INV:
1452 		dcache_cleaninv_range(va, len);
1453 		break;
1454 	default:
1455 		return TEE_ERROR_NOT_IMPLEMENTED;
1456 	}
1457 	return TEE_SUCCESS;
1458 }
1459 
1460 #ifdef CFG_PL310
cache_op_outer(enum cache_op op,paddr_t pa,size_t len)1461 TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len)
1462 {
1463 	TEE_Result ret = TEE_SUCCESS;
1464 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
1465 
1466 	tee_l2cc_mutex_lock();
1467 	switch (op) {
1468 	case DCACHE_INVALIDATE:
1469 		arm_cl2_invbyway(pl310_base());
1470 		break;
1471 	case DCACHE_AREA_INVALIDATE:
1472 		if (len)
1473 			arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
1474 		break;
1475 	case DCACHE_CLEAN:
1476 		arm_cl2_cleanbyway(pl310_base());
1477 		break;
1478 	case DCACHE_AREA_CLEAN:
1479 		if (len)
1480 			arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
1481 		break;
1482 	case DCACHE_CLEAN_INV:
1483 		arm_cl2_cleaninvbyway(pl310_base());
1484 		break;
1485 	case DCACHE_AREA_CLEAN_INV:
1486 		if (len)
1487 			arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
1488 		break;
1489 	default:
1490 		ret = TEE_ERROR_NOT_IMPLEMENTED;
1491 	}
1492 
1493 	tee_l2cc_mutex_unlock();
1494 	thread_set_exceptions(exceptions);
1495 	return ret;
1496 }
1497 #endif /*CFG_PL310*/
1498 
core_mmu_set_entry(struct core_mmu_table_info * tbl_info,unsigned idx,paddr_t pa,uint32_t attr)1499 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
1500 			paddr_t pa, uint32_t attr)
1501 {
1502 	assert(idx < tbl_info->num_entries);
1503 	core_mmu_set_entry_primitive(tbl_info->table, tbl_info->level,
1504 				     idx, pa, attr);
1505 }
1506 
core_mmu_get_entry(struct core_mmu_table_info * tbl_info,unsigned idx,paddr_t * pa,uint32_t * attr)1507 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
1508 			paddr_t *pa, uint32_t *attr)
1509 {
1510 	assert(idx < tbl_info->num_entries);
1511 	core_mmu_get_entry_primitive(tbl_info->table, tbl_info->level,
1512 				     idx, pa, attr);
1513 }
1514 
clear_region(struct core_mmu_table_info * tbl_info,struct tee_mmap_region * region)1515 static void clear_region(struct core_mmu_table_info *tbl_info,
1516 			 struct tee_mmap_region *region)
1517 {
1518 	unsigned int end = 0;
1519 	unsigned int idx = 0;
1520 
1521 	/* va, len and pa should be block aligned */
1522 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1523 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1524 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1525 
1526 	idx = core_mmu_va2idx(tbl_info, region->va);
1527 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1528 
1529 	while (idx < end) {
1530 		core_mmu_set_entry(tbl_info, idx, 0, 0);
1531 		idx++;
1532 	}
1533 }
1534 
set_region(struct core_mmu_table_info * tbl_info,struct tee_mmap_region * region)1535 static void set_region(struct core_mmu_table_info *tbl_info,
1536 		struct tee_mmap_region *region)
1537 {
1538 	unsigned end;
1539 	unsigned idx;
1540 	paddr_t pa;
1541 
1542 	/* va, len and pa should be block aligned */
1543 	assert(!core_mmu_get_block_offset(tbl_info, region->va));
1544 	assert(!core_mmu_get_block_offset(tbl_info, region->size));
1545 	assert(!core_mmu_get_block_offset(tbl_info, region->pa));
1546 
1547 	idx = core_mmu_va2idx(tbl_info, region->va);
1548 	end = core_mmu_va2idx(tbl_info, region->va + region->size);
1549 	pa = region->pa;
1550 
1551 	while (idx < end) {
1552 		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
1553 		idx++;
1554 		pa += BIT64(tbl_info->shift);
1555 	}
1556 }
1557 
set_pg_region(struct core_mmu_table_info * dir_info,struct vm_region * region,struct pgt ** pgt,struct core_mmu_table_info * pg_info)1558 static void set_pg_region(struct core_mmu_table_info *dir_info,
1559 			struct vm_region *region, struct pgt **pgt,
1560 			struct core_mmu_table_info *pg_info)
1561 {
1562 	struct tee_mmap_region r = {
1563 		.va = region->va,
1564 		.size = region->size,
1565 		.attr = region->attr,
1566 	};
1567 	vaddr_t end = r.va + r.size;
1568 	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;
1569 
1570 	while (r.va < end) {
1571 		if (!pg_info->table ||
1572 		     r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
1573 			/*
1574 			 * We're assigning a new translation table.
1575 			 */
1576 			unsigned int idx;
1577 
1578 			/* Virtual addresses must grow */
1579 			assert(r.va > pg_info->va_base);
1580 
1581 			idx = core_mmu_va2idx(dir_info, r.va);
1582 			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
1583 
1584 #ifdef CFG_PAGED_USER_TA
1585 			/*
1586 			 * Advance pgt to va_base, note that we may need to
1587 			 * skip multiple page tables if there are large
1588 			 * holes in the vm map.
1589 			 */
1590 			while ((*pgt)->vabase < pg_info->va_base) {
1591 				*pgt = SLIST_NEXT(*pgt, link);
1592 				/* We should have alloced enough */
1593 				assert(*pgt);
1594 			}
1595 			assert((*pgt)->vabase == pg_info->va_base);
1596 			pg_info->table = (*pgt)->tbl;
1597 #else
1598 			assert(*pgt); /* We should have alloced enough */
1599 			pg_info->table = (*pgt)->tbl;
1600 			*pgt = SLIST_NEXT(*pgt, link);
1601 #endif
1602 
1603 			core_mmu_set_entry(dir_info, idx,
1604 					   virt_to_phys(pg_info->table),
1605 					   pgt_attr);
1606 		}
1607 
1608 		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
1609 			     end - r.va);
1610 
1611 		if (!mobj_is_paged(region->mobj)) {
1612 			size_t granule = BIT(pg_info->shift);
1613 			size_t offset = r.va - region->va + region->offset;
1614 
1615 			r.size = MIN(r.size,
1616 				     mobj_get_phys_granule(region->mobj));
1617 			r.size = ROUNDUP(r.size, SMALL_PAGE_SIZE);
1618 
1619 			if (mobj_get_pa(region->mobj, offset, granule,
1620 					&r.pa) != TEE_SUCCESS)
1621 				panic("Failed to get PA of unpaged mobj");
1622 			set_region(pg_info, &r);
1623 		}
1624 		r.va += r.size;
1625 	}
1626 }
1627 
can_map_at_level(paddr_t paddr,vaddr_t vaddr,size_t size_left,paddr_t block_size,struct tee_mmap_region * mm __maybe_unused)1628 static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
1629 			     size_t size_left, paddr_t block_size,
1630 			     struct tee_mmap_region *mm __maybe_unused)
1631 {
1632 
1633 	/* VA and PA are aligned to block size at current level */
1634 	if ((vaddr | paddr) & (block_size - 1))
1635 		return false;
1636 
1637 	/* Remainder fits into block at current level */
1638 	if (size_left < block_size)
1639 		return false;
1640 
1641 #ifdef CFG_WITH_PAGER
1642 	/*
1643 	 * If pager is enabled, we need to map tee ram
1644 	 * regions with small pages only
1645 	 */
1646 	if (map_is_tee_ram(mm) && block_size != SMALL_PAGE_SIZE)
1647 		return false;
1648 #endif
1649 
1650 	return true;
1651 }
1652 
core_mmu_map_region(struct mmu_partition * prtn,struct tee_mmap_region * mm)1653 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
1654 {
1655 	struct core_mmu_table_info tbl_info;
1656 	unsigned int idx;
1657 	vaddr_t vaddr = mm->va;
1658 	paddr_t paddr = mm->pa;
1659 	ssize_t size_left = mm->size;
1660 	unsigned int level;
1661 	bool table_found;
1662 	uint32_t old_attr;
1663 
1664 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
1665 
1666 	while (size_left > 0) {
1667 		level = CORE_MMU_BASE_TABLE_LEVEL;
1668 
1669 		while (true) {
1670 			paddr_t block_size = 0;
1671 
1672 			assert(level <= CORE_MMU_PGDIR_LEVEL);
1673 
1674 			table_found = core_mmu_find_table(prtn, vaddr, level,
1675 							  &tbl_info);
1676 			if (!table_found)
1677 				panic("can't find table for mapping");
1678 
1679 			block_size = BIT64(tbl_info.shift);
1680 
1681 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1682 			if (!can_map_at_level(paddr, vaddr, size_left,
1683 					      block_size, mm)) {
1684 				/*
1685 				 * This part of the region can't be mapped at
1686 				 * this level. Need to go deeper.
1687 				 */
1688 				if (!core_mmu_entry_to_finer_grained(&tbl_info,
1689 					      idx, mm->attr & TEE_MATTR_SECURE))
1690 					panic("Can't divide MMU entry");
1691 				level++;
1692 				continue;
1693 			}
1694 
1695 			/* We can map part of the region at current level */
1696 			core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1697 			if (old_attr)
1698 				panic("Page is already mapped");
1699 
1700 			core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
1701 			paddr += block_size;
1702 			vaddr += block_size;
1703 			size_left -= block_size;
1704 
1705 			break;
1706 		}
1707 	}
1708 }
1709 
core_mmu_map_pages(vaddr_t vstart,paddr_t * pages,size_t num_pages,enum teecore_memtypes memtype)1710 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
1711 			      enum teecore_memtypes memtype)
1712 {
1713 	TEE_Result ret;
1714 	struct core_mmu_table_info tbl_info;
1715 	struct tee_mmap_region *mm;
1716 	unsigned int idx;
1717 	uint32_t old_attr;
1718 	uint32_t exceptions;
1719 	vaddr_t vaddr = vstart;
1720 	size_t i;
1721 	bool secure;
1722 
1723 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1724 
1725 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1726 
1727 	if (vaddr & SMALL_PAGE_MASK)
1728 		return TEE_ERROR_BAD_PARAMETERS;
1729 
1730 	exceptions = mmu_lock();
1731 
1732 	mm = find_map_by_va((void *)vaddr);
1733 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1734 		panic("VA does not belong to any known mm region");
1735 
1736 	if (!core_mmu_is_dynamic_vaspace(mm))
1737 		panic("Trying to map into static region");
1738 
1739 	for (i = 0; i < num_pages; i++) {
1740 		if (pages[i] & SMALL_PAGE_MASK) {
1741 			ret = TEE_ERROR_BAD_PARAMETERS;
1742 			goto err;
1743 		}
1744 
1745 		while (true) {
1746 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1747 						 &tbl_info))
1748 				panic("Can't find pagetable for vaddr ");
1749 
1750 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1751 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1752 				break;
1753 
1754 			/* This is supertable. Need to divide it. */
1755 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1756 							     secure))
1757 				panic("Failed to spread pgdir on small tables");
1758 		}
1759 
1760 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1761 		if (old_attr)
1762 			panic("Page is already mapped");
1763 
1764 		core_mmu_set_entry(&tbl_info, idx, pages[i],
1765 				   core_mmu_type_to_attr(memtype));
1766 		vaddr += SMALL_PAGE_SIZE;
1767 	}
1768 
1769 	/*
1770 	 * Make sure all the changes to translation tables are visible
1771 	 * before returning. TLB doesn't need to be invalidated as we are
1772 	 * guaranteed that there's no valid mapping in this range.
1773 	 */
1774 	dsb_ishst();
1775 	mmu_unlock(exceptions);
1776 
1777 	return TEE_SUCCESS;
1778 err:
1779 	mmu_unlock(exceptions);
1780 
1781 	if (i)
1782 		core_mmu_unmap_pages(vstart, i);
1783 
1784 	return ret;
1785 }
1786 
core_mmu_map_contiguous_pages(vaddr_t vstart,paddr_t pstart,size_t num_pages,enum teecore_memtypes memtype)1787 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
1788 					 size_t num_pages,
1789 					 enum teecore_memtypes memtype)
1790 {
1791 	struct core_mmu_table_info tbl_info = { };
1792 	struct tee_mmap_region *mm = NULL;
1793 	unsigned int idx = 0;
1794 	uint32_t old_attr = 0;
1795 	uint32_t exceptions = 0;
1796 	vaddr_t vaddr = vstart;
1797 	paddr_t paddr = pstart;
1798 	size_t i = 0;
1799 	bool secure = false;
1800 
1801 	assert(!(core_mmu_type_to_attr(memtype) & TEE_MATTR_PX));
1802 
1803 	secure = core_mmu_type_to_attr(memtype) & TEE_MATTR_SECURE;
1804 
1805 	if ((vaddr | paddr) & SMALL_PAGE_MASK)
1806 		return TEE_ERROR_BAD_PARAMETERS;
1807 
1808 	exceptions = mmu_lock();
1809 
1810 	mm = find_map_by_va((void *)vaddr);
1811 	if (!mm || !va_is_in_map(mm, vaddr + num_pages * SMALL_PAGE_SIZE - 1))
1812 		panic("VA does not belong to any known mm region");
1813 
1814 	if (!core_mmu_is_dynamic_vaspace(mm))
1815 		panic("Trying to map into static region");
1816 
1817 	for (i = 0; i < num_pages; i++) {
1818 		while (true) {
1819 			if (!core_mmu_find_table(NULL, vaddr, UINT_MAX,
1820 						 &tbl_info))
1821 				panic("Can't find pagetable for vaddr ");
1822 
1823 			idx = core_mmu_va2idx(&tbl_info, vaddr);
1824 			if (tbl_info.shift == SMALL_PAGE_SHIFT)
1825 				break;
1826 
1827 			/* This is supertable. Need to divide it. */
1828 			if (!core_mmu_entry_to_finer_grained(&tbl_info, idx,
1829 							     secure))
1830 				panic("Failed to spread pgdir on small tables");
1831 		}
1832 
1833 		core_mmu_get_entry(&tbl_info, idx, NULL, &old_attr);
1834 		if (old_attr)
1835 			panic("Page is already mapped");
1836 
1837 		core_mmu_set_entry(&tbl_info, idx, paddr,
1838 				   core_mmu_type_to_attr(memtype));
1839 		paddr += SMALL_PAGE_SIZE;
1840 		vaddr += SMALL_PAGE_SIZE;
1841 	}
1842 
1843 	/*
1844 	 * Make sure all the changes to translation tables are visible
1845 	 * before returning. TLB doesn't need to be invalidated as we are
1846 	 * guaranteed that there's no valid mapping in this range.
1847 	 */
1848 	dsb_ishst();
1849 	mmu_unlock(exceptions);
1850 
1851 	return TEE_SUCCESS;
1852 }
1853 
core_mmu_unmap_pages(vaddr_t vstart,size_t num_pages)1854 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages)
1855 {
1856 	struct core_mmu_table_info tbl_info;
1857 	struct tee_mmap_region *mm;
1858 	size_t i;
1859 	unsigned int idx;
1860 	uint32_t exceptions;
1861 
1862 	exceptions = mmu_lock();
1863 
1864 	mm = find_map_by_va((void *)vstart);
1865 	if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1))
1866 		panic("VA does not belong to any known mm region");
1867 
1868 	if (!core_mmu_is_dynamic_vaspace(mm))
1869 		panic("Trying to unmap static region");
1870 
1871 	for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) {
1872 		if (!core_mmu_find_table(NULL, vstart, UINT_MAX, &tbl_info))
1873 			panic("Can't find pagetable");
1874 
1875 		if (tbl_info.shift != SMALL_PAGE_SHIFT)
1876 			panic("Invalid pagetable level");
1877 
1878 		idx = core_mmu_va2idx(&tbl_info, vstart);
1879 		core_mmu_set_entry(&tbl_info, idx, 0, 0);
1880 	}
1881 	tlbi_all();
1882 
1883 	mmu_unlock(exceptions);
1884 }
1885 
core_mmu_populate_user_map(struct core_mmu_table_info * dir_info,struct user_mode_ctx * uctx)1886 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
1887 				struct user_mode_ctx *uctx)
1888 {
1889 	struct core_mmu_table_info pg_info = { };
1890 	struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
1891 	struct pgt *pgt = NULL;
1892 	struct vm_region *r = NULL;
1893 	struct vm_region *r_last = NULL;
1894 
1895 	/* Find the first and last valid entry */
1896 	r = TAILQ_FIRST(&uctx->vm_info.regions);
1897 	if (!r)
1898 		return; /* Nothing to map */
1899 	r_last = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head);
1900 
1901 	/*
1902 	 * Allocate all page tables in advance.
1903 	 */
1904 	pgt_alloc(pgt_cache, uctx->ts_ctx, r->va,
1905 		  r_last->va + r_last->size - 1);
1906 	pgt = SLIST_FIRST(pgt_cache);
1907 
1908 	core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);
1909 
1910 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
1911 		set_pg_region(dir_info, r, &pgt, &pg_info);
1912 }
1913 
core_mmu_remove_mapping(enum teecore_memtypes type,void * addr,size_t len)1914 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
1915 				   size_t len)
1916 {
1917 	struct core_mmu_table_info tbl_info = { };
1918 	struct tee_mmap_region *res_map = NULL;
1919 	struct tee_mmap_region *map = NULL;
1920 	paddr_t pa = virt_to_phys(addr);
1921 	size_t granule = 0;
1922 	ptrdiff_t i = 0;
1923 	paddr_t p = 0;
1924 	size_t l = 0;
1925 
1926 	map = find_map_by_type_and_pa(type, pa, len);
1927 	if (!map)
1928 		return TEE_ERROR_GENERIC;
1929 
1930 	res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
1931 	if (!res_map)
1932 		return TEE_ERROR_GENERIC;
1933 	if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
1934 		return TEE_ERROR_GENERIC;
1935 	granule = BIT(tbl_info.shift);
1936 
1937 	if (map < static_memory_map ||
1938 	    map >= static_memory_map + ARRAY_SIZE(static_memory_map))
1939 		return TEE_ERROR_GENERIC;
1940 	i = map - static_memory_map;
1941 
1942 	/* Check that we have a full match */
1943 	p = ROUNDDOWN(pa, granule);
1944 	l = ROUNDUP(len + pa - p, granule);
1945 	if (map->pa != p || map->size != l)
1946 		return TEE_ERROR_GENERIC;
1947 
1948 	clear_region(&tbl_info, map);
1949 	tlbi_all();
1950 
1951 	/* If possible remove the va range from res_map */
1952 	if (res_map->va - map->size == map->va) {
1953 		res_map->va -= map->size;
1954 		res_map->size += map->size;
1955 	}
1956 
1957 	/* Remove the entry. */
1958 	memmove(map, map + 1,
1959 		(ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map));
1960 
1961 	/* Clear the last new entry in case it was used */
1962 	memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1,
1963 	       0, sizeof(*map));
1964 
1965 	return TEE_SUCCESS;
1966 }
1967 
1968 struct tee_mmap_region *
core_mmu_find_mapping_exclusive(enum teecore_memtypes type,size_t len)1969 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len)
1970 {
1971 	struct tee_mmap_region *map = NULL;
1972 	struct tee_mmap_region *map_found = NULL;
1973 
1974 	if (!len)
1975 		return NULL;
1976 
1977 	for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
1978 		if (map->type != type)
1979 			continue;
1980 
1981 		if (map_found)
1982 			return NULL;
1983 
1984 		map_found = map;
1985 	}
1986 
1987 	if (!map_found || map_found->size < len)
1988 		return NULL;
1989 
1990 	return map_found;
1991 }
1992 
core_mmu_add_mapping(enum teecore_memtypes type,paddr_t addr,size_t len)1993 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
1994 {
1995 	struct core_mmu_table_info tbl_info;
1996 	struct tee_mmap_region *map;
1997 	size_t n;
1998 	size_t granule;
1999 	paddr_t p;
2000 	size_t l;
2001 
2002 	if (!len)
2003 		return NULL;
2004 
2005 	if (!core_mmu_check_end_pa(addr, len))
2006 		return NULL;
2007 
2008 	/* Check if the memory is already mapped */
2009 	map = find_map_by_type_and_pa(type, addr, len);
2010 	if (map && pbuf_inside_map_area(addr, len, map))
2011 		return (void *)(vaddr_t)(map->va + addr - map->pa);
2012 
2013 	/* Find the reserved va space used for late mappings */
2014 	map = find_map_by_type(MEM_AREA_RES_VASPACE);
2015 	if (!map)
2016 		return NULL;
2017 
2018 	if (!core_mmu_find_table(NULL, map->va, UINT_MAX, &tbl_info))
2019 		return NULL;
2020 
2021 	granule = BIT64(tbl_info.shift);
2022 	p = ROUNDDOWN(addr, granule);
2023 	l = ROUNDUP(len + addr - p, granule);
2024 
2025 	/* Ban overflowing virtual addresses */
2026 	if (map->size < l)
2027 		return NULL;
2028 
2029 	/*
2030 	 * Something is wrong, we can't fit the va range into the selected
2031 	 * table. The reserved va range is possibly missaligned with
2032 	 * granule.
2033 	 */
2034 	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
2035 		return NULL;
2036 
2037 	/* Find end of the memory map */
2038 	n = 0;
2039 	while (!core_mmap_is_end_of_table(static_memory_map + n))
2040 		n++;
2041 
2042 	if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
2043 		/* There's room for another entry */
2044 		static_memory_map[n].va = map->va;
2045 		static_memory_map[n].size = l;
2046 		static_memory_map[n + 1].type = MEM_AREA_END;
2047 		map->va += l;
2048 		map->size -= l;
2049 		map = static_memory_map + n;
2050 	} else {
2051 		/*
2052 		 * There isn't room for another entry, steal the reserved
2053 		 * entry as it's not useful for anything else any longer.
2054 		 */
2055 		map->size = l;
2056 	}
2057 	map->type = type;
2058 	map->region_size = granule;
2059 	map->attr = core_mmu_type_to_attr(type);
2060 	map->pa = p;
2061 
2062 	set_region(&tbl_info, map);
2063 
2064 	/* Make sure the new entry is visible before continuing. */
2065 	dsb_ishst();
2066 
2067 	return (void *)(vaddr_t)(map->va + addr - map->pa);
2068 }
2069 
asid_alloc(void)2070 unsigned int asid_alloc(void)
2071 {
2072 	uint32_t exceptions = cpu_spin_lock_xsave(&g_asid_spinlock);
2073 	unsigned int r;
2074 	int i;
2075 
2076 	bit_ffc(g_asid, MMU_NUM_ASID_PAIRS, &i);
2077 	if (i == -1) {
2078 		r = 0;
2079 	} else {
2080 		bit_set(g_asid, i);
2081 		r = (i + 1) * 2;
2082 	}
2083 
2084 	cpu_spin_unlock_xrestore(&g_asid_spinlock, exceptions);
2085 	return r;
2086 }
2087 
asid_free(unsigned int asid)2088 void asid_free(unsigned int asid)
2089 {
2090 	uint32_t exceptions = cpu_spin_lock_xsave(&g_asid_spinlock);
2091 
2092 	/* Only even ASIDs are supposed to be allocated */
2093 	assert(!(asid & 1));
2094 
2095 	if (asid) {
2096 		int i = (asid - 1) / 2;
2097 
2098 		assert(i < MMU_NUM_ASID_PAIRS && bit_test(g_asid, i));
2099 		bit_clear(g_asid, i);
2100 	}
2101 
2102 	cpu_spin_unlock_xrestore(&g_asid_spinlock, exceptions);
2103 }
2104 
arm_va2pa_helper(void * va,paddr_t * pa)2105 static bool arm_va2pa_helper(void *va, paddr_t *pa)
2106 {
2107 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
2108 	paddr_t par = 0;
2109 	paddr_t par_pa_mask = 0;
2110 	bool ret = false;
2111 
2112 #ifdef ARM32
2113 	write_ats1cpr((vaddr_t)va);
2114 	isb();
2115 #ifdef CFG_WITH_LPAE
2116 	par = read_par64();
2117 	par_pa_mask = PAR64_PA_MASK;
2118 #else
2119 	par = read_par32();
2120 	par_pa_mask = PAR32_PA_MASK;
2121 #endif
2122 #endif /*ARM32*/
2123 
2124 #ifdef ARM64
2125 	write_at_s1e1r((vaddr_t)va);
2126 	isb();
2127 	par = read_par_el1();
2128 	par_pa_mask = PAR_PA_MASK;
2129 #endif
2130 	if (par & PAR_F)
2131 		goto out;
2132 	*pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
2133 		((vaddr_t)va & (BIT64(PAR_PA_SHIFT) - 1));
2134 
2135 	ret = true;
2136 out:
2137 	thread_unmask_exceptions(exceptions);
2138 	return ret;
2139 }
2140 
2141 #ifdef CFG_WITH_PAGER
get_linear_map_end(void)2142 static vaddr_t get_linear_map_end(void)
2143 {
2144 	/* this is synced with the generic linker file kern.ld.S */
2145 	return (vaddr_t)__heap2_end;
2146 }
2147 #endif
2148 
2149 #if defined(CFG_TEE_CORE_DEBUG)
check_pa_matches_va(void * va,paddr_t pa)2150 static void check_pa_matches_va(void *va, paddr_t pa)
2151 {
2152 	TEE_Result res = TEE_ERROR_GENERIC;
2153 	vaddr_t v = (vaddr_t)va;
2154 	paddr_t p = 0;
2155 	struct core_mmu_table_info ti __maybe_unused = { };
2156 
2157 	if (core_mmu_user_va_range_is_defined()) {
2158 		vaddr_t user_va_base = 0;
2159 		size_t user_va_size = 0;
2160 
2161 		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
2162 		if (v >= user_va_base &&
2163 		    v <= (user_va_base - 1 + user_va_size)) {
2164 			if (!core_mmu_user_mapping_is_active()) {
2165 				if (pa)
2166 					panic("issue in linear address space");
2167 				return;
2168 			}
2169 
2170 			res = vm_va2pa(to_user_mode_ctx(thread_get_tsd()->ctx),
2171 				       va, &p);
2172 			if (res == TEE_ERROR_NOT_SUPPORTED)
2173 				return;
2174 			if (res == TEE_SUCCESS && pa != p)
2175 				panic("bad pa");
2176 			if (res != TEE_SUCCESS && pa)
2177 				panic("false pa");
2178 			return;
2179 		}
2180 	}
2181 #ifdef CFG_WITH_PAGER
2182 	if (is_unpaged(va)) {
2183 		if (v - boot_mmu_config.load_offset != pa)
2184 			panic("issue in linear address space");
2185 		return;
2186 	}
2187 
2188 	if (tee_pager_get_table_info(v, &ti)) {
2189 		uint32_t a;
2190 
2191 		/*
2192 		 * Lookups in the page table managed by the pager is
2193 		 * dangerous for addresses in the paged area as those pages
2194 		 * changes all the time. But some ranges are safe,
2195 		 * rw-locked areas when the page is populated for instance.
2196 		 */
2197 		core_mmu_get_entry(&ti, core_mmu_va2idx(&ti, v), &p, &a);
2198 		if (a & TEE_MATTR_VALID_BLOCK) {
2199 			paddr_t mask = BIT64(ti.shift) - 1;
2200 
2201 			p |= v & mask;
2202 			if (pa != p)
2203 				panic();
2204 		} else
2205 			if (pa)
2206 				panic();
2207 		return;
2208 	}
2209 #endif
2210 
2211 	if (!core_va2pa_helper(va, &p)) {
2212 		/* Verfiy only the static mapping (case non null phys addr) */
2213 		if (p && pa != p) {
2214 			DMSG("va %p maps 0x%" PRIxPA ", expect 0x%" PRIxPA,
2215 					va, p, pa);
2216 			panic();
2217 		}
2218 	} else {
2219 		if (pa) {
2220 			DMSG("va %p unmapped, expect 0x%" PRIxPA, va, pa);
2221 			panic();
2222 		}
2223 	}
2224 }
2225 #else
check_pa_matches_va(void * va __unused,paddr_t pa __unused)2226 static void check_pa_matches_va(void *va __unused, paddr_t pa __unused)
2227 {
2228 }
2229 #endif
2230 
virt_to_phys(void * va)2231 paddr_t virt_to_phys(void *va)
2232 {
2233 	paddr_t pa = 0;
2234 
2235 	if (!arm_va2pa_helper(va, &pa))
2236 		pa = 0;
2237 	check_pa_matches_va(va, pa);
2238 	return pa;
2239 }
2240 
2241 #if defined(CFG_TEE_CORE_DEBUG)
check_va_matches_pa(paddr_t pa,void * va)2242 static void check_va_matches_pa(paddr_t pa, void *va)
2243 {
2244 	paddr_t p = 0;
2245 
2246 	if (!va)
2247 		return;
2248 
2249 	p = virt_to_phys(va);
2250 	if (p != pa) {
2251 		DMSG("va %p maps 0x%" PRIxPA " expect 0x%" PRIxPA, va, p, pa);
2252 		panic();
2253 	}
2254 }
2255 #else
check_va_matches_pa(paddr_t pa __unused,void * va __unused)2256 static void check_va_matches_pa(paddr_t pa __unused, void *va __unused)
2257 {
2258 }
2259 #endif
2260 
phys_to_virt_ts_vaspace(paddr_t pa,size_t len)2261 static void *phys_to_virt_ts_vaspace(paddr_t pa, size_t len)
2262 {
2263 	if (!core_mmu_user_mapping_is_active())
2264 		return NULL;
2265 
2266 	return vm_pa2va(to_user_mode_ctx(thread_get_tsd()->ctx), pa, len);
2267 }
2268 
2269 #ifdef CFG_WITH_PAGER
phys_to_virt_tee_ram(paddr_t pa,size_t len)2270 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2271 {
2272 	paddr_t end_pa = 0;
2273 
2274 	if (SUB_OVERFLOW(len, 1, &end_pa) || ADD_OVERFLOW(pa, end_pa, &end_pa))
2275 		return NULL;
2276 
2277 	if (pa >= TEE_LOAD_ADDR && pa < get_linear_map_end()) {
2278 		if (end_pa > get_linear_map_end())
2279 			return NULL;
2280 		return (void *)(vaddr_t)(pa + boot_mmu_config.load_offset);
2281 	}
2282 
2283 	return tee_pager_phys_to_virt(pa, len);
2284 }
2285 #else
phys_to_virt_tee_ram(paddr_t pa,size_t len)2286 static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
2287 {
2288 	struct tee_mmap_region *mmap = NULL;
2289 
2290 	mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM, pa, len);
2291 	if (!mmap)
2292 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RW, pa, len);
2293 	if (!mmap)
2294 		mmap = find_map_by_type_and_pa(MEM_AREA_NEX_RAM_RO, pa, len);
2295 	if (!mmap)
2296 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RW, pa, len);
2297 	if (!mmap)
2298 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
2299 	if (!mmap)
2300 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
2301 	/*
2302 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
2303 	 * used with pager and not needed here.
2304 	 */
2305 	return map_pa2va(mmap, pa, len);
2306 }
2307 #endif
2308 
phys_to_virt(paddr_t pa,enum teecore_memtypes m,size_t len)2309 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
2310 {
2311 	void *va = NULL;
2312 
2313 	switch (m) {
2314 	case MEM_AREA_TS_VASPACE:
2315 		va = phys_to_virt_ts_vaspace(pa, len);
2316 		break;
2317 	case MEM_AREA_TEE_RAM:
2318 	case MEM_AREA_TEE_RAM_RX:
2319 	case MEM_AREA_TEE_RAM_RO:
2320 	case MEM_AREA_TEE_RAM_RW:
2321 	case MEM_AREA_NEX_RAM_RO:
2322 	case MEM_AREA_NEX_RAM_RW:
2323 		va = phys_to_virt_tee_ram(pa, len);
2324 		break;
2325 	case MEM_AREA_SHM_VASPACE:
2326 		/* Find VA from PA in dynamic SHM is not yet supported */
2327 		va = NULL;
2328 		break;
2329 	default:
2330 		va = map_pa2va(find_map_by_type_and_pa(m, pa, len), pa, len);
2331 	}
2332 	if (m != MEM_AREA_SEC_RAM_OVERALL)
2333 		check_va_matches_pa(pa, va);
2334 	return va;
2335 }
2336 
phys_to_virt_io(paddr_t pa,size_t len)2337 void *phys_to_virt_io(paddr_t pa, size_t len)
2338 {
2339 	struct tee_mmap_region *map = NULL;
2340 	void *va = NULL;
2341 
2342 	map = find_map_by_type_and_pa(MEM_AREA_IO_SEC, pa, len);
2343 	if (!map)
2344 		map = find_map_by_type_and_pa(MEM_AREA_IO_NSEC, pa, len);
2345 	if (!map)
2346 		return NULL;
2347 	va = map_pa2va(map, pa, len);
2348 	check_va_matches_pa(pa, va);
2349 	return va;
2350 }
2351 
cpu_mmu_enabled(void)2352 bool cpu_mmu_enabled(void)
2353 {
2354 	uint32_t sctlr;
2355 
2356 #ifdef ARM32
2357 	sctlr =  read_sctlr();
2358 #else
2359 	sctlr =  read_sctlr_el1();
2360 #endif
2361 
2362 	return sctlr & SCTLR_M ? true : false;
2363 }
2364 
core_mmu_get_va(paddr_t pa,enum teecore_memtypes type,size_t len)2365 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len)
2366 {
2367 	if (cpu_mmu_enabled())
2368 		return (vaddr_t)phys_to_virt(pa, type, len);
2369 
2370 	return (vaddr_t)pa;
2371 }
2372 
2373 #ifdef CFG_WITH_PAGER
is_unpaged(void * va)2374 bool is_unpaged(void *va)
2375 {
2376 	vaddr_t v = (vaddr_t)va;
2377 
2378 	return v >= VCORE_START_VA && v < get_linear_map_end();
2379 }
2380 #else
is_unpaged(void * va __unused)2381 bool is_unpaged(void *va __unused)
2382 {
2383 	return true;
2384 }
2385 #endif
2386 
core_mmu_init_virtualization(void)2387 void core_mmu_init_virtualization(void)
2388 {
2389 	virt_init_memory(static_memory_map);
2390 }
2391 
io_pa_or_va(struct io_pa_va * p,size_t len)2392 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len)
2393 {
2394 	assert(p->pa);
2395 	if (cpu_mmu_enabled()) {
2396 		if (!p->va)
2397 			p->va = (vaddr_t)phys_to_virt_io(p->pa, len);
2398 		assert(p->va);
2399 		return p->va;
2400 	}
2401 	return p->pa;
2402 }
2403 
io_pa_or_va_secure(struct io_pa_va * p,size_t len)2404 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len)
2405 {
2406 	assert(p->pa);
2407 	if (cpu_mmu_enabled()) {
2408 		if (!p->va)
2409 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_SEC,
2410 						      len);
2411 		assert(p->va);
2412 		return p->va;
2413 	}
2414 	return p->pa;
2415 }
2416 
io_pa_or_va_nsec(struct io_pa_va * p,size_t len)2417 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len)
2418 {
2419 	assert(p->pa);
2420 	if (cpu_mmu_enabled()) {
2421 		if (!p->va)
2422 			p->va = (vaddr_t)phys_to_virt(p->pa, MEM_AREA_IO_NSEC,
2423 						      len);
2424 		assert(p->va);
2425 		return p->va;
2426 	}
2427 	return p->pa;
2428 }
2429 
2430 #ifdef CFG_CORE_RESERVED_SHM
teecore_init_pub_ram(void)2431 static TEE_Result teecore_init_pub_ram(void)
2432 {
2433 	vaddr_t s = 0;
2434 	vaddr_t e = 0;
2435 
2436 	/* get virtual addr/size of NSec shared mem allocated from teecore */
2437 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);
2438 
2439 	if (s >= e || s & SMALL_PAGE_MASK || e & SMALL_PAGE_MASK)
2440 		panic("invalid PUB RAM");
2441 
2442 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2443 	if (!tee_vbuf_is_non_sec(s, e - s))
2444 		panic("PUB RAM is not non-secure");
2445 
2446 #ifdef CFG_PL310
2447 	/* Allocate statically the l2cc mutex */
2448 	tee_l2cc_store_mutex_boot_pa(virt_to_phys((void *)s));
2449 	s += sizeof(uint32_t);			/* size of a pl310 mutex */
2450 	s = ROUNDUP(s, SMALL_PAGE_SIZE);	/* keep required alignment */
2451 #endif
2452 
2453 	default_nsec_shm_paddr = virt_to_phys((void *)s);
2454 	default_nsec_shm_size = e - s;
2455 
2456 	return TEE_SUCCESS;
2457 }
2458 early_init(teecore_init_pub_ram);
2459 #endif /*CFG_CORE_RESERVED_SHM*/
2460 
core_mmu_init_ta_ram(void)2461 void core_mmu_init_ta_ram(void)
2462 {
2463 	vaddr_t s = 0;
2464 	vaddr_t e = 0;
2465 	paddr_t ps = 0;
2466 	size_t size = 0;
2467 
2468 	/*
2469 	 * Get virtual addr/size of RAM where TA are loaded/executedNSec
2470 	 * shared mem allocated from teecore.
2471 	 */
2472 	if (IS_ENABLED(CFG_VIRTUALIZATION))
2473 		virt_get_ta_ram(&s, &e);
2474 	else
2475 		core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
2476 
2477 	ps = virt_to_phys((void *)s);
2478 	size = e - s;
2479 
2480 	if (!ps || (ps & CORE_MMU_USER_CODE_MASK) ||
2481 	    !size || (size & CORE_MMU_USER_CODE_MASK))
2482 		panic("invalid TA RAM");
2483 
2484 	/* extra check: we could rely on core_mmu_get_mem_by_type() */
2485 	if (!tee_pbuf_is_sec(ps, size))
2486 		panic("TA RAM is not secure");
2487 
2488 	if (!tee_mm_is_empty(&tee_mm_sec_ddr))
2489 		panic("TA RAM pool is not empty");
2490 
2491 	/* remove previous config and init TA ddr memory pool */
2492 	tee_mm_final(&tee_mm_sec_ddr);
2493 	tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT,
2494 		    TEE_MM_POOL_NO_FLAGS);
2495 }
2496