1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <keep.h>
10 #include <kernel/cache_helpers.h>
11 #include <kernel/misc.h>
12 #include <kernel/panic.h>
13 #include <kernel/tlb_helpers.h>
14 #include <kernel/thread.h>
15 #include <mm/core_memprot.h>
16 #include <mm/core_mmu.h>
17 #include <mm/pgt_cache.h>
18 #include <platform_config.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <trace.h>
22 #include <util.h>
23 
24 #include "core_mmu_private.h"
25 
26 #ifdef CFG_WITH_LPAE
27 #error This file is not to be used with LPAE
28 #endif
29 
30 #ifdef CFG_VIRTUALIZATION
31 #error Currently V7 MMU code does not support virtualization
32 #endif
33 
34 #ifndef DEBUG_XLAT_TABLE
35 #define DEBUG_XLAT_TABLE 0
36 #endif
37 
38 #if DEBUG_XLAT_TABLE
39 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
40 #else
41 #define debug_print(...) ((void)0)
42 #endif
43 
44 /*
45  * MMU related values
46  */
47 
48 /* Sharable */
49 #define TEE_MMU_TTB_S           (1 << 1)
50 
51 /* Not Outer Sharable */
52 #define TEE_MMU_TTB_NOS         (1 << 5)
53 
54 /* Normal memory, Inner Non-cacheable */
55 #define TEE_MMU_TTB_IRGN_NC     0
56 
57 /* Normal memory, Inner Write-Back Write-Allocate Cacheable */
58 #define TEE_MMU_TTB_IRGN_WBWA   (1 << 6)
59 
60 /* Normal memory, Inner Write-Through Cacheable */
61 #define TEE_MMU_TTB_IRGN_WT     1
62 
63 /* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
64 #define TEE_MMU_TTB_IRGN_WB     (1 | (1 << 6))
65 
66 /* Normal memory, Outer Write-Back Write-Allocate Cacheable */
67 #define TEE_MMU_TTB_RNG_WBWA    (1 << 3)
68 
69 /* Normal memory, Outer Write-Back no Write-Allocate Cacheable */
70 #define TEE_MMU_TTB_RNG_WB      (3 << 3)
71 
72 #ifndef CFG_NO_SMP
73 #define TEE_MMU_DEFAULT_ATTRS \
74 		(TEE_MMU_TTB_S | TEE_MMU_TTB_NOS | \
75 		 TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
76 #else
77 #define TEE_MMU_DEFAULT_ATTRS (TEE_MMU_TTB_IRGN_WB | TEE_MMU_TTB_RNG_WB)
78 #endif
79 
80 
81 #define INVALID_DESC		0x0
82 
83 #define SECTION_SHIFT		20
84 #define SECTION_MASK		0x000fffff
85 #define SECTION_SIZE		0x00100000
86 
87 /* armv7 memory mapping attributes: section mapping */
88 #define SECTION_SECURE			(0 << 19)
89 #define SECTION_NOTSECURE		(1 << 19)
90 #define SECTION_SHARED			(1 << 16)
91 #define SECTION_NOTGLOBAL		(1 << 17)
92 #define SECTION_ACCESS_FLAG		(1 << 10)
93 #define SECTION_UNPRIV			(1 << 11)
94 #define SECTION_RO			(1 << 15)
95 #define SECTION_TEXCB(texcb)		((((texcb) >> 2) << 12) | \
96 					 ((((texcb) >> 1) & 0x1) << 3) | \
97 					 (((texcb) & 0x1) << 2))
98 #define SECTION_DEVICE			SECTION_TEXCB(ATTR_DEVICE_INDEX)
99 #define SECTION_NORMAL			SECTION_TEXCB(ATTR_DEVICE_INDEX)
100 #define SECTION_NORMAL_CACHED		SECTION_TEXCB(ATTR_NORMAL_CACHED_INDEX)
101 
102 #define SECTION_XN			(1 << 4)
103 #define SECTION_PXN			(1 << 0)
104 #define SECTION_SECTION			(2 << 0)
105 
106 #define SECTION_PT_NOTSECURE		(1 << 3)
107 #define SECTION_PT_PT			(1 << 0)
108 
109 #define SECTION_PT_ATTR_MASK		~((1 << 10) - 1)
110 
111 #define SMALL_PAGE_SMALL_PAGE		(1 << 1)
112 #define SMALL_PAGE_SHARED		(1 << 10)
113 #define SMALL_PAGE_NOTGLOBAL		(1 << 11)
114 #define SMALL_PAGE_TEXCB(texcb)		((((texcb) >> 2) << 6) | \
115 					 ((((texcb) >> 1) & 0x1) << 3) | \
116 					 (((texcb) & 0x1) << 2))
117 #define SMALL_PAGE_DEVICE		SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
118 #define SMALL_PAGE_NORMAL		SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
119 #define SMALL_PAGE_NORMAL_CACHED	SMALL_PAGE_TEXCB(ATTR_NORMAL_CACHED_INDEX)
120 #define SMALL_PAGE_ACCESS_FLAG		(1 << 4)
121 #define SMALL_PAGE_UNPRIV		(1 << 5)
122 #define SMALL_PAGE_RO			(1 << 9)
123 #define SMALL_PAGE_XN			(1 << 0)
124 
125 
126 /* The TEX, C and B bits concatenated */
127 #define ATTR_DEVICE_INDEX		0x0
128 #define ATTR_NORMAL_CACHED_INDEX	0x1
129 
130 #define PRRR_IDX(idx, tr, nos)		(((tr) << (2 * (idx))) | \
131 					 ((uint32_t)(nos) << ((idx) + 24)))
132 #define NMRR_IDX(idx, ir, or)		(((ir) << (2 * (idx))) | \
133 					 ((uint32_t)(or) << (2 * (idx) + 16)))
134 #define PRRR_DS0			(1 << 16)
135 #define PRRR_DS1			(1 << 17)
136 #define PRRR_NS0			(1 << 18)
137 #define PRRR_NS1			(1 << 19)
138 
139 #define ATTR_DEVICE_PRRR		PRRR_IDX(ATTR_DEVICE_INDEX, 1, 0)
140 #define ATTR_DEVICE_NMRR		NMRR_IDX(ATTR_DEVICE_INDEX, 0, 0)
141 
142 #ifndef CFG_NO_SMP
143 #define ATTR_NORMAL_CACHED_PRRR		PRRR_IDX(ATTR_NORMAL_CACHED_INDEX, 2, 1)
144 #define ATTR_NORMAL_CACHED_NMRR		NMRR_IDX(ATTR_NORMAL_CACHED_INDEX, 1, 1)
145 #else
146 #define ATTR_NORMAL_CACHED_PRRR		PRRR_IDX(ATTR_NORMAL_CACHED_INDEX, 2, 0)
147 #define ATTR_NORMAL_CACHED_NMRR		NMRR_IDX(ATTR_NORMAL_CACHED_INDEX, 3, 3)
148 #endif
149 
150 #define NUM_L1_ENTRIES		4096
151 #define NUM_L2_ENTRIES		256
152 
153 #define L1_TBL_SIZE		(NUM_L1_ENTRIES * 4)
154 #define L2_TBL_SIZE		(NUM_L2_ENTRIES * 4)
155 #define L1_ALIGNMENT		L1_TBL_SIZE
156 #define L2_ALIGNMENT		L2_TBL_SIZE
157 
158 /* Defined to the smallest possible secondary L1 MMU table */
159 #define TTBCR_N_VALUE		7
160 
161 /* Number of sections in ttbr0 when user mapping activated */
162 #define NUM_UL1_ENTRIES         (1 << (12 - TTBCR_N_VALUE))
163 #define UL1_ALIGNMENT		(NUM_UL1_ENTRIES * 4)
164 /* TTB attributes */
165 
166 /* TTB0 of TTBR0 (depends on TTBCR_N_VALUE) */
167 #define TTB_UL1_MASK		(~(UL1_ALIGNMENT - 1))
168 /* TTB1 of TTBR1 */
169 #define TTB_L1_MASK		(~(L1_ALIGNMENT - 1))
170 
171 #ifndef MAX_XLAT_TABLES
172 #ifdef CFG_CORE_ASLR
173 #	define XLAT_TABLE_ASLR_EXTRA 2
174 #else
175 #	define XLAT_TABLE_ASLR_EXTRA 0
176 #endif
177 #define MAX_XLAT_TABLES		(4 + XLAT_TABLE_ASLR_EXTRA)
178 #endif /*!MAX_XLAT_TABLES*/
179 
180 enum desc_type {
181 	DESC_TYPE_PAGE_TABLE,
182 	DESC_TYPE_SECTION,
183 	DESC_TYPE_SUPER_SECTION,
184 	DESC_TYPE_LARGE_PAGE,
185 	DESC_TYPE_SMALL_PAGE,
186 	DESC_TYPE_INVALID,
187 };
188 
189 typedef uint32_t l1_xlat_tbl_t[NUM_L1_ENTRIES];
190 typedef uint32_t l2_xlat_tbl_t[NUM_L2_ENTRIES];
191 typedef uint32_t ul1_xlat_tbl_t[NUM_UL1_ENTRIES];
192 
193 static l1_xlat_tbl_t main_mmu_l1_ttb
194 		__aligned(L1_ALIGNMENT) __section(".nozi.mmu.l1");
195 
196 /* L2 MMU tables */
197 static l2_xlat_tbl_t main_mmu_l2_ttb[MAX_XLAT_TABLES]
198 		__aligned(L2_ALIGNMENT) __section(".nozi.mmu.l2");
199 
200 /* MMU L1 table for TAs, one for each thread */
201 static ul1_xlat_tbl_t main_mmu_ul1_ttb[CFG_NUM_THREADS]
202 		__aligned(UL1_ALIGNMENT) __section(".nozi.mmu.ul1");
203 
204 struct mmu_partition {
205 	l1_xlat_tbl_t *l1_table;
206 	l2_xlat_tbl_t *l2_tables;
207 	ul1_xlat_tbl_t *ul1_tables;
208 	uint32_t tables_used;
209 };
210 
211 static struct mmu_partition default_partition = {
212 	.l1_table = &main_mmu_l1_ttb,
213 	.l2_tables = main_mmu_l2_ttb,
214 	.ul1_tables = main_mmu_ul1_ttb,
215 	.tables_used = 0,
216 };
217 
218 #ifdef CFG_VIRTUALIZATION
219 static struct mmu_partition *current_prtn[CFG_TEE_CORE_NB_CORE];
220 
core_mmu_set_default_prtn_tbl(void)221 void core_mmu_set_default_prtn_tbl(void)
222 {
223 	size_t n = 0;
224 
225 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
226 		current_prtn[n] = &default_partition;
227 }
228 #endif
229 
get_prtn(void)230 static struct mmu_partition *get_prtn(void)
231 {
232 #ifdef CFG_VIRTUALIZATION
233 	return current_prtn[get_core_pos()];
234 #else
235 	return &default_partition;
236 #endif
237 }
238 
core_mmu_get_main_ttb_va(struct mmu_partition * prtn)239 static vaddr_t core_mmu_get_main_ttb_va(struct mmu_partition *prtn)
240 {
241 	return (vaddr_t)prtn->l1_table;
242 }
243 
core_mmu_get_main_ttb_pa(struct mmu_partition * prtn)244 static paddr_t core_mmu_get_main_ttb_pa(struct mmu_partition *prtn)
245 {
246 	paddr_t pa = virt_to_phys((void *)core_mmu_get_main_ttb_va(prtn));
247 
248 	if (pa & ~TTB_L1_MASK)
249 		panic("invalid core l1 table");
250 	return pa;
251 }
252 
core_mmu_get_ul1_ttb_va(struct mmu_partition * prtn)253 static vaddr_t core_mmu_get_ul1_ttb_va(struct mmu_partition *prtn)
254 {
255 	return (vaddr_t)prtn->ul1_tables[thread_get_id()];
256 }
257 
core_mmu_get_ul1_ttb_pa(struct mmu_partition * prtn)258 static paddr_t core_mmu_get_ul1_ttb_pa(struct mmu_partition *prtn)
259 {
260 	paddr_t pa = virt_to_phys((void *)core_mmu_get_ul1_ttb_va(prtn));
261 
262 	if (pa & ~TTB_UL1_MASK)
263 		panic("invalid user l1 table");
264 	return pa;
265 }
266 
core_mmu_alloc_l2(struct mmu_partition * prtn,size_t size)267 static void *core_mmu_alloc_l2(struct mmu_partition *prtn, size_t size)
268 {
269 	uint32_t to_alloc = ROUNDUP(size, NUM_L2_ENTRIES * SMALL_PAGE_SIZE) /
270 		(NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
271 
272 	DMSG("L2 table used: %d/%d", prtn->tables_used + to_alloc,
273 	     MAX_XLAT_TABLES);
274 	if (prtn->tables_used + to_alloc > MAX_XLAT_TABLES)
275 		return NULL;
276 
277 	memset(prtn->l2_tables[prtn->tables_used], 0,
278 		sizeof(l2_xlat_tbl_t) * to_alloc);
279 	prtn->tables_used += to_alloc;
280 	return prtn->l2_tables[prtn->tables_used - to_alloc];
281 }
282 
get_desc_type(unsigned level,uint32_t desc)283 static enum desc_type get_desc_type(unsigned level, uint32_t desc)
284 {
285 	assert(level >= 1 && level <= 2);
286 
287 	if (level == 1) {
288 		if ((desc & 0x3) == 0x1)
289 			return DESC_TYPE_PAGE_TABLE;
290 
291 		if ((desc & 0x2) == 0x2) {
292 			if (desc & (1 << 18))
293 				return DESC_TYPE_SUPER_SECTION;
294 			return DESC_TYPE_SECTION;
295 		}
296 	} else {
297 		if ((desc & 0x3) == 0x1)
298 			return DESC_TYPE_LARGE_PAGE;
299 
300 		if ((desc & 0x2) == 0x2)
301 			return DESC_TYPE_SMALL_PAGE;
302 	}
303 
304 	return DESC_TYPE_INVALID;
305 }
306 
texcb_to_mattr(uint32_t texcb)307 static uint32_t texcb_to_mattr(uint32_t texcb)
308 {
309 	COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE);
310 	COMPILE_TIME_ASSERT(ATTR_NORMAL_CACHED_INDEX == TEE_MATTR_CACHE_CACHED);
311 
312 	return texcb << TEE_MATTR_CACHE_SHIFT;
313 }
314 
mattr_to_texcb(uint32_t attr)315 static uint32_t mattr_to_texcb(uint32_t attr)
316 {
317 	/* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
318 	return (attr >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK;
319 }
320 
321 
desc_to_mattr(unsigned level,uint32_t desc)322 static uint32_t desc_to_mattr(unsigned level, uint32_t desc)
323 {
324 	uint32_t a;
325 
326 	switch (get_desc_type(level, desc)) {
327 	case DESC_TYPE_PAGE_TABLE:
328 		a = TEE_MATTR_TABLE;
329 		if (!(desc & SECTION_PT_NOTSECURE))
330 			a |= TEE_MATTR_SECURE;
331 		break;
332 	case DESC_TYPE_SECTION:
333 		a = TEE_MATTR_VALID_BLOCK;
334 		if (desc & SECTION_ACCESS_FLAG)
335 			a |= TEE_MATTR_PRX | TEE_MATTR_URX;
336 
337 		if (!(desc & SECTION_RO))
338 			a |= TEE_MATTR_PW | TEE_MATTR_UW;
339 
340 		if (desc & SECTION_XN)
341 			a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
342 
343 		if (desc & SECTION_PXN)
344 			a &= ~TEE_MATTR_PX;
345 
346 		a |= texcb_to_mattr(((desc >> 12) & 0x7) | ((desc >> 2) & 0x3));
347 
348 		if (!(desc & SECTION_NOTGLOBAL))
349 			a |= TEE_MATTR_GLOBAL;
350 
351 		if (!(desc & SECTION_NOTSECURE))
352 			a |= TEE_MATTR_SECURE;
353 
354 		break;
355 	case DESC_TYPE_SMALL_PAGE:
356 		a = TEE_MATTR_VALID_BLOCK;
357 		if (desc & SMALL_PAGE_ACCESS_FLAG)
358 			a |= TEE_MATTR_PRX | TEE_MATTR_URX;
359 
360 		if (!(desc & SMALL_PAGE_RO))
361 			a |= TEE_MATTR_PW | TEE_MATTR_UW;
362 
363 		if (desc & SMALL_PAGE_XN)
364 			a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
365 
366 		a |= texcb_to_mattr(((desc >> 6) & 0x7) | ((desc >> 2) & 0x3));
367 
368 		if (!(desc & SMALL_PAGE_NOTGLOBAL))
369 			a |= TEE_MATTR_GLOBAL;
370 		break;
371 	default:
372 		return 0;
373 	}
374 
375 	return a;
376 }
377 
mattr_to_desc(unsigned level,uint32_t attr)378 static uint32_t mattr_to_desc(unsigned level, uint32_t attr)
379 {
380 	uint32_t desc;
381 	uint32_t a = attr;
382 	unsigned texcb;
383 
384 	if (level == 1 && (a & TEE_MATTR_TABLE)) {
385 		desc = SECTION_PT_PT;
386 		if (!(a & TEE_MATTR_SECURE))
387 			desc |= SECTION_PT_NOTSECURE;
388 		return desc;
389 	}
390 
391 	if (!(a & TEE_MATTR_VALID_BLOCK))
392 		return INVALID_DESC;
393 
394 	if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
395 		a |= TEE_MATTR_PR;
396 	if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
397 		a |= TEE_MATTR_UR;
398 	if (a & TEE_MATTR_UR)
399 		a |= TEE_MATTR_PR;
400 	if (a & TEE_MATTR_UW)
401 		a |= TEE_MATTR_PW;
402 
403 
404 	texcb = mattr_to_texcb(a);
405 
406 	if (level == 1) {	/* Section */
407 #ifndef CFG_NO_SMP
408 		desc = SECTION_SECTION | SECTION_SHARED;
409 #else
410 		desc = SECTION_SECTION;
411 #endif
412 
413 		if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
414 			desc |= SECTION_XN;
415 
416 #ifdef CFG_HWSUPP_MEM_PERM_PXN
417 		if (!(a & TEE_MATTR_PX))
418 			desc |= SECTION_PXN;
419 #endif
420 
421 		if (a & TEE_MATTR_UR)
422 			desc |= SECTION_UNPRIV;
423 
424 		if (!(a & TEE_MATTR_PW))
425 			desc |= SECTION_RO;
426 
427 		if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
428 			desc |= SECTION_ACCESS_FLAG;
429 
430 		if (!(a & TEE_MATTR_GLOBAL))
431 			desc |= SECTION_NOTGLOBAL;
432 
433 		if (!(a & TEE_MATTR_SECURE))
434 			desc |= SECTION_NOTSECURE;
435 
436 		desc |= SECTION_TEXCB(texcb);
437 	} else {
438 #ifndef CFG_NO_SMP
439 		desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
440 #else
441 		desc = SMALL_PAGE_SMALL_PAGE;
442 #endif
443 
444 		if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
445 			desc |= SMALL_PAGE_XN;
446 
447 		if (a & TEE_MATTR_UR)
448 			desc |= SMALL_PAGE_UNPRIV;
449 
450 		if (!(a & TEE_MATTR_PW))
451 			desc |= SMALL_PAGE_RO;
452 
453 		if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
454 			desc |= SMALL_PAGE_ACCESS_FLAG;
455 
456 		if (!(a & TEE_MATTR_GLOBAL))
457 			desc |= SMALL_PAGE_NOTGLOBAL;
458 
459 		desc |= SMALL_PAGE_TEXCB(texcb);
460 	}
461 
462 	return desc;
463 }
464 
core_mmu_set_info_table(struct core_mmu_table_info * tbl_info,unsigned level,vaddr_t va_base,void * table)465 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
466 		unsigned level, vaddr_t va_base, void *table)
467 {
468 	tbl_info->level = level;
469 	tbl_info->table = table;
470 	tbl_info->va_base = va_base;
471 	assert(level <= 2);
472 	if (level == 1) {
473 		tbl_info->shift = SECTION_SHIFT;
474 		tbl_info->num_entries = NUM_L1_ENTRIES;
475 	} else {
476 		tbl_info->shift = SMALL_PAGE_SHIFT;
477 		tbl_info->num_entries = NUM_L2_ENTRIES;
478 	}
479 }
480 
core_mmu_get_user_pgdir(struct core_mmu_table_info * pgd_info)481 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
482 {
483 	void *tbl = (void *)core_mmu_get_ul1_ttb_va(get_prtn());
484 
485 	core_mmu_set_info_table(pgd_info, 1, 0, tbl);
486 	pgd_info->num_entries = NUM_UL1_ENTRIES;
487 }
488 
core_mmu_create_user_map(struct user_mode_ctx * uctx,struct core_mmu_user_map * map)489 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
490 			      struct core_mmu_user_map *map)
491 {
492 	struct core_mmu_table_info dir_info = { };
493 
494 	COMPILE_TIME_ASSERT(L2_TBL_SIZE == PGT_SIZE);
495 
496 	core_mmu_get_user_pgdir(&dir_info);
497 	memset(dir_info.table, 0, dir_info.num_entries * sizeof(uint32_t));
498 	core_mmu_populate_user_map(&dir_info, uctx);
499 	map->ttbr0 = core_mmu_get_ul1_ttb_pa(get_prtn()) |
500 		     TEE_MMU_DEFAULT_ATTRS;
501 	map->ctxid = uctx->vm_info.asid;
502 }
503 
core_mmu_find_table(struct mmu_partition * prtn,vaddr_t va,unsigned max_level,struct core_mmu_table_info * tbl_info)504 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
505 			 unsigned max_level,
506 			 struct core_mmu_table_info *tbl_info)
507 {
508 	uint32_t *tbl;
509 	unsigned n = va >> SECTION_SHIFT;
510 
511 	if (!prtn)
512 		prtn = get_prtn();
513 	tbl = (uint32_t *)core_mmu_get_main_ttb_va(prtn);
514 
515 	if (max_level == 1 || (tbl[n] & 0x3) != 0x1) {
516 		core_mmu_set_info_table(tbl_info, 1, 0, tbl);
517 	} else {
518 		paddr_t ntbl = tbl[n] & ~((1 << 10) - 1);
519 		void *l2tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM_RW_DATA,
520 					   L2_TBL_SIZE);
521 
522 		if (!l2tbl)
523 			return false;
524 
525 		core_mmu_set_info_table(tbl_info, 2, n << SECTION_SHIFT, l2tbl);
526 	}
527 	return true;
528 }
529 
core_mmu_set_entry_primitive(void * table,size_t level,size_t idx,paddr_t pa,uint32_t attr)530 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
531 				  paddr_t pa, uint32_t attr)
532 {
533 	uint32_t *tbl = table;
534 	uint32_t desc = mattr_to_desc(level, attr);
535 
536 	tbl[idx] = desc | pa;
537 }
538 
desc_to_pa(unsigned level,uint32_t desc)539 static paddr_t desc_to_pa(unsigned level, uint32_t desc)
540 {
541 	unsigned shift_mask;
542 
543 	switch (get_desc_type(level, desc)) {
544 	case DESC_TYPE_PAGE_TABLE:
545 		shift_mask = 10;
546 		break;
547 	case DESC_TYPE_SECTION:
548 		shift_mask = 20;
549 		break;
550 	case DESC_TYPE_SUPER_SECTION:
551 		shift_mask = 24; /* We're ignoring bits 32 and above. */
552 		break;
553 	case DESC_TYPE_LARGE_PAGE:
554 		shift_mask = 16;
555 		break;
556 	case DESC_TYPE_SMALL_PAGE:
557 		shift_mask = 12;
558 		break;
559 	default:
560 		/* Invalid section */
561 		shift_mask = 4;
562 	}
563 
564 	return desc & ~((1 << shift_mask) - 1);
565 }
566 
core_mmu_entry_to_finer_grained(struct core_mmu_table_info * tbl_info,unsigned int idx,bool secure)567 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
568 				     unsigned int idx, bool secure)
569 {
570 	uint32_t *new_table;
571 	uint32_t *entry;
572 	uint32_t new_table_desc;
573 	uint32_t attr;
574 	uint32_t desc;
575 	paddr_t pa;
576 	int i;
577 
578 	if (tbl_info->level != 1)
579 		return false;
580 
581 	if (idx >= NUM_L1_ENTRIES)
582 		return false;
583 
584 	entry = (uint32_t *)tbl_info->table + idx;
585 	attr = desc_to_mattr(1, *entry);
586 
587 	if (*entry && get_desc_type(1, *entry) == DESC_TYPE_PAGE_TABLE) {
588 		/*
589 		 * If there is page table already,
590 		 * check the secure attribute fits
591 		 */
592 		return secure == (bool)(attr & TEE_MATTR_SECURE);
593 	}
594 
595 	/* If there is something mapped, check the secure access flag */
596 	if (attr && secure != (bool)(attr & TEE_MATTR_SECURE))
597 		return false;
598 
599 	new_table = core_mmu_alloc_l2(get_prtn(),
600 				      NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
601 
602 	if (!new_table)
603 		return false;
604 
605 	new_table_desc = SECTION_PT_PT | virt_to_phys(new_table);
606 
607 	if (!secure)
608 		new_table_desc |= SECTION_PT_NOTSECURE;
609 
610 	if (*entry) {
611 		pa = desc_to_pa(1, *entry);
612 		desc = mattr_to_desc(2, attr);
613 		for (i = 0; i < NUM_L2_ENTRIES; i++, pa += SMALL_PAGE_SIZE)
614 			new_table[i] = desc | pa;
615 	}
616 
617 	/* Update descriptor at current level */
618 	*entry = new_table_desc;
619 
620 	return true;
621 }
622 
core_mmu_get_entry_primitive(const void * table,size_t level,size_t idx,paddr_t * pa,uint32_t * attr)623 void core_mmu_get_entry_primitive(const void *table, size_t level,
624 				  size_t idx, paddr_t *pa, uint32_t *attr)
625 {
626 	const uint32_t *tbl = table;
627 
628 	if (pa)
629 		*pa = desc_to_pa(level, tbl[idx]);
630 
631 	if (attr)
632 		*attr = desc_to_mattr(level, tbl[idx]);
633 }
634 
core_mmu_get_user_va_range(vaddr_t * base,size_t * size)635 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
636 {
637 	if (base) {
638 		/* Leaving the first entry unmapped to make NULL unmapped */
639 		*base = 1 << SECTION_SHIFT;
640 	}
641 
642 	if (size)
643 		*size = (NUM_UL1_ENTRIES - 1) << SECTION_SHIFT;
644 }
645 
core_mmu_get_user_map(struct core_mmu_user_map * map)646 void core_mmu_get_user_map(struct core_mmu_user_map *map)
647 {
648 	map->ttbr0 = read_ttbr0();
649 	map->ctxid = read_contextidr();
650 }
651 
core_mmu_set_user_map(struct core_mmu_user_map * map)652 void core_mmu_set_user_map(struct core_mmu_user_map *map)
653 {
654 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
655 
656 	/*
657 	 * Update the reserved Context ID and TTBR0
658 	 */
659 
660 	dsb();  /* ARM erratum 754322 */
661 	write_contextidr(0);
662 	isb();
663 
664 	if (map) {
665 		write_ttbr0(map->ttbr0);
666 		isb();
667 		write_contextidr(map->ctxid);
668 		isb();
669 	} else {
670 		write_ttbr0(read_ttbr1());
671 		isb();
672 	}
673 
674 	tlbi_all();
675 	icache_inv_all();
676 
677 	/* Restore interrupts */
678 	thread_unmask_exceptions(exceptions);
679 }
680 
core_mmu_user_mapping_is_active(void)681 bool core_mmu_user_mapping_is_active(void)
682 {
683 	bool ret;
684 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
685 
686 	ret = read_ttbr0() != read_ttbr1();
687 	thread_unmask_exceptions(exceptions);
688 
689 	return ret;
690 }
691 
print_mmap_area(const struct tee_mmap_region * mm __maybe_unused,const char * str __maybe_unused)692 static void print_mmap_area(const struct tee_mmap_region *mm __maybe_unused,
693 				const char *str __maybe_unused)
694 {
695 	if (!(mm->attr & TEE_MATTR_VALID_BLOCK))
696 		debug_print("%s [%08" PRIxVA " %08" PRIxVA "] not mapped",
697 				str, mm->va, mm->va + mm->size);
698 	else
699 		debug_print("%s [%08" PRIxVA " %08" PRIxVA "] %s-%s-%s-%s",
700 				str, mm->va, mm->va + mm->size,
701 				mm->attr & (TEE_MATTR_CACHE_CACHED <<
702 					TEE_MATTR_CACHE_SHIFT) ? "MEM" : "DEV",
703 				mm->attr & TEE_MATTR_PW ? "RW" : "RO",
704 				mm->attr & TEE_MATTR_PX ? "X" : "XN",
705 				mm->attr & TEE_MATTR_SECURE ? "S" : "NS");
706 }
707 
map_memarea_sections(const struct tee_mmap_region * mm,uint32_t * ttb)708 void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb)
709 {
710 	uint32_t attr = mattr_to_desc(1, mm->attr);
711 	size_t idx = mm->va >> SECTION_SHIFT;
712 	paddr_t pa = 0;
713 	size_t n;
714 
715 	if (core_mmap_is_end_of_table(mm))
716 		return;
717 
718 	print_mmap_area(mm, "section map");
719 
720 	attr = mattr_to_desc(1, mm->attr);
721 	if (attr != INVALID_DESC)
722 		pa = mm->pa;
723 
724 	n = ROUNDUP(mm->size, SECTION_SIZE) >> SECTION_SHIFT;
725 	while (n--) {
726 		assert(!attr || !ttb[idx] || ttb[idx] == (pa | attr));
727 
728 		ttb[idx] = pa | attr;
729 		idx++;
730 		pa += SECTION_SIZE;
731 	}
732 }
733 
core_init_mmu_prtn(struct mmu_partition * prtn,struct tee_mmap_region * mm)734 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm)
735 {
736 	void *ttb1 = (void *)core_mmu_get_main_ttb_va(prtn);
737 	size_t n;
738 
739 	/* reset L1 table */
740 	memset(ttb1, 0, L1_TBL_SIZE);
741 
742 	for (n = 0; !core_mmap_is_end_of_table(mm + n); n++)
743 		if (!core_mmu_is_dynamic_vaspace(mm + n))
744 			core_mmu_map_region(prtn, mm + n);
745 }
746 
core_mmu_place_tee_ram_at_top(paddr_t paddr)747 bool core_mmu_place_tee_ram_at_top(paddr_t paddr)
748 {
749 	return paddr > 0x80000000;
750 }
751 
core_init_mmu(struct tee_mmap_region * mm)752 void core_init_mmu(struct tee_mmap_region *mm)
753 {
754 	/* Initialize default pagetables */
755 	core_init_mmu_prtn(&default_partition, mm);
756 }
757 
core_init_mmu_regs(struct core_mmu_config * cfg)758 void core_init_mmu_regs(struct core_mmu_config *cfg)
759 {
760 	cfg->ttbr = core_mmu_get_main_ttb_pa(&default_partition) |
761 		    TEE_MMU_DEFAULT_ATTRS;
762 
763 	cfg->prrr = ATTR_DEVICE_PRRR | ATTR_NORMAL_CACHED_PRRR;
764 	cfg->nmrr = ATTR_DEVICE_NMRR | ATTR_NORMAL_CACHED_NMRR;
765 
766 	cfg->prrr |= PRRR_NS1 | PRRR_DS1;
767 
768 	/*
769 	 * Program Domain access control register with two domains:
770 	 * domain 0: teecore
771 	 * domain 1: TA
772 	 */
773 	cfg->dacr = DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT) |
774 		    DACR_DOMAIN(1, DACR_DOMAIN_PERM_CLIENT);
775 
776 	/*
777 	 * Enable lookups using TTBR0 and TTBR1 with the split of addresses
778 	 * defined by TEE_MMU_TTBCR_N_VALUE.
779 	 */
780 	cfg->ttbcr = TTBCR_N_VALUE;
781 }
782 DECLARE_KEEP_PAGER(core_init_mmu_regs);
783 
core_mmu_get_fault_type(uint32_t fsr)784 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr)
785 {
786 	assert(!(fsr & FSR_LPAE));
787 
788 	switch (fsr & FSR_FS_MASK) {
789 	case 0x1: /* DFSR[10,3:0] 0b00001 Alignment fault (DFSR only) */
790 		return CORE_MMU_FAULT_ALIGNMENT;
791 	case 0x2: /* DFSR[10,3:0] 0b00010 Debug event */
792 		return CORE_MMU_FAULT_DEBUG_EVENT;
793 	case 0x4: /* DFSR[10,3:0] b00100 Fault on instr cache maintenance */
794 	case 0x5: /* DFSR[10,3:0] b00101 Translation fault first level */
795 	case 0x7: /* DFSR[10,3:0] b00111 Translation fault second level */
796 		return CORE_MMU_FAULT_TRANSLATION;
797 	case 0xd: /* DFSR[10,3:0] b01101 Permission fault first level */
798 	case 0xf: /* DFSR[10,3:0] b01111 Permission fault second level */
799 		if (fsr & FSR_WNR)
800 			return CORE_MMU_FAULT_WRITE_PERMISSION;
801 		else
802 			return CORE_MMU_FAULT_READ_PERMISSION;
803 	case 0x3: /* DFSR[10,3:0] b00011 access bit fault on section */
804 	case 0x6: /* DFSR[10,3:0] b00110 access bit fault on page */
805 		return CORE_MMU_FAULT_ACCESS_BIT;
806 	case (1 << 10) | 0x6:
807 		/* DFSR[10,3:0] 0b10110 Async external abort (DFSR only) */
808 		return CORE_MMU_FAULT_ASYNC_EXTERNAL;
809 
810 	default:
811 		return CORE_MMU_FAULT_OTHER;
812 	}
813 }
814