1 // SPDX-License-Identifier: (BSD-2-Clause AND BSD-3-Clause)
2 /*
3 * Copyright (c) 2015-2016, Linaro Limited
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are met:
34 *
35 * Redistributions of source code must retain the above copyright notice, this
36 * list of conditions and the following disclaimer.
37 *
38 * Redistributions in binary form must reproduce the above copyright notice,
39 * this list of conditions and the following disclaimer in the documentation
40 * and/or other materials provided with the distribution.
41 *
42 * Neither the name of ARM nor the names of its contributors may be used
43 * to endorse or promote products derived from this software without specific
44 * prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56 * POSSIBILITY OF SUCH DAMAGE.
57 */
58 #include <platform_config.h>
59
60 #include <arm.h>
61 #include <assert.h>
62 #include <compiler.h>
63 #include <config.h>
64 #include <inttypes.h>
65 #include <keep.h>
66 #include <kernel/cache_helpers.h>
67 #include <kernel/linker.h>
68 #include <kernel/misc.h>
69 #include <kernel/panic.h>
70 #include <kernel/thread.h>
71 #include <kernel/tlb_helpers.h>
72 #include <mm/core_memprot.h>
73 #include <mm/core_memprot.h>
74 #include <mm/pgt_cache.h>
75 #include <string.h>
76 #include <trace.h>
77 #include <types_ext.h>
78 #include <util.h>
79
80 #include "core_mmu_private.h"
81
82 #ifndef DEBUG_XLAT_TABLE
83 #define DEBUG_XLAT_TABLE 0
84 #endif
85
86 #if DEBUG_XLAT_TABLE
87 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
88 #else
89 #define debug_print(...) ((void)0)
90 #endif
91
92
93 /*
94 * Miscellaneous MMU related constants
95 */
96
97 #define INVALID_DESC 0x0
98 #define BLOCK_DESC 0x1
99 #define L3_BLOCK_DESC 0x3
100 #define TABLE_DESC 0x3
101 #define DESC_ENTRY_TYPE_MASK 0x3
102
103 #define XN (1ull << 2)
104 #define PXN (1ull << 1)
105 #define CONT_HINT (1ull << 0)
106
107 #define UPPER_ATTRS(x) (((x) & 0x7) << 52)
108 #define GP BIT64(50) /* Guarded Page, Aarch64 FEAT_BTI */
109 #define NON_GLOBAL (1ull << 9)
110 #define ACCESS_FLAG (1ull << 8)
111 #define NSH (0x0 << 6)
112 #define OSH (0x2 << 6)
113 #define ISH (0x3 << 6)
114
115 #define AP_RO (0x1 << 5)
116 #define AP_RW (0x0 << 5)
117 #define AP_UNPRIV (0x1 << 4)
118
119 #define NS (0x1 << 3)
120 #define LOWER_ATTRS_SHIFT 2
121 #define LOWER_ATTRS(x) (((x) & 0xfff) << LOWER_ATTRS_SHIFT)
122
123 #define ATTR_DEVICE_INDEX 0x0
124 #define ATTR_IWBWA_OWBWA_NTR_INDEX 0x1
125 #define ATTR_INDEX_MASK 0x7
126
127 #define ATTR_DEVICE (0x4)
128 #define ATTR_IWBWA_OWBWA_NTR (0xff)
129
130 #define MAIR_ATTR_SET(attr, index) (((uint64_t)attr) << ((index) << 3))
131
132 #define OUTPUT_ADDRESS_MASK (0x0000FFFFFFFFF000ULL)
133
134 /* (internal) physical address size bits in EL3/EL1 */
135 #define TCR_PS_BITS_4GB (0x0)
136 #define TCR_PS_BITS_64GB (0x1)
137 #define TCR_PS_BITS_1TB (0x2)
138 #define TCR_PS_BITS_4TB (0x3)
139 #define TCR_PS_BITS_16TB (0x4)
140 #define TCR_PS_BITS_256TB (0x5)
141
142 #define UNSET_DESC ((uint64_t)-1)
143
144 #define FOUR_KB_SHIFT 12
145 #define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
146 #define PAGE_SIZE (1 << PAGE_SIZE_SHIFT)
147 #define PAGE_SIZE_MASK (PAGE_SIZE - 1)
148 #define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == 0)
149
150 #define XLAT_ENTRY_SIZE_SHIFT 3 /* Each MMU table entry is 8 bytes (1 << 3) */
151 #define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SIZE_SHIFT)
152
153 #define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT
154 #define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SIZE_SHIFT)
155
156 #define XLAT_TABLE_LEVEL_MAX U(3)
157
158 /* Values for number of entries in each MMU translation table */
159 #define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
160 #define XLAT_TABLE_ENTRIES (1 << XLAT_TABLE_ENTRIES_SHIFT)
161 #define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - 1)
162
163 /* Values to convert a memory address to an index into a translation table */
164 #define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
165 #define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + \
166 XLAT_TABLE_ENTRIES_SHIFT)
167 #define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + \
168 XLAT_TABLE_ENTRIES_SHIFT)
169 #define L0_XLAT_ADDRESS_SHIFT (L1_XLAT_ADDRESS_SHIFT + \
170 XLAT_TABLE_ENTRIES_SHIFT)
171 #define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
172 ((XLAT_TABLE_LEVEL_MAX - (level)) * \
173 XLAT_TABLE_ENTRIES_SHIFT))
174
175 #define XLAT_BLOCK_SIZE(level) (UL(1) << XLAT_ADDR_SHIFT(level))
176
177 /* Base table */
178 #define BASE_XLAT_ADDRESS_SHIFT XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL)
179 #define BASE_XLAT_BLOCK_SIZE XLAT_BLOCK_SIZE(CORE_MMU_BASE_TABLE_LEVEL)
180
181 #define NUM_BASE_LEVEL_ENTRIES \
182 BIT(CFG_LPAE_ADDR_SPACE_BITS - BASE_XLAT_ADDRESS_SHIFT)
183
184 /*
185 * MMU L1 table, one for each core
186 *
187 * With CFG_CORE_UNMAP_CORE_AT_EL0, each core has one table to be used
188 * while in kernel mode and one to be used while in user mode.
189 */
190 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
191 #define NUM_BASE_TABLES 2
192 #else
193 #define NUM_BASE_TABLES 1
194 #endif
195
196 #ifndef MAX_XLAT_TABLES
197 #ifdef CFG_VIRTUALIZATION
198 # define XLAT_TABLE_VIRTUALIZATION_EXTRA 3
199 #else
200 # define XLAT_TABLE_VIRTUALIZATION_EXTRA 0
201 #endif
202 #ifdef CFG_CORE_ASLR
203 # define XLAT_TABLE_ASLR_EXTRA 3
204 #else
205 # define XLAT_TABLE_ASLR_EXTRA 0
206 #endif
207 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
208 # define XLAT_TABLE_TEE_EXTRA 8
209 # define XLAT_TABLE_USER_EXTRA (NUM_BASE_TABLES * CFG_TEE_CORE_NB_CORE)
210 #else
211 # define XLAT_TABLE_TEE_EXTRA 5
212 # define XLAT_TABLE_USER_EXTRA 0
213 #endif
214 #define MAX_XLAT_TABLES (XLAT_TABLE_TEE_EXTRA + \
215 XLAT_TABLE_VIRTUALIZATION_EXTRA + \
216 XLAT_TABLE_ASLR_EXTRA + \
217 XLAT_TABLE_USER_EXTRA)
218 #endif /*!MAX_XLAT_TABLES*/
219
220 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
221 #if (MAX_XLAT_TABLES <= UINT8_MAX)
222 typedef uint8_t l1_idx_t;
223 #elif (MAX_XLAT_TABLES <= UINT16_MAX)
224 typedef uint16_t l1_idx_t;
225 #else
226 #error MAX_XLAT_TABLES is suspiciously large, please check
227 #endif
228 #endif
229
230 typedef uint64_t base_xlat_tbls_t[CFG_TEE_CORE_NB_CORE][NUM_BASE_LEVEL_ENTRIES];
231 typedef uint64_t xlat_tbl_t[XLAT_TABLE_ENTRIES];
232
233 static base_xlat_tbls_t base_xlation_table[NUM_BASE_TABLES]
234 __aligned(NUM_BASE_LEVEL_ENTRIES * XLAT_ENTRY_SIZE)
235 __section(".nozi.mmu.base_table");
236
237 static xlat_tbl_t xlat_tables[MAX_XLAT_TABLES]
238 __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
239
240 #define XLAT_TABLES_SIZE (sizeof(xlat_tbl_t) * MAX_XLAT_TABLES)
241
242 /* MMU L2 table for TAs, one for each thread */
243 static xlat_tbl_t xlat_tables_ul1[CFG_NUM_THREADS]
244 __aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
245
246 /*
247 * TAs page table entry inside a level 1 page table.
248 *
249 * TAs mapping is expected to start from level 2.
250 *
251 * If base level is 1 then this is the index of a level 1 entry,
252 * that will point directly into TA mapping table.
253 *
254 * If base level is 0 then entry 0 in base table is always used, and then
255 * we fallback to "base level == 1" like scenario.
256 */
257 static int user_va_idx __nex_data = -1;
258
259 struct mmu_partition {
260 base_xlat_tbls_t *base_tables;
261 xlat_tbl_t *xlat_tables;
262 xlat_tbl_t *l2_ta_tables;
263 unsigned int xlat_tables_used;
264 unsigned int asid;
265
266 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
267 /*
268 * Indexes of the L1 table from 'xlat_tables'
269 * that points to the user mappings.
270 */
271 l1_idx_t user_l1_table_idx[NUM_BASE_TABLES][CFG_TEE_CORE_NB_CORE];
272 #endif
273 };
274
275 static struct mmu_partition default_partition __nex_data = {
276 .base_tables = base_xlation_table,
277 .xlat_tables = xlat_tables,
278 .l2_ta_tables = xlat_tables_ul1,
279 .xlat_tables_used = 0,
280 .asid = 0
281 };
282
283 #ifdef CFG_VIRTUALIZATION
284 static struct mmu_partition *current_prtn[CFG_TEE_CORE_NB_CORE] __nex_bss;
285 #endif
286
get_prtn(void)287 static struct mmu_partition *get_prtn(void)
288 {
289 #ifdef CFG_VIRTUALIZATION
290 struct mmu_partition *ret;
291 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
292
293 ret = current_prtn[get_core_pos()];
294
295 thread_unmask_exceptions(exceptions);
296 return ret;
297 #else
298 return &default_partition;
299 #endif
300 }
301
desc_to_mattr(unsigned level,uint64_t desc)302 static uint32_t desc_to_mattr(unsigned level, uint64_t desc)
303 {
304 uint32_t a;
305
306 if (!(desc & 1))
307 return 0;
308
309 if (level == XLAT_TABLE_LEVEL_MAX) {
310 if ((desc & DESC_ENTRY_TYPE_MASK) != L3_BLOCK_DESC)
311 return 0;
312 } else {
313 if ((desc & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
314 return TEE_MATTR_TABLE;
315 }
316
317 a = TEE_MATTR_VALID_BLOCK;
318
319 if (desc & LOWER_ATTRS(ACCESS_FLAG))
320 a |= TEE_MATTR_PRX | TEE_MATTR_URX;
321
322 if (!(desc & LOWER_ATTRS(AP_RO)))
323 a |= TEE_MATTR_PW | TEE_MATTR_UW;
324
325 if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
326 a &= ~TEE_MATTR_URWX;
327
328 if (desc & UPPER_ATTRS(XN))
329 a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
330
331 if (desc & UPPER_ATTRS(PXN))
332 a &= ~TEE_MATTR_PX;
333
334 COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE);
335 COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX ==
336 TEE_MATTR_CACHE_CACHED);
337
338 a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) <<
339 TEE_MATTR_CACHE_SHIFT;
340
341 if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
342 a |= TEE_MATTR_GLOBAL;
343
344 if (!(desc & LOWER_ATTRS(NS)))
345 a |= TEE_MATTR_SECURE;
346
347 if (desc & GP)
348 a |= TEE_MATTR_GUARDED;
349
350 return a;
351 }
352
mattr_to_desc(unsigned level,uint32_t attr)353 static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
354 {
355 uint64_t desc;
356 uint32_t a = attr;
357
358 if (a & TEE_MATTR_TABLE)
359 return TABLE_DESC;
360
361 if (!(a & TEE_MATTR_VALID_BLOCK))
362 return 0;
363
364 if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
365 a |= TEE_MATTR_PR;
366 if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
367 a |= TEE_MATTR_UR;
368 if (a & TEE_MATTR_UR)
369 a |= TEE_MATTR_PR;
370 if (a & TEE_MATTR_UW)
371 a |= TEE_MATTR_PW;
372
373 if (IS_ENABLED(CFG_CORE_BTI) && (a & TEE_MATTR_PX))
374 a |= TEE_MATTR_GUARDED;
375
376 if (level == XLAT_TABLE_LEVEL_MAX)
377 desc = L3_BLOCK_DESC;
378 else
379 desc = BLOCK_DESC;
380
381 if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
382 desc |= UPPER_ATTRS(XN);
383 if (!(a & TEE_MATTR_PX))
384 desc |= UPPER_ATTRS(PXN);
385
386 if (a & TEE_MATTR_UR)
387 desc |= LOWER_ATTRS(AP_UNPRIV);
388
389 if (!(a & TEE_MATTR_PW))
390 desc |= LOWER_ATTRS(AP_RO);
391
392 if (feat_bti_is_implemented() && (a & TEE_MATTR_GUARDED))
393 desc |= GP;
394
395 /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
396 switch ((a >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) {
397 case TEE_MATTR_CACHE_NONCACHE:
398 desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
399 break;
400 case TEE_MATTR_CACHE_CACHED:
401 desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
402 break;
403 default:
404 /*
405 * "Can't happen" the attribute is supposed to be checked
406 * with core_mmu_mattr_is_ok() before.
407 */
408 panic();
409 }
410
411 if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
412 desc |= LOWER_ATTRS(ACCESS_FLAG);
413
414 if (!(a & TEE_MATTR_GLOBAL))
415 desc |= LOWER_ATTRS(NON_GLOBAL);
416
417 desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
418
419 return desc;
420 }
421
422 #ifdef CFG_VIRTUALIZATION
core_mmu_get_total_pages_size(void)423 size_t core_mmu_get_total_pages_size(void)
424 {
425 return ROUNDUP(sizeof(base_xlation_table), SMALL_PAGE_SIZE) +
426 sizeof(xlat_tables) + sizeof(xlat_tables_ul1);
427 }
428
core_alloc_mmu_prtn(void * tables)429 struct mmu_partition *core_alloc_mmu_prtn(void *tables)
430 {
431 struct mmu_partition *prtn;
432 uint8_t *tbl = tables;
433 unsigned int asid = asid_alloc();
434
435 assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
436
437 if (!asid)
438 return NULL;
439
440 prtn = nex_malloc(sizeof(*prtn));
441 if (!prtn) {
442 asid_free(asid);
443 return NULL;
444 }
445
446 prtn->base_tables = (void *)tbl;
447 COMPILE_TIME_ASSERT(sizeof(base_xlation_table) <= SMALL_PAGE_SIZE);
448 memset(prtn->base_tables, 0, SMALL_PAGE_SIZE);
449 tbl += ROUNDUP(sizeof(base_xlation_table), SMALL_PAGE_SIZE);
450
451 prtn->xlat_tables = (void *)tbl;
452 memset(prtn->xlat_tables, 0, XLAT_TABLES_SIZE);
453 tbl += XLAT_TABLES_SIZE;
454 assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
455
456 prtn->l2_ta_tables = (void *)tbl;
457 prtn->xlat_tables_used = 0;
458 prtn->asid = asid;
459
460 return prtn;
461 }
462
core_free_mmu_prtn(struct mmu_partition * prtn)463 void core_free_mmu_prtn(struct mmu_partition *prtn)
464 {
465 asid_free(prtn->asid);
466 nex_free(prtn);
467 }
468
core_mmu_set_prtn(struct mmu_partition * prtn)469 void core_mmu_set_prtn(struct mmu_partition *prtn)
470 {
471 uint64_t ttbr;
472 /*
473 * We are changing mappings for current CPU,
474 * so make sure that we will not be rescheduled
475 */
476 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
477
478 current_prtn[get_core_pos()] = prtn;
479
480 ttbr = virt_to_phys(prtn->base_tables[0][get_core_pos()]);
481
482 write_ttbr0_el1(ttbr | ((paddr_t)prtn->asid << TTBR_ASID_SHIFT));
483 isb();
484 tlbi_all();
485 }
486
core_mmu_set_default_prtn(void)487 void core_mmu_set_default_prtn(void)
488 {
489 core_mmu_set_prtn(&default_partition);
490 }
491
core_mmu_set_default_prtn_tbl(void)492 void core_mmu_set_default_prtn_tbl(void)
493 {
494 size_t n = 0;
495
496 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
497 current_prtn[n] = &default_partition;
498 }
499 #endif
500
core_mmu_xlat_table_alloc(struct mmu_partition * prtn)501 static uint64_t *core_mmu_xlat_table_alloc(struct mmu_partition *prtn)
502 {
503 uint64_t *new_table = NULL;
504
505 if (prtn->xlat_tables_used >= MAX_XLAT_TABLES) {
506 EMSG("%u xlat tables exhausted", MAX_XLAT_TABLES);
507
508 return NULL;
509 }
510
511 new_table = prtn->xlat_tables[prtn->xlat_tables_used++];
512
513 DMSG("xlat tables used %u / %u",
514 prtn->xlat_tables_used, MAX_XLAT_TABLES);
515
516 return new_table;
517 }
518
519 /*
520 * Given an entry that points to a table returns the virtual address
521 * of the pointed table. NULL otherwise.
522 */
core_mmu_xlat_table_entry_pa2va(struct mmu_partition * prtn,unsigned int level,uint64_t entry)523 static void *core_mmu_xlat_table_entry_pa2va(struct mmu_partition *prtn,
524 unsigned int level,
525 uint64_t entry)
526 {
527 paddr_t pa = 0;
528 void *va = NULL;
529
530 if ((entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
531 level >= XLAT_TABLE_LEVEL_MAX)
532 return NULL;
533
534 pa = entry & OUTPUT_ADDRESS_MASK;
535
536 if (!IS_ENABLED(CFG_VIRTUALIZATION) || prtn == &default_partition)
537 va = phys_to_virt(pa, MEM_AREA_TEE_RAM_RW_DATA,
538 XLAT_TABLE_SIZE);
539 else
540 va = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL,
541 XLAT_TABLE_SIZE);
542
543 return va;
544 }
545
546 /*
547 * For a table entry that points to a table - allocate and copy to
548 * a new pointed table. This is done for the requested entry,
549 * without going deeper into the pointed table entries.
550 *
551 * A success is returned for non-table entries, as nothing to do there.
552 */
553 __maybe_unused
core_mmu_entry_copy(struct core_mmu_table_info * tbl_info,unsigned int idx)554 static bool core_mmu_entry_copy(struct core_mmu_table_info *tbl_info,
555 unsigned int idx)
556 {
557 uint64_t *orig_table = NULL;
558 uint64_t *new_table = NULL;
559 uint64_t *entry = NULL;
560 struct mmu_partition *prtn = NULL;
561
562 #ifdef CFG_VIRTUALIZATION
563 prtn = tbl_info->prtn;
564 #else
565 prtn = &default_partition;
566 #endif
567 assert(prtn);
568
569 if (idx >= tbl_info->num_entries)
570 return false;
571
572 entry = (uint64_t *)tbl_info->table + idx;
573
574 /* Nothing to do for non-table entries */
575 if ((*entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
576 tbl_info->level >= XLAT_TABLE_LEVEL_MAX)
577 return true;
578
579 new_table = core_mmu_xlat_table_alloc(prtn);
580 if (!new_table)
581 return false;
582
583 orig_table = core_mmu_xlat_table_entry_pa2va(prtn, tbl_info->level,
584 *entry);
585
586 /* Copy original table content to new table */
587 memcpy(new_table, orig_table, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
588
589 /* Point to the new table */
590 *entry = virt_to_phys(new_table) | (*entry & ~OUTPUT_ADDRESS_MASK);
591
592 return true;
593 }
594
core_init_mmu_prtn_tee(struct mmu_partition * prtn,struct tee_mmap_region * mm)595 static void core_init_mmu_prtn_tee(struct mmu_partition *prtn,
596 struct tee_mmap_region *mm)
597 {
598 size_t n;
599
600 assert(prtn && mm);
601
602 for (n = 0; !core_mmap_is_end_of_table(mm + n); n++) {
603 debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
604 mm[n].va, mm[n].pa, mm[n].size, mm[n].attr);
605
606 if (!IS_PAGE_ALIGNED(mm[n].pa) || !IS_PAGE_ALIGNED(mm[n].size))
607 panic("unaligned region");
608 }
609
610 /* Clear table before use */
611 memset(prtn->base_tables, 0, sizeof(base_xlation_table));
612
613 for (n = 0; !core_mmap_is_end_of_table(mm + n); n++)
614 if (!core_mmu_is_dynamic_vaspace(mm + n))
615 core_mmu_map_region(prtn, mm + n);
616
617 /*
618 * Primary mapping table is ready at index `get_core_pos()`
619 * whose value may not be ZERO. Take this index as copy source.
620 */
621 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
622 if (n == get_core_pos())
623 continue;
624
625 memcpy(prtn->base_tables[0][n],
626 prtn->base_tables[0][get_core_pos()],
627 XLAT_ENTRY_SIZE * NUM_BASE_LEVEL_ENTRIES);
628 }
629 }
630
631 /*
632 * In order to support 32-bit TAs we will have to find
633 * a user VA base in the region [1GB, 4GB[.
634 * Due to OP-TEE design limitation, TAs page table should be an entry
635 * inside a level 1 page table.
636 *
637 * Available options are only these:
638 * - base level 0 entry 0 - [0GB, 512GB[
639 * - level 1 entry 0 - [0GB, 1GB[
640 * - level 1 entry 1 - [1GB, 2GB[ <----
641 * - level 1 entry 2 - [2GB, 3GB[ <----
642 * - level 1 entry 3 - [3GB, 4GB[ <----
643 * - level 1 entry 4 - [4GB, 5GB[
644 * - ...
645 * - ...
646 *
647 * - base level 1 entry 0 - [0GB, 1GB[
648 * - base level 1 entry 1 - [1GB, 2GB[ <----
649 * - base level 1 entry 2 - [2GB, 3GB[ <----
650 * - base level 1 entry 3 - [3GB, 4GB[ <----
651 * - base level 1 entry 4 - [4GB, 5GB[
652 * - ...
653 */
set_user_va_idx(struct mmu_partition * prtn)654 static void set_user_va_idx(struct mmu_partition *prtn)
655 {
656 uint64_t *tbl = NULL;
657 unsigned int n = 0;
658
659 assert(prtn);
660
661 tbl = prtn->base_tables[0][get_core_pos()];
662
663 /*
664 * If base level is 0, then we must use its entry 0.
665 */
666 if (CORE_MMU_BASE_TABLE_LEVEL == 0) {
667 /*
668 * If base level 0 entry 0 is not used then
669 * it's clear that we can use level 1 entry 1 inside it.
670 * (will be allocated later).
671 */
672 if ((tbl[0] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
673 user_va_idx = 1;
674
675 return;
676 }
677
678 assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
679
680 tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
681 assert(tbl);
682 }
683
684 /*
685 * Search level 1 table (i.e. 1GB mapping per entry) for
686 * an empty entry in the range [1GB, 4GB[.
687 */
688 for (n = 1; n < 4; n++) {
689 if ((tbl[n] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
690 user_va_idx = n;
691 break;
692 }
693 }
694
695 assert(user_va_idx != -1);
696 }
697
698 /*
699 * Setup an entry inside a core level 1 page table for TAs memory mapping
700 *
701 * If base table level is 1 - user_va_idx is already the index,
702 * so nothing to do.
703 * If base table level is 0 - we might need to allocate entry 0 of base table,
704 * as TAs page table is an entry inside a level 1
705 * page table.
706 */
core_init_mmu_prtn_ta_core(struct mmu_partition * prtn __maybe_unused,unsigned int base_idx __maybe_unused,unsigned int core __maybe_unused)707 static void core_init_mmu_prtn_ta_core(struct mmu_partition *prtn
708 __maybe_unused,
709 unsigned int base_idx __maybe_unused,
710 unsigned int core __maybe_unused)
711 {
712 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
713 struct core_mmu_table_info tbl_info = { };
714 uint64_t *tbl = NULL;
715 uintptr_t idx = 0;
716
717 assert(user_va_idx != -1);
718 COMPILE_TIME_ASSERT(MAX_XLAT_TABLES <
719 (1 << (8 * sizeof(prtn->user_l1_table_idx[0][0]))));
720
721 tbl = prtn->base_tables[base_idx][core];
722
723 /*
724 * If base level is 0, then user_va_idx refers to
725 * level 1 page table that's in base level 0 entry 0.
726 */
727 core_mmu_set_info_table(&tbl_info, 0, 0, tbl);
728 #ifdef CFG_VIRTUALIZATION
729 tbl_info.prtn = prtn;
730 #endif
731
732 /*
733 * If this isn't the core that created the initial tables
734 * mappings, then the level 1 table must be copied,
735 * as it will hold pointer to the user mapping table
736 * that changes per core.
737 */
738 if (core != get_core_pos()) {
739 if (!core_mmu_entry_copy(&tbl_info, 0))
740 panic();
741 }
742
743 if (!core_mmu_entry_to_finer_grained(&tbl_info, 0, true))
744 panic();
745
746 /*
747 * Now base level table should be ready with a table descriptor
748 */
749 assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
750
751 tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
752 assert(tbl);
753
754 idx = ((uintptr_t)&tbl[user_va_idx] - (uintptr_t)prtn->xlat_tables) /
755 sizeof(xlat_tbl_t);
756 assert(idx < prtn->xlat_tables_used);
757
758 prtn->user_l1_table_idx[base_idx][core] = idx;
759 #endif
760 }
761
core_init_mmu_prtn_ta(struct mmu_partition * prtn)762 static void core_init_mmu_prtn_ta(struct mmu_partition *prtn)
763 {
764 unsigned int base_idx = 0;
765 unsigned int core = 0;
766
767 assert(user_va_idx != -1);
768
769 for (base_idx = 0; base_idx < NUM_BASE_TABLES; base_idx++)
770 for (core = 0; core < CFG_TEE_CORE_NB_CORE; core++)
771 core_init_mmu_prtn_ta_core(prtn, base_idx, core);
772 }
773
core_init_mmu_prtn(struct mmu_partition * prtn,struct tee_mmap_region * mm)774 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm)
775 {
776 core_init_mmu_prtn_tee(prtn, mm);
777 core_init_mmu_prtn_ta(prtn);
778 }
779
core_init_mmu(struct tee_mmap_region * mm)780 void core_init_mmu(struct tee_mmap_region *mm)
781 {
782 uint64_t max_va = 0;
783 size_t n;
784
785 COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_SHIFT ==
786 XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL));
787 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
788 COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_OFFSET ==
789 sizeof(base_xlation_table) / 2);
790 #endif
791 COMPILE_TIME_ASSERT(XLAT_TABLES_SIZE == sizeof(xlat_tables));
792
793 /* Initialize default pagetables */
794 core_init_mmu_prtn_tee(&default_partition, mm);
795
796 for (n = 0; !core_mmap_is_end_of_table(mm + n); n++) {
797 vaddr_t va_end = mm[n].va + mm[n].size - 1;
798
799 if (va_end > max_va)
800 max_va = va_end;
801 }
802
803 set_user_va_idx(&default_partition);
804
805 core_init_mmu_prtn_ta(&default_partition);
806
807 COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS > L1_XLAT_ADDRESS_SHIFT);
808 assert(max_va < BIT64(CFG_LPAE_ADDR_SPACE_BITS));
809 }
810
core_mmu_place_tee_ram_at_top(paddr_t paddr)811 bool core_mmu_place_tee_ram_at_top(paddr_t paddr)
812 {
813 size_t base_level_size = BASE_XLAT_BLOCK_SIZE;
814 paddr_t base_level_mask = base_level_size - 1;
815
816 return (paddr & base_level_mask) > (base_level_size / 2);
817 }
818
819 #ifdef ARM32
core_init_mmu_regs(struct core_mmu_config * cfg)820 void core_init_mmu_regs(struct core_mmu_config *cfg)
821 {
822 uint32_t ttbcr = 0;
823 uint32_t mair = 0;
824
825 cfg->ttbr0_base = virt_to_phys(base_xlation_table[0][0]);
826 cfg->ttbr0_core_offset = sizeof(base_xlation_table[0][0]);
827
828 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
829 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
830 cfg->mair0 = mair;
831
832 ttbcr = TTBCR_EAE;
833 ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
834 ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
835 ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
836 ttbcr |= TTBCR_EPD1; /* Disable the use of TTBR1 */
837
838 /* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
839 cfg->ttbcr = ttbcr;
840 }
841 #endif /*ARM32*/
842
843 #ifdef ARM64
get_physical_addr_size_bits(void)844 static unsigned int get_physical_addr_size_bits(void)
845 {
846 /*
847 * Intermediate Physical Address Size.
848 * 0b000 32 bits, 4GB.
849 * 0b001 36 bits, 64GB.
850 * 0b010 40 bits, 1TB.
851 * 0b011 42 bits, 4TB.
852 * 0b100 44 bits, 16TB.
853 * 0b101 48 bits, 256TB.
854 * 0b110 52 bits, 4PB (not supported)
855 */
856
857 COMPILE_TIME_ASSERT(CFG_CORE_ARM64_PA_BITS >= 32);
858
859 if (CFG_CORE_ARM64_PA_BITS <= 32)
860 return TCR_PS_BITS_4GB;
861
862 if (CFG_CORE_ARM64_PA_BITS <= 36)
863 return TCR_PS_BITS_64GB;
864
865 if (CFG_CORE_ARM64_PA_BITS <= 40)
866 return TCR_PS_BITS_1TB;
867
868 if (CFG_CORE_ARM64_PA_BITS <= 42)
869 return TCR_PS_BITS_4TB;
870
871 if (CFG_CORE_ARM64_PA_BITS <= 44)
872 return TCR_PS_BITS_16TB;
873
874 /* Physical address can't exceed 48 bits */
875 COMPILE_TIME_ASSERT(CFG_CORE_ARM64_PA_BITS <= 48);
876 /* CFG_CORE_ARM64_PA_BITS <= 48 */
877 return TCR_PS_BITS_256TB;
878 }
879
core_init_mmu_regs(struct core_mmu_config * cfg)880 void core_init_mmu_regs(struct core_mmu_config *cfg)
881 {
882 uint64_t ips = get_physical_addr_size_bits();
883 uint64_t mair = 0;
884 uint64_t tcr = 0;
885
886 cfg->ttbr0_el1_base = virt_to_phys(base_xlation_table[0][0]);
887 cfg->ttbr0_core_offset = sizeof(base_xlation_table[0][0]);
888
889 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
890 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
891 cfg->mair_el1 = mair;
892
893 tcr = TCR_RES1;
894 tcr |= TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
895 tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
896 tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
897 tcr |= ips << TCR_EL1_IPS_SHIFT;
898 tcr |= 64 - CFG_LPAE_ADDR_SPACE_BITS;
899
900 /* Disable the use of TTBR1 */
901 tcr |= TCR_EPD1;
902
903 /*
904 * TCR.A1 = 0 => ASID is stored in TTBR0
905 * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
906 */
907 cfg->tcr_el1 = tcr;
908 }
909 #endif /*ARM64*/
910
core_mmu_set_info_table(struct core_mmu_table_info * tbl_info,unsigned level,vaddr_t va_base,void * table)911 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
912 unsigned level, vaddr_t va_base, void *table)
913 {
914 tbl_info->level = level;
915 tbl_info->table = table;
916 tbl_info->va_base = va_base;
917 tbl_info->shift = XLAT_ADDR_SHIFT(level);
918
919 #if (CORE_MMU_BASE_TABLE_LEVEL > 0)
920 assert(level >= CORE_MMU_BASE_TABLE_LEVEL);
921 #endif
922 assert(level <= XLAT_TABLE_LEVEL_MAX);
923
924 if (level == CORE_MMU_BASE_TABLE_LEVEL)
925 tbl_info->num_entries = NUM_BASE_LEVEL_ENTRIES;
926 else
927 tbl_info->num_entries = XLAT_TABLE_ENTRIES;
928 }
929
core_mmu_get_user_pgdir(struct core_mmu_table_info * pgd_info)930 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
931 {
932 vaddr_t va_range_base;
933 void *tbl = get_prtn()->l2_ta_tables[thread_get_id()];
934
935 core_mmu_get_user_va_range(&va_range_base, NULL);
936 core_mmu_set_info_table(pgd_info, 2, va_range_base, tbl);
937 }
938
core_mmu_create_user_map(struct user_mode_ctx * uctx,struct core_mmu_user_map * map)939 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
940 struct core_mmu_user_map *map)
941 {
942 struct core_mmu_table_info dir_info;
943
944 COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);
945
946 core_mmu_get_user_pgdir(&dir_info);
947 memset(dir_info.table, 0, PGT_SIZE);
948 core_mmu_populate_user_map(&dir_info, uctx);
949 map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC;
950 map->asid = uctx->vm_info.asid;
951 }
952
core_mmu_find_table(struct mmu_partition * prtn,vaddr_t va,unsigned max_level,struct core_mmu_table_info * tbl_info)953 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
954 unsigned max_level,
955 struct core_mmu_table_info *tbl_info)
956 {
957 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
958 unsigned int num_entries = NUM_BASE_LEVEL_ENTRIES;
959 unsigned int level = CORE_MMU_BASE_TABLE_LEVEL;
960 vaddr_t va_base = 0;
961 bool ret = false;
962 uint64_t *tbl;
963
964 if (!prtn)
965 prtn = get_prtn();
966 tbl = prtn->base_tables[0][get_core_pos()];
967
968 while (true) {
969 unsigned int level_size_shift = XLAT_ADDR_SHIFT(level);
970 unsigned int n = (va - va_base) >> level_size_shift;
971
972 if (n >= num_entries)
973 goto out;
974
975 if (level == max_level || level == XLAT_TABLE_LEVEL_MAX ||
976 (tbl[n] & TABLE_DESC) != TABLE_DESC) {
977 /*
978 * We've either reached max_level, a block
979 * mapping entry or an "invalid" mapping entry.
980 */
981
982 /*
983 * Base level is the CPU specific translation table.
984 * It doesn't make sense to return anything based
985 * on that unless foreign interrupts already are
986 * masked.
987 */
988 if (level == CORE_MMU_BASE_TABLE_LEVEL &&
989 !(exceptions & THREAD_EXCP_FOREIGN_INTR))
990 goto out;
991
992 tbl_info->table = tbl;
993 tbl_info->va_base = va_base;
994 tbl_info->level = level;
995 tbl_info->shift = level_size_shift;
996 tbl_info->num_entries = num_entries;
997 #ifdef CFG_VIRTUALIZATION
998 tbl_info->prtn = prtn;
999 #endif
1000 ret = true;
1001 goto out;
1002 }
1003
1004 tbl = core_mmu_xlat_table_entry_pa2va(prtn, level, tbl[n]);
1005
1006 if (!tbl)
1007 goto out;
1008
1009 va_base += (vaddr_t)n << level_size_shift;
1010 level++;
1011 num_entries = XLAT_TABLE_ENTRIES;
1012 }
1013 out:
1014 thread_unmask_exceptions(exceptions);
1015 return ret;
1016 }
1017
core_mmu_entry_to_finer_grained(struct core_mmu_table_info * tbl_info,unsigned int idx,bool secure __unused)1018 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
1019 unsigned int idx, bool secure __unused)
1020 {
1021 uint64_t *new_table;
1022 uint64_t *entry;
1023 int i;
1024 paddr_t pa;
1025 uint64_t attr;
1026 paddr_t block_size_on_next_lvl = XLAT_BLOCK_SIZE(tbl_info->level + 1);
1027 struct mmu_partition *prtn;
1028
1029 #ifdef CFG_VIRTUALIZATION
1030 prtn = tbl_info->prtn;
1031 #else
1032 prtn = &default_partition;
1033 #endif
1034 assert(prtn);
1035
1036 if (tbl_info->level >= XLAT_TABLE_LEVEL_MAX ||
1037 idx >= tbl_info->num_entries)
1038 return false;
1039
1040 entry = (uint64_t *)tbl_info->table + idx;
1041
1042 if ((*entry & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
1043 return true;
1044
1045 new_table = core_mmu_xlat_table_alloc(prtn);
1046 if (!new_table)
1047 return false;
1048
1049 if (*entry) {
1050 pa = *entry & OUTPUT_ADDRESS_MASK;
1051 attr = *entry & ~(OUTPUT_ADDRESS_MASK | DESC_ENTRY_TYPE_MASK);
1052 for (i = 0; i < XLAT_TABLE_ENTRIES; i++) {
1053 new_table[i] = pa | attr | BLOCK_DESC;
1054 pa += block_size_on_next_lvl;
1055 }
1056 } else {
1057 memset(new_table, 0, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
1058 }
1059
1060 *entry = virt_to_phys(new_table) | TABLE_DESC;
1061
1062 return true;
1063 }
1064
core_mmu_set_entry_primitive(void * table,size_t level,size_t idx,paddr_t pa,uint32_t attr)1065 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
1066 paddr_t pa, uint32_t attr)
1067 {
1068 uint64_t *tbl = table;
1069 uint64_t desc = mattr_to_desc(level, attr);
1070
1071 tbl[idx] = desc | pa;
1072 }
1073
core_mmu_get_entry_primitive(const void * table,size_t level,size_t idx,paddr_t * pa,uint32_t * attr)1074 void core_mmu_get_entry_primitive(const void *table, size_t level,
1075 size_t idx, paddr_t *pa, uint32_t *attr)
1076 {
1077 const uint64_t *tbl = table;
1078
1079 if (pa)
1080 *pa = tbl[idx] & GENMASK_64(47, 12);
1081
1082 if (attr)
1083 *attr = desc_to_mattr(level, tbl[idx]);
1084 }
1085
core_mmu_user_va_range_is_defined(void)1086 bool core_mmu_user_va_range_is_defined(void)
1087 {
1088 return user_va_idx != -1;
1089 }
1090
core_mmu_get_user_va_range(vaddr_t * base,size_t * size)1091 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
1092 {
1093 assert(user_va_idx != -1);
1094
1095 if (base)
1096 *base = (vaddr_t)user_va_idx << L1_XLAT_ADDRESS_SHIFT;
1097 if (size)
1098 *size = BIT64(L1_XLAT_ADDRESS_SHIFT);
1099 }
1100
core_mmu_get_user_mapping_entry(struct mmu_partition * prtn,unsigned int base_idx)1101 static uint64_t *core_mmu_get_user_mapping_entry(struct mmu_partition *prtn,
1102 unsigned int base_idx)
1103 {
1104 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1105 uint8_t idx = 0;
1106 uint64_t *tbl = NULL;
1107 #endif
1108
1109 assert(user_va_idx != -1);
1110
1111 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1112 idx = prtn->user_l1_table_idx[base_idx][get_core_pos()];
1113 tbl = prtn->xlat_tables[idx];
1114
1115 return &tbl[user_va_idx];
1116 #else
1117 return &prtn->base_tables[base_idx][get_core_pos()][user_va_idx];
1118 #endif
1119 }
1120
core_mmu_user_mapping_is_active(void)1121 bool core_mmu_user_mapping_is_active(void)
1122 {
1123 bool ret = false;
1124 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1125 uint64_t *entry = NULL;
1126
1127 entry = core_mmu_get_user_mapping_entry(get_prtn(), 0);
1128 ret = (*entry != 0);
1129
1130 thread_unmask_exceptions(exceptions);
1131
1132 return ret;
1133 }
1134
1135 #ifdef ARM32
core_mmu_get_user_map(struct core_mmu_user_map * map)1136 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1137 {
1138 struct mmu_partition *prtn = get_prtn();
1139 uint64_t *entry = NULL;
1140
1141 entry = core_mmu_get_user_mapping_entry(prtn, 0);
1142
1143 map->user_map = *entry;
1144 if (map->user_map) {
1145 map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
1146 TTBR_ASID_MASK;
1147 } else {
1148 map->asid = 0;
1149 }
1150 }
1151
core_mmu_set_user_map(struct core_mmu_user_map * map)1152 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1153 {
1154 uint64_t ttbr = 0;
1155 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1156 struct mmu_partition *prtn = get_prtn();
1157 uint64_t *entries[NUM_BASE_TABLES] = { };
1158 unsigned int i = 0;
1159
1160 ttbr = read_ttbr0_64bit();
1161 /* Clear ASID */
1162 ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1163 write_ttbr0_64bit(ttbr);
1164 isb();
1165
1166 for (i = 0; i < NUM_BASE_TABLES; i++)
1167 entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1168
1169 /* Set the new map */
1170 if (map && map->user_map) {
1171 for (i = 0; i < NUM_BASE_TABLES; i++)
1172 *entries[i] = map->user_map;
1173
1174 dsb(); /* Make sure the write above is visible */
1175 ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1176 write_ttbr0_64bit(ttbr);
1177 isb();
1178 } else {
1179 for (i = 0; i < NUM_BASE_TABLES; i++)
1180 *entries[i] = INVALID_DESC;
1181
1182 dsb(); /* Make sure the write above is visible */
1183 }
1184
1185 tlbi_all();
1186 icache_inv_all();
1187
1188 thread_unmask_exceptions(exceptions);
1189 }
1190
core_mmu_get_fault_type(uint32_t fault_descr)1191 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1192 {
1193 assert(fault_descr & FSR_LPAE);
1194
1195 switch (fault_descr & FSR_STATUS_MASK) {
1196 case 0x21: /* b100001 Alignment fault */
1197 return CORE_MMU_FAULT_ALIGNMENT;
1198 case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
1199 return CORE_MMU_FAULT_ASYNC_EXTERNAL;
1200 case 0x12: /* b100010 Debug event */
1201 return CORE_MMU_FAULT_DEBUG_EVENT;
1202 default:
1203 break;
1204 }
1205
1206 switch ((fault_descr & FSR_STATUS_MASK) >> 2) {
1207 case 0x1: /* b0001LL Translation fault */
1208 return CORE_MMU_FAULT_TRANSLATION;
1209 case 0x2: /* b0010LL Access flag fault */
1210 case 0x3: /* b0011LL Permission fault */
1211 if (fault_descr & FSR_WNR)
1212 return CORE_MMU_FAULT_WRITE_PERMISSION;
1213 else
1214 return CORE_MMU_FAULT_READ_PERMISSION;
1215 default:
1216 return CORE_MMU_FAULT_OTHER;
1217 }
1218 }
1219 #endif /*ARM32*/
1220
1221 #ifdef ARM64
core_mmu_get_user_map(struct core_mmu_user_map * map)1222 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1223 {
1224 struct mmu_partition *prtn = get_prtn();
1225 uint64_t *entry = NULL;
1226
1227 entry = core_mmu_get_user_mapping_entry(prtn, 0);
1228
1229 map->user_map = *entry;
1230 if (map->user_map) {
1231 map->asid = (read_ttbr0_el1() >> TTBR_ASID_SHIFT) &
1232 TTBR_ASID_MASK;
1233 } else {
1234 map->asid = 0;
1235 }
1236 }
1237
core_mmu_set_user_map(struct core_mmu_user_map * map)1238 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1239 {
1240 uint64_t ttbr = 0;
1241 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1242 struct mmu_partition *prtn = get_prtn();
1243 uint64_t *entries[NUM_BASE_TABLES] = { };
1244 unsigned int i = 0;
1245
1246 ttbr = read_ttbr0_el1();
1247 /* Clear ASID */
1248 ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1249 write_ttbr0_el1(ttbr);
1250 isb();
1251
1252 for (i = 0; i < NUM_BASE_TABLES; i++)
1253 entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1254
1255 /* Set the new map */
1256 if (map && map->user_map) {
1257 for (i = 0; i < NUM_BASE_TABLES; i++)
1258 *entries[i] = map->user_map;
1259
1260 dsb(); /* Make sure the write above is visible */
1261 ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1262 write_ttbr0_el1(ttbr);
1263 isb();
1264 } else {
1265 for (i = 0; i < NUM_BASE_TABLES; i++)
1266 *entries[i] = INVALID_DESC;
1267
1268 dsb(); /* Make sure the write above is visible */
1269 }
1270
1271 tlbi_all();
1272 icache_inv_all();
1273
1274 thread_unmask_exceptions(exceptions);
1275 }
1276
core_mmu_get_fault_type(uint32_t fault_descr)1277 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1278 {
1279 switch ((fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
1280 case ESR_EC_SP_ALIGN:
1281 case ESR_EC_PC_ALIGN:
1282 return CORE_MMU_FAULT_ALIGNMENT;
1283 case ESR_EC_IABT_EL0:
1284 case ESR_EC_DABT_EL0:
1285 case ESR_EC_IABT_EL1:
1286 case ESR_EC_DABT_EL1:
1287 switch (fault_descr & ESR_FSC_MASK) {
1288 case ESR_FSC_SIZE_L0:
1289 case ESR_FSC_SIZE_L1:
1290 case ESR_FSC_SIZE_L2:
1291 case ESR_FSC_SIZE_L3:
1292 case ESR_FSC_TRANS_L0:
1293 case ESR_FSC_TRANS_L1:
1294 case ESR_FSC_TRANS_L2:
1295 case ESR_FSC_TRANS_L3:
1296 return CORE_MMU_FAULT_TRANSLATION;
1297 case ESR_FSC_ACCF_L1:
1298 case ESR_FSC_ACCF_L2:
1299 case ESR_FSC_ACCF_L3:
1300 case ESR_FSC_PERMF_L1:
1301 case ESR_FSC_PERMF_L2:
1302 case ESR_FSC_PERMF_L3:
1303 if (fault_descr & ESR_ABT_WNR)
1304 return CORE_MMU_FAULT_WRITE_PERMISSION;
1305 else
1306 return CORE_MMU_FAULT_READ_PERMISSION;
1307 case ESR_FSC_ALIGN:
1308 return CORE_MMU_FAULT_ALIGNMENT;
1309 default:
1310 return CORE_MMU_FAULT_OTHER;
1311 }
1312 default:
1313 return CORE_MMU_FAULT_OTHER;
1314 }
1315 }
1316 #endif /*ARM64*/
1317