1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6 #ifndef CORE_MMU_H
7 #define CORE_MMU_H
8
9 #ifndef __ASSEMBLER__
10 #include <assert.h>
11 #include <compiler.h>
12 #include <kernel/user_ta.h>
13 #include <mm/tee_mmu_types.h>
14 #include <types_ext.h>
15 #include <util.h>
16 #endif
17
18 #include <platform_config.h>
19
20 /* A small page is the smallest unit of memory that can be mapped */
21 #define SMALL_PAGE_SHIFT U(12)
22 #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT)
23 #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1)
24
25 /*
26 * PGDIR is the translation table above the translation table that holds
27 * the pages.
28 */
29 #ifdef CFG_WITH_LPAE
30 #define CORE_MMU_PGDIR_SHIFT U(21)
31 #define CORE_MMU_PGDIR_LEVEL U(3)
32 #else
33 #define CORE_MMU_PGDIR_SHIFT U(20)
34 #define CORE_MMU_PGDIR_LEVEL U(2)
35 #endif
36 #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT)
37 #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1)
38
39 /* TA user space code, data, stack and heap are mapped using this granularity */
40 #define CORE_MMU_USER_CODE_SHIFT SMALL_PAGE_SHIFT
41 #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT)
42 #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1)
43
44 /* TA user space parameters are mapped using this granularity */
45 #define CORE_MMU_USER_PARAM_SHIFT SMALL_PAGE_SHIFT
46 #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT)
47 #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1)
48
49 /*
50 * Level of base table (i.e. first level of page table),
51 * depending on address space
52 */
53 #if !defined(CFG_WITH_LPAE) || (CFG_LPAE_ADDR_SPACE_BITS < 40)
54 #define CORE_MMU_BASE_TABLE_SHIFT U(30)
55 #define CORE_MMU_BASE_TABLE_LEVEL U(1)
56 #elif (CFG_LPAE_ADDR_SPACE_BITS <= 48)
57 #define CORE_MMU_BASE_TABLE_SHIFT U(39)
58 #define CORE_MMU_BASE_TABLE_LEVEL U(0)
59 #else /* (CFG_LPAE_ADDR_SPACE_BITS > 48) */
60 #error "CFG_WITH_LPAE with CFG_LPAE_ADDR_SPACE_BITS > 48 isn't supported!"
61 #endif
62
63 #ifdef CFG_WITH_LPAE
64 /*
65 * CORE_MMU_BASE_TABLE_OFFSET is used when switching to/from reduced kernel
66 * mapping. The actual value depends on internals in core_mmu_lpae.c which
67 * we rather not expose here. There's a compile time assertion to check
68 * that these magic numbers are correct.
69 */
70 #define CORE_MMU_BASE_TABLE_OFFSET \
71 (CFG_TEE_CORE_NB_CORE * \
72 BIT(CFG_LPAE_ADDR_SPACE_BITS - CORE_MMU_BASE_TABLE_SHIFT) * \
73 U(8))
74 #endif
75 /*
76 * TEE_RAM_VA_START: The start virtual address of the TEE RAM
77 * TEE_TEXT_VA_START: The start virtual address of the OP-TEE text
78 */
79
80 /*
81 * Identify mapping constraint: virtual base address is the physical start addr.
82 * If platform did not set some macros, some get default value.
83 */
84 #ifndef TEE_RAM_VA_SIZE
85 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE
86 #endif
87
88 #ifndef TEE_LOAD_ADDR
89 #define TEE_LOAD_ADDR TEE_RAM_START
90 #endif
91
92 #define TEE_RAM_VA_START TEE_RAM_START
93 #define TEE_TEXT_VA_START (TEE_RAM_VA_START + \
94 (TEE_LOAD_ADDR - TEE_RAM_START))
95
96 #ifndef STACK_ALIGNMENT
97 #define STACK_ALIGNMENT (sizeof(long) * U(2))
98 #endif
99
100 #ifndef __ASSEMBLER__
101 /*
102 * Memory area type:
103 * MEM_AREA_END: Reserved, marks the end of a table of mapping areas.
104 * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE)
105 * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure)
106 * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure)
107 * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure)
108 * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure)
109 * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
110 * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
111 * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
112 * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
113 * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
114 * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
115 * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances.
116 * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
117 * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
118 * MEM_AREA_RAM_SEC: Secure RAM storing some secrets
119 * MEM_AREA_IO_NSEC: NonSecure HW mapped registers
120 * MEM_AREA_IO_SEC: Secure HW mapped registers
121 * MEM_AREA_EXT_DT: Memory loads external device tree
122 * MEM_AREA_RES_VASPACE: Reserved virtual memory space
123 * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers
124 * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt()
125 * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm.
126 * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM
127 * MEM_AREA_MAXTYPE: lower invalid 'type' value
128 */
129 enum teecore_memtypes {
130 MEM_AREA_END = 0,
131 MEM_AREA_TEE_RAM,
132 MEM_AREA_TEE_RAM_RX,
133 MEM_AREA_TEE_RAM_RO,
134 MEM_AREA_TEE_RAM_RW,
135 MEM_AREA_INIT_RAM_RO,
136 MEM_AREA_INIT_RAM_RX,
137 MEM_AREA_NEX_RAM_RO,
138 MEM_AREA_NEX_RAM_RW,
139 MEM_AREA_TEE_COHERENT,
140 MEM_AREA_TEE_ASAN,
141 MEM_AREA_IDENTITY_MAP_RX,
142 MEM_AREA_TA_RAM,
143 MEM_AREA_NSEC_SHM,
144 MEM_AREA_RAM_NSEC,
145 MEM_AREA_RAM_SEC,
146 MEM_AREA_IO_NSEC,
147 MEM_AREA_IO_SEC,
148 MEM_AREA_EXT_DT,
149 MEM_AREA_RES_VASPACE,
150 MEM_AREA_SHM_VASPACE,
151 MEM_AREA_TS_VASPACE,
152 MEM_AREA_PAGER_VASPACE,
153 MEM_AREA_SDP_MEM,
154 MEM_AREA_DDR_OVERALL,
155 MEM_AREA_SEC_RAM_OVERALL,
156 MEM_AREA_MAXTYPE
157 };
158
teecore_memtype_name(enum teecore_memtypes type)159 static inline const char *teecore_memtype_name(enum teecore_memtypes type)
160 {
161 static const char * const names[] = {
162 [MEM_AREA_END] = "END",
163 [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX",
164 [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX",
165 [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO",
166 [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW",
167 [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO",
168 [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
169 [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
170 [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
171 [MEM_AREA_TEE_ASAN] = "TEE_ASAN",
172 [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
173 [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
174 [MEM_AREA_TA_RAM] = "TA_RAM",
175 [MEM_AREA_NSEC_SHM] = "NSEC_SHM",
176 [MEM_AREA_RAM_NSEC] = "RAM_NSEC",
177 [MEM_AREA_RAM_SEC] = "RAM_SEC",
178 [MEM_AREA_IO_NSEC] = "IO_NSEC",
179 [MEM_AREA_IO_SEC] = "IO_SEC",
180 [MEM_AREA_EXT_DT] = "EXT_DT",
181 [MEM_AREA_RES_VASPACE] = "RES_VASPACE",
182 [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE",
183 [MEM_AREA_TS_VASPACE] = "TS_VASPACE",
184 [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE",
185 [MEM_AREA_SDP_MEM] = "SDP_MEM",
186 [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL",
187 [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL",
188 };
189
190 COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE);
191 return names[type];
192 }
193
194 #ifdef CFG_CORE_RWDATA_NOEXEC
195 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW
196 #else
197 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM
198 #endif
199
200 struct core_mmu_phys_mem {
201 const char *name;
202 enum teecore_memtypes type;
203 __extension__ union {
204 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
205 struct {
206 uint32_t lo_addr;
207 uint32_t hi_addr;
208 };
209 #endif
210 paddr_t addr;
211 };
212 __extension__ union {
213 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
214 struct {
215 uint32_t lo_size;
216 uint32_t hi_size;
217 };
218 #endif
219 paddr_size_t size;
220 };
221 };
222
223 #define __register_memory(_name, _type, _addr, _size, _section) \
224 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
225 { .name = (_name), .type = (_type), .addr = (_addr), \
226 .size = (_size) }
227
228 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
229 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
230 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
231 { .name = (_name), .type = (_type), .lo_addr = (_addr), \
232 .lo_size = (_size) }
233 #else
234 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
235 __register_memory(_name, _type, _addr, _size, _section)
236 #endif
237
238 #define register_phys_mem(type, addr, size) \
239 __register_memory(#addr, (type), (addr), (size), \
240 phys_mem_map)
241
242 #define register_phys_mem_ul(type, addr, size) \
243 __register_memory_ul(#addr, (type), (addr), (size), \
244 phys_mem_map)
245
246 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */
247 #define register_phys_mem_pgdir(type, addr, size) \
248 register_phys_mem(type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
249 ROUNDUP(size + addr - ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
250 CORE_MMU_PGDIR_SIZE))
251
252 #ifdef CFG_SECURE_DATA_PATH
253 #define register_sdp_mem(addr, size) \
254 __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \
255 phys_sdp_mem)
256 #else
257 #define register_sdp_mem(addr, size) \
258 static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \
259 __unused
260 #endif
261
262 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */
263 #define register_dynamic_shm(addr, size) \
264 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \
265 phys_ddr_overall_compat)
266
267 /*
268 * register_ddr() - Define a memory range
269 * @addr: Base address
270 * @size: Length
271 *
272 * This macro can be used multiple times to define disjoint ranges. While
273 * initializing holes are carved out of these ranges where it overlaps with
274 * special memory, for instance memory registered with register_sdp_mem().
275 *
276 * The memory that remains is accepted as non-secure shared memory when
277 * communicating with normal world.
278 *
279 * This macro is an alternative to supply the memory description with a
280 * devicetree blob.
281 */
282 #define register_ddr(addr, size) \
283 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \
284 (size), phys_ddr_overall)
285
286 #define phys_ddr_overall_begin \
287 SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem)
288
289 #define phys_ddr_overall_end \
290 SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem)
291
292 #define phys_ddr_overall_compat_begin \
293 SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem)
294
295 #define phys_ddr_overall_compat_end \
296 SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem)
297
298 #define phys_sdp_mem_begin \
299 SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem)
300
301 #define phys_sdp_mem_end \
302 SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem)
303
304 #define phys_mem_map_begin \
305 SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem)
306
307 #define phys_mem_map_end \
308 SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem)
309
310 #ifdef CFG_CORE_RESERVED_SHM
311 /* Default NSec shared memory allocated from NSec world */
312 extern unsigned long default_nsec_shm_paddr;
313 extern unsigned long default_nsec_shm_size;
314 #endif
315
316 /*
317 * Assembly code in enable_mmu() depends on the layout of this struct.
318 */
319 struct core_mmu_config {
320 #if defined(ARM64)
321 uint64_t tcr_el1;
322 uint64_t mair_el1;
323 uint64_t ttbr0_el1_base;
324 uint64_t ttbr0_core_offset;
325 uint64_t load_offset;
326 #elif defined(CFG_WITH_LPAE)
327 uint32_t ttbcr;
328 uint32_t mair0;
329 uint32_t ttbr0_base;
330 uint32_t ttbr0_core_offset;
331 uint32_t load_offset;
332 #else
333 uint32_t prrr;
334 uint32_t nmrr;
335 uint32_t dacr;
336 uint32_t ttbcr;
337 uint32_t ttbr;
338 uint32_t load_offset;
339 #endif
340 };
341
342 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg);
343 void core_init_mmu_regs(struct core_mmu_config *cfg);
344
345 bool core_mmu_place_tee_ram_at_top(paddr_t paddr);
346
347 #ifdef CFG_WITH_LPAE
348 /*
349 * struct core_mmu_user_map - current user mapping register state
350 * @user_map: physical address of user map translation table
351 * @asid: ASID for the user map
352 *
353 * Note that this struct should be treated as an opaque struct since
354 * the content depends on descriptor table format.
355 */
356 struct core_mmu_user_map {
357 uint64_t user_map;
358 uint32_t asid;
359 };
360 #else
361 /*
362 * struct core_mmu_user_map - current user mapping register state
363 * @ttbr0: content of ttbr0
364 * @ctxid: content of contextidr
365 *
366 * Note that this struct should be treated as an opaque struct since
367 * the content depends on descriptor table format.
368 */
369 struct core_mmu_user_map {
370 uint32_t ttbr0;
371 uint32_t ctxid;
372 };
373 #endif
374
375 #ifdef CFG_WITH_LPAE
376 bool core_mmu_user_va_range_is_defined(void);
377 #else
core_mmu_user_va_range_is_defined(void)378 static inline bool __noprof core_mmu_user_va_range_is_defined(void)
379 {
380 return true;
381 }
382 #endif
383
384 /*
385 * struct mmu_partition - stores MMU partition.
386 *
387 * Basically it represent whole MMU mapping. It is possible
388 * to create multiple partitions, and change them in runtime,
389 * effectively changing how OP-TEE sees memory.
390 * This is opaque struct which is defined differently for
391 * v7 and LPAE MMUs
392 *
393 * This structure used mostly when virtualization is enabled.
394 * When CFG_VIRTUALIZATION==n only default partition exists.
395 */
396 struct mmu_partition;
397
398 /*
399 * core_mmu_get_user_va_range() - Return range of user va space
400 * @base: Lowest user virtual address
401 * @size: Size in bytes of user address space
402 */
403 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
404
405 /*
406 * enum core_mmu_fault - different kinds of faults
407 * @CORE_MMU_FAULT_ALIGNMENT: alignment fault
408 * @CORE_MMU_FAULT_DEBUG_EVENT: debug event
409 * @CORE_MMU_FAULT_TRANSLATION: translation fault
410 * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write
411 * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read
412 * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort
413 * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault
414 * @CORE_MMU_FAULT_OTHER: Other/unknown fault
415 */
416 enum core_mmu_fault {
417 CORE_MMU_FAULT_ALIGNMENT,
418 CORE_MMU_FAULT_DEBUG_EVENT,
419 CORE_MMU_FAULT_TRANSLATION,
420 CORE_MMU_FAULT_WRITE_PERMISSION,
421 CORE_MMU_FAULT_READ_PERMISSION,
422 CORE_MMU_FAULT_ASYNC_EXTERNAL,
423 CORE_MMU_FAULT_ACCESS_BIT,
424 CORE_MMU_FAULT_OTHER,
425 };
426
427 /*
428 * core_mmu_get_fault_type() - get fault type
429 * @fault_descr: Content of fault status or exception syndrome register
430 * @returns an enum describing the content of fault status register.
431 */
432 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
433
434 /*
435 * core_mm_type_to_attr() - convert memory type to attribute
436 * @t: memory type
437 * @returns an attribute that can be passed to core_mm_set_entry() and friends
438 */
439 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
440
441 /*
442 * core_mmu_create_user_map() - Create user mode mapping
443 * @uctx: Pointer to user mode context
444 * @map: MMU configuration to use when activating this VA space
445 */
446 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
447 struct core_mmu_user_map *map);
448 /*
449 * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
450 * @map: MMU configuration for current user VA space.
451 */
452 void core_mmu_get_user_map(struct core_mmu_user_map *map);
453
454 /*
455 * core_mmu_set_user_map() - Set new MMU configuration for user VA space
456 * @map: User context MMU configuration or NULL to set core VA space
457 *
458 * Activate user VA space mapping and set its ASID if @map is not NULL,
459 * otherwise activate core mapping and set ASID to 0.
460 */
461 void core_mmu_set_user_map(struct core_mmu_user_map *map);
462
463 /*
464 * struct core_mmu_table_info - Properties for a translation table
465 * @table: Pointer to translation table
466 * @va_base: VA base address of the transaltion table
467 * @level: Translation table level
468 * @shift: The shift of each entry in the table
469 * @num_entries: Number of entries in this table.
470 */
471 struct core_mmu_table_info {
472 void *table;
473 vaddr_t va_base;
474 unsigned level;
475 unsigned shift;
476 unsigned num_entries;
477 #ifdef CFG_VIRTUALIZATION
478 struct mmu_partition *prtn;
479 #endif
480 };
481
482 /*
483 * core_mmu_find_table() - Locates a translation table
484 * @prtn: MMU partition where search should be performed
485 * @va: Virtual address for the table to cover
486 * @max_level: Don't traverse beyond this level
487 * @tbl_info: Pointer to where to store properties.
488 * @return true if a translation table was found, false on error
489 */
490 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
491 unsigned max_level,
492 struct core_mmu_table_info *tbl_info);
493
494 /*
495 * core_mmu_entry_to_finer_grained() - divide mapping at current level into
496 * smaller ones so memory can be mapped with finer granularity
497 * @tbl_info: table where target record located
498 * @idx: index of record for which a pdgir must be setup.
499 * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu)
500 * @return true on successful, false on error
501 */
502 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
503 unsigned int idx, bool secure);
504
505 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
506 paddr_t pa, uint32_t attr);
507
508 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
509
510 /*
511 * core_mmu_set_entry() - Set entry in translation table
512 * @tbl_info: Translation table properties
513 * @idx: Index of entry to update
514 * @pa: Physical address to assign entry
515 * @attr: Attributes to assign entry
516 */
517 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
518 paddr_t pa, uint32_t attr);
519
520 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
521 paddr_t *pa, uint32_t *attr);
522
523 /*
524 * core_mmu_get_entry() - Get entry from translation table
525 * @tbl_info: Translation table properties
526 * @idx: Index of entry to read
527 * @pa: Physical address is returned here if pa is not NULL
528 * @attr: Attributues are returned here if attr is not NULL
529 */
530 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
531 paddr_t *pa, uint32_t *attr);
532
533 /*
534 * core_mmu_va2idx() - Translate from virtual address to table index
535 * @tbl_info: Translation table properties
536 * @va: Virtual address to translate
537 * @returns index in transaltion table
538 */
core_mmu_va2idx(struct core_mmu_table_info * tbl_info,vaddr_t va)539 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
540 vaddr_t va)
541 {
542 return (va - tbl_info->va_base) >> tbl_info->shift;
543 }
544
545 /*
546 * core_mmu_idx2va() - Translate from table index to virtual address
547 * @tbl_info: Translation table properties
548 * @idx: Index to translate
549 * @returns Virtual address
550 */
core_mmu_idx2va(struct core_mmu_table_info * tbl_info,unsigned idx)551 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
552 unsigned idx)
553 {
554 return (idx << tbl_info->shift) + tbl_info->va_base;
555 }
556
557 /*
558 * core_mmu_get_block_offset() - Get offset inside a block/page
559 * @tbl_info: Translation table properties
560 * @pa: Physical address
561 * @returns offset within one block of the translation table
562 */
core_mmu_get_block_offset(struct core_mmu_table_info * tbl_info,paddr_t pa)563 static inline size_t core_mmu_get_block_offset(
564 struct core_mmu_table_info *tbl_info, paddr_t pa)
565 {
566 return pa & ((1 << tbl_info->shift) - 1);
567 }
568
569 /*
570 * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to
571 * empty virtual address space that is used for dymanic mappings
572 * @mm: memory region to be checked
573 * @returns result of the check
574 */
core_mmu_is_dynamic_vaspace(struct tee_mmap_region * mm)575 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
576 {
577 return mm->type == MEM_AREA_RES_VASPACE ||
578 mm->type == MEM_AREA_SHM_VASPACE;
579 }
580
581 /*
582 * core_mmu_map_pages() - map list of pages at given virtual address
583 * @vstart: Virtual address where mapping begins
584 * @pages: Array of page addresses
585 * @num_pages: Number of pages
586 * @memtype: Type of memmory to be mapped
587 *
588 * Note: This function asserts that pages are not mapped executeable for
589 * kernel (privileged) mode.
590 *
591 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
592 */
593 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
594 enum teecore_memtypes memtype);
595
596 /*
597 * core_mmu_map_contiguous_pages() - map range of pages at given virtual address
598 * @vstart: Virtual address where mapping begins
599 * @pstart: Physical address of the first page
600 * @num_pages: Number of pages
601 * @memtype: Type of memmory to be mapped
602 *
603 * Note: This function asserts that pages are not mapped executeable for
604 * kernel (privileged) mode.
605 *
606 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
607 */
608 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
609 size_t num_pages,
610 enum teecore_memtypes memtype);
611
612 /*
613 * core_mmu_unmap_pages() - remove mapping at given virtual address
614 * @vstart: Virtual address where mapping begins
615 * @num_pages: Number of pages to unmap
616 */
617 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
618
619 /*
620 * core_mmu_user_mapping_is_active() - Report if user mapping is active
621 * @returns true if a user VA space is active, false if user VA space is
622 * inactive.
623 */
624 bool core_mmu_user_mapping_is_active(void);
625
626 /*
627 * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
628 * @returns true if the attributes can be used, false if not.
629 */
630 bool core_mmu_mattr_is_ok(uint32_t mattr);
631
632 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
633 vaddr_t *e);
634
635 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
636
637 /* routines to retreive shared mem configuration */
core_mmu_is_shm_cached(void)638 static inline bool core_mmu_is_shm_cached(void)
639 {
640 return core_mmu_type_to_attr(MEM_AREA_NSEC_SHM) &
641 (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT);
642 }
643
644 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
645 size_t len);
646 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr,
647 size_t len);
648
649 /*
650 * core_mmu_find_mapping_exclusive() - Find mapping of specified type and
651 * length. If more than one mapping of
652 * specified type is present, NULL will be
653 * returned.
654 * @type: memory type
655 * @len: length in bytes
656 */
657 struct tee_mmap_region *
658 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len);
659
660 /*
661 * tlbi_mva_range() - Invalidate TLB for virtual address range
662 * @va: start virtual address, must be a multiple of @granule
663 * @len: length in bytes of range, must be a multiple of @granule
664 * @granule: granularity of mapping, supported values are
665 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
666 * match the actual mappings.
667 */
668 void tlbi_mva_range(vaddr_t va, size_t len, size_t granule);
669
670 /*
671 * tlbi_mva_range_asid() - Invalidate TLB for virtual address range for
672 * a specific ASID
673 * @va: start virtual address, must be a multiple of @granule
674 * @len: length in bytes of range, must be a multiple of @granule
675 * @granule: granularity of mapping, supported values are
676 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
677 * match the actual mappings.
678 * @asid: Address space identifier
679 */
680 void tlbi_mva_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid);
681
682 /* Cache maintenance operation type */
683 enum cache_op {
684 DCACHE_CLEAN,
685 DCACHE_AREA_CLEAN,
686 DCACHE_INVALIDATE,
687 DCACHE_AREA_INVALIDATE,
688 ICACHE_INVALIDATE,
689 ICACHE_AREA_INVALIDATE,
690 DCACHE_CLEAN_INV,
691 DCACHE_AREA_CLEAN_INV,
692 };
693
694 /* L1/L2 cache maintenance */
695 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len);
696 #ifdef CFG_PL310
697 TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len);
698 #else
cache_op_outer(enum cache_op op __unused,paddr_t pa __unused,size_t len __unused)699 static inline TEE_Result cache_op_outer(enum cache_op op __unused,
700 paddr_t pa __unused,
701 size_t len __unused)
702 {
703 /* Nothing to do about L2 Cache Maintenance when no PL310 */
704 return TEE_SUCCESS;
705 }
706 #endif
707
708 /* Check cpu mmu enabled or not */
709 bool cpu_mmu_enabled(void);
710
711 /* Do section mapping, not support on LPAE */
712 void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb);
713
714 #ifdef CFG_CORE_DYN_SHM
715 /*
716 * Check if platform defines nsec DDR range(s).
717 * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is
718 * always present.
719 */
720 bool core_mmu_nsec_ddr_is_defined(void);
721
722 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
723 size_t nelems);
724 #endif
725
726 /* Initialize MMU partition */
727 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm);
728
729 unsigned int asid_alloc(void);
730 void asid_free(unsigned int asid);
731
732 #ifdef CFG_SECURE_DATA_PATH
733 /* Alloc and fill SDP memory objects table - table is NULL terminated */
734 struct mobj **core_sdp_mem_create_mobjs(void);
735 #endif
736
737 #ifdef CFG_VIRTUALIZATION
738 size_t core_mmu_get_total_pages_size(void);
739 struct mmu_partition *core_alloc_mmu_prtn(void *tables);
740 void core_free_mmu_prtn(struct mmu_partition *prtn);
741 void core_mmu_set_prtn(struct mmu_partition *prtn);
742 void core_mmu_set_default_prtn(void);
743 void core_mmu_set_default_prtn_tbl(void);
744 #endif
745
746 void core_mmu_init_virtualization(void);
747
748 /* init some allocation pools */
749 void core_mmu_init_ta_ram(void);
750
751 #endif /*__ASSEMBLER__*/
752
753 #endif /* CORE_MMU_H */
754