1 /*
2  * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_features.h>
14 #include <common/debug.h>
15 #include <lib/utils_def.h>
16 #include <lib/xlat_tables/xlat_tables_defs.h>
17 #include <lib/xlat_tables/xlat_tables_v2.h>
18 #include "xlat_mpu_private.h"
19 
20 #include <fvp_r_arch_helpers.h>
21 #include <platform_def.h>
22 
23 #warning "xlat_mpu library is currently experimental and its API may change in future."
24 
25 
26 /* Helper function that cleans the data cache only if it is enabled. */
27 static inline __attribute__((unused))
xlat_clean_dcache_range(uintptr_t addr,size_t size)28 	void xlat_clean_dcache_range(uintptr_t addr, size_t size)
29 {
30 	if (is_dcache_enabled()) {
31 		clean_dcache_range(addr, size);
32 	}
33 }
34 
35 
36 
37 /* Calculate region-attributes byte for PRBAR part of MPU-region descriptor: */
prbar_attr_value(uint32_t attr)38 uint64_t prbar_attr_value(uint32_t attr)
39 {
40 	uint64_t retValue = UL(0);
41 	uint64_t extract;  /* temp var holding bit extracted from attr */
42 
43 	/* Extract and stuff SH: */
44 	extract = (uint64_t) ((attr >> MT_SHAREABILITY_SHIFT)
45 				& MT_SHAREABILITY_MASK);
46 	retValue |= (extract << PRBAR_SH_SHIFT);
47 
48 	/* Extract and stuff AP: */
49 	extract = (uint64_t) ((attr >> MT_PERM_SHIFT) & MT_PERM_MASK);
50 	if (extract == 0U) {
51 		retValue |= (UL(2) << PRBAR_AP_SHIFT);
52 	} else /* extract == 1 */ {
53 		retValue |= (UL(0) << PRBAR_AP_SHIFT);
54 	}
55 
56 	/* Extract and stuff XN: */
57 	extract = (uint64_t) ((attr >> MT_EXECUTE_SHIFT) & MT_EXECUTE_MASK);
58 	retValue |= (extract << PRBAR_XN_SHIFT);
59 	/* However, also don't execute in peripheral space: */
60 	extract = (uint64_t) ((attr >> MT_TYPE_SHIFT) & MT_TYPE_MASK);
61 	if (extract == 0U) {
62 		retValue |= (UL(1) << PRBAR_XN_SHIFT);
63 	}
64 	return retValue;
65 }
66 
67 /* Calculate region-attributes byte for PRLAR part of MPU-region descriptor: */
prlar_attr_value(uint32_t attr)68 uint64_t prlar_attr_value(uint32_t attr)
69 {
70 	uint64_t retValue = UL(0);
71 	uint64_t extract;  /* temp var holding bit extracted from attr */
72 
73 	/* Extract and stuff AttrIndx: */
74 	extract = (uint64_t) ((attr >> MT_TYPE_SHIFT)
75 				& MT_TYPE_MASK);
76 	switch (extract) {
77 	case UL(0):
78 		retValue |= (UL(1) << PRLAR_ATTR_SHIFT);
79 		break;
80 	case UL(2):
81 		/* 0, so OR in nothing */
82 		break;
83 	case UL(3):
84 		retValue |= (UL(2) << PRLAR_ATTR_SHIFT);
85 		break;
86 	default:
87 		retValue |= (extract << PRLAR_ATTR_SHIFT);
88 		break;
89 	}
90 
91 	/* Stuff EN: */
92 	retValue |= (UL(1) << PRLAR_EN_SHIFT);
93 
94 	/* Force NS to 0 (Secure);  v8-R64 only supports Secure: */
95 	extract = ~(1U << PRLAR_NS_SHIFT);
96 	retValue &= extract;
97 
98 	return retValue;
99 }
100 
101 /*
102  * Function that writes an MPU "translation" into the MPU registers. If not
103  * possible (e.g., if no more MPU regions available) boot is aborted.
104  */
mpu_map_region(mmap_region_t * mm)105 static void mpu_map_region(mmap_region_t *mm)
106 {
107 	uint64_t prenr_el2_value = 0UL;
108 	uint64_t prbar_attrs = 0UL;
109 	uint64_t prlar_attrs = 0UL;
110 	int region_to_use = 0;
111 
112 	/* If all MPU regions in use, then abort boot: */
113 	prenr_el2_value = read_prenr_el2();
114 	assert(prenr_el2_value != 0xffffffff);
115 
116 	/* Find and select first-available MPU region (PRENR has an enable bit
117 	 * for each MPU region, 1 for in-use or 0 for unused):
118 	 */
119 	for (region_to_use = 0;  region_to_use < N_MPU_REGIONS;
120 	     region_to_use++) {
121 		if (((prenr_el2_value >> region_to_use) & 1) == 0) {
122 			break;
123 		}
124 	}
125 	write_prselr_el2((uint64_t) (region_to_use));
126 	isb();
127 
128 	/* Set base and limit addresses: */
129 	write_prbar_el2(mm->base_pa & PRBAR_PRLAR_ADDR_MASK);
130 	write_prlar_el2((mm->base_pa + mm->size - 1UL)
131 			& PRBAR_PRLAR_ADDR_MASK);
132 	dsbsy();
133 	isb();
134 
135 	/* Set attributes: */
136 	prbar_attrs = prbar_attr_value(mm->attr);
137 	write_prbar_el2(read_prbar_el2() | prbar_attrs);
138 	prlar_attrs = prlar_attr_value(mm->attr);
139 	write_prlar_el2(read_prlar_el2() | prlar_attrs);
140 	dsbsy();
141 	isb();
142 
143 	/* Mark this MPU region as used: */
144 	prenr_el2_value |= (1 << region_to_use);
145 	write_prenr_el2(prenr_el2_value);
146 	isb();
147 }
148 
149 /*
150  * Function that verifies that a region can be mapped.
151  * Returns:
152  *        0: Success, the mapping is allowed.
153  *   EINVAL: Invalid values were used as arguments.
154  *   ERANGE: The memory limits were surpassed.
155  *   ENOMEM: There is not enough memory in the mmap array.
156  *    EPERM: Region overlaps another one in an invalid way.
157  */
mmap_add_region_check(const xlat_ctx_t * ctx,const mmap_region_t * mm)158 static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
159 {
160 	unsigned long long base_pa = mm->base_pa;
161 	uintptr_t base_va = mm->base_va;
162 	size_t size = mm->size;
163 
164 	unsigned long long end_pa = base_pa + size - 1U;
165 	uintptr_t end_va = base_va + size - 1U;
166 
167 	if (base_pa != base_va) {
168 		return -EINVAL;  /* MPU does not perform address translation */
169 	}
170 	if ((base_pa % 64ULL) != 0ULL) {
171 		return -EINVAL;  /* MPU requires 64-byte alignment */
172 	}
173 	/* Check for overflows */
174 	if ((base_pa > end_pa) || (base_va > end_va)) {
175 		return -ERANGE;
176 	}
177 	if (end_pa > ctx->pa_max_address) {
178 		return -ERANGE;
179 	}
180 	/* Check that there is space in the ctx->mmap array */
181 	if (ctx->mmap[ctx->mmap_num - 1].size != 0U) {
182 		return -ENOMEM;
183 	}
184 	/* Check for PAs and VAs overlaps with all other regions */
185 	for (const mmap_region_t *mm_cursor = ctx->mmap;
186 	     mm_cursor->size != 0U; ++mm_cursor) {
187 
188 		uintptr_t mm_cursor_end_va =
189 			mm_cursor->base_va + mm_cursor->size - 1U;
190 
191 		/*
192 		 * Check if one of the regions is completely inside the other
193 		 * one.
194 		 */
195 		bool fully_overlapped_va =
196 			((base_va >= mm_cursor->base_va) &&
197 					(end_va <= mm_cursor_end_va)) ||
198 			((mm_cursor->base_va >= base_va) &&
199 						(mm_cursor_end_va <= end_va));
200 
201 		/*
202 		 * Full VA overlaps are only allowed if both regions are
203 		 * identity mapped (zero offset) or have the same VA to PA
204 		 * offset. Also, make sure that it's not the exact same area.
205 		 * This can only be done with static regions.
206 		 */
207 		if (fully_overlapped_va) {
208 
209 #if PLAT_XLAT_TABLES_DYNAMIC
210 			if (((mm->attr & MT_DYNAMIC) != 0U) ||
211 			    ((mm_cursor->attr & MT_DYNAMIC) != 0U)) {
212 				return -EPERM;
213 			}
214 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
215 			if ((mm_cursor->base_va - mm_cursor->base_pa)
216 					!= (base_va - base_pa)) {
217 				return -EPERM;
218 			}
219 			if ((base_va == mm_cursor->base_va) &&
220 					(size == mm_cursor->size)) {
221 				return -EPERM;
222 			}
223 		} else {
224 			/*
225 			 * If the regions do not have fully overlapping VAs,
226 			 * then they must have fully separated VAs and PAs.
227 			 * Partial overlaps are not allowed
228 			 */
229 
230 			unsigned long long mm_cursor_end_pa =
231 				     mm_cursor->base_pa + mm_cursor->size - 1U;
232 
233 			bool separated_pa = (end_pa < mm_cursor->base_pa) ||
234 				(base_pa > mm_cursor_end_pa);
235 			bool separated_va = (end_va < mm_cursor->base_va) ||
236 				(base_va > mm_cursor_end_va);
237 
238 			if (!separated_va || !separated_pa) {
239 				return -EPERM;
240 			}
241 		}
242 	}
243 
244 	return 0;
245 }
246 
mmap_add_region_ctx(xlat_ctx_t * ctx,const mmap_region_t * mm)247 void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
248 {
249 	mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
250 	const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
251 	const mmap_region_t *mm_last;
252 	unsigned long long end_pa = mm->base_pa + mm->size - 1U;
253 	uintptr_t end_va = mm->base_va + mm->size - 1U;
254 	int ret;
255 
256 	/* Ignore empty regions */
257 	if (mm->size == 0U) {
258 		return;
259 	}
260 
261 	/* Static regions must be added before initializing the xlat tables. */
262 	assert(!ctx->initialized);
263 
264 	ret = mmap_add_region_check(ctx, mm);
265 	if (ret != 0) {
266 		ERROR("mmap_add_region_check() failed. error %d\n", ret);
267 		assert(false);
268 		return;
269 	}
270 
271 	/*
272 	 * Find the last entry marker in the mmap
273 	 */
274 	mm_last = ctx->mmap;
275 	while ((mm_last->size != 0U) && (mm_last < mm_end)) {
276 		++mm_last;
277 	}
278 
279 	/*
280 	 * Check if we have enough space in the memory mapping table.
281 	 * This shouldn't happen as we have checked in mmap_add_region_check
282 	 * that there is free space.
283 	 */
284 	assert(mm_last->size == 0U);
285 
286 	/* Make room for new region by moving other regions up by one place */
287 	mm_destination = mm_cursor + 1;
288 	(void)memmove(mm_destination, mm_cursor,
289 		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
290 
291 	/*
292 	 * Check we haven't lost the empty sentinel from the end of the array.
293 	 * This shouldn't happen as we have checked in mmap_add_region_check
294 	 * that there is free space.
295 	 */
296 	assert(mm_end->size == 0U);
297 
298 	*mm_cursor = *mm;
299 
300 	if (end_pa > ctx->max_pa) {
301 		ctx->max_pa = end_pa;
302 	}
303 	if (end_va > ctx->max_va) {
304 		ctx->max_va = end_va;
305 	}
306 }
307 
mmap_add_ctx(xlat_ctx_t * ctx,const mmap_region_t * mm)308 void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
309 {
310 	const mmap_region_t *mm_cursor = mm;
311 
312 	while (mm_cursor->granularity != 0U) {
313 		mmap_add_region_ctx(ctx, mm_cursor);
314 		mm_cursor++;
315 	}
316 }
317 
init_xlat_tables_ctx(xlat_ctx_t * ctx)318 void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
319 {
320 	uint64_t mair = UL(0);
321 
322 	assert(ctx != NULL);
323 	assert(!ctx->initialized);
324 	assert((ctx->xlat_regime == EL2_REGIME) ||
325 		(ctx->xlat_regime == EL1_EL0_REGIME));
326 	/* Note:  Add EL3_REGIME if EL3 is supported in future v8-R64 cores. */
327 	assert(!is_mpu_enabled_ctx(ctx));
328 
329 	mmap_region_t *mm = ctx->mmap;
330 
331 	assert(ctx->va_max_address >=
332 		(xlat_get_min_virt_addr_space_size() - 1U));
333 	assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
334 	assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
335 
336 	xlat_mmap_print(mm);
337 
338 	/* All tables must be zeroed before mapping any region. */
339 
340 	for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
341 		ctx->base_table[i] = INVALID_DESC;
342 
343 	/* Also mark all MPU regions as invalid in the MPU hardware itself: */
344 	write_prenr_el2(0);
345 		/* Sufficient for current, max-32-region implementations. */
346 	dsbsy();
347 	isb();
348 	while (mm->size != 0U) {
349 		if (read_prenr_el2() == ALL_MPU_EL2_REGIONS_USED) {
350 			ERROR("Not enough MPU regions to map region:\n"
351 				" VA:0x%lx  PA:0x%llx  size:0x%zx  attr:0x%x\n",
352 				mm->base_va, mm->base_pa, mm->size, mm->attr);
353 			panic();
354 		} else {
355 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
356 			xlat_clean_dcache_range((uintptr_t)mm->base_va,
357 				mm->size);
358 #endif
359 			mpu_map_region(mm);
360 		}
361 		mm++;
362 	}
363 
364 	ctx->initialized = true;
365 
366 	xlat_tables_print(ctx);
367 
368 	/* Set attributes in the right indices of the MAIR */
369 	mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
370 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
371 			ATTR_IWBWA_OWBWA_NTR_INDEX);
372 	mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,
373 			ATTR_NON_CACHEABLE_INDEX);
374 	write_mair_el2(mair);
375 	dsbsy();
376 	isb();
377 }
378 
379 /*
380  * Function to wipe clean and disable all MPU regions.  This function expects
381  * that the MPU has already been turned off, and caching concerns addressed,
382  * but it nevertheless also explicitly turns off the MPU.
383  */
clear_all_mpu_regions(void)384 void clear_all_mpu_regions(void)
385 {
386 	uint64_t sctlr_el2_value = 0UL;
387 	uint64_t region_n = 0UL;
388 
389 	/*
390 	 * MPU should already be disabled, but explicitly disable it
391 	 * nevertheless:
392 	 */
393 	sctlr_el2_value = read_sctlr_el2() & ~(1UL);
394 	write_sctlr_el2(sctlr_el2_value);
395 
396 	/* Disable all regions: */
397 	write_prenr_el2(0UL);
398 
399 	/* Sequence through all regions, zeroing them out and turning off: */
400 	for (region_n = 0UL;  region_n < N_MPU_REGIONS;  region_n++) {
401 		write_prselr_el2(region_n);
402 		isb();
403 		write_prbar_el2((uint64_t) 0);
404 		write_prlar_el2((uint64_t) 0);
405 		dsbsy();
406 		isb();
407 	}
408 }
409