1 /*
2  * Copyright (c) 2021, Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <limits.h>
11 #include <stdint.h>
12 
13 #include <arch.h>
14 #include <arch_helpers.h>
15 #include <common/debug.h>
16 #include "gpt_rme_private.h"
17 #include <lib/gpt_rme/gpt_rme.h>
18 #include <lib/smccc.h>
19 #include <lib/spinlock.h>
20 #include <lib/xlat_tables/xlat_tables_v2.h>
21 
22 #if !ENABLE_RME
23 #error "ENABLE_RME must be enabled to use the GPT library."
24 #endif
25 
26 /*
27  * Lookup T from PPS
28  *
29  *   PPS    Size    T
30  *   0b000  4GB     32
31  *   0b001  64GB    36
32  *   0b010  1TB     40
33  *   0b011  4TB     42
34  *   0b100  16TB    44
35  *   0b101  256TB   48
36  *   0b110  4PB     52
37  *
38  * See section 15.1.27 of the RME specification.
39  */
40 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
41 					   PPS_1TB_T, PPS_4TB_T,
42 					   PPS_16TB_T, PPS_256TB_T,
43 					   PPS_4PB_T};
44 
45 /*
46  * Lookup P from PGS
47  *
48  *   PGS    Size    P
49  *   0b00   4KB     12
50  *   0b10   16KB    14
51  *   0b01   64KB    16
52  *
53  * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
54  *
55  * See section 15.1.27 of the RME specification.
56  */
57 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
58 
59 /*
60  * This structure contains GPT configuration data.
61  */
62 typedef struct {
63 	uintptr_t plat_gpt_l0_base;
64 	gpccr_pps_e pps;
65 	gpt_t_val_e t;
66 	gpccr_pgs_e pgs;
67 	gpt_p_val_e p;
68 } gpt_config_t;
69 
70 static gpt_config_t gpt_config;
71 
72 /* These variables are used during initialization of the L1 tables. */
73 static unsigned int gpt_next_l1_tbl_idx;
74 static uintptr_t gpt_l1_tbl;
75 
76 /*
77  * This function checks to see if a GPI value is valid.
78  *
79  * These are valid GPI values.
80  *   GPT_GPI_NO_ACCESS   U(0x0)
81  *   GPT_GPI_SECURE      U(0x8)
82  *   GPT_GPI_NS          U(0x9)
83  *   GPT_GPI_ROOT        U(0xA)
84  *   GPT_GPI_REALM       U(0xB)
85  *   GPT_GPI_ANY         U(0xF)
86  *
87  * Parameters
88  *   gpi		GPI to check for validity.
89  *
90  * Return
91  *   true for a valid GPI, false for an invalid one.
92  */
gpt_is_gpi_valid(unsigned int gpi)93 static bool gpt_is_gpi_valid(unsigned int gpi)
94 {
95 	if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
96 	    ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
97 		return true;
98 	} else {
99 		return false;
100 	}
101 }
102 
103 /*
104  * This function checks to see if two PAS regions overlap.
105  *
106  * Parameters
107  *   base_1: base address of first PAS
108  *   size_1: size of first PAS
109  *   base_2: base address of second PAS
110  *   size_2: size of second PAS
111  *
112  * Return
113  *   True if PAS regions overlap, false if they do not.
114  */
gpt_check_pas_overlap(uintptr_t base_1,size_t size_1,uintptr_t base_2,size_t size_2)115 static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1,
116 				  uintptr_t base_2, size_t size_2)
117 {
118 	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
119 		return true;
120 	} else {
121 		return false;
122 	}
123 }
124 
125 /*
126  * This helper function checks to see if a PAS region from index 0 to
127  * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
128  *
129  * Parameters
130  *   l0_idx:      Index of the L0 entry to check
131  *   pas_regions: PAS region array
132  *   pas_idx:     Upper bound of the PAS array index.
133  *
134  * Return
135  *   True if a PAS region occupies the L0 region in question, false if not.
136  */
gpt_does_previous_pas_exist_here(unsigned int l0_idx,pas_region_t * pas_regions,unsigned int pas_idx)137 static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx,
138 					     pas_region_t *pas_regions,
139 					     unsigned int pas_idx)
140 {
141 	/* Iterate over PAS regions up to pas_idx. */
142 	for (unsigned int i = 0U; i < pas_idx; i++) {
143 		if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
144 		    GPT_L0GPTSZ_ACTUAL_SIZE,
145 		    pas_regions[i].base_pa, pas_regions[i].size)) {
146 			return true;
147 		}
148 	}
149 	return false;
150 }
151 
152 /*
153  * This function iterates over all of the PAS regions and checks them to ensure
154  * proper alignment of base and size, that the GPI is valid, and that no regions
155  * overlap. As a part of the overlap checks, this function checks existing L0
156  * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
157  * is called multiple times to place L1 tables in different areas of memory. It
158  * also counts the number of L1 tables needed and returns it on success.
159  *
160  * Parameters
161  *   *pas_regions	Pointer to array of PAS region structures.
162  *   pas_region_cnt	Total number of PAS regions in the array.
163  *
164  * Return
165  *   Negative Linux error code in the event of a failure, number of L1 regions
166  *   required when successful.
167  */
gpt_validate_pas_mappings(pas_region_t * pas_regions,unsigned int pas_region_cnt)168 static int gpt_validate_pas_mappings(pas_region_t *pas_regions,
169 				     unsigned int pas_region_cnt)
170 {
171 	unsigned int idx;
172 	unsigned int l1_cnt = 0U;
173 	unsigned int pas_l1_cnt;
174 	uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
175 
176 	assert(pas_regions != NULL);
177 	assert(pas_region_cnt != 0U);
178 
179 	for (idx = 0U; idx < pas_region_cnt; idx++) {
180 		/* Check for arithmetic overflow in region. */
181 		if ((ULONG_MAX - pas_regions[idx].base_pa) <
182 		    pas_regions[idx].size) {
183 			ERROR("[GPT] Address overflow in PAS[%u]!\n", idx);
184 			return -EOVERFLOW;
185 		}
186 
187 		/* Initial checks for PAS validity. */
188 		if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
189 		    GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
190 		    !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
191 			ERROR("[GPT] PAS[%u] is invalid!\n", idx);
192 			return -EFAULT;
193 		}
194 
195 		/*
196 		 * Make sure this PAS does not overlap with another one. We
197 		 * start from idx + 1 instead of 0 since prior PAS mappings will
198 		 * have already checked themselves against this one.
199 		 */
200 		for (unsigned int i = idx + 1; i < pas_region_cnt; i++) {
201 			if (gpt_check_pas_overlap(pas_regions[idx].base_pa,
202 			    pas_regions[idx].size,
203 			    pas_regions[i].base_pa,
204 			    pas_regions[i].size)) {
205 				ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n",
206 					i, idx);
207 				return -EFAULT;
208 			}
209 		}
210 
211 		/*
212 		 * Since this function can be called multiple times with
213 		 * separate L1 tables we need to check the existing L0 mapping
214 		 * to see if this PAS would fall into one that has already been
215 		 * initialized.
216 		 */
217 		for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa);
218 		     i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1);
219 		     i++) {
220 			if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
221 			    (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
222 				/* This descriptor is unused so continue. */
223 				continue;
224 			}
225 
226 			/*
227 			 * This descriptor has been initialized in a previous
228 			 * call to this function so cannot be initialized again.
229 			 */
230 			ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n",
231 			      idx, i);
232 			return -EFAULT;
233 		}
234 
235 		/* Check for block mapping (L0) type. */
236 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
237 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
238 			/* Make sure base and size are block-aligned. */
239 			if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
240 			    !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
241 				ERROR("[GPT] PAS[%u] is not block-aligned!\n",
242 				      idx);
243 				return -EFAULT;
244 			}
245 
246 			continue;
247 		}
248 
249 		/* Check for granule mapping (L1) type. */
250 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
251 		    GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
252 			/* Make sure base and size are granule-aligned. */
253 			if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
254 			    !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
255 				ERROR("[GPT] PAS[%u] is not granule-aligned!\n",
256 				      idx);
257 				return -EFAULT;
258 			}
259 
260 			/* Find how many L1 tables this PAS occupies. */
261 			pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
262 				     pas_regions[idx].size - 1) -
263 				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1);
264 
265 			/*
266 			 * This creates a situation where, if multiple PAS
267 			 * regions occupy the same table descriptor, we can get
268 			 * an artificially high total L1 table count. The way we
269 			 * handle this is by checking each PAS against those
270 			 * before it in the array, and if they both occupy the
271 			 * same PAS we subtract from pas_l1_cnt and only the
272 			 * first PAS in the array gets to count it.
273 			 */
274 
275 			/*
276 			 * If L1 count is greater than 1 we know the start and
277 			 * end PAs are in different L0 regions so we must check
278 			 * both for overlap against other PAS.
279 			 */
280 			if (pas_l1_cnt > 1) {
281 				if (gpt_does_previous_pas_exist_here(
282 				    GPT_L0_IDX(pas_regions[idx].base_pa +
283 				    pas_regions[idx].size - 1),
284 				    pas_regions, idx)) {
285 					pas_l1_cnt = pas_l1_cnt - 1;
286 				}
287 			}
288 
289 			if (gpt_does_previous_pas_exist_here(
290 			    GPT_L0_IDX(pas_regions[idx].base_pa),
291 			    pas_regions, idx)) {
292 				pas_l1_cnt = pas_l1_cnt - 1;
293 			}
294 
295 			l1_cnt += pas_l1_cnt;
296 			continue;
297 		}
298 
299 		/* If execution reaches this point, mapping type is invalid. */
300 		ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx,
301 		      GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
302 		return -EINVAL;
303 	}
304 
305 	return l1_cnt;
306 }
307 
308 /*
309  * This function validates L0 initialization parameters.
310  *
311  * Parameters
312  *   l0_mem_base	Base address of memory used for L0 tables.
313  *   l1_mem_size	Size of memory available for L0 tables.
314  *
315  * Return
316  *   Negative Linux error code in the event of a failure, 0 for success.
317  */
gpt_validate_l0_params(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)318 static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
319 				  size_t l0_mem_size)
320 {
321 	size_t l0_alignment;
322 
323 	/*
324 	 * Make sure PPS is valid and then store it since macros need this value
325 	 * to work.
326 	 */
327 	if (pps > GPT_PPS_MAX) {
328 		ERROR("[GPT] Invalid PPS: 0x%x\n", pps);
329 		return -EINVAL;
330 	}
331 	gpt_config.pps = pps;
332 	gpt_config.t = gpt_t_lookup[pps];
333 
334 	/* Alignment must be the greater of 4k or l0 table size. */
335 	l0_alignment = PAGE_SIZE_4KB;
336 	if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
337 		l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
338 	}
339 
340 	/* Check base address. */
341 	if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) {
342 		ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base);
343 		return -EFAULT;
344 	}
345 
346 	/* Check size. */
347 	if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
348 		ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n",
349 		      GPT_L0_TABLE_SIZE(gpt_config.t),
350 		      l0_mem_size);
351 		return -ENOMEM;
352 	}
353 
354 	return 0;
355 }
356 
357 /*
358  * In the event that L1 tables are needed, this function validates
359  * the L1 table generation parameters.
360  *
361  * Parameters
362  *   l1_mem_base	Base address of memory used for L1 table allocation.
363  *   l1_mem_size	Total size of memory available for L1 tables.
364  *   l1_gpt_cnt		Number of L1 tables needed.
365  *
366  * Return
367  *   Negative Linux error code in the event of a failure, 0 for success.
368  */
gpt_validate_l1_params(uintptr_t l1_mem_base,size_t l1_mem_size,unsigned int l1_gpt_cnt)369 static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
370 				  unsigned int l1_gpt_cnt)
371 {
372 	size_t l1_gpt_mem_sz;
373 
374 	/* Check if the granularity is supported */
375 	if (!xlat_arch_is_granule_size_supported(
376 	    GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
377 		return -EPERM;
378 	}
379 
380 	/* Make sure L1 tables are aligned to their size. */
381 	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) {
382 		ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n",
383 		      l1_mem_base);
384 		return -EFAULT;
385 	}
386 
387 	/* Get total memory needed for L1 tables. */
388 	l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
389 
390 	/* Check for overflow. */
391 	if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
392 		ERROR("[GPT] Overflow calculating L1 memory size.\n");
393 		return -ENOMEM;
394 	}
395 
396 	/* Make sure enough space was supplied. */
397 	if (l1_mem_size < l1_gpt_mem_sz) {
398 		ERROR("[GPT] Inadequate memory for L1 GPTs. ");
399 		ERROR("      Expected 0x%lx bytes. Got 0x%lx bytes\n",
400 		      l1_gpt_mem_sz, l1_mem_size);
401 		return -ENOMEM;
402 	}
403 
404 	VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
405 	return 0;
406 }
407 
408 /*
409  * This function initializes L0 block descriptors (regions that cannot be
410  * transitioned at the granule level) according to the provided PAS.
411  *
412  * Parameters
413  *   *pas		Pointer to the structure defining the PAS region to
414  *			initialize.
415  */
gpt_generate_l0_blk_desc(pas_region_t * pas)416 static void gpt_generate_l0_blk_desc(pas_region_t *pas)
417 {
418 	uint64_t gpt_desc;
419 	unsigned int end_idx;
420 	unsigned int idx;
421 	uint64_t *l0_gpt_arr;
422 
423 	assert(gpt_config.plat_gpt_l0_base != 0U);
424 	assert(pas != NULL);
425 
426 	/*
427 	 * Checking of PAS parameters has already been done in
428 	 * gpt_validate_pas_mappings so no need to check the same things again.
429 	 */
430 
431 	l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
432 
433 	/* Create the GPT Block descriptor for this PAS region */
434 	gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
435 
436 	/* Start index of this region in L0 GPTs */
437 	idx = pas->base_pa >> GPT_L0_IDX_SHIFT;
438 
439 	/*
440 	 * Determine number of L0 GPT descriptors covered by
441 	 * this PAS region and use the count to populate these
442 	 * descriptors.
443 	 */
444 	end_idx = (pas->base_pa + pas->size) >> GPT_L0_IDX_SHIFT;
445 
446 	/* Generate the needed block descriptors. */
447 	for (; idx < end_idx; idx++) {
448 		l0_gpt_arr[idx] = gpt_desc;
449 		VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
450 			idx, &l0_gpt_arr[idx],
451 			(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
452 			GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
453 	}
454 }
455 
456 /*
457  * Helper function to determine if the end physical address lies in the same L0
458  * region as the current physical address. If true, the end physical address is
459  * returned else, the start address of the next region is returned.
460  *
461  * Parameters
462  *   cur_pa		Physical address of the current PA in the loop through
463  *			the range.
464  *   end_pa		Physical address of the end PA in a PAS range.
465  *
466  * Return
467  *   The PA of the end of the current range.
468  */
gpt_get_l1_end_pa(uintptr_t cur_pa,uintptr_t end_pa)469 static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
470 {
471 	uintptr_t cur_idx;
472 	uintptr_t end_idx;
473 
474 	cur_idx = cur_pa >> GPT_L0_IDX_SHIFT;
475 	end_idx = end_pa >> GPT_L0_IDX_SHIFT;
476 
477 	assert(cur_idx <= end_idx);
478 
479 	if (cur_idx == end_idx) {
480 		return end_pa;
481 	}
482 
483 	return (cur_idx + 1U) << GPT_L0_IDX_SHIFT;
484 }
485 
486 /*
487  * Helper function to fill out GPI entries in a single L1 table. This function
488  * fills out entire L1 descriptors at a time to save memory writes.
489  *
490  * Parameters
491  *   gpi		GPI to set this range to
492  *   l1			Pointer to L1 table to fill out
493  *   first		Address of first granule in range.
494  *   last		Address of last granule in range (inclusive).
495  */
gpt_fill_l1_tbl(uint64_t gpi,uint64_t * l1,uintptr_t first,uintptr_t last)496 static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
497 			    uintptr_t last)
498 {
499 	uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi);
500 	uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF;
501 
502 	assert(first <= last);
503 	assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
504 	assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
505 	assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
506 	assert(l1 != NULL);
507 
508 	/* Shift the mask if we're starting in the middle of an L1 entry. */
509 	gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
510 
511 	/* Fill out each L1 entry for this region. */
512 	for (unsigned int i = GPT_L1_IDX(gpt_config.p, first);
513 	     i <= GPT_L1_IDX(gpt_config.p, last); i++) {
514 		/* Account for stopping in the middle of an L1 entry. */
515 		if (i == GPT_L1_IDX(gpt_config.p, last)) {
516 			gpi_mask &= (gpi_mask >> ((15 -
517 				    GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
518 		}
519 
520 		/* Write GPI values. */
521 		assert((l1[i] & gpi_mask) ==
522 		       (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask));
523 		l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field);
524 
525 		/* Reset mask. */
526 		gpi_mask = 0xFFFFFFFFFFFFFFFF;
527 	}
528 }
529 
530 /*
531  * This function finds the next available unused L1 table and initializes all
532  * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
533  * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
534  * event that a PAS region stops midway through an L1 table, thus guaranteeing
535  * that all memory not explicitly assigned is GPI_ANY. This function does not
536  * check for overflow conditions, that should be done by the caller.
537  *
538  * Return
539  *   Pointer to the next available L1 table.
540  */
gpt_get_new_l1_tbl(void)541 static uint64_t *gpt_get_new_l1_tbl(void)
542 {
543 	/* Retrieve the next L1 table. */
544 	uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) +
545 		       (GPT_L1_TABLE_SIZE(gpt_config.p) *
546 		       gpt_next_l1_tbl_idx));
547 
548 	/* Increment L1 counter. */
549 	gpt_next_l1_tbl_idx++;
550 
551 	/* Initialize all GPIs to GPT_GPI_ANY */
552 	for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
553 		l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY);
554 	}
555 
556 	return l1;
557 }
558 
559 /*
560  * When L1 tables are needed, this function creates the necessary L0 table
561  * descriptors and fills out the L1 table entries according to the supplied
562  * PAS range.
563  *
564  * Parameters
565  *   *pas		Pointer to the structure defining the PAS region.
566  */
gpt_generate_l0_tbl_desc(pas_region_t * pas)567 static void gpt_generate_l0_tbl_desc(pas_region_t *pas)
568 {
569 	uintptr_t end_pa;
570 	uintptr_t cur_pa;
571 	uintptr_t last_gran_pa;
572 	uint64_t *l0_gpt_base;
573 	uint64_t *l1_gpt_arr;
574 	unsigned int l0_idx;
575 
576 	assert(gpt_config.plat_gpt_l0_base != 0U);
577 	assert(pas != NULL);
578 
579 	/*
580 	 * Checking of PAS parameters has already been done in
581 	 * gpt_validate_pas_mappings so no need to check the same things again.
582 	 */
583 
584 	end_pa = pas->base_pa + pas->size;
585 	l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
586 
587 	/* We start working from the granule at base PA */
588 	cur_pa = pas->base_pa;
589 
590 	/* Iterate over each L0 region in this memory range. */
591 	for (l0_idx = GPT_L0_IDX(pas->base_pa);
592 	     l0_idx <= GPT_L0_IDX(end_pa - 1U);
593 	     l0_idx++) {
594 
595 		/*
596 		 * See if the L0 entry is already a table descriptor or if we
597 		 * need to create one.
598 		 */
599 		if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
600 			/* Get the L1 array from the L0 entry. */
601 			l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
602 		} else {
603 			/* Get a new L1 table from the L1 memory space. */
604 			l1_gpt_arr = gpt_get_new_l1_tbl();
605 
606 			/* Fill out the L0 descriptor and flush it. */
607 			l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
608 		}
609 
610 		VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%" PRIx64 ")\n",
611 			l0_idx, &l0_gpt_base[l0_idx],
612 			(unsigned long long)(l1_gpt_arr),
613 			l0_gpt_base[l0_idx]);
614 
615 		/*
616 		 * Determine the PA of the last granule in this L0 descriptor.
617 		 */
618 		last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) -
619 			       GPT_PGS_ACTUAL_SIZE(gpt_config.p);
620 
621 		/*
622 		 * Fill up L1 GPT entries between these two addresses. This
623 		 * function needs the addresses of the first granule and last
624 		 * granule in the range.
625 		 */
626 		gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
627 				cur_pa, last_gran_pa);
628 
629 		/* Advance cur_pa to first granule in next L0 region. */
630 		cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa);
631 	}
632 }
633 
634 /*
635  * This function flushes a range of L0 descriptors used by a given PAS region
636  * array. There is a chance that some unmodified L0 descriptors would be flushed
637  * in the case that there are "holes" in an array of PAS regions but overall
638  * this should be faster than individually flushing each modified L0 descriptor
639  * as they are created.
640  *
641  * Parameters
642  *   *pas		Pointer to an array of PAS regions.
643  *   pas_count		Number of entries in the PAS array.
644  */
flush_l0_for_pas_array(pas_region_t * pas,unsigned int pas_count)645 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
646 {
647 	unsigned int idx;
648 	unsigned int start_idx;
649 	unsigned int end_idx;
650 	uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
651 
652 	assert(pas != NULL);
653 	assert(pas_count > 0);
654 
655 	/* Initial start and end values. */
656 	start_idx = GPT_L0_IDX(pas[0].base_pa);
657 	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1);
658 
659 	/* Find lowest and highest L0 indices used in this PAS array. */
660 	for (idx = 1; idx < pas_count; idx++) {
661 		if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
662 			start_idx = GPT_L0_IDX(pas[idx].base_pa);
663 		}
664 		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) {
665 			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1);
666 		}
667 	}
668 
669 	/*
670 	 * Flush all covered L0 descriptors, add 1 because we need to include
671 	 * the end index value.
672 	 */
673 	flush_dcache_range((uintptr_t)&l0[start_idx],
674 			   ((end_idx + 1) - start_idx) * sizeof(uint64_t));
675 }
676 
677 /*
678  * Public API to enable granule protection checks once the tables have all been
679  * initialized. This function is called at first initialization and then again
680  * later during warm boots of CPU cores.
681  *
682  * Return
683  *   Negative Linux error code in the event of a failure, 0 for success.
684  */
gpt_enable(void)685 int gpt_enable(void)
686 {
687 	u_register_t gpccr_el3;
688 
689 	/*
690 	 * Granule tables must be initialised before enabling
691 	 * granule protection.
692 	 */
693 	if (gpt_config.plat_gpt_l0_base == 0U) {
694 		ERROR("[GPT] Tables have not been initialized!\n");
695 		return -EPERM;
696 	}
697 
698 	/* Invalidate any stale TLB entries */
699 	tlbipaallos();
700 	dsb();
701 
702 	/* Write the base address of the L0 tables into GPTBR */
703 	write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
704 			>> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
705 
706 	/* GPCCR_EL3.PPS */
707 	gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
708 
709 	/* GPCCR_EL3.PGS */
710 	gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
711 
712 	/*
713 	 * Since EL3 maps the L1 region as Inner shareable, use the same
714 	 * shareability attribute for GPC as well so that
715 	 * GPC fetches are visible to PEs
716 	 */
717 	gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
718 
719 	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
720 	gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
721 	gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
722 
723 	/* Enable GPT */
724 	gpccr_el3 |= GPCCR_GPC_BIT;
725 
726 	/* TODO: Configure GPCCR_EL3_GPCP for Fault control. */
727 	write_gpccr_el3(gpccr_el3);
728 	isb();
729 	tlbipaallos();
730 	dsb();
731 	isb();
732 
733 	return 0;
734 }
735 
736 /*
737  * Public API to disable granule protection checks.
738  */
gpt_disable(void)739 void gpt_disable(void)
740 {
741 	u_register_t gpccr_el3 = read_gpccr_el3();
742 
743 	write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
744 	dsbsy();
745 	isb();
746 }
747 
748 /*
749  * Public API that initializes the entire protected space to GPT_GPI_ANY using
750  * the L0 tables (block descriptors). Ideally, this function is invoked prior
751  * to DDR discovery and initialization. The MMU must be initialized before
752  * calling this function.
753  *
754  * Parameters
755  *   pps		PPS value to use for table generation
756  *   l0_mem_base	Base address of L0 tables in memory.
757  *   l0_mem_size	Total size of memory available for L0 tables.
758  *
759  * Return
760  *   Negative Linux error code in the event of a failure, 0 for success.
761  */
gpt_init_l0_tables(unsigned int pps,uintptr_t l0_mem_base,size_t l0_mem_size)762 int gpt_init_l0_tables(unsigned int pps, uintptr_t l0_mem_base,
763 		       size_t l0_mem_size)
764 {
765 	int ret;
766 	uint64_t gpt_desc;
767 
768 	/* Ensure that MMU and Data caches are enabled. */
769 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
770 
771 	/* Validate other parameters. */
772 	ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size);
773 	if (ret < 0) {
774 		return ret;
775 	}
776 
777 	/* Create the descriptor to initialize L0 entries with. */
778 	gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
779 
780 	/* Iterate through all L0 entries */
781 	for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
782 		((uint64_t *)l0_mem_base)[i] = gpt_desc;
783 	}
784 
785 	/* Flush updated L0 tables to memory. */
786 	flush_dcache_range((uintptr_t)l0_mem_base,
787 			   (size_t)GPT_L0_TABLE_SIZE(gpt_config.t));
788 
789 	/* Stash the L0 base address once initial setup is complete. */
790 	gpt_config.plat_gpt_l0_base = l0_mem_base;
791 
792 	return 0;
793 }
794 
795 /*
796  * Public API that carves out PAS regions from the L0 tables and builds any L1
797  * tables that are needed. This function ideally is run after DDR discovery and
798  * initialization. The L0 tables must have already been initialized to GPI_ANY
799  * when this function is called.
800  *
801  * This function can be called multiple times with different L1 memory ranges
802  * and PAS regions if it is desirable to place L1 tables in different locations
803  * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
804  * in the DDR bank that they control)
805  *
806  * Parameters
807  *   pgs		PGS value to use for table generation.
808  *   l1_mem_base	Base address of memory used for L1 tables.
809  *   l1_mem_size	Total size of memory available for L1 tables.
810  *   *pas_regions	Pointer to PAS regions structure array.
811  *   pas_count		Total number of PAS regions.
812  *
813  * Return
814  *   Negative Linux error code in the event of a failure, 0 for success.
815  */
gpt_init_pas_l1_tables(gpccr_pgs_e pgs,uintptr_t l1_mem_base,size_t l1_mem_size,pas_region_t * pas_regions,unsigned int pas_count)816 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
817 			   size_t l1_mem_size, pas_region_t *pas_regions,
818 			   unsigned int pas_count)
819 {
820 	int ret;
821 	int l1_gpt_cnt;
822 
823 	/* Ensure that MMU and Data caches are enabled. */
824 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
825 
826 	/* PGS is needed for gpt_validate_pas_mappings so check it now. */
827 	if (pgs > GPT_PGS_MAX) {
828 		ERROR("[GPT] Invalid PGS: 0x%x\n", pgs);
829 		return -EINVAL;
830 	}
831 	gpt_config.pgs = pgs;
832 	gpt_config.p = gpt_p_lookup[pgs];
833 
834 	/* Make sure L0 tables have been initialized. */
835 	if (gpt_config.plat_gpt_l0_base == 0U) {
836 		ERROR("[GPT] L0 tables must be initialized first!\n");
837 		return -EPERM;
838 	}
839 
840 	/* Check if L1 GPTs are required and how many. */
841 	l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count);
842 	if (l1_gpt_cnt < 0) {
843 		return l1_gpt_cnt;
844 	}
845 
846 	VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt);
847 
848 	/* If L1 tables are needed then validate the L1 parameters. */
849 	if (l1_gpt_cnt > 0) {
850 		ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size,
851 		      l1_gpt_cnt);
852 		if (ret < 0) {
853 			return ret;
854 		}
855 
856 		/* Set up parameters for L1 table generation. */
857 		gpt_l1_tbl = l1_mem_base;
858 		gpt_next_l1_tbl_idx = 0U;
859 	}
860 
861 	INFO("[GPT] Boot Configuration\n");
862 	INFO("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
863 	INFO("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
864 	INFO("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
865 	INFO("  PAS count: 0x%x\n", pas_count);
866 	INFO("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
867 
868 	/* Generate the tables in memory. */
869 	for (unsigned int idx = 0U; idx < pas_count; idx++) {
870 		INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n",
871 		     idx, pas_regions[idx].base_pa, pas_regions[idx].size,
872 		     GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
873 		     GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
874 
875 		/* Check if a block or table descriptor is required */
876 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
877 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
878 			gpt_generate_l0_blk_desc(&pas_regions[idx]);
879 
880 		} else {
881 			gpt_generate_l0_tbl_desc(&pas_regions[idx]);
882 		}
883 	}
884 
885 	/* Flush modified L0 tables. */
886 	flush_l0_for_pas_array(pas_regions, pas_count);
887 
888 	/* Flush L1 tables if needed. */
889 	if (l1_gpt_cnt > 0) {
890 		flush_dcache_range(l1_mem_base,
891 				   GPT_L1_TABLE_SIZE(gpt_config.p) *
892 				   l1_gpt_cnt);
893 	}
894 
895 	/* Make sure that all the entries are written to the memory. */
896 	dsbishst();
897 	tlbipaallos();
898 	dsb();
899 	isb();
900 
901 	return 0;
902 }
903 
904 /*
905  * Public API to initialize the runtime gpt_config structure based on the values
906  * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
907  * typically happens in a bootloader stage prior to setting up the EL3 runtime
908  * environment for the granule transition service so this function detects the
909  * initialization from a previous stage. Granule protection checks must be
910  * enabled already or this function will return an error.
911  *
912  * Return
913  *   Negative Linux error code in the event of a failure, 0 for success.
914  */
gpt_runtime_init(void)915 int gpt_runtime_init(void)
916 {
917 	u_register_t reg;
918 
919 	/* Ensure that MMU and Data caches are enabled. */
920 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
921 
922 	/* Ensure GPC are already enabled. */
923 	if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
924 		ERROR("[GPT] Granule protection checks are not enabled!\n");
925 		return -EPERM;
926 	}
927 
928 	/*
929 	 * Read the L0 table address from GPTBR, we don't need the L1 base
930 	 * address since those are included in the L0 tables as needed.
931 	 */
932 	reg = read_gptbr_el3();
933 	gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
934 				      GPTBR_BADDR_MASK) <<
935 				      GPTBR_BADDR_VAL_SHIFT;
936 
937 	/* Read GPCCR to get PGS and PPS values. */
938 	reg = read_gpccr_el3();
939 	gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
940 	gpt_config.t = gpt_t_lookup[gpt_config.pps];
941 	gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
942 	gpt_config.p = gpt_p_lookup[gpt_config.pgs];
943 
944 	VERBOSE("[GPT] Runtime Configuration\n");
945 	VERBOSE("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
946 	VERBOSE("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
947 	VERBOSE("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
948 	VERBOSE("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
949 
950 	return 0;
951 }
952 
953 /*
954  * The L1 descriptors are protected by a spinlock to ensure that multiple
955  * CPUs do not attempt to change the descriptors at once. In the future it
956  * would be better to have separate spinlocks for each L1 descriptor.
957  */
958 static spinlock_t gpt_lock;
959 
960 /*
961  * Check if caller is allowed to transition a PAS.
962  *
963  * - Secure world caller can only request S <-> NS transitions on a
964  *   granule that is already in either S or NS PAS.
965  *
966  * - Realm world caller can only request R <-> NS transitions on a
967  *   granule that is already in either R or NS PAS.
968  *
969  * Parameters
970  *   src_sec_state	Security state of the caller.
971  *   current_gpi	Current GPI of the granule.
972  *   target_gpi		Requested new GPI for the granule.
973  *
974  * Return
975  *   Negative Linux error code in the event of a failure, 0 for success.
976  */
gpt_check_transition_gpi(unsigned int src_sec_state,unsigned int current_gpi,unsigned int target_gpi)977 static int gpt_check_transition_gpi(unsigned int src_sec_state,
978 				    unsigned int current_gpi,
979 				    unsigned int target_gpi)
980 {
981 	unsigned int check_gpi;
982 
983 	/* Cannot transition a granule to the state it is already in. */
984 	if (current_gpi == target_gpi) {
985 		return -EINVAL;
986 	}
987 
988 	/* Check security state, only secure and realm can transition. */
989 	if (src_sec_state == SMC_FROM_REALM) {
990 		check_gpi = GPT_GPI_REALM;
991 	} else if (src_sec_state == SMC_FROM_SECURE) {
992 		check_gpi = GPT_GPI_SECURE;
993 	} else {
994 		return -EINVAL;
995 	}
996 
997 	/* Make sure security state is allowed to make the transition. */
998 	if ((target_gpi != check_gpi) && (target_gpi != GPT_GPI_NS)) {
999 		return -EINVAL;
1000 	}
1001 	if ((current_gpi != check_gpi) && (current_gpi != GPT_GPI_NS)) {
1002 		return -EINVAL;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 /*
1009  * This function is the core of the granule transition service. When a granule
1010  * transition request occurs it is routed to this function where the request is
1011  * validated then fulfilled if possible.
1012  *
1013  * TODO: implement support for transitioning multiple granules at once.
1014  *
1015  * Parameters
1016  *   base		Base address of the region to transition, must be
1017  *			aligned to granule size.
1018  *   size		Size of region to transition, must be aligned to granule
1019  *			size.
1020  *   src_sec_state	Security state of the caller.
1021  *   target_pas		Target PAS of the specified memory region.
1022  *
1023  * Return
1024  *    Negative Linux error code in the event of a failure, 0 for success.
1025  */
gpt_transition_pas(uint64_t base,size_t size,unsigned int src_sec_state,unsigned int target_pas)1026 int gpt_transition_pas(uint64_t base, size_t size, unsigned int src_sec_state,
1027 	unsigned int target_pas)
1028 {
1029 	int idx;
1030 	unsigned int gpi_shift;
1031 	unsigned int gpi;
1032 	uint64_t gpt_l0_desc;
1033 	uint64_t gpt_l1_desc;
1034 	uint64_t *gpt_l1_addr;
1035 	uint64_t *gpt_l0_base;
1036 
1037 	/* Ensure that the tables have been set up before taking requests. */
1038 	assert(gpt_config.plat_gpt_l0_base != 0U);
1039 
1040 	/* Ensure that MMU and data caches are enabled. */
1041 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1042 
1043 	/* Check for address range overflow. */
1044 	if ((ULONG_MAX - base) < size) {
1045 		VERBOSE("[GPT] Transition request address overflow!\n");
1046 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
1047 		VERBOSE("      Size=0x%lx\n", size);
1048 		return -EINVAL;
1049 	}
1050 
1051 	/* Make sure base and size are valid. */
1052 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
1053 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) ||
1054 	    (size == 0U) ||
1055 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1056 		VERBOSE("[GPT] Invalid granule transition address range!\n");
1057 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
1058 		VERBOSE("      Size=0x%lx\n", size);
1059 		return -EINVAL;
1060 	}
1061 
1062 	/* See if this is a single granule transition or a range of granules. */
1063 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1064 		/*
1065 		 * TODO: Add support for transitioning multiple granules with a
1066 		 * single call to this function.
1067 		 */
1068 		panic();
1069 	}
1070 
1071 	/* Get the L0 descriptor and make sure it is for a table. */
1072 	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
1073 	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
1074 	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
1075 		VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
1076 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
1077 		return -EINVAL;
1078 	}
1079 
1080 	/* Get the table index and GPI shift from PA. */
1081 	gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
1082 	idx = GPT_L1_IDX(gpt_config.p, base);
1083 	gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
1084 
1085 	/*
1086 	 * Access to L1 tables is controlled by a global lock to ensure
1087 	 * that no more than one CPU is allowed to make changes at any
1088 	 * given time.
1089 	 */
1090 	spin_lock(&gpt_lock);
1091 	gpt_l1_desc = gpt_l1_addr[idx];
1092 	gpi = (gpt_l1_desc >> gpi_shift) & GPT_L1_GRAN_DESC_GPI_MASK;
1093 
1094 	/* Make sure caller state and source/target PAS are allowed. */
1095 	if (gpt_check_transition_gpi(src_sec_state, gpi, target_pas) < 0) {
1096 		spin_unlock(&gpt_lock);
1097 			VERBOSE("[GPT] Invalid caller state and PAS combo!\n");
1098 		VERBOSE("      Caller: %u, Current GPI: %u, Target GPI: %u\n",
1099 			src_sec_state, gpi, target_pas);
1100 		return -EPERM;
1101 	}
1102 
1103 	/* Clear existing GPI encoding and transition granule. */
1104 	gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
1105 	gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
1106 	gpt_l1_addr[idx] = gpt_l1_desc;
1107 
1108 	/* Ensure that the write operation will be observed by GPC */
1109 	dsbishst();
1110 
1111 	/* Unlock access to the L1 tables. */
1112 	spin_unlock(&gpt_lock);
1113 
1114 	gpt_tlbi_by_pa(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1115 	dsbishst();
1116 	/*
1117 	 * The isb() will be done as part of context
1118 	 * synchronization when returning to lower EL
1119 	 */
1120 	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n", base, gpi,
1121 		target_pas);
1122 
1123 	return 0;
1124 }
1125