1 /*
2 * Copyright 2018-2021 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8 #include <assert.h>
9
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/debug.h>
13 #include <lib/mmio.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include <mmu_def.h>
16 #include <plat/common/platform.h>
17
18 #include "plat_common.h"
19 #include "platform_def.h"
20
21 const mmap_region_t *plat_ls_get_mmap(void);
22
23 /*
24 * Table of memory regions for various BL stages to map using the MMU.
25 * This doesn't include Trusted SRAM as arm_setup_page_tables() already
26 * takes care of mapping it.
27 *
28 * The flash needs to be mapped as writable in order to erase the FIP's Table of
29 * Contents in case of unrecoverable error (see plat_error_handler()).
30 */
31 #ifdef IMAGE_BL2
32 const mmap_region_t plat_ls_mmap[] = {
33 LS_MAP_CCSR,
34 {0}
35 };
36 #endif
37
38 #ifdef IMAGE_BL31
39 const mmap_region_t plat_ls_mmap[] = {
40 LS_MAP_CCSR,
41 #ifdef NXP_DCSR_ADDR
42 LS_MAP_DCSR,
43 #endif
44 LS_MAP_OCRAM,
45 {0}
46 };
47 #endif
48 #ifdef IMAGE_BL32
49 const mmap_region_t plat_ls_mmap[] = {
50 LS_MAP_CCSR,
51 LS_MAP_BL32_SEC_MEM,
52 {0}
53 };
54 #endif
55
56 /* Weak definitions may be overridden in specific NXP SoC */
57 #pragma weak plat_get_ns_image_entrypoint
58 #pragma weak plat_ls_get_mmap
59
60 #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
mmap_add_ddr_regions_statically(void)61 static void mmap_add_ddr_regions_statically(void)
62 {
63 int i = 0;
64 dram_regions_info_t *info_dram_regions = get_dram_regions_info();
65 /* MMU map for Non-Secure DRAM Regions */
66 VERBOSE("DRAM Region %d: %p - %p\n", i,
67 (void *) info_dram_regions->region[i].addr,
68 (void *) (info_dram_regions->region[i].addr
69 + info_dram_regions->region[i].size
70 - 1));
71 mmap_add_region(info_dram_regions->region[i].addr,
72 info_dram_regions->region[i].addr,
73 info_dram_regions->region[i].size,
74 MT_MEMORY | MT_RW | MT_NS);
75
76 /* MMU map for Secure DDR Region on DRAM-0 */
77 if (info_dram_regions->region[i].size >
78 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
79 VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
80 (void *) (info_dram_regions->region[i].addr
81 + info_dram_regions->region[i].size),
82 (void *) (info_dram_regions->region[i].addr
83 + info_dram_regions->region[i].size
84 + NXP_SECURE_DRAM_SIZE
85 + NXP_SP_SHRD_DRAM_SIZE
86 - 1));
87 mmap_add_region((info_dram_regions->region[i].addr
88 + info_dram_regions->region[i].size),
89 (info_dram_regions->region[i].addr
90 + info_dram_regions->region[i].size),
91 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
92 MT_MEMORY | MT_RW | MT_SECURE);
93 }
94
95 #ifdef IMAGE_BL31
96 for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
97 if (info_dram_regions->region[i].size == 0)
98 break;
99 VERBOSE("DRAM Region %d: %p - %p\n", i,
100 (void *) info_dram_regions->region[i].addr,
101 (void *) (info_dram_regions->region[i].addr
102 + info_dram_regions->region[i].size
103 - 1));
104 mmap_add_region(info_dram_regions->region[i].addr,
105 info_dram_regions->region[i].addr,
106 info_dram_regions->region[i].size,
107 MT_MEMORY | MT_RW | MT_NS);
108 }
109 #endif
110 }
111 #endif
112
113 #if defined(PLAT_XLAT_TABLES_DYNAMIC)
mmap_add_ddr_region_dynamically(void)114 void mmap_add_ddr_region_dynamically(void)
115 {
116 int i = 0;
117 dram_regions_info_t *info_dram_regions = get_dram_regions_info();
118 /* MMU map for Non-Secure DRAM Regions */
119 VERBOSE("DRAM Region %d: %p - %p\n", i,
120 (void *) info_dram_regions->region[i].addr,
121 (void *) (info_dram_regions->region[i].addr
122 + info_dram_regions->region[i].size
123 - 1));
124 mmap_add_dynamic_region(info_dram_regions->region[i].addr,
125 info_dram_regions->region[i].addr,
126 info_dram_regions->region[i].size,
127 MT_MEMORY | MT_RW | MT_NS);
128
129 /* MMU map for Secure DDR Region on DRAM-0 */
130 if (info_dram_regions->region[i].size >
131 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
132 VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
133 (void *) (info_dram_regions->region[i].addr
134 + info_dram_regions->region[i].size),
135 (void *) (info_dram_regions->region[i].addr
136 + info_dram_regions->region[i].size
137 + NXP_SECURE_DRAM_SIZE
138 + NXP_SP_SHRD_DRAM_SIZE
139 - 1));
140 mmap_add_dynamic_region((info_dram_regions->region[i].addr
141 + info_dram_regions->region[i].size),
142 (info_dram_regions->region[i].addr
143 + info_dram_regions->region[i].size),
144 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
145 MT_MEMORY | MT_RW | MT_SECURE);
146 }
147
148 #ifdef IMAGE_BL31
149 for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
150 if (info_dram_regions->region[i].size == 0) {
151 break;
152 }
153 VERBOSE("DRAM Region %d: %p - %p\n", i,
154 (void *) info_dram_regions->region[i].addr,
155 (void *) (info_dram_regions->region[i].addr
156 + info_dram_regions->region[i].size
157 - 1));
158 mmap_add_dynamic_region(info_dram_regions->region[i].addr,
159 info_dram_regions->region[i].addr,
160 info_dram_regions->region[i].size,
161 MT_MEMORY | MT_RW | MT_NS);
162 }
163 #endif
164 }
165 #endif
166
167 /*
168 * Set up the page tables for the generic and platform-specific memory regions.
169 * The extents of the generic memory regions are specified by the function
170 * arguments and consist of:
171 * - Trusted SRAM seen by the BL image;
172 * - Code section;
173 * - Read-only data section;
174 * - Coherent memory region, if applicable.
175 */
ls_setup_page_tables(uintptr_t total_base,size_t total_size,uintptr_t code_start,uintptr_t code_limit,uintptr_t rodata_start,uintptr_t rodata_limit,uintptr_t coh_start,uintptr_t coh_limit)176 void ls_setup_page_tables(uintptr_t total_base,
177 size_t total_size,
178 uintptr_t code_start,
179 uintptr_t code_limit,
180 uintptr_t rodata_start,
181 uintptr_t rodata_limit
182 #if USE_COHERENT_MEM
183 ,
184 uintptr_t coh_start,
185 uintptr_t coh_limit
186 #endif
187 )
188 {
189 /*
190 * Map the Trusted SRAM with appropriate memory attributes.
191 * Subsequent mappings will adjust the attributes for specific regions.
192 */
193 VERBOSE("Memory seen by this BL image: %p - %p\n",
194 (void *) total_base, (void *) (total_base + total_size));
195 mmap_add_region(total_base, total_base,
196 total_size,
197 MT_MEMORY | MT_RW | MT_SECURE);
198
199 /* Re-map the code section */
200 VERBOSE("Code region: %p - %p\n",
201 (void *) code_start, (void *) code_limit);
202 mmap_add_region(code_start, code_start,
203 code_limit - code_start,
204 MT_CODE | MT_SECURE);
205
206 /* Re-map the read-only data section */
207 VERBOSE("Read-only data region: %p - %p\n",
208 (void *) rodata_start, (void *) rodata_limit);
209 mmap_add_region(rodata_start, rodata_start,
210 rodata_limit - rodata_start,
211 MT_RO_DATA | MT_SECURE);
212
213 #if USE_COHERENT_MEM
214 /* Re-map the coherent memory region */
215 VERBOSE("Coherent region: %p - %p\n",
216 (void *) coh_start, (void *) coh_limit);
217 mmap_add_region(coh_start, coh_start,
218 coh_limit - coh_start,
219 MT_DEVICE | MT_RW | MT_SECURE);
220 #endif
221
222 /* Now (re-)map the platform-specific memory regions */
223 mmap_add(plat_ls_get_mmap());
224
225
226 #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
227 mmap_add_ddr_regions_statically();
228 #endif
229
230 /* Create the page tables to reflect the above mappings */
231 init_xlat_tables();
232 }
233
234 /*******************************************************************************
235 * Returns NXP platform specific memory map regions.
236 ******************************************************************************/
plat_ls_get_mmap(void)237 const mmap_region_t *plat_ls_get_mmap(void)
238 {
239 return plat_ls_mmap;
240 }
241
242 /*
243 * This function get the number of clusters and cores count per cluster
244 * in the SoC.
245 */
get_cluster_info(const struct soc_type * soc_list,uint8_t ps_count,uint8_t * num_clusters,uint8_t * cores_per_cluster)246 void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count,
247 uint8_t *num_clusters, uint8_t *cores_per_cluster)
248 {
249 const soc_info_t *soc_info = get_soc_info();
250 *num_clusters = NUMBER_OF_CLUSTERS;
251 *cores_per_cluster = CORES_PER_CLUSTER;
252 unsigned int i;
253
254 for (i = 0U; i < ps_count; i++) {
255 if (soc_list[i].version == soc_info->svr_reg.bf_ver.version) {
256 *num_clusters = soc_list[i].num_clusters;
257 *cores_per_cluster = soc_list[i].cores_per_cluster;
258 break;
259 }
260 }
261
262 VERBOSE("NUM of cluster = 0x%x, Cores per cluster = 0x%x\n",
263 *num_clusters, *cores_per_cluster);
264 }
265