1 /*
2 * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/pubsub.h>
21 #include <lib/gpt_rme/gpt_rme.h>
22
23 #include <lib/spinlock.h>
24 #include <lib/utils.h>
25 #include <lib/xlat_tables/xlat_tables_v2.h>
26 #include <plat/common/common_def.h>
27 #include <plat/common/platform.h>
28 #include <platform_def.h>
29 #include <services/gtsi_svc.h>
30 #include <services/rmi_svc.h>
31 #include <services/rmmd_svc.h>
32 #include <smccc_helpers.h>
33 #include "rmmd_initial_context.h"
34 #include "rmmd_private.h"
35
36 /*******************************************************************************
37 * RMM context information.
38 ******************************************************************************/
39 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
40
41 /*******************************************************************************
42 * RMM entry point information. Discovered on the primary core and reused
43 * on secondary cores.
44 ******************************************************************************/
45 static entry_point_info_t *rmm_ep_info;
46
47 /*******************************************************************************
48 * Static function declaration.
49 ******************************************************************************/
50 static int32_t rmm_init(void);
51 static uint64_t rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state,
52 uint32_t dst_sec_state, uint64_t x1,
53 uint64_t x2, uint64_t x3, uint64_t x4,
54 void *handle);
55
56 /*******************************************************************************
57 * This function takes an RMM context pointer and performs a synchronous entry
58 * into it.
59 ******************************************************************************/
rmmd_rmm_sync_entry(rmmd_rmm_context_t * rmm_ctx)60 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
61 {
62 uint64_t rc;
63
64 assert(rmm_ctx != NULL);
65
66 cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
67
68 /* Save the current el1/el2 context before loading realm context. */
69 cm_el1_sysregs_context_save(NON_SECURE);
70 cm_el2_sysregs_context_save(NON_SECURE);
71
72 /* Restore the realm context assigned above */
73 cm_el1_sysregs_context_restore(REALM);
74 cm_el2_sysregs_context_restore(REALM);
75 cm_set_next_eret_context(REALM);
76
77 /* Enter RMM */
78 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
79
80 /* Save realm context */
81 cm_el1_sysregs_context_save(REALM);
82 cm_el2_sysregs_context_save(REALM);
83
84 /* Restore the el1/el2 context again. */
85 cm_el1_sysregs_context_restore(NON_SECURE);
86 cm_el2_sysregs_context_restore(NON_SECURE);
87
88 return rc;
89 }
90
91 /*******************************************************************************
92 * This function returns to the place where rmmd_rmm_sync_entry() was
93 * called originally.
94 ******************************************************************************/
rmmd_rmm_sync_exit(uint64_t rc)95 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
96 {
97 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
98
99 /* Get context of the RMM in use by this CPU. */
100 assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
101
102 /*
103 * The RMMD must have initiated the original request through a
104 * synchronous entry into RMM. Jump back to the original C runtime
105 * context with the value of rc in x0;
106 */
107 rmmd_rmm_exit(ctx->c_rt_ctx, rc);
108
109 panic();
110 }
111
rmm_el2_context_init(el2_sysregs_t * regs)112 static void rmm_el2_context_init(el2_sysregs_t *regs)
113 {
114 regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2;
115 regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1;
116 }
117
118 /*******************************************************************************
119 * Jump to the RMM for the first time.
120 ******************************************************************************/
rmm_init(void)121 static int32_t rmm_init(void)
122 {
123
124 uint64_t rc;
125
126 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
127
128 INFO("RMM init start.\n");
129 ctx->state = RMM_STATE_RESET;
130
131 /* Initialize RMM EL2 context. */
132 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
133
134 rc = rmmd_rmm_sync_entry(ctx);
135 if (rc != 0ULL) {
136 ERROR("RMM initialisation failed 0x%" PRIx64 "\n", rc);
137 panic();
138 }
139
140 ctx->state = RMM_STATE_IDLE;
141 INFO("RMM init end.\n");
142
143 return 1;
144 }
145
146 /*******************************************************************************
147 * Load and read RMM manifest, setup RMM.
148 ******************************************************************************/
rmmd_setup(void)149 int rmmd_setup(void)
150 {
151 uint32_t ep_attr;
152 unsigned int linear_id = plat_my_core_pos();
153 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
154
155 /* Make sure RME is supported. */
156 assert(get_armv9_2_feat_rme_support() != 0U);
157
158 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
159 if (rmm_ep_info == NULL) {
160 WARN("No RMM image provided by BL2 boot loader, Booting "
161 "device without RMM initialization. SMCs destined for "
162 "RMM will return SMC_UNK\n");
163 return -ENOENT;
164 }
165
166 /* Under no circumstances will this parameter be 0 */
167 assert(rmm_ep_info->pc == RMM_BASE);
168
169 /* Initialise an entrypoint to set up the CPU context */
170 ep_attr = EP_REALM;
171 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
172 ep_attr |= EP_EE_BIG;
173 }
174
175 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
176 rmm_ep_info->spsr = SPSR_64(MODE_EL2,
177 MODE_SP_ELX,
178 DISABLE_ALL_EXCEPTIONS);
179
180 /* Initialise RMM context with this entry point information */
181 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
182
183 INFO("RMM setup done.\n");
184
185 /* Register init function for deferred init. */
186 bl31_register_rmm_init(&rmm_init);
187
188 return 0;
189 }
190
191 /*******************************************************************************
192 * Forward SMC to the other security state
193 ******************************************************************************/
rmmd_smc_forward(uint32_t smc_fid,uint32_t src_sec_state,uint32_t dst_sec_state,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle)194 static uint64_t rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state,
195 uint32_t dst_sec_state, uint64_t x1,
196 uint64_t x2, uint64_t x3, uint64_t x4,
197 void *handle)
198 {
199 /* Save incoming security state */
200 cm_el1_sysregs_context_save(src_sec_state);
201 cm_el2_sysregs_context_save(src_sec_state);
202
203 /* Restore outgoing security state */
204 cm_el1_sysregs_context_restore(dst_sec_state);
205 cm_el2_sysregs_context_restore(dst_sec_state);
206 cm_set_next_eret_context(dst_sec_state);
207
208 SMC_RET8(cm_get_context(dst_sec_state), smc_fid, x1, x2, x3, x4,
209 SMC_GET_GP(handle, CTX_GPREG_X5),
210 SMC_GET_GP(handle, CTX_GPREG_X6),
211 SMC_GET_GP(handle, CTX_GPREG_X7));
212 }
213
214 /*******************************************************************************
215 * This function handles all SMCs in the range reserved for RMI. Each call is
216 * either forwarded to the other security state or handled by the RMM dispatcher
217 ******************************************************************************/
rmmd_rmi_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)218 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
219 uint64_t x3, uint64_t x4, void *cookie,
220 void *handle, uint64_t flags)
221 {
222 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
223 uint32_t src_sec_state;
224
225 /* Determine which security state this SMC originated from */
226 src_sec_state = caller_sec_state(flags);
227
228 /* RMI must not be invoked by the Secure world */
229 if (src_sec_state == SMC_FROM_SECURE) {
230 WARN("RMM: RMI invoked by secure world.\n");
231 SMC_RET1(handle, SMC_UNK);
232 }
233
234 /*
235 * Forward an RMI call from the Normal world to the Realm world as it
236 * is.
237 */
238 if (src_sec_state == SMC_FROM_NON_SECURE) {
239 VERBOSE("RMM: RMI call from non-secure world.\n");
240 return rmmd_smc_forward(smc_fid, NON_SECURE, REALM,
241 x1, x2, x3, x4, handle);
242 }
243
244 assert(src_sec_state == SMC_FROM_REALM);
245
246 switch (smc_fid) {
247 case RMI_RMM_REQ_COMPLETE:
248 if (ctx->state == RMM_STATE_RESET) {
249 VERBOSE("RMM: running rmmd_rmm_sync_exit\n");
250 rmmd_rmm_sync_exit(x1);
251 }
252
253 return rmmd_smc_forward(x1, REALM, NON_SECURE,
254 x2, x3, x4, 0, handle);
255
256 default:
257 WARN("RMM: Unsupported RMM call 0x%08x\n", smc_fid);
258 SMC_RET1(handle, SMC_UNK);
259 }
260 }
261
262 /*******************************************************************************
263 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM
264 * is done after initialising minimal architectural state that guarantees safe
265 * execution.
266 ******************************************************************************/
rmmd_cpu_on_finish_handler(const void * arg)267 static void *rmmd_cpu_on_finish_handler(const void *arg)
268 {
269 int32_t rc;
270 uint32_t linear_id = plat_my_core_pos();
271 rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
272
273 ctx->state = RMM_STATE_RESET;
274
275 /* Initialise RMM context with this entry point information */
276 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
277
278 /* Initialize RMM EL2 context. */
279 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
280
281 rc = rmmd_rmm_sync_entry(ctx);
282 if (rc != 0) {
283 ERROR("RMM initialisation failed (%d) on CPU%d\n", rc,
284 linear_id);
285 panic();
286 }
287
288 ctx->state = RMM_STATE_IDLE;
289 return NULL;
290 }
291
292 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
293 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
294
gtsi_transition_granule(uint64_t pa,unsigned int src_sec_state,unsigned int target_pas)295 static int gtsi_transition_granule(uint64_t pa,
296 unsigned int src_sec_state,
297 unsigned int target_pas)
298 {
299 int ret;
300
301 ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas);
302
303 /* Convert TF-A error codes into GTSI error codes */
304 if (ret == -EINVAL) {
305 ERROR("[GTSI] Transition failed: invalid %s\n", "address");
306 ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa,
307 src_sec_state, target_pas);
308 ret = GRAN_TRANS_RET_BAD_ADDR;
309 } else if (ret == -EPERM) {
310 ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS");
311 ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa,
312 src_sec_state, target_pas);
313 ret = GRAN_TRANS_RET_BAD_PAS;
314 }
315
316 return ret;
317 }
318
319 /*******************************************************************************
320 * This function handles all SMCs in the range reserved for GTF.
321 ******************************************************************************/
rmmd_gtsi_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)322 uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
323 uint64_t x3, uint64_t x4, void *cookie,
324 void *handle, uint64_t flags)
325 {
326 uint32_t src_sec_state;
327
328 /* Determine which security state this SMC originated from */
329 src_sec_state = caller_sec_state(flags);
330
331 if (src_sec_state != SMC_FROM_REALM) {
332 WARN("RMM: GTF call originated from secure or normal world\n");
333 SMC_RET1(handle, SMC_UNK);
334 }
335
336 switch (smc_fid) {
337 case SMC_ASC_MARK_REALM:
338 SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
339 GPT_GPI_REALM));
340 case SMC_ASC_MARK_NONSECURE:
341 SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM,
342 GPT_GPI_NS));
343 default:
344 WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid);
345 SMC_RET1(handle, SMC_UNK);
346 }
347 }
348