1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <config.h>
8 #include <kernel/boot.h>
9 #include <kernel/misc.h>
10 #include <kernel/notif.h>
11 #include <kernel/tee_l2cc_mutex.h>
12 #include <kernel/virtualization.h>
13 #include <mm/core_mmu.h>
14 #include <optee_msg.h>
15 #include <sm/optee_smc.h>
16 #include <tee/entry_fast.h>
17 
18 #ifdef CFG_CORE_RESERVED_SHM
tee_entry_get_shm_config(struct thread_smc_args * args)19 static void tee_entry_get_shm_config(struct thread_smc_args *args)
20 {
21 	args->a0 = OPTEE_SMC_RETURN_OK;
22 	args->a1 = default_nsec_shm_paddr;
23 	args->a2 = default_nsec_shm_size;
24 	/* Should this be TEESMC cache attributes instead? */
25 	args->a3 = core_mmu_is_shm_cached();
26 }
27 #endif
28 
tee_entry_fastcall_l2cc_mutex(struct thread_smc_args * args)29 static void tee_entry_fastcall_l2cc_mutex(struct thread_smc_args *args)
30 {
31 	TEE_Result ret;
32 #ifdef ARM32
33 	paddr_t pa = 0;
34 
35 	switch (args->a1) {
36 	case OPTEE_SMC_L2CC_MUTEX_GET_ADDR:
37 		ret = tee_get_l2cc_mutex(&pa);
38 		reg_pair_from_64(pa, &args->a2, &args->a3);
39 		break;
40 	case OPTEE_SMC_L2CC_MUTEX_SET_ADDR:
41 		pa = reg_pair_to_64(args->a2, args->a3);
42 		ret = tee_set_l2cc_mutex(&pa);
43 		break;
44 	case OPTEE_SMC_L2CC_MUTEX_ENABLE:
45 		ret = tee_enable_l2cc_mutex();
46 		break;
47 	case OPTEE_SMC_L2CC_MUTEX_DISABLE:
48 		ret = tee_disable_l2cc_mutex();
49 		break;
50 	default:
51 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
52 		return;
53 	}
54 #else
55 	ret = TEE_ERROR_NOT_SUPPORTED;
56 #endif
57 	if (ret == TEE_ERROR_NOT_SUPPORTED)
58 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
59 	else if (ret)
60 		args->a0 = OPTEE_SMC_RETURN_EBADADDR;
61 	else
62 		args->a0 = OPTEE_SMC_RETURN_OK;
63 }
64 
tee_entry_exchange_capabilities(struct thread_smc_args * args)65 static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
66 {
67 	bool dyn_shm_en __maybe_unused = false;
68 
69 	/*
70 	 * Currently we ignore OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
71 	 *
72 	 * The memory mapping of shared memory is defined as normal
73 	 * shared memory for SMP systems and normal memory for UP
74 	 * systems. Currently we map all memory as shared in secure
75 	 * world.
76 	 *
77 	 * When translation tables are created with shared bit cleared for
78 	 * uniprocessor systems we'll need to check
79 	 * OPTEE_SMC_NSEC_CAP_UNIPROCESSOR.
80 	 */
81 
82 	if (args->a1 & ~OPTEE_SMC_NSEC_CAP_UNIPROCESSOR) {
83 		/* Unknown capability. */
84 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
85 		return;
86 	}
87 
88 	args->a0 = OPTEE_SMC_RETURN_OK;
89 	args->a1 = 0;
90 #ifdef CFG_CORE_RESERVED_SHM
91 	args->a1 |= OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
92 #endif
93 	if (IS_ENABLED(CFG_VIRTUALIZATION))
94 		args->a1 |= OPTEE_SMC_SEC_CAP_VIRTUALIZATION;
95 	args->a1 |= OPTEE_SMC_SEC_CAP_MEMREF_NULL;
96 	if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
97 		args->a1 |= OPTEE_SMC_SEC_CAP_ASYNC_NOTIF;
98 		args->a2 = NOTIF_VALUE_MAX;
99 	}
100 	DMSG("Asynchronous notifications are %sabled",
101 	     IS_ENABLED(CFG_CORE_ASYNC_NOTIF) ? "en" : "dis");
102 
103 #if defined(CFG_CORE_DYN_SHM)
104 	dyn_shm_en = core_mmu_nsec_ddr_is_defined();
105 	if (dyn_shm_en)
106 		args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
107 #endif
108 
109 	DMSG("Dynamic shared memory is %sabled", dyn_shm_en ? "en" : "dis");
110 }
111 
tee_entry_disable_shm_cache(struct thread_smc_args * args)112 static void tee_entry_disable_shm_cache(struct thread_smc_args *args)
113 {
114 	uint64_t cookie;
115 
116 	if (!thread_disable_prealloc_rpc_cache(&cookie)) {
117 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
118 		return;
119 	}
120 
121 	if (!cookie) {
122 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
123 		return;
124 	}
125 
126 	args->a0 = OPTEE_SMC_RETURN_OK;
127 	args->a1 = cookie >> 32;
128 	args->a2 = cookie;
129 }
130 
tee_entry_enable_shm_cache(struct thread_smc_args * args)131 static void tee_entry_enable_shm_cache(struct thread_smc_args *args)
132 {
133 	if (thread_enable_prealloc_rpc_cache())
134 		args->a0 = OPTEE_SMC_RETURN_OK;
135 	else
136 		args->a0 = OPTEE_SMC_RETURN_EBUSY;
137 }
138 
tee_entry_boot_secondary(struct thread_smc_args * args)139 static void tee_entry_boot_secondary(struct thread_smc_args *args)
140 {
141 #if defined(CFG_BOOT_SECONDARY_REQUEST)
142 	if (!boot_core_release(args->a1, (paddr_t)(args->a3)))
143 		args->a0 = OPTEE_SMC_RETURN_OK;
144 	else
145 		args->a0 = OPTEE_SMC_RETURN_EBADCMD;
146 #else
147 	args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
148 #endif
149 }
150 
tee_entry_get_thread_count(struct thread_smc_args * args)151 static void tee_entry_get_thread_count(struct thread_smc_args *args)
152 {
153 	args->a0 = OPTEE_SMC_RETURN_OK;
154 	args->a1 = CFG_NUM_THREADS;
155 }
156 
157 #if defined(CFG_VIRTUALIZATION)
tee_entry_vm_created(struct thread_smc_args * args)158 static void tee_entry_vm_created(struct thread_smc_args *args)
159 {
160 	uint16_t guest_id = args->a1;
161 
162 	/* Only hypervisor can issue this request */
163 	if (args->a7 != HYP_CLNT_ID) {
164 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
165 		return;
166 	}
167 
168 	if (virt_guest_created(guest_id))
169 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
170 	else
171 		args->a0 = OPTEE_SMC_RETURN_OK;
172 }
173 
tee_entry_vm_destroyed(struct thread_smc_args * args)174 static void tee_entry_vm_destroyed(struct thread_smc_args *args)
175 {
176 	uint16_t guest_id = args->a1;
177 
178 	/* Only hypervisor can issue this request */
179 	if (args->a7 != HYP_CLNT_ID) {
180 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
181 		return;
182 	}
183 
184 	if (virt_guest_destroyed(guest_id))
185 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
186 	else
187 		args->a0 = OPTEE_SMC_RETURN_OK;
188 }
189 #endif
190 
191 /* Note: this function is weak to let platforms add special handling */
tee_entry_fast(struct thread_smc_args * args)192 void __weak tee_entry_fast(struct thread_smc_args *args)
193 {
194 	__tee_entry_fast(args);
195 }
196 
get_async_notif_value(struct thread_smc_args * args)197 static void get_async_notif_value(struct thread_smc_args *args)
198 {
199 	bool value_valid = false;
200 	bool value_pending = false;
201 
202 	args->a0 = OPTEE_SMC_RETURN_OK;
203 	args->a1 = notif_get_value(&value_valid, &value_pending);
204 	args->a2 = 0;
205 	if (value_valid)
206 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_VALID;
207 	if (value_pending)
208 		args->a2 |= OPTEE_SMC_ASYNC_NOTIF_PENDING;
209 }
210 
211 /*
212  * If tee_entry_fast() is overridden, it's still supposed to call this
213  * function.
214  */
__tee_entry_fast(struct thread_smc_args * args)215 void __tee_entry_fast(struct thread_smc_args *args)
216 {
217 	switch (args->a0) {
218 
219 	/* Generic functions */
220 	case OPTEE_SMC_CALLS_COUNT:
221 		tee_entry_get_api_call_count(args);
222 		break;
223 	case OPTEE_SMC_CALLS_UID:
224 		tee_entry_get_api_uuid(args);
225 		break;
226 	case OPTEE_SMC_CALLS_REVISION:
227 		tee_entry_get_api_revision(args);
228 		break;
229 	case OPTEE_SMC_CALL_GET_OS_UUID:
230 		tee_entry_get_os_uuid(args);
231 		break;
232 	case OPTEE_SMC_CALL_GET_OS_REVISION:
233 		tee_entry_get_os_revision(args);
234 		break;
235 
236 	/* OP-TEE specific SMC functions */
237 #ifdef CFG_CORE_RESERVED_SHM
238 	case OPTEE_SMC_GET_SHM_CONFIG:
239 		tee_entry_get_shm_config(args);
240 		break;
241 #endif
242 	case OPTEE_SMC_L2CC_MUTEX:
243 		tee_entry_fastcall_l2cc_mutex(args);
244 		break;
245 	case OPTEE_SMC_EXCHANGE_CAPABILITIES:
246 		tee_entry_exchange_capabilities(args);
247 		break;
248 	case OPTEE_SMC_DISABLE_SHM_CACHE:
249 		tee_entry_disable_shm_cache(args);
250 		break;
251 	case OPTEE_SMC_ENABLE_SHM_CACHE:
252 		tee_entry_enable_shm_cache(args);
253 		break;
254 	case OPTEE_SMC_BOOT_SECONDARY:
255 		tee_entry_boot_secondary(args);
256 		break;
257 	case OPTEE_SMC_GET_THREAD_COUNT:
258 		tee_entry_get_thread_count(args);
259 		break;
260 
261 #if defined(CFG_VIRTUALIZATION)
262 	case OPTEE_SMC_VM_CREATED:
263 		tee_entry_vm_created(args);
264 		break;
265 	case OPTEE_SMC_VM_DESTROYED:
266 		tee_entry_vm_destroyed(args);
267 		break;
268 #endif
269 
270 	case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
271 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
272 			notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
273 			args->a0 = OPTEE_SMC_RETURN_OK;
274 		} else {
275 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
276 		}
277 		break;
278 	case OPTEE_SMC_GET_ASYNC_NOTIF_VALUE:
279 		if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF))
280 			get_async_notif_value(args);
281 		else
282 			args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
283 		break;
284 
285 	default:
286 		args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
287 		break;
288 	}
289 }
290 
tee_entry_generic_get_api_call_count(void)291 size_t tee_entry_generic_get_api_call_count(void)
292 {
293 	/*
294 	 * All the different calls handled in this file. If the specific
295 	 * target has additional calls it will call this function and
296 	 * add the number of calls the target has added.
297 	 */
298 	size_t ret = 12;
299 
300 	if (IS_ENABLED(CFG_VIRTUALIZATION))
301 		ret += 2;
302 
303 	return ret;
304 }
305 
tee_entry_get_api_call_count(struct thread_smc_args * args)306 void __weak tee_entry_get_api_call_count(struct thread_smc_args *args)
307 {
308 	args->a0 = tee_entry_generic_get_api_call_count();
309 }
310 
tee_entry_get_api_uuid(struct thread_smc_args * args)311 void __weak tee_entry_get_api_uuid(struct thread_smc_args *args)
312 {
313 	args->a0 = OPTEE_MSG_UID_0;
314 	args->a1 = OPTEE_MSG_UID_1;
315 	args->a2 = OPTEE_MSG_UID_2;
316 	args->a3 = OPTEE_MSG_UID_3;
317 }
318 
tee_entry_get_api_revision(struct thread_smc_args * args)319 void __weak tee_entry_get_api_revision(struct thread_smc_args *args)
320 {
321 	args->a0 = OPTEE_MSG_REVISION_MAJOR;
322 	args->a1 = OPTEE_MSG_REVISION_MINOR;
323 }
324 
tee_entry_get_os_uuid(struct thread_smc_args * args)325 void __weak tee_entry_get_os_uuid(struct thread_smc_args *args)
326 {
327 	args->a0 = OPTEE_MSG_OS_OPTEE_UUID_0;
328 	args->a1 = OPTEE_MSG_OS_OPTEE_UUID_1;
329 	args->a2 = OPTEE_MSG_OS_OPTEE_UUID_2;
330 	args->a3 = OPTEE_MSG_OS_OPTEE_UUID_3;
331 }
332 
tee_entry_get_os_revision(struct thread_smc_args * args)333 void __weak tee_entry_get_os_revision(struct thread_smc_args *args)
334 {
335 	args->a0 = CFG_OPTEE_REVISION_MAJOR;
336 	args->a1 = CFG_OPTEE_REVISION_MINOR;
337 	args->a2 = TEE_IMPL_GIT_SHA1;
338 }
339