1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2019, Linaro Limited
4 */
5
6#include <arm.h>
7#include <asm.S>
8#include <generated/asm-defines.h>
9#include <keep.h>
10#include <kernel/thread_defs.h>
11#include <sm/optee_smc.h>
12#include <sm/teesmc_opteed.h>
13#include <sm/teesmc_opteed_macros.h>
14
15.arch_extension sec
16
17/*
18 * If ASLR is configured the identity mapped code may be mapped at two
19 * locations, the identity location where virtual and physical address is
20 * the same and at the runtime selected location to which OP-TEE has been
21 * relocated.  Code executing at a location different compared to the
22 * runtime selected location works OK as long as it doesn't do relative
23 * addressing outside the identity mapped range. To allow relative
24 * addressing this macro jumps to the runtime selected location.
25 *
26 * Note that the identity mapped range and the runtime selected range can
27 * only differ if ASLR is configured.
28 */
29	.macro readjust_pc
30#ifdef CFG_CORE_ASLR
31	ldr	r12, =1111f
32	bx	r12
331111:
34#endif
35	.endm
36
37FUNC vector_std_smc_entry , : , .identity_map
38UNWIND(	.cantunwind)
39	readjust_pc
40	push	{r4-r7}
41	bl	thread_handle_std_smc
42	add	sp, sp, #(4 * 4)
43	/*
44	 * Normally thread_handle_std_smc() should return via
45	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
46	 * hasn't switched stack (error detected) it will do a normal "C"
47	 * return.
48	 */
49	mov	r1, r0
50	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
51	smc	#0
52	b	.	/* SMC should not return */
53END_FUNC vector_std_smc_entry
54
55FUNC vector_fast_smc_entry , : , .identity_map
56UNWIND(	.cantunwind)
57	readjust_pc
58	push	{r0-r7}
59	mov	r0, sp
60	bl	thread_handle_fast_smc
61	pop	{r1-r8}
62	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
63	smc	#0
64	b	.	/* SMC should not return */
65END_FUNC vector_fast_smc_entry
66
67FUNC vector_fiq_entry , : , .identity_map
68UNWIND(	.cantunwind)
69	readjust_pc
70 	/* Secure Monitor received a FIQ and passed control to us. */
71	bl	thread_check_canaries
72	bl	itr_core_handler
73	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
74	smc	#0
75	b	.	/* SMC should not return */
76END_FUNC vector_fiq_entry
77
78#if defined(CFG_WITH_ARM_TRUSTED_FW)
79LOCAL_FUNC vector_cpu_on_entry , : , .identity_map
80UNWIND(	.cantunwind)
81	bl	cpu_on_handler
82	/* When cpu_on_handler() returns mmu is enabled */
83	mov	r1, r0
84	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
85	smc	#0
86	b	.	/* SMC should not return */
87END_FUNC vector_cpu_on_entry
88
89LOCAL_FUNC vector_cpu_off_entry , : , .identity_map
90UNWIND(	.cantunwind)
91	readjust_pc
92	bl	thread_cpu_off_handler
93	mov	r1, r0
94	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
95	smc	#0
96	b	.	/* SMC should not return */
97END_FUNC vector_cpu_off_entry
98
99LOCAL_FUNC vector_cpu_suspend_entry , : , .identity_map
100UNWIND(	.cantunwind)
101	readjust_pc
102	bl	thread_cpu_suspend_handler
103	mov	r1, r0
104	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
105	smc	#0
106	b	.	/* SMC should not return */
107END_FUNC vector_cpu_suspend_entry
108
109LOCAL_FUNC vector_cpu_resume_entry , : , .identity_map
110UNWIND(	.cantunwind)
111	readjust_pc
112	bl	thread_cpu_resume_handler
113	mov	r1, r0
114	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
115	smc	#0
116	b	.	/* SMC should not return */
117END_FUNC vector_cpu_resume_entry
118
119LOCAL_FUNC vector_system_off_entry , : , .identity_map
120UNWIND(	.cantunwind)
121	readjust_pc
122	bl	thread_system_off_handler
123	mov	r1, r0
124	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
125	smc	#0
126	b	.	/* SMC should not return */
127END_FUNC vector_system_off_entry
128
129LOCAL_FUNC vector_system_reset_entry , : , .identity_map
130UNWIND(	.cantunwind)
131	readjust_pc
132	bl	thread_system_reset_handler
133	mov	r1, r0
134	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
135	smc	#0
136	b	.	/* SMC should not return */
137END_FUNC vector_system_reset_entry
138
139/*
140 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
141 * initialization.  Also used when compiled with the internal monitor, but
142 * the cpu_*_entry and system_*_entry are not used then.
143 *
144 * Note that ARM-TF depends on the layout of this vector table, any change
145 * in layout has to be synced with ARM-TF.
146 */
147FUNC thread_vector_table , : , .identity_map
148UNWIND(	.cantunwind)
149	b	vector_std_smc_entry
150	b	vector_fast_smc_entry
151	b	vector_cpu_on_entry
152	b	vector_cpu_off_entry
153	b	vector_cpu_resume_entry
154	b	vector_cpu_suspend_entry
155	b	vector_fiq_entry
156	b	vector_system_off_entry
157	b	vector_system_reset_entry
158END_FUNC thread_vector_table
159DECLARE_KEEP_PAGER thread_vector_table
160#endif /*if defined(CFG_WITH_ARM_TRUSTED_FW)*/
161
162FUNC thread_std_smc_entry , :
163UNWIND(	.cantunwind)
164	push	{r4, r5} /* Pass these following the arm32 calling convention */
165	bl	__thread_std_smc_entry
166	add	sp, sp, #8 /* There's nothing return, just restore the sp */
167	mov	r4, r0	/* Save return value for later */
168
169	/* Disable interrupts before switching to temporary stack */
170	cpsid	aif
171	bl	thread_get_tmp_sp
172	mov	sp, r0
173
174	bl	thread_state_free
175
176	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
177	mov	r1, r4
178	mov	r2, #0
179	mov	r3, #0
180	mov	r4, #0
181	smc	#0
182	b	.	/* SMC should not return */
183END_FUNC thread_std_smc_entry
184
185/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
186FUNC thread_rpc , :
187	push	{r0, lr}
188UNWIND(	.save	{r0, lr})
189
190	bl	thread_save_state
191	mov	r4, r0			/* Save original CPSR */
192
193	/*
194 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
195	 */
196	bl	thread_get_tmp_sp
197	ldr	r5, [sp]		/* Get pointer to rv[] */
198	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
199	mov	sp, r0			/* Switch to tmp stack */
200
201	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
202	mov	r1, r4			/* CPSR to restore */
203	ldr	r2, =.thread_rpc_return
204	bl	thread_state_suspend
205	mov	r4, r0			/* Supply thread index */
206	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
207	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
208	smc	#0
209	b	.	/* SMC should not return */
210
211.thread_rpc_return:
212	/*
213	 * At this point has the stack pointer been restored to the value
214	 * it had when thread_save_state() was called above.
215	 *
216	 * Jumps here from thread_resume above when RPC has returned. The
217	 * IRQ and FIQ bits are restored to what they where when this
218	 * function was originally entered.
219	 */
220	pop	{r12, lr}		/* Get pointer to rv[] */
221	stm	r12, {r0-r3}		/* Store r0-r3 into rv[] */
222	bx	lr
223END_FUNC thread_rpc
224DECLARE_KEEP_PAGER thread_rpc
225
226/*
227 * void thread_foreign_intr_exit(uint32_t thread_index)
228 *
229 * This function is jumped to at the end of macro foreign_intr_handler().
230 * The current thread as indicated by @thread_index has just been
231 * suspended.  The job here is just to inform normal world the thread id to
232 * resume when returning.
233 */
234FUNC thread_foreign_intr_exit , :
235	mov	r4, r0
236	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
237	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
238	mov	r2, #0
239	mov	r3, #0
240	smc	#0
241	b	.	/* SMC should not return */
242END_FUNC thread_foreign_intr_exit
243