1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2019, Linaro Limited
4 */
5
6#include <arm64_macros.S>
7#include <arm.h>
8#include <asm.S>
9#include <generated/asm-defines.h>
10#include <keep.h>
11#include <kernel/thread_defs.h>
12#include <sm/optee_smc.h>
13#include <sm/teesmc_opteed.h>
14#include <sm/teesmc_opteed_macros.h>
15
16/*
17 * If ASLR is configured the identity mapped code may be mapped at two
18 * locations, the identity location where virtual and physical address is
19 * the same and at the runtime selected location to which OP-TEE has been
20 * relocated.  Code executing at a location different compared to the
21 * runtime selected location works OK as long as it doesn't do relative
22 * addressing outside the identity mapped range. To allow relative
23 * addressing this macro jumps to the runtime selected location.
24 *
25 * Note that the identity mapped range and the runtime selected range can
26 * only differ if ASLR is configured.
27 */
28	.macro readjust_pc
29#ifdef CFG_CORE_ASLR
30	ldr	x16, =1111f
31	br	x16
321111:
33BTI(	bti	j)
34#endif
35	.endm
36
37LOCAL_FUNC vector_std_smc_entry , : , .identity_map
38	readjust_pc
39	bl	thread_handle_std_smc
40	/*
41	 * Normally thread_handle_std_smc() should return via
42	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
43	 * hasn't switched stack (error detected) it will do a normal "C"
44	 * return.
45	 */
46	mov	w1, w0
47	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
48	smc	#0
49	b	.	/* SMC should not return */
50END_FUNC vector_std_smc_entry
51
52LOCAL_FUNC vector_fast_smc_entry , : , .identity_map
53	readjust_pc
54	sub	sp, sp, #THREAD_SMC_ARGS_SIZE
55	store_xregs sp, THREAD_SMC_ARGS_X0, 0, 7
56	mov	x0, sp
57	bl	thread_handle_fast_smc
58	load_xregs sp, THREAD_SMC_ARGS_X0, 1, 8
59	add	sp, sp, #THREAD_SMC_ARGS_SIZE
60	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63END_FUNC vector_fast_smc_entry
64
65LOCAL_FUNC vector_fiq_entry , : , .identity_map
66	readjust_pc
67	/* Secure Monitor received a FIQ and passed control to us. */
68	bl	thread_check_canaries
69	bl	itr_core_handler
70	ldr	x0, =TEESMC_OPTEED_RETURN_FIQ_DONE
71	smc	#0
72	b	.	/* SMC should not return */
73END_FUNC vector_fiq_entry
74
75LOCAL_FUNC vector_cpu_on_entry , : , .identity_map
76	bl	cpu_on_handler
77	mov	x1, x0
78	ldr	x0, =TEESMC_OPTEED_RETURN_ON_DONE
79	smc	#0
80	b	.	/* SMC should not return */
81END_FUNC vector_cpu_on_entry
82
83LOCAL_FUNC vector_cpu_off_entry , : , .identity_map
84	readjust_pc
85	bl	thread_cpu_off_handler
86	mov	x1, x0
87	ldr	x0, =TEESMC_OPTEED_RETURN_OFF_DONE
88	smc	#0
89	b	.	/* SMC should not return */
90END_FUNC vector_cpu_off_entry
91
92LOCAL_FUNC vector_cpu_suspend_entry , : , .identity_map
93	readjust_pc
94	bl	thread_cpu_suspend_handler
95	mov	x1, x0
96	ldr	x0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
97	smc	#0
98	b	.	/* SMC should not return */
99END_FUNC vector_cpu_suspend_entry
100
101LOCAL_FUNC vector_cpu_resume_entry , : , .identity_map
102	readjust_pc
103	bl	thread_cpu_resume_handler
104	mov	x1, x0
105	ldr	x0, =TEESMC_OPTEED_RETURN_RESUME_DONE
106	smc	#0
107	b	.	/* SMC should not return */
108END_FUNC vector_cpu_resume_entry
109
110LOCAL_FUNC vector_system_off_entry , : , .identity_map
111	readjust_pc
112	bl	thread_system_off_handler
113	mov	x1, x0
114	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
115	smc	#0
116	b	.	/* SMC should not return */
117END_FUNC vector_system_off_entry
118
119LOCAL_FUNC vector_system_reset_entry , : , .identity_map
120	readjust_pc
121	bl	thread_system_reset_handler
122	mov	x1, x0
123	ldr	x0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
124	smc	#0
125	b	.	/* SMC should not return */
126END_FUNC vector_system_reset_entry
127
128/*
129 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
130 * initialization.
131 *
132 * Note that ARM-TF depends on the layout of this vector table, any change
133 * in layout has to be synced with ARM-TF.
134 */
135FUNC thread_vector_table , : , .identity_map, , nobti
136	b	vector_std_smc_entry
137	b	vector_fast_smc_entry
138	b	vector_cpu_on_entry
139	b	vector_cpu_off_entry
140	b	vector_cpu_resume_entry
141	b	vector_cpu_suspend_entry
142	b	vector_fiq_entry
143	b	vector_system_off_entry
144	b	vector_system_reset_entry
145END_FUNC thread_vector_table
146DECLARE_KEEP_PAGER thread_vector_table
147
148FUNC thread_std_smc_entry , :
149	bl	__thread_std_smc_entry
150	mov	w20, w0	/* Save return value for later */
151
152	/* Mask all maskable exceptions before switching to temporary stack */
153	msr	daifset, #DAIFBIT_ALL
154	bl	thread_get_tmp_sp
155	mov	sp, x0
156
157	bl	thread_state_free
158
159	ldr	x0, =TEESMC_OPTEED_RETURN_CALL_DONE
160	mov	w1, w20
161	mov	x2, #0
162	mov	x3, #0
163	mov	x4, #0
164	smc	#0
165	b	.	/* SMC should not return */
166END_FUNC thread_std_smc_entry
167
168/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
169FUNC thread_rpc , :
170	/* Read daif and create an SPSR */
171	mrs	x1, daif
172	orr	x1, x1, #(SPSR_64_MODE_EL1 << SPSR_64_MODE_EL_SHIFT)
173
174	/* Mask all maskable exceptions before switching to temporary stack */
175	msr	daifset, #DAIFBIT_ALL
176	push	x0, xzr
177	push	x1, x30
178	bl	thread_get_ctx_regs
179	ldr	x30, [sp, #8]
180	store_xregs x0, THREAD_CTX_REGS_X19, 19, 30
181	mov	x19, x0
182
183	bl	thread_get_tmp_sp
184	pop	x1, xzr		/* Match "push x1, x30" above */
185	mov	x2, sp
186	str	x2, [x19, #THREAD_CTX_REGS_SP]
187	ldr	x20, [sp]	/* Get pointer to rv[] */
188	mov	sp, x0		/* Switch to tmp stack */
189	/*
190	 * We need to read rv[] early, because thread_state_suspend
191	 * can invoke virt_unset_guest() which will unmap pages,
192	 * where rv[] resides
193	 */
194	load_wregs x20, 0, 21, 23	/* Load rv[] into w20-w22 */
195
196	adr	x2, .thread_rpc_return
197	mov	w0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
198	bl	thread_state_suspend
199	mov	x4, x0		/* Supply thread index */
200	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
201	mov	x1, x21
202	mov	x2, x22
203	mov	x3, x23
204	smc	#0
205	b	.		/* SMC should not return */
206
207.thread_rpc_return:
208	/*
209	 * At this point has the stack pointer been restored to the value
210	 * stored in THREAD_CTX above.
211	 *
212	 * Jumps here from thread_resume above when RPC has returned. The
213	 * IRQ and FIQ bits are restored to what they where when this
214	 * function was originally entered.
215	 */
216	pop	x16, xzr	/* Get pointer to rv[] */
217	store_wregs x16, 0, 0, 3	/* Store w0-w3 into rv[] */
218	ret
219END_FUNC thread_rpc
220DECLARE_KEEP_PAGER thread_rpc
221
222/*
223 * void thread_foreign_intr_exit(uint32_t thread_index)
224 *
225 * This function is jumped to at the end of macro foreign_intr_handler().
226 * The current thread as indicated by @thread_index has just been
227 * suspended.  The job here is just to inform normal world the thread id to
228 * resume when returning.
229 */
230FUNC thread_foreign_intr_exit , :
231	mov	w4, w0
232	ldr	w0, =TEESMC_OPTEED_RETURN_CALL_DONE
233	ldr	w1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
234	mov	w2, #0
235	mov	w3, #0
236	smc	#0
237	b	.	/* SMC should not return */
238END_FUNC thread_foreign_intr_exit
239
240BTI(emit_aarch64_feature_1_and     GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
241