1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <sm/optee_smc.h>
13#include <sm/sm.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16#include <util.h>
17
18#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
19
20	.macro save_regs mode
21	cps	\mode
22	mrs	r2, spsr
23	str	r2, [r0], #4
24	str	sp, [r0], #4
25	str	lr, [r0], #4
26	.endm
27
28FUNC sm_save_unbanked_regs , :
29UNWIND(	.cantunwind)
30	/* User mode registers has to be saved from system mode */
31	cps	#CPSR_MODE_SYS
32	str	sp, [r0], #4
33	str	lr, [r0], #4
34
35	save_regs	#CPSR_MODE_IRQ
36	save_regs	#CPSR_MODE_FIQ
37	save_regs	#CPSR_MODE_SVC
38	save_regs	#CPSR_MODE_ABT
39	save_regs	#CPSR_MODE_UND
40
41#ifdef CFG_SM_NO_CYCLE_COUNTING
42	read_pmcr r2
43	stm	r0!, {r2}
44#endif
45
46#ifdef CFG_FTRACE_SUPPORT
47	read_cntkctl r2
48	stm	r0!, {r2}
49#endif
50	cps	#CPSR_MODE_MON
51	bx	lr
52END_FUNC sm_save_unbanked_regs
53
54	.macro restore_regs mode
55	cps	\mode
56	ldr	r2, [r0], #4
57	ldr	sp, [r0], #4
58	ldr	lr, [r0], #4
59	msr	spsr_fsxc, r2
60	.endm
61
62/* Restores the mode specific registers */
63FUNC sm_restore_unbanked_regs , :
64UNWIND(	.cantunwind)
65	/* User mode registers has to be saved from system mode */
66	cps	#CPSR_MODE_SYS
67	ldr	sp, [r0], #4
68	ldr	lr, [r0], #4
69
70	restore_regs	#CPSR_MODE_IRQ
71	restore_regs	#CPSR_MODE_FIQ
72	restore_regs	#CPSR_MODE_SVC
73	restore_regs	#CPSR_MODE_ABT
74	restore_regs	#CPSR_MODE_UND
75
76#ifdef CFG_SM_NO_CYCLE_COUNTING
77	ldm	r0!, {r2}
78	write_pmcr r2
79#endif
80
81#ifdef CFG_FTRACE_SUPPORT
82	ldm	r0!, {r2}
83	write_cntkctl r2
84#endif
85	cps	#CPSR_MODE_MON
86	bx	lr
87END_FUNC sm_restore_unbanked_regs
88
89/*
90 * stack_tmp is used as stack, the top of the stack is reserved to hold
91 * struct sm_ctx, everything below is for normal stack usage. As several
92 * different CPU modes are using the same stack it's important that switch
93 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
94 * Async abort has to be masked while using stack_tmp.
95 */
96LOCAL_FUNC sm_smc_entry , :
97UNWIND(	.cantunwind)
98	srsdb	sp!, #CPSR_MODE_MON
99	push	{r0-r7}
100
101	clrex		/* Clear the exclusive monitor */
102
103	/* Find out if we're doing an secure or non-secure entry */
104	read_scr r1
105	tst	r1, #SCR_NS
106	bne	.smc_from_nsec
107
108	/*
109	 * As we're coming from secure world (NS bit cleared) the stack
110	 * pointer points to sm_ctx.sec.r0 at this stage. After the
111	 * instruction below the stack pointer points to sm_ctx.
112	 */
113	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
114
115	/* Save secure context */
116	add	r0, sp, #SM_CTX_SEC
117	bl	sm_save_unbanked_regs
118
119	/*
120	 * On FIQ exit we're restoring the non-secure context unchanged, on
121	 * all other exits we're shifting r1-r4 from secure context into
122	 * r0-r3 in non-secure context.
123	 */
124	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
125	ldm	r8, {r0-r4}
126	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
127	cmp	r0, r9
128	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
129	stmne	r8, {r1-r4}
130
131	/* Restore non-secure context */
132	add	r0, sp, #SM_CTX_NSEC
133	bl	sm_restore_unbanked_regs
134
135.sm_ret_to_nsec:
136	/*
137	 * Return to non-secure world
138	 */
139	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
140	ldm	r0, {r8-r12}
141
142#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
143	/*
144	 * Prevent leaking information about which code has been executed.
145	 * This is required to be used together with
146	 * CFG_CORE_WORKAROUND_SPECTRE_BP to protect Cortex A15 CPUs too.
147	 *
148	 * CFG_CORE_WORKAROUND_SPECTRE_BP also invalidates the branch
149	 * predictor on affected CPUs. In the cases where an alternative
150	 * vector has been installed the branch predictor is already
151	 * invalidated so invalidating here again would be redundant, but
152	 * testing for that is more trouble than it's worth.
153	 */
154	write_bpiall
155#endif
156
157	/* Update SCR */
158	read_scr r0
159	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
160	write_scr r0
161	/*
162	 * isb not needed since we're doing an exception return below
163	 * without dependency to the changes in SCR before that.
164	 */
165
166	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
167	b	.sm_exit
168
169.smc_from_nsec:
170	/*
171	 * As we're coming from non-secure world (NS bit set) the stack
172	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
173	 * instruction below the stack pointer points to sm_ctx.
174	 */
175	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
176
177	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
178	write_scr r1
179	isb
180
181	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
182	stm	r0, {r8-r12}
183
184	mov	r0, sp
185	bl	sm_from_nsec
186	cmp	r0, #SM_EXIT_TO_NON_SECURE
187	beq	.sm_ret_to_nsec
188
189	/*
190	 * Continue into secure world
191	 */
192	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
193
194.sm_exit:
195	pop	{r0-r7}
196	rfefd	sp!
197END_FUNC sm_smc_entry
198
199/*
200 * FIQ handling
201 *
202 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
203 * context will later be restored by sm_smc_entry() when handling a return
204 * from FIQ.
205 */
206LOCAL_FUNC sm_fiq_entry , :
207UNWIND(	.cantunwind)
208	/* FIQ has a +4 offset for lr compared to preferred return address */
209	sub	lr, lr, #4
210	/* sp points just past struct sm_sec_ctx */
211	srsdb	sp!, #CPSR_MODE_MON
212	push	{r0-r7}
213
214	clrex		/* Clear the exclusive monitor */
215
216	/*
217	 * As we're coming from non-secure world the stack pointer points
218	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
219	 * stack pointer points to sm_ctx.
220	 */
221	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
222
223	/* Update SCR */
224	read_scr r1
225	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
226	write_scr r1
227	isb
228
229	/* Save non-secure context */
230	add	r0, sp, #SM_CTX_NSEC
231	bl	sm_save_unbanked_regs
232	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
233	stm	r0!, {r8-r12}
234
235	/* Set FIQ entry */
236	ldr	r0, =vector_fiq_entry
237	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
238
239	/* Restore secure context */
240	add	r0, sp, #SM_CTX_SEC
241	bl	sm_restore_unbanked_regs
242
243	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
244
245	rfefd	sp!
246END_FUNC sm_fiq_entry
247
248LOCAL_FUNC sm_vect_table , :, align=32
249UNWIND(	.cantunwind)
250	b	.		/* Reset			*/
251	b	.		/* Undefined instruction	*/
252	b	sm_smc_entry	/* Secure monitor call		*/
253	b	.		/* Prefetch abort		*/
254	b	.		/* Data abort			*/
255	b	.		/* Reserved			*/
256	b	.		/* IRQ				*/
257	b	sm_fiq_entry	/* FIQ				*/
258END_FUNC sm_vect_table
259
260#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
261	.macro vector_prologue_spectre
262		/*
263		 * This depends on SP being 8 byte aligned, that is, the
264		 * lowest three bits in SP are zero.
265		 *
266		 * The idea is to form a specific bit pattern in the lowest
267		 * three bits of SP depending on which entry in the vector
268		 * we enter via.  This is done by adding 1 to SP in each
269		 * entry but the last.
270		 */
271		add	sp, sp, #1	/* 7:111 Reset			*/
272		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
273		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
274		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
275		add	sp, sp, #1	/* 3:011 Data abort		*/
276		add	sp, sp, #1	/* 2:010 Reserved		*/
277		add	sp, sp, #1	/* 1:001 IRQ			*/
278		nop			/* 0:000 FIQ			*/
279	.endm
280
281LOCAL_FUNC sm_vect_table_a15 , :, align=32
282UNWIND(	.cantunwind)
283	vector_prologue_spectre
284	/*
285	 * Invalidate the branch predictor for the current processor.
286	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
287	 * effective.
288	 * Note that the BPIALL instruction is not effective in
289	 * invalidating the branch predictor on Cortex-A15. For that CPU,
290	 * set ACTLR[0] to 1 during early processor initialisation, and
291	 * invalidate the branch predictor by performing an ICIALLU
292	 * instruction. See also:
293	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
294	 */
295	write_iciallu
296	isb
297	b	1f
298END_FUNC sm_vect_table_a15
299
300
301LOCAL_FUNC sm_vect_table_bpiall , :, align=32
302UNWIND(	.cantunwind)
303	vector_prologue_spectre
304	/* Invalidate the branch predictor for the current processor. */
305	write_bpiall
306	isb
307
3081:
309	/*
310	 * Only two exception does normally occur, smc and fiq. With all
311	 * other exceptions it's good enough to just spinn, the lowest bits
312	 * still tells which exception we're stuck with when attaching a
313	 * debugger.
314	 */
315
316	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
317	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
318	beq	sm_fiq_entry
319
320	/* Test for SMC, xor the lowest bits of SP to be 0 */
321	eor	sp, sp, #(BIT(0) | BIT(2))
322	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
323	beq	sm_smc_entry
324
325	/* unhandled exception */
326	b	.
327END_FUNC sm_vect_table_bpiall
328#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
329
330/* void sm_init(vaddr_t stack_pointer); */
331FUNC sm_init , :
332	/* Set monitor stack */
333	mrs	r1, cpsr
334	cps	#CPSR_MODE_MON
335	/* Point just beyond sm_ctx.sec */
336	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
337
338#ifdef CFG_INIT_CNTVOFF
339	read_scr r0
340	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
341	write_scr r0
342	isb
343
344	/*
345	 * Accessing CNTVOFF:
346	 * If the implementation includes the Virtualization Extensions
347	 * this is a RW register, accessible from Hyp mode, and
348	 * from Monitor mode when SCR.NS is set to 1.
349	 * If the implementation includes the Security Extensions
350	 * but not the Virtualization Extensions, an MCRR or MRRC to
351	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
352	 * mode, regardless of the value of SCR.NS.
353	 */
354	read_id_pfr1 r2
355	mov	r3, r2
356	ands    r3, r3, #IDPFR1_GENTIMER_MASK
357	beq	.no_gentimer
358	ands    r2, r2, #IDPFR1_VIRT_MASK
359	beq	.no_gentimer
360	mov	r2, #0
361	write_cntvoff r2, r2
362
363.no_gentimer:
364	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
365	write_scr r0
366	isb
367#endif
368#ifdef CFG_SM_NO_CYCLE_COUNTING
369	read_pmcr r0
370	orr	r0, #PMCR_DP
371	write_pmcr r0
372#endif
373	msr	cpsr, r1
374
375#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
376	/*
377	 * For unrecognized CPUs we fall back to the vector used for
378	 * unaffected CPUs. Cortex A-15 has special treatment compared to
379	 * the other affected Cortex CPUs.
380	 */
381	read_midr r1
382	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
383	cmp	r2, #MIDR_IMPLEMENTER_ARM
384	bne	1f
385
386	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
387			#MIDR_PRIMARY_PART_NUM_WIDTH
388
389	movw	r3, #CORTEX_A8_PART_NUM
390	cmp	r2, r3
391	movwne	r3, #CORTEX_A9_PART_NUM
392	cmpne	r2, r3
393	movwne	r3, #CORTEX_A17_PART_NUM
394	cmpne	r2, r3
395	ldreq	r0, =sm_vect_table_bpiall
396	beq	2f
397
398	movw	r3, #CORTEX_A15_PART_NUM
399	cmp	r2, r3
400	ldreq	r0, =sm_vect_table_a15
401	beq	2f
402#endif
403	/* Set monitor vector (MVBAR) */
4041:	ldr	r0, =sm_vect_table
4052:	write_mvbar r0
406
407	bx	lr
408END_FUNC sm_init
409DECLARE_KEEP_PAGER sm_init
410
411
412/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
413FUNC sm_get_nsec_ctx , :
414	mrs	r1, cpsr
415	cps	#CPSR_MODE_MON
416	/*
417	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
418	 * which allows us to calculate the address of sm_ctx.nsec.
419	 */
420	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
421	msr	cpsr, r1
422
423	bx	lr
424END_FUNC sm_get_nsec_ctx
425