1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #ifndef THREAD_PRIVATE_H
8 #define THREAD_PRIVATE_H
9 
10 #ifndef __ASSEMBLER__
11 
12 #include <mm/core_mmu.h>
13 #include <mm/pgt_cache.h>
14 #include <kernel/vfp.h>
15 #include <kernel/mutex.h>
16 #include <kernel/thread.h>
17 
18 enum thread_state {
19 	THREAD_STATE_FREE,
20 	THREAD_STATE_SUSPENDED,
21 	THREAD_STATE_ACTIVE,
22 };
23 
24 #ifdef ARM64
25 struct thread_user_mode_rec {
26 	uint64_t ctx_regs_ptr;
27 	uint64_t exit_status0_ptr;
28 	uint64_t exit_status1_ptr;
29 	uint64_t pad;
30 	uint64_t x[31 - 19]; /* x19..x30 */
31 };
32 #endif /*ARM64*/
33 
34 #ifdef CFG_WITH_VFP
35 struct thread_vfp_state {
36 	bool ns_saved;
37 	bool sec_saved;
38 	bool sec_lazy_saved;
39 	struct vfp_state ns;
40 	struct vfp_state sec;
41 	struct thread_user_vfp_state *uvfp;
42 };
43 
44 #endif /*CFG_WITH_VFP*/
45 
46 struct thread_shm_cache_entry {
47 	struct mobj *mobj;
48 	size_t size;
49 	enum thread_shm_type type;
50 	enum thread_shm_cache_user user;
51 	SLIST_ENTRY(thread_shm_cache_entry) link;
52 };
53 
54 SLIST_HEAD(thread_shm_cache, thread_shm_cache_entry);
55 
56 struct thread_ctx {
57 	struct thread_ctx_regs regs;
58 	enum thread_state state;
59 	vaddr_t stack_va_end;
60 	uint32_t flags;
61 	struct core_mmu_user_map user_map;
62 	bool have_user_map;
63 #ifdef ARM64
64 	vaddr_t kern_sp;	/* Saved kernel SP during user TA execution */
65 #endif
66 #ifdef CFG_WITH_VFP
67 	struct thread_vfp_state vfp_state;
68 #endif
69 	void *rpc_arg;
70 	struct mobj *rpc_mobj;
71 	struct thread_shm_cache shm_cache;
72 	struct thread_specific_data tsd;
73 };
74 #endif /*__ASSEMBLER__*/
75 
76 #ifdef ARM64
77 #ifdef CFG_WITH_VFP
78 #define THREAD_VFP_STATE_SIZE				\
79 	(16 + (16 * 32 + 16) * 2 + 16)
80 #else
81 #define THREAD_VFP_STATE_SIZE				0
82 #endif
83 #endif /*ARM64*/
84 
85 /* Describes the flags field of struct thread_core_local */
86 #define THREAD_CLF_SAVED_SHIFT			4
87 #define THREAD_CLF_CURR_SHIFT			0
88 #define THREAD_CLF_MASK				0xf
89 #define THREAD_CLF_TMP_SHIFT			0
90 #define THREAD_CLF_ABORT_SHIFT			1
91 #define THREAD_CLF_IRQ_SHIFT			2
92 #define THREAD_CLF_FIQ_SHIFT			3
93 
94 #define THREAD_CLF_TMP				(1 << THREAD_CLF_TMP_SHIFT)
95 #define THREAD_CLF_ABORT			(1 << THREAD_CLF_ABORT_SHIFT)
96 #define THREAD_CLF_IRQ				(1 << THREAD_CLF_IRQ_SHIFT)
97 #define THREAD_CLF_FIQ				(1 << THREAD_CLF_FIQ_SHIFT)
98 
99 #ifndef __ASSEMBLER__
100 extern const void *stack_tmp_export;
101 extern const uint32_t stack_tmp_stride;
102 extern struct thread_ctx threads[];
103 
104 /*
105  * During boot note the part of code and data that needs to be mapped while
106  * in user mode. The provided address and size have to be page aligned.
107  * Note that the code and data will be mapped at the lowest possible
108  * addresses available for user space (see core_mmu_get_user_va_range()).
109  */
110 extern long thread_user_kcode_offset;
111 
112 /*
113  * Initializes VBAR for current CPU (called by thread_init_per_cpu()
114  */
115 void thread_init_vbar(vaddr_t addr);
116 
117 void thread_excp_vect(void);
118 void thread_excp_vect_workaround(void);
119 void thread_excp_vect_workaround_a15(void);
120 void thread_excp_vect_end(void);
121 
122 /*
123  * Assembly function as the first function in a thread.  Handles a stdcall,
124  * a0-a3 holds the parameters. Hands over to __thread_std_smc_entry() when
125  * everything is set up and does some post processing once
126  * __thread_std_smc_entry() returns.
127  */
128 void thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
129 			  uint32_t a4, uint32_t a5);
130 uint32_t __thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
131 				uint32_t a3, uint32_t a4, uint32_t a5);
132 
133 void thread_sp_alloc_and_run(struct thread_smc_args *args);
134 
135 /*
136  * Resumes execution of currently active thread by restoring context and
137  * jumping to the instruction where to continue execution.
138  *
139  * Arguments supplied by non-secure world will be copied into the saved
140  * context of the current thread if THREAD_FLAGS_COPY_ARGS_ON_RETURN is set
141  * in the flags field in the thread context.
142  */
143 void thread_resume(struct thread_ctx_regs *regs);
144 
145 uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
146 				  uint32_t *exit_status0,
147 				  uint32_t *exit_status1);
148 
149 /*
150  * Private functions made available for thread_asm.S
151  */
152 
153 /* Returns the temp stack for current CPU */
154 void *thread_get_tmp_sp(void);
155 
156 /*
157  * Marks the current thread as suspended. And updated the flags
158  * for the thread context (see thread resume for use of flags).
159  * Returns thread index of the thread that was suspended.
160  */
161 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc);
162 
163 /*
164  * Marks the current thread as free.
165  */
166 void thread_state_free(void);
167 
168 /* Returns a pointer to the saved registers in current thread context. */
169 struct thread_ctx_regs *thread_get_ctx_regs(void);
170 
171 #ifdef ARM32
172 /* Sets sp for abort mode */
173 void thread_set_abt_sp(vaddr_t sp);
174 
175 /* Sets sp for undefined mode */
176 void thread_set_und_sp(vaddr_t sp);
177 
178 /* Sets sp for irq mode */
179 void thread_set_irq_sp(vaddr_t sp);
180 
181 /* Sets sp for fiq mode */
182 void thread_set_fiq_sp(vaddr_t sp);
183 
184 /* Read usr_sp banked CPU register */
185 uint32_t thread_get_usr_sp(void);
186 #endif /*ARM32*/
187 
188 /* Checks stack canaries */
189 void thread_check_canaries(void);
190 
191 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
192 			  uint32_t a4, uint32_t a5);
193 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
194 			    uint32_t a2, uint32_t a3);
195 void thread_lock_global(void);
196 void thread_unlock_global(void);
197 
198 
199 /*
200  * Suspends current thread and temorarily exits to non-secure world.
201  * This function returns later when non-secure world returns.
202  *
203  * The purpose of this function is to request services from non-secure
204  * world.
205  */
206 #define THREAD_RPC_NUM_ARGS     4
207 #ifdef CFG_CORE_FFA
208 struct thread_rpc_arg {
209 	union {
210 		struct {
211 			uint32_t w1;
212 			uint32_t w4;
213 			uint32_t w5;
214 			uint32_t w6;
215 		} call;
216 		struct {
217 			uint32_t w4;
218 			uint32_t w5;
219 			uint32_t w6;
220 		} ret;
221 		uint32_t pad[THREAD_RPC_NUM_ARGS];
222 	};
223 };
224 
225 void thread_rpc(struct thread_rpc_arg *rpc_arg);
226 #else
227 void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
228 #endif
229 
230 /*
231  * Called from assembly only, vector_fast_smc_entry(). Handles a fast SMC
232  * by dispatching it to the registered fast SMC handler.
233  */
234 void thread_handle_fast_smc(struct thread_smc_args *args);
235 
236 /*
237  * Called from assembly only, vector_std_smc_entry().  Handles a std SMC by
238  * dispatching it to the registered std SMC handler.
239  */
240 uint32_t thread_handle_std_smc(uint32_t a0, uint32_t a1, uint32_t a2,
241 			       uint32_t a3, uint32_t a4, uint32_t a5,
242 			       uint32_t a6, uint32_t a7);
243 
244 /* Called from assembly only. Handles a SVC from user mode. */
245 void thread_svc_handler(struct thread_svc_regs *regs);
246 
247 /* Frees the cache of allocated FS RPC memory */
248 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache);
249 #endif /*__ASSEMBLER__*/
250 #endif /*THREAD_PRIVATE_H*/
251