1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2016-2017, Linaro Limited
5 * Copyright (c) 2020-2021, Arm Limited
6 */
7
8 #ifndef KERNEL_THREAD_H
9 #define KERNEL_THREAD_H
10
11 #ifndef __ASSEMBLER__
12 #include <arm.h>
13 #include <types_ext.h>
14 #include <compiler.h>
15 #include <kernel/mutex.h>
16 #include <kernel/vfp.h>
17 #include <mm/pgt_cache.h>
18 #endif
19
20 #define THREAD_ID_0 0
21 #define THREAD_ID_INVALID -1
22
23 #define THREAD_RPC_MAX_NUM_PARAMS U(4)
24
25 #ifndef __ASSEMBLER__
26
27 #ifdef ARM64
28 /*
29 * struct thread_core_local needs to have alignment suitable for a stack
30 * pointer since SP_EL1 points to this
31 */
32 #define THREAD_CORE_LOCAL_ALIGNED __aligned(16)
33 #else
34 #define THREAD_CORE_LOCAL_ALIGNED __aligned(8)
35 #endif
36
37 struct thread_core_local {
38 #ifdef ARM32
39 uint32_t r[2];
40 paddr_t sm_pm_ctx_phys;
41 #endif
42 #ifdef ARM64
43 uint64_t x[4];
44 #endif
45 vaddr_t tmp_stack_va_end;
46 short int curr_thread;
47 uint32_t flags;
48 vaddr_t abt_stack_va_end;
49 #ifdef CFG_TEE_CORE_DEBUG
50 unsigned int locked_count; /* Number of spinlocks held */
51 #endif
52 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
53 bool stackcheck_recursion;
54 #endif
55 } THREAD_CORE_LOCAL_ALIGNED;
56
57 struct thread_vector_table {
58 uint32_t std_smc_entry;
59 uint32_t fast_smc_entry;
60 uint32_t cpu_on_entry;
61 uint32_t cpu_off_entry;
62 uint32_t cpu_resume_entry;
63 uint32_t cpu_suspend_entry;
64 uint32_t fiq_entry;
65 uint32_t system_off_entry;
66 uint32_t system_reset_entry;
67 };
68 extern struct thread_vector_table thread_vector_table;
69
70 struct thread_user_vfp_state {
71 struct vfp_state vfp;
72 bool lazy_saved;
73 bool saved;
74 };
75
76 #ifdef ARM32
77 struct thread_smc_args {
78 uint32_t a0; /* SMC function ID */
79 uint32_t a1; /* Parameter */
80 uint32_t a2; /* Parameter */
81 uint32_t a3; /* Thread ID when returning from RPC */
82 uint32_t a4; /* Not used */
83 uint32_t a5; /* Not used */
84 uint32_t a6; /* Not used */
85 uint32_t a7; /* Hypervisor Client ID */
86 };
87 #endif /*ARM32*/
88 #ifdef ARM64
89 struct thread_smc_args {
90 uint64_t a0; /* SMC function ID */
91 uint64_t a1; /* Parameter */
92 uint64_t a2; /* Parameter */
93 uint64_t a3; /* Thread ID when returning from RPC */
94 uint64_t a4; /* Not used */
95 uint64_t a5; /* Not used */
96 uint64_t a6; /* Not used */
97 uint64_t a7; /* Hypervisor Client ID */
98 };
99 #endif /*ARM64*/
100
101 #ifdef ARM32
102 struct thread_abort_regs {
103 uint32_t usr_sp;
104 uint32_t usr_lr;
105 uint32_t pad;
106 uint32_t spsr;
107 uint32_t elr;
108 uint32_t r0;
109 uint32_t r1;
110 uint32_t r2;
111 uint32_t r3;
112 uint32_t r4;
113 uint32_t r5;
114 uint32_t r6;
115 uint32_t r7;
116 uint32_t r8;
117 uint32_t r9;
118 uint32_t r10;
119 uint32_t r11;
120 uint32_t ip;
121 };
122 #endif /*ARM32*/
123 #ifdef ARM64
124 struct thread_abort_regs {
125 uint64_t x0; /* r0_usr */
126 uint64_t x1; /* r1_usr */
127 uint64_t x2; /* r2_usr */
128 uint64_t x3; /* r3_usr */
129 uint64_t x4; /* r4_usr */
130 uint64_t x5; /* r5_usr */
131 uint64_t x6; /* r6_usr */
132 uint64_t x7; /* r7_usr */
133 uint64_t x8; /* r8_usr */
134 uint64_t x9; /* r9_usr */
135 uint64_t x10; /* r10_usr */
136 uint64_t x11; /* r11_usr */
137 uint64_t x12; /* r12_usr */
138 uint64_t x13; /* r13/sp_usr */
139 uint64_t x14; /* r14/lr_usr */
140 uint64_t x15;
141 uint64_t x16;
142 uint64_t x17;
143 uint64_t x18;
144 uint64_t x19;
145 uint64_t x20;
146 uint64_t x21;
147 uint64_t x22;
148 uint64_t x23;
149 uint64_t x24;
150 uint64_t x25;
151 uint64_t x26;
152 uint64_t x27;
153 uint64_t x28;
154 uint64_t x29;
155 uint64_t x30;
156 uint64_t elr;
157 uint64_t spsr;
158 uint64_t sp_el0;
159 };
160 #endif /*ARM64*/
161
162 #ifdef ARM32
163 struct thread_svc_regs {
164 uint32_t spsr;
165 uint32_t r0;
166 uint32_t r1;
167 uint32_t r2;
168 uint32_t r3;
169 uint32_t r4;
170 uint32_t r5;
171 uint32_t r6;
172 uint32_t r7;
173 uint32_t lr;
174 };
175 #endif /*ARM32*/
176 #ifdef ARM64
177 struct thread_svc_regs {
178 uint64_t elr;
179 uint64_t spsr;
180 uint64_t x0; /* r0_usr */
181 uint64_t x1; /* r1_usr */
182 uint64_t x2; /* r2_usr */
183 uint64_t x3; /* r3_usr */
184 uint64_t x4; /* r4_usr */
185 uint64_t x5; /* r5_usr */
186 uint64_t x6; /* r6_usr */
187 uint64_t x7; /* r7_usr */
188 uint64_t x8; /* r8_usr */
189 uint64_t x9; /* r9_usr */
190 uint64_t x10; /* r10_usr */
191 uint64_t x11; /* r11_usr */
192 uint64_t x12; /* r12_usr */
193 uint64_t x13; /* r13/sp_usr */
194 uint64_t x14; /* r14/lr_usr */
195 uint64_t x30;
196 uint64_t sp_el0;
197 #ifdef CFG_SECURE_PARTITION
198 uint64_t x15;
199 uint64_t x16;
200 uint64_t x17;
201 uint64_t x18;
202 uint64_t x19;
203 uint64_t x20;
204 uint64_t x21;
205 uint64_t x22;
206 uint64_t x23;
207 uint64_t x24;
208 uint64_t x25;
209 uint64_t x26;
210 uint64_t x27;
211 uint64_t x28;
212 uint64_t x29;
213 #endif
214 uint64_t pad;
215 } __aligned(16);
216 #endif /*ARM64*/
217
218 #ifdef ARM32
219 struct thread_ctx_regs {
220 uint32_t r0;
221 uint32_t r1;
222 uint32_t r2;
223 uint32_t r3;
224 uint32_t r4;
225 uint32_t r5;
226 uint32_t r6;
227 uint32_t r7;
228 uint32_t r8;
229 uint32_t r9;
230 uint32_t r10;
231 uint32_t r11;
232 uint32_t r12;
233 uint32_t usr_sp;
234 uint32_t usr_lr;
235 uint32_t svc_spsr;
236 uint32_t svc_sp;
237 uint32_t svc_lr;
238 uint32_t pc;
239 uint32_t cpsr;
240 };
241 #endif /*ARM32*/
242
243 #ifdef ARM64
244 struct thread_ctx_regs {
245 uint64_t sp;
246 uint64_t pc;
247 uint64_t cpsr;
248 uint64_t x[31];
249 uint64_t tpidr_el0;
250 };
251 #endif /*ARM64*/
252
253 struct thread_specific_data {
254 TAILQ_HEAD(, ts_session) sess_stack;
255 struct ts_ctx *ctx;
256 struct pgt_cache pgt_cache;
257 #ifdef CFG_CORE_FFA
258 uint32_t rpc_target_info;
259 #endif
260 uint32_t abort_type;
261 uint32_t abort_descr;
262 vaddr_t abort_va;
263 unsigned int abort_core;
264 struct thread_abort_regs abort_regs;
265 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
266 bool stackcheck_recursion;
267 #endif
268 unsigned int syscall_recursion;
269 };
270
271 struct user_mode_ctx;
272
273 #ifdef CFG_WITH_ARM_TRUSTED_FW
274 /*
275 * These five functions have a __weak default implementation which does
276 * nothing. Platforms are expected to override them if needed.
277 */
278 unsigned long thread_cpu_off_handler(unsigned long a0, unsigned long a1);
279 unsigned long thread_cpu_suspend_handler(unsigned long a0, unsigned long a1);
280 unsigned long thread_cpu_resume_handler(unsigned long a0, unsigned long a1);
281 unsigned long thread_system_off_handler(unsigned long a0, unsigned long a1);
282 unsigned long thread_system_reset_handler(unsigned long a0, unsigned long a1);
283 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
284
285 void thread_init_primary(void);
286 void thread_init_per_cpu(void);
287
288 struct thread_core_local *thread_get_core_local(void);
289
290 /*
291 * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
292 * first stack, THREAD_ID_0 + 1 for the next and so on.
293 *
294 * Returns true on success and false on errors.
295 */
296 bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
297
298 /*
299 * Initializes thread contexts. Called in thread_init_boot_thread() if
300 * virtualization is disabled. Virtualization subsystem calls it for
301 * every new guest otherwise.
302 */
303 void thread_init_threads(void);
304
305 /*
306 * Called by the init CPU. Sets temporary stack mode for all CPUs
307 * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit for
308 * the init CPU.
309 */
310 void thread_init_thread_core_local(void);
311
312 /*
313 * Initializes a thread to be used during boot
314 */
315 void thread_init_boot_thread(void);
316
317 /*
318 * Clears the current thread id
319 * Only supposed to be used during initialization.
320 */
321 void thread_clr_boot_thread(void);
322
323 /*
324 * Returns current thread id.
325 */
326 short int thread_get_id(void);
327
328 /*
329 * Returns current thread id, return -1 on failure.
330 */
331 short int thread_get_id_may_fail(void);
332
333 /* Returns Thread Specific Data (TSD) pointer. */
334 struct thread_specific_data *thread_get_tsd(void);
335
336 /*
337 * Sets foreign interrupts status for current thread, must only be called
338 * from an active thread context.
339 *
340 * enable == true -> enable foreign interrupts
341 * enable == false -> disable foreign interrupts
342 */
343 void thread_set_foreign_intr(bool enable);
344
345 /*
346 * Restores the foreign interrupts status (in CPSR) for current thread, must
347 * only be called from an active thread context.
348 */
349 void thread_restore_foreign_intr(void);
350
351 /*
352 * Defines the bits for the exception mask used by the
353 * thread_*_exceptions() functions below.
354 * These definitions are compatible with both ARM32 and ARM64.
355 */
356 #if defined(CFG_ARM_GICV3)
357 #define THREAD_EXCP_FOREIGN_INTR (ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
358 #define THREAD_EXCP_NATIVE_INTR (ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
359 #else
360 #define THREAD_EXCP_FOREIGN_INTR (ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
361 #define THREAD_EXCP_NATIVE_INTR (ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
362 #endif
363 #define THREAD_EXCP_ALL (THREAD_EXCP_FOREIGN_INTR \
364 | THREAD_EXCP_NATIVE_INTR \
365 | (ARM32_CPSR_A >> ARM32_CPSR_F_SHIFT))
366
367 /*
368 * thread_get_exceptions() - return current exception mask
369 */
370 uint32_t thread_get_exceptions(void);
371
372 /*
373 * thread_set_exceptions() - set exception mask
374 * @exceptions: exception mask to set
375 *
376 * Any previous exception mask is replaced by this exception mask, that is,
377 * old bits are cleared and replaced by these.
378 */
379 void thread_set_exceptions(uint32_t exceptions);
380
381 /*
382 * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
383 * @exceptions exceptions to mask
384 * @returns old exception state
385 */
386 uint32_t thread_mask_exceptions(uint32_t exceptions);
387
388 /*
389 * thread_unmask_exceptions() - Unmasks asynchronous exceptions
390 * @state Old asynchronous exception state to restore (returned by
391 * thread_mask_exceptions())
392 */
393 void thread_unmask_exceptions(uint32_t state);
394
395
thread_foreign_intr_disabled(void)396 static inline bool __nostackcheck thread_foreign_intr_disabled(void)
397 {
398 return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
399 }
400
401 #ifdef CFG_WITH_VFP
402 /*
403 * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
404 *
405 * Foreign interrupts are masked while VFP is enabled. User space must not be
406 * entered before thread_kernel_disable_vfp() has been called to disable VFP
407 * and restore the foreign interrupt status.
408 *
409 * This function may only be called from an active thread context and may
410 * not be called again before thread_kernel_disable_vfp() has been called.
411 *
412 * VFP state is saved as needed.
413 *
414 * Returns a state variable that should be passed to
415 * thread_kernel_disable_vfp().
416 */
417 uint32_t thread_kernel_enable_vfp(void);
418
419 /*
420 * thread_kernel_disable_vfp() - Disables usage of VFP
421 * @state: state variable returned by thread_kernel_enable_vfp()
422 *
423 * Disables usage of VFP and restores foreign interrupt status after a call to
424 * thread_kernel_enable_vfp().
425 *
426 * This function may only be called after a call to
427 * thread_kernel_enable_vfp().
428 */
429 void thread_kernel_disable_vfp(uint32_t state);
430
431 /*
432 * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
433 */
434 void thread_kernel_save_vfp(void);
435
436 /*
437 * thread_kernel_save_vfp() - Restores kernel vfp state
438 */
439 void thread_kernel_restore_vfp(void);
440
441 /*
442 * thread_user_enable_vfp() - Enables vfp for user mode usage
443 * @uvfp: pointer to where to save the vfp state if needed
444 */
445 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
446 #else /*CFG_WITH_VFP*/
thread_kernel_save_vfp(void)447 static inline void thread_kernel_save_vfp(void)
448 {
449 }
450
thread_kernel_restore_vfp(void)451 static inline void thread_kernel_restore_vfp(void)
452 {
453 }
454 #endif /*CFG_WITH_VFP*/
455
456 /*
457 * thread_user_save_vfp() - Saves the user vfp state if enabled
458 */
459 #ifdef CFG_WITH_VFP
460 void thread_user_save_vfp(void);
461 #else
thread_user_save_vfp(void)462 static inline void thread_user_save_vfp(void)
463 {
464 }
465 #endif
466
467 /*
468 * thread_user_clear_vfp() - Clears the vfp state
469 * @uctx: pointer to user mode context containing the saved state to clear
470 */
471 #ifdef CFG_WITH_VFP
472 void thread_user_clear_vfp(struct user_mode_ctx *uctx);
473 #else
thread_user_clear_vfp(struct user_mode_ctx * uctx __unused)474 static inline void thread_user_clear_vfp(struct user_mode_ctx *uctx __unused)
475 {
476 }
477 #endif
478
479
480 /*
481 * thread_enter_user_mode() - Enters user mode
482 * @a0: Passed in r/x0 for user_func
483 * @a1: Passed in r/x1 for user_func
484 * @a2: Passed in r/x2 for user_func
485 * @a3: Passed in r/x3 for user_func
486 * @user_sp: Assigned sp value in user mode
487 * @user_func: Function to execute in user mode
488 * @is_32bit: True if TA should execute in Aarch32, false if Aarch64
489 * @exit_status0: Pointer to opaque exit staus 0
490 * @exit_status1: Pointer to opaque exit staus 1
491 *
492 * This functions enters user mode with the argument described above,
493 * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
494 * when returning back to the caller of this function through an exception
495 * handler.
496 *
497 * @Returns what's passed in "ret" to thread_unwind_user_mode()
498 */
499 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
500 unsigned long a2, unsigned long a3, unsigned long user_sp,
501 unsigned long entry_func, bool is_32bit,
502 uint32_t *exit_status0, uint32_t *exit_status1);
503
504 /*
505 * thread_unwind_user_mode() - Unwinds kernel stack from user entry
506 * @ret: Value to return from thread_enter_user_mode()
507 * @exit_status0: Exit status 0
508 * @exit_status1: Exit status 1
509 *
510 * This is the function that exception handlers can return into
511 * to resume execution in kernel mode instead of user mode.
512 *
513 * This function is closely coupled with thread_enter_user_mode() since it
514 * need to restore registers saved by thread_enter_user_mode() and when it
515 * returns make it look like thread_enter_user_mode() just returned. It is
516 * expected that the stack pointer is where thread_enter_user_mode() left
517 * it. The stack will be unwound and the function will return to where
518 * thread_enter_user_mode() was called from. Exit_status0 and exit_status1
519 * are filled in the corresponding pointers supplied to
520 * thread_enter_user_mode().
521 */
522 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
523 uint32_t exit_status1);
524
525 #ifdef ARM64
526 /*
527 * thread_get_saved_thread_sp() - Returns the saved sp of current thread
528 *
529 * When switching from the thread stack pointer the value is stored
530 * separately in the current thread context. This function returns this
531 * saved value.
532 *
533 * @returns stack pointer
534 */
535 vaddr_t thread_get_saved_thread_sp(void);
536 #endif /*ARM64*/
537
538 /*
539 * Provides addresses and size of kernel code that must be mapped while in
540 * user mode.
541 */
542 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
543 void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
544 vaddr_t *va, size_t *sz);
545 #else
thread_get_user_kcode(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)546 static inline void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
547 vaddr_t *va, size_t *sz)
548 {
549 *mobj = NULL;
550 *offset = 0;
551 *va = 0;
552 *sz = 0;
553 }
554 #endif
555
556 /*
557 * Provides addresses and size of kernel (rw) data that must be mapped
558 * while in user mode.
559 */
560 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
561 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
562 void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
563 vaddr_t *va, size_t *sz);
564 #else
thread_get_user_kdata(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)565 static inline void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
566 vaddr_t *va, size_t *sz)
567 {
568 *mobj = NULL;
569 *offset = 0;
570 *va = 0;
571 *sz = 0;
572 }
573 #endif
574
575 /*
576 * Returns the start address (bottom) of the stack for the current thread,
577 * zero if there is no current thread.
578 */
579 vaddr_t thread_stack_start(void);
580
581
582 /* Returns the stack size for the current thread */
583 size_t thread_stack_size(void);
584
585 /*
586 * Returns the start (top, lowest address) and end (bottom, highest address) of
587 * the current stack (thread, temporary or abort stack).
588 * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
589 * soft limits are queried. The difference between soft and hard is that for the
590 * latter, the stack start includes some additional space to let any function
591 * overflow the soft limit and still be able to print a stack dump in this case.
592 */
593 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
594
get_stack_soft_limits(vaddr_t * start,vaddr_t * end)595 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
596 vaddr_t *end)
597 {
598 return get_stack_limits(start, end, false);
599 }
600
get_stack_hard_limits(vaddr_t * start,vaddr_t * end)601 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
602 vaddr_t *end)
603 {
604 return get_stack_limits(start, end, true);
605 }
606
607 bool thread_is_in_normal_mode(void);
608
609 /*
610 * Returns true if previous exeception also was in abort mode.
611 *
612 * Note: it's only valid to call this function from an abort exception
613 * handler before interrupts has been re-enabled.
614 */
615 bool thread_is_from_abort_mode(void);
616
617 /*
618 * Disables and empties the prealloc RPC cache one reference at a time. If
619 * all threads are idle this function returns true and a cookie of one shm
620 * object which was removed from the cache. When the cache is empty *cookie
621 * is set to 0 and the cache is disabled else a valid cookie value. If one
622 * thread isn't idle this function returns false.
623 */
624 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
625
626 /*
627 * Enabled the prealloc RPC cache. If all threads are idle the cache is
628 * enabled and this function returns true. If one thread isn't idle this
629 * function return false.
630 */
631 bool thread_enable_prealloc_rpc_cache(void);
632
633 /**
634 * Allocates data for payload buffers.
635 *
636 * @size: size in bytes of payload buffer
637 *
638 * @returns mobj that describes allocated buffer or NULL on error
639 */
640 struct mobj *thread_rpc_alloc_payload(size_t size);
641
642 /**
643 * Free physical memory previously allocated with thread_rpc_alloc_payload()
644 *
645 * @mobj: mobj that describes the buffer
646 */
647 void thread_rpc_free_payload(struct mobj *mobj);
648
649 /**
650 * Allocate data for payload buffers only shared with the non-secure kernel
651 *
652 * @size: size in bytes of payload buffer
653 *
654 * @returns mobj that describes allocated buffer or NULL on error
655 */
656 struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
657
658 /**
659 * Free physical memory previously allocated with
660 * thread_rpc_alloc_kernel_payload()
661 *
662 * @mobj: mobj that describes the buffer
663 */
664 void thread_rpc_free_kernel_payload(struct mobj *mobj);
665
666 struct thread_param_memref {
667 size_t offs;
668 size_t size;
669 struct mobj *mobj;
670 };
671
672 struct thread_param_value {
673 uint64_t a;
674 uint64_t b;
675 uint64_t c;
676 };
677
678 /*
679 * Note that there's some arithmetics done on the value so it's important
680 * to keep in IN, OUT, INOUT order.
681 */
682 enum thread_param_attr {
683 THREAD_PARAM_ATTR_NONE = 0,
684 THREAD_PARAM_ATTR_VALUE_IN,
685 THREAD_PARAM_ATTR_VALUE_OUT,
686 THREAD_PARAM_ATTR_VALUE_INOUT,
687 THREAD_PARAM_ATTR_MEMREF_IN,
688 THREAD_PARAM_ATTR_MEMREF_OUT,
689 THREAD_PARAM_ATTR_MEMREF_INOUT,
690 };
691
692 struct thread_param {
693 enum thread_param_attr attr;
694 union {
695 struct thread_param_memref memref;
696 struct thread_param_value value;
697 } u;
698 };
699
700 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
701 (struct thread_param){ \
702 .attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
703 .mobj = (_mobj), .offs = (_offs), .size = (_size) } \
704 }
705
706 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
707 (struct thread_param){ \
708 .attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
709 .a = (_a), .b = (_b), .c = (_c) } \
710 }
711
712 /**
713 * Does an RPC using a preallocated argument buffer
714 * @cmd: RPC cmd
715 * @num_params: number of parameters
716 * @params: RPC parameters
717 * @returns RPC return value
718 */
719 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
720 struct thread_param *params);
721
722 unsigned long thread_smc(unsigned long func_id, unsigned long a1,
723 unsigned long a2, unsigned long a3);
724 void thread_smccc(struct thread_smc_args *arg_res);
725
726 /**
727 * Allocate data for payload buffers.
728 * Buffer is exported to user mode applications.
729 *
730 * @size: size in bytes of payload buffer
731 *
732 * @returns mobj that describes allocated buffer or NULL on error
733 */
734 struct mobj *thread_rpc_alloc_global_payload(size_t size);
735
736 /**
737 * Free physical memory previously allocated with
738 * thread_rpc_alloc_global_payload()
739 *
740 * @mobj: mobj that describes the buffer
741 */
742 void thread_rpc_free_global_payload(struct mobj *mobj);
743
744 /*
745 * enum thread_shm_type - type of non-secure shared memory
746 * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
747 * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
748 * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
749 */
750 enum thread_shm_type {
751 THREAD_SHM_TYPE_APPLICATION,
752 THREAD_SHM_TYPE_KERNEL_PRIVATE,
753 THREAD_SHM_TYPE_GLOBAL,
754 };
755
756 /*
757 * enum thread_shm_cache_user - user of a cache allocation
758 * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
759 * @THREAD_SHM_CACHE_USER_FS - filesystem access
760 * @THREAD_SHM_CACHE_USER_I2C - I2C communication
761 *
762 * To ensure that each user of the shared memory cache doesn't interfere
763 * with each other a unique ID per user is used.
764 */
765 enum thread_shm_cache_user {
766 THREAD_SHM_CACHE_USER_SOCKET,
767 THREAD_SHM_CACHE_USER_FS,
768 THREAD_SHM_CACHE_USER_I2C,
769 };
770
771 /*
772 * Returns a pointer to the cached RPC memory. Each thread and @user tuple
773 * has a unique cache. The pointer is guaranteed to point to a large enough
774 * area or to be NULL.
775 */
776 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
777 enum thread_shm_type shm_type,
778 size_t size, struct mobj **mobj);
779
780 #endif /*__ASSEMBLER__*/
781
782 #endif /*KERNEL_THREAD_H*/
783