1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm/include/asm/processor.h
4 *
5 * Copyright (C) 1995-1999 Russell King
6 */
7
8 #ifndef __ASM_ARM_PROCESSOR_H
9 #define __ASM_ARM_PROCESSOR_H
10
11 #ifdef __KERNEL__
12
13 #include <asm/hw_breakpoint.h>
14 #include <asm/ptrace.h>
15 #include <asm/types.h>
16 #include <asm/unified.h>
17 #include <asm/vdso/processor.h>
18
19 #ifdef __KERNEL__
20 #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
21 TASK_SIZE : TASK_SIZE_26)
22 #define STACK_TOP_MAX TASK_SIZE
23 #endif
24
25 struct debug_info {
26 #ifdef CONFIG_HAVE_HW_BREAKPOINT
27 struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
28 #endif
29 };
30
31 struct thread_struct {
32 /* fault info */
33 unsigned long address;
34 unsigned long trap_no;
35 unsigned long error_code;
36 /* debugging */
37 struct debug_info debug;
38 };
39
40 /*
41 * Everything usercopied to/from thread_struct is statically-sized, so
42 * no hardened usercopy whitelist is needed.
43 */
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)44 static inline void arch_thread_struct_whitelist(unsigned long *offset,
45 unsigned long *size)
46 {
47 *offset = *size = 0;
48 }
49
50 #define INIT_THREAD { }
51
52 #define start_thread(regs,pc,sp) \
53 ({ \
54 unsigned long r7, r8, r9; \
55 \
56 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
57 r7 = regs->ARM_r7; \
58 r8 = regs->ARM_r8; \
59 r9 = regs->ARM_r9; \
60 } \
61 memset(regs->uregs, 0, sizeof(regs->uregs)); \
62 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
63 current->personality & FDPIC_FUNCPTRS) { \
64 regs->ARM_r7 = r7; \
65 regs->ARM_r8 = r8; \
66 regs->ARM_r9 = r9; \
67 regs->ARM_r10 = current->mm->start_data; \
68 } else if (!IS_ENABLED(CONFIG_MMU)) \
69 regs->ARM_r10 = current->mm->start_data; \
70 if (current->personality & ADDR_LIMIT_32BIT) \
71 regs->ARM_cpsr = USR_MODE; \
72 else \
73 regs->ARM_cpsr = USR26_MODE; \
74 if (elf_hwcap & HWCAP_THUMB && pc & 1) \
75 regs->ARM_cpsr |= PSR_T_BIT; \
76 regs->ARM_cpsr |= PSR_ENDSTATE; \
77 regs->ARM_pc = pc & ~1; /* pc */ \
78 regs->ARM_sp = sp; /* sp */ \
79 })
80
81 /* Forward declaration, a strange C thing */
82 struct task_struct;
83
84 /* Free all resources held by a thread. */
85 extern void release_thread(struct task_struct *);
86
87 unsigned long __get_wchan(struct task_struct *p);
88
89 #define task_pt_regs(p) \
90 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
91
92 #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
93 #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
94
95 #ifdef CONFIG_SMP
96 #define __ALT_SMP_ASM(smp, up) \
97 "9998: " smp "\n" \
98 " .pushsection \".alt.smp.init\", \"a\"\n" \
99 " .long 9998b - .\n" \
100 " " up "\n" \
101 " .popsection\n"
102 #else
103 #define __ALT_SMP_ASM(smp, up) up
104 #endif
105
106 /*
107 * Prefetching support - only ARMv5.
108 */
109 #if __LINUX_ARM_ARCH__ >= 5
110
111 #define ARCH_HAS_PREFETCH
prefetch(const void * ptr)112 static inline void prefetch(const void *ptr)
113 {
114 __asm__ __volatile__(
115 "pld\t%a0"
116 :: "p" (ptr));
117 }
118
119 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
120 #define ARCH_HAS_PREFETCHW
prefetchw(const void * ptr)121 static inline void prefetchw(const void *ptr)
122 {
123 __asm__ __volatile__(
124 ".arch_extension mp\n"
125 __ALT_SMP_ASM(
126 "pldw\t%a0",
127 "pld\t%a0"
128 )
129 :: "p" (ptr));
130 }
131 #endif
132 #endif
133
134 #endif
135
136 #endif /* __ASM_ARM_PROCESSOR_H */
137