1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
7
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/types.h>
12 #include <linux/llist.h>
13
14 #include <asm/memory.h>
15 #include <asm/ptrace.h>
16 #include <asm/sdei.h>
17
18 enum stack_type {
19 STACK_TYPE_UNKNOWN,
20 STACK_TYPE_TASK,
21 STACK_TYPE_IRQ,
22 STACK_TYPE_OVERFLOW,
23 STACK_TYPE_SDEI_NORMAL,
24 STACK_TYPE_SDEI_CRITICAL,
25 __NR_STACK_TYPES
26 };
27
28 struct stack_info {
29 unsigned long low;
30 unsigned long high;
31 enum stack_type type;
32 };
33
34 /*
35 * A snapshot of a frame record or fp/lr register values, along with some
36 * accounting information necessary for robust unwinding.
37 *
38 * @fp: The fp value in the frame record (or the real fp)
39 * @pc: The lr value in the frame record (or the real lr)
40 *
41 * @stacks_done: Stacks which have been entirely unwound, for which it is no
42 * longer valid to unwind to.
43 *
44 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
45 * of 0. This is used to ensure that within a stack, each
46 * subsequent frame record is at an increasing address.
47 * @prev_type: The type of stack this frame record was on, or a synthetic
48 * value of STACK_TYPE_UNKNOWN. This is used to detect a
49 * transition from one stack to another.
50 */
51 struct stackframe {
52 unsigned long fp;
53 unsigned long pc;
54 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
55 unsigned long prev_fp;
56 enum stack_type prev_type;
57 #ifdef CONFIG_KRETPROBES
58 struct llist_node *kr_cur;
59 #endif
60 };
61
62 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
63 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
64 bool (*fn)(void *, unsigned long), void *data);
65 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
66 const char *loglvl);
67
68 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
69
on_stack(unsigned long sp,unsigned long size,unsigned long low,unsigned long high,enum stack_type type,struct stack_info * info)70 static inline bool on_stack(unsigned long sp, unsigned long size,
71 unsigned long low, unsigned long high,
72 enum stack_type type, struct stack_info *info)
73 {
74 if (!low)
75 return false;
76
77 if (sp < low || sp + size < sp || sp + size > high)
78 return false;
79
80 if (info) {
81 info->low = low;
82 info->high = high;
83 info->type = type;
84 }
85 return true;
86 }
87
on_irq_stack(unsigned long sp,unsigned long size,struct stack_info * info)88 static inline bool on_irq_stack(unsigned long sp, unsigned long size,
89 struct stack_info *info)
90 {
91 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
92 unsigned long high = low + IRQ_STACK_SIZE;
93
94 return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
95 }
96
on_task_stack(const struct task_struct * tsk,unsigned long sp,unsigned long size,struct stack_info * info)97 static inline bool on_task_stack(const struct task_struct *tsk,
98 unsigned long sp, unsigned long size,
99 struct stack_info *info)
100 {
101 unsigned long low = (unsigned long)task_stack_page(tsk);
102 unsigned long high = low + THREAD_SIZE;
103
104 return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
105 }
106
107 #ifdef CONFIG_VMAP_STACK
108 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
109
on_overflow_stack(unsigned long sp,unsigned long size,struct stack_info * info)110 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
111 struct stack_info *info)
112 {
113 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
114 unsigned long high = low + OVERFLOW_STACK_SIZE;
115
116 return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
117 }
118 #else
on_overflow_stack(unsigned long sp,unsigned long size,struct stack_info * info)119 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
120 struct stack_info *info) { return false; }
121 #endif
122
123
124 /*
125 * We can only safely access per-cpu stacks from current in a non-preemptible
126 * context.
127 */
on_accessible_stack(const struct task_struct * tsk,unsigned long sp,unsigned long size,struct stack_info * info)128 static inline bool on_accessible_stack(const struct task_struct *tsk,
129 unsigned long sp, unsigned long size,
130 struct stack_info *info)
131 {
132 if (info)
133 info->type = STACK_TYPE_UNKNOWN;
134
135 if (on_task_stack(tsk, sp, size, info))
136 return true;
137 if (tsk != current || preemptible())
138 return false;
139 if (on_irq_stack(sp, size, info))
140 return true;
141 if (on_overflow_stack(sp, size, info))
142 return true;
143 if (on_sdei_stack(sp, size, info))
144 return true;
145
146 return false;
147 }
148
149 void start_backtrace(struct stackframe *frame, unsigned long fp,
150 unsigned long pc);
151
152 #endif /* __ASM_STACKTRACE_H */
153