1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_IRQ_STACK_H 3 #define _ASM_X86_IRQ_STACK_H 4 5 #include <linux/ptrace.h> 6 7 #include <asm/processor.h> 8 9 #ifdef CONFIG_X86_64 10 11 /* 12 * Macro to inline switching to an interrupt stack and invoking function 13 * calls from there. The following rules apply: 14 * 15 * - Ordering: 16 * 17 * 1. Write the stack pointer into the top most place of the irq 18 * stack. This ensures that the various unwinders can link back to the 19 * original stack. 20 * 21 * 2. Switch the stack pointer to the top of the irq stack. 22 * 23 * 3. Invoke whatever needs to be done (@asm_call argument) 24 * 25 * 4. Pop the original stack pointer from the top of the irq stack 26 * which brings it back to the original stack where it left off. 27 * 28 * - Function invocation: 29 * 30 * To allow flexible usage of the macro, the actual function code including 31 * the store of the arguments in the call ABI registers is handed in via 32 * the @asm_call argument. 33 * 34 * - Local variables: 35 * 36 * @tos: 37 * The @tos variable holds a pointer to the top of the irq stack and 38 * _must_ be allocated in a non-callee saved register as this is a 39 * restriction coming from objtool. 40 * 41 * Note, that (tos) is both in input and output constraints to ensure 42 * that the compiler does not assume that R11 is left untouched in 43 * case this macro is used in some place where the per cpu interrupt 44 * stack pointer is used again afterwards 45 * 46 * - Function arguments: 47 * The function argument(s), if any, have to be defined in register 48 * variables at the place where this is invoked. Storing the 49 * argument(s) in the proper register(s) is part of the @asm_call 50 * 51 * - Constraints: 52 * 53 * The constraints have to be done very carefully because the compiler 54 * does not know about the assembly call. 55 * 56 * output: 57 * As documented already above the @tos variable is required to be in 58 * the output constraints to make the compiler aware that R11 cannot be 59 * reused after the asm() statement. 60 * 61 * For builds with CONFIG_UNWINDER_FRAME_POINTER, ASM_CALL_CONSTRAINT is 62 * required as well as this prevents certain creative GCC variants from 63 * misplacing the ASM code. 64 * 65 * input: 66 * - func: 67 * Immediate, which tells the compiler that the function is referenced. 68 * 69 * - tos: 70 * Register. The actual register is defined by the variable declaration. 71 * 72 * - function arguments: 73 * The constraints are handed in via the 'argconstr' argument list. They 74 * describe the register arguments which are used in @asm_call. 75 * 76 * clobbers: 77 * Function calls can clobber anything except the callee-saved 78 * registers. Tell the compiler. 79 */ 80 #define call_on_stack(stack, func, asm_call, argconstr...) \ 81 { \ 82 register void *tos asm("r11"); \ 83 \ 84 tos = ((void *)(stack)); \ 85 \ 86 asm_inline volatile( \ 87 "movq %%rsp, (%[tos]) \n" \ 88 "movq %[tos], %%rsp \n" \ 89 \ 90 asm_call \ 91 \ 92 "popq %%rsp \n" \ 93 \ 94 : "+r" (tos), ASM_CALL_CONSTRAINT \ 95 : [__func] "i" (func), [tos] "r" (tos) argconstr \ 96 : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \ 97 "memory" \ 98 ); \ 99 } 100 101 #define ASM_CALL_ARG0 \ 102 "call %P[__func] \n" 103 104 #define ASM_CALL_ARG1 \ 105 "movq %[arg1], %%rdi \n" \ 106 ASM_CALL_ARG0 107 108 #define ASM_CALL_ARG2 \ 109 "movq %[arg2], %%rsi \n" \ 110 ASM_CALL_ARG1 111 112 #define ASM_CALL_ARG3 \ 113 "movq %[arg3], %%rdx \n" \ 114 ASM_CALL_ARG2 115 116 #define call_on_irqstack(func, asm_call, argconstr...) \ 117 call_on_stack(__this_cpu_read(hardirq_stack_ptr), \ 118 func, asm_call, argconstr) 119 120 /* Macros to assert type correctness for run_*_on_irqstack macros */ 121 #define assert_function_type(func, proto) \ 122 static_assert(__builtin_types_compatible_p(typeof(&func), proto)) 123 124 #define assert_arg_type(arg, proto) \ 125 static_assert(__builtin_types_compatible_p(typeof(arg), proto)) 126 127 /* 128 * Macro to invoke system vector and device interrupt C handlers. 129 */ 130 #define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \ 131 { \ 132 /* \ 133 * User mode entry and interrupt on the irq stack do not \ 134 * switch stacks. If from user mode the task stack is empty. \ 135 */ \ 136 if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \ 137 irq_enter_rcu(); \ 138 func(c_args); \ 139 irq_exit_rcu(); \ 140 } else { \ 141 /* \ 142 * Mark the irq stack inuse _before_ and unmark _after_ \ 143 * switching stacks. Interrupts are disabled in both \ 144 * places. Invoke the stack switch macro with the call \ 145 * sequence which matches the above direct invocation. \ 146 */ \ 147 __this_cpu_write(hardirq_stack_inuse, true); \ 148 call_on_irqstack(func, asm_call, constr); \ 149 __this_cpu_write(hardirq_stack_inuse, false); \ 150 } \ 151 } 152 153 /* 154 * Function call sequence for __call_on_irqstack() for system vectors. 155 * 156 * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input 157 * mechanism because these functions are global and cannot be optimized out 158 * when compiling a particular source file which uses one of these macros. 159 * 160 * The argument (regs) does not need to be pushed or stashed in a callee 161 * saved register to be safe vs. the irq_enter_rcu() call because the 162 * clobbers already prevent the compiler from storing it in a callee 163 * clobbered register. As the compiler has to preserve @regs for the final 164 * call to idtentry_exit() anyway, it's likely that it does not cause extra 165 * effort for this asm magic. 166 */ 167 #define ASM_CALL_SYSVEC \ 168 "call irq_enter_rcu \n" \ 169 ASM_CALL_ARG1 \ 170 "call irq_exit_rcu \n" 171 172 #define SYSVEC_CONSTRAINTS , [arg1] "r" (regs) 173 174 #define run_sysvec_on_irqstack_cond(func, regs) \ 175 { \ 176 assert_function_type(func, void (*)(struct pt_regs *)); \ 177 assert_arg_type(regs, struct pt_regs *); \ 178 \ 179 call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \ 180 SYSVEC_CONSTRAINTS, regs); \ 181 } 182 183 /* 184 * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store 185 * @regs and @vector in callee saved registers. 186 */ 187 #define ASM_CALL_IRQ \ 188 "call irq_enter_rcu \n" \ 189 ASM_CALL_ARG2 \ 190 "call irq_exit_rcu \n" 191 192 #define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector) 193 194 #define run_irq_on_irqstack_cond(func, regs, vector) \ 195 { \ 196 assert_function_type(func, void (*)(struct pt_regs *, u32)); \ 197 assert_arg_type(regs, struct pt_regs *); \ 198 assert_arg_type(vector, u32); \ 199 \ 200 call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \ 201 IRQ_CONSTRAINTS, regs, vector); \ 202 } 203 204 #ifndef CONFIG_PREEMPT_RT 205 /* 206 * Macro to invoke __do_softirq on the irq stack. This is only called from 207 * task context when bottom halves are about to be reenabled and soft 208 * interrupts are pending to be processed. The interrupt stack cannot be in 209 * use here. 210 */ 211 #define do_softirq_own_stack() \ 212 { \ 213 __this_cpu_write(hardirq_stack_inuse, true); \ 214 call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \ 215 __this_cpu_write(hardirq_stack_inuse, false); \ 216 } 217 218 #endif 219 220 #else /* CONFIG_X86_64 */ 221 /* System vector handlers always run on the stack they interrupted. */ 222 #define run_sysvec_on_irqstack_cond(func, regs) \ 223 { \ 224 irq_enter_rcu(); \ 225 func(regs); \ 226 irq_exit_rcu(); \ 227 } 228 229 /* Switches to the irq stack within func() */ 230 #define run_irq_on_irqstack_cond(func, regs, vector) \ 231 { \ 232 irq_enter_rcu(); \ 233 func(regs, vector); \ 234 irq_exit_rcu(); \ 235 } 236 237 #endif /* !CONFIG_X86_64 */ 238 239 #endif 240