1 /* From Linux arch/arm/include/asm/assembler.h */ 2 /* 3 * arch/arm/include/asm/assembler.h 4 * 5 * Copyright (C) 1996-2000 Russell King 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This file contains arm architecture specific defines 12 * for the different processors. 13 * 14 * Do not include any C declarations in this file - it is included by 15 * assembler source. 16 */ 17 #ifndef __ASM_ASSEMBLER_H__ 18 #define __ASM_ASSEMBLER_H__ 19 20 #ifndef __ASSEMBLY__ 21 #error "Only include this from assembly code" 22 #endif 23 24 // No Thumb, hence: 25 #define W(instr) instr 26 #define ARM(instr...) instr 27 #define THUMB(instr...) 28 29 #ifdef CONFIG_ARM_UNWIND 30 #define UNWIND(code...) code 31 #else 32 #define UNWIND(code...) 33 #endif 34 35 /* 36 * Endian independent macros for shifting bytes within registers. 37 */ 38 #ifndef __ARMEB__ 39 #define lspull lsr 40 #define lspush lsl 41 #define get_byte_0 lsl #0 42 #define get_byte_1 lsr #8 43 #define get_byte_2 lsr #16 44 #define get_byte_3 lsr #24 45 #define put_byte_0 lsl #0 46 #define put_byte_1 lsl #8 47 #define put_byte_2 lsl #16 48 #define put_byte_3 lsl #24 49 #else 50 #define lspull lsl 51 #define lspush lsr 52 #define get_byte_0 lsr #24 53 #define get_byte_1 lsr #16 54 #define get_byte_2 lsr #8 55 #define get_byte_3 lsl #0 56 #define put_byte_0 lsl #24 57 #define put_byte_1 lsl #16 58 #define put_byte_2 lsl #8 59 #define put_byte_3 lsl #0 60 #endif 61 62 /* 63 * Data preload for architectures that support it 64 */ 65 #if __LINUX_ARM_ARCH__ >= 5 66 #define PLD(code...) code 67 #else 68 #define PLD(code...) 69 #endif 70 71 /* 72 * This can be used to enable code to cacheline align the destination 73 * pointer when bulk writing to memory. Experiments on StrongARM and 74 * XScale didn't show this a worthwhile thing to do when the cache is not 75 * set to write-allocate (this would need further testing on XScale when WA 76 * is used). 77 * 78 * On Feroceon there is much to gain however, regardless of cache mode. 79 */ 80 #ifdef CONFIG_CPU_FEROCEON 81 #define CALGN(code...) code 82 #else 83 #define CALGN(code...) 84 #endif 85 86 /* 87 * Enable and disable interrupts 88 */ 89 #if __LINUX_ARM_ARCH__ >= 6 90 .macro disable_irq_notrace 91 cpsid i 92 .endm 93 94 .macro enable_irq_notrace 95 cpsie i 96 .endm 97 #else 98 .macro disable_irq_notrace 99 msr cpsr_c, #PSR_I_BIT | SVC_MODE 100 .endm 101 102 .macro enable_irq_notrace 103 msr cpsr_c, #SVC_MODE 104 .endm 105 #endif 106 107 .macro asm_trace_hardirqs_off 108 #if defined(CONFIG_TRACE_IRQFLAGS) 109 stmdb sp!, {r0-r3, ip, lr} 110 bl trace_hardirqs_off 111 ldmia sp!, {r0-r3, ip, lr} 112 #endif 113 .endm 114 115 .macro asm_trace_hardirqs_on_cond, cond 116 #if defined(CONFIG_TRACE_IRQFLAGS) 117 /* 118 * actually the registers should be pushed and pop'd conditionally, but 119 * after bl the flags are certainly clobbered 120 */ 121 stmdb sp!, {r0-r3, ip, lr} 122 bl\cond trace_hardirqs_on 123 ldmia sp!, {r0-r3, ip, lr} 124 #endif 125 .endm 126 127 .macro asm_trace_hardirqs_on 128 asm_trace_hardirqs_on_cond al 129 .endm 130 131 .macro disable_irq 132 disable_irq_notrace 133 asm_trace_hardirqs_off 134 .endm 135 136 .macro enable_irq 137 asm_trace_hardirqs_on 138 enable_irq_notrace 139 .endm 140 /* 141 * Save the current IRQ state and disable IRQs. Note that this macro 142 * assumes FIQs are enabled, and that the processor is in SVC mode. 143 */ 144 .macro save_and_disable_irqs, oldcpsr 145 mrs \oldcpsr, cpsr 146 disable_irq 147 .endm 148 149 /* 150 * Restore interrupt state previously stored in a register. We don't 151 * guarantee that this will preserve the flags. 152 */ 153 .macro restore_irqs_notrace, oldcpsr 154 msr cpsr_c, \oldcpsr 155 .endm 156 157 .macro restore_irqs, oldcpsr 158 tst \oldcpsr, #PSR_I_BIT 159 asm_trace_hardirqs_on_cond eq 160 restore_irqs_notrace \oldcpsr 161 .endm 162 163 #define USER(x...) \ 164 9999: x; \ 165 .pushsection __ex_table,"a"; \ 166 .align 3; \ 167 .long 9999b,9001f; \ 168 .popsection 169 170 #ifdef CONFIG_SMP 171 #define ALT_SMP(instr...) \ 172 9998: instr 173 /* 174 * Note: if you get assembler errors from ALT_UP() when building with 175 * CONFIG_THUMB2_KERNEL, you almost certainly need to use 176 * ALT_SMP( W(instr) ... ) 177 */ 178 #define ALT_UP(instr...) \ 179 .pushsection ".alt.smp.init", "a" ;\ 180 .long 9998b ;\ 181 9997: instr ;\ 182 .if . - 9997b != 4 ;\ 183 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 184 .endif ;\ 185 .popsection 186 #define ALT_UP_B(label) \ 187 .equ up_b_offset, label - 9998b ;\ 188 .pushsection ".alt.smp.init", "a" ;\ 189 .long 9998b ;\ 190 W(b) . + up_b_offset ;\ 191 .popsection 192 #else 193 #define ALT_SMP(instr...) 194 #define ALT_UP(instr...) instr 195 #define ALT_UP_B(label) b label 196 #endif 197 198 /* 199 * Instruction barrier 200 */ 201 .macro instr_sync 202 #if __LINUX_ARM_ARCH__ >= 7 203 isb 204 #elif __LINUX_ARM_ARCH__ == 6 205 mcr p15, 0, r0, c7, c5, 4 206 #endif 207 .endm 208 209 /* 210 * SMP data memory barrier 211 */ 212 .macro smp_dmb mode 213 #ifdef CONFIG_SMP 214 #if __LINUX_ARM_ARCH__ >= 7 215 .ifeqs "\mode","arm" 216 ALT_SMP(dmb) 217 .else 218 ALT_SMP(W(dmb)) 219 .endif 220 #elif __LINUX_ARM_ARCH__ == 6 221 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 222 #else 223 #error Incompatible SMP platform 224 #endif 225 .ifeqs "\mode","arm" 226 ALT_UP(nop) 227 .else 228 ALT_UP(W(nop)) 229 .endif 230 #endif 231 .endm 232 233 #ifdef CONFIG_THUMB2_KERNEL 234 .macro setmode, mode, reg 235 mov \reg, #\mode 236 msr cpsr_c, \reg 237 .endm 238 #else 239 .macro setmode, mode, reg 240 msr cpsr_c, #\mode 241 .endm 242 #endif 243 244 /* 245 * STRT/LDRT access macros with ARM and Thumb-2 variants 246 */ 247 #ifdef CONFIG_THUMB2_KERNEL 248 249 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T() 250 9999: 251 .if \inc == 1 252 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] 253 .elseif \inc == 4 254 \instr\cond\()\t\().w \reg, [\ptr, #\off] 255 .else 256 .error "Unsupported inc macro argument" 257 .endif 258 259 .pushsection __ex_table,"a" 260 .align 3 261 .long 9999b, \abort 262 .popsection 263 .endm 264 265 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 266 @ explicit IT instruction needed because of the label 267 @ introduced by the USER macro 268 .ifnc \cond,al 269 .if \rept == 1 270 itt \cond 271 .elseif \rept == 2 272 ittt \cond 273 .else 274 .error "Unsupported rept macro argument" 275 .endif 276 .endif 277 278 @ Slightly optimised to avoid incrementing the pointer twice 279 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 280 .if \rept == 2 281 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort 282 .endif 283 284 add\cond \ptr, #\rept * \inc 285 .endm 286 287 #else /* !CONFIG_THUMB2_KERNEL */ 288 289 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() 290 .rept \rept 291 9999: 292 .if \inc == 1 293 \instr\cond\()b\()\t \reg, [\ptr], #\inc 294 .elseif \inc == 4 295 \instr\cond\()\t \reg, [\ptr], #\inc 296 .else 297 .error "Unsupported inc macro argument" 298 .endif 299 300 .pushsection __ex_table,"a" 301 .align 3 302 .long 9999b, \abort 303 .popsection 304 .endr 305 .endm 306 307 #endif /* CONFIG_THUMB2_KERNEL */ 308 309 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 310 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort 311 .endm 312 313 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 314 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 315 .endm 316 317 /* Utility macro for declaring string literals */ 318 .macro string name:req, string 319 .type \name , #object 320 \name: 321 .asciz "\string" 322 .size \name , . - \name 323 .endm 324 325 #endif /* __ASM_ASSEMBLER_H__ */ 326