1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_IRQFLAGS_H
6 #define __ASM_IRQFLAGS_H
7 
8 #include <asm/alternative.h>
9 #include <asm/barrier.h>
10 #include <asm/ptrace.h>
11 #include <asm/sysreg.h>
12 
13 /*
14  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
15  * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
16  * order:
17  * Masking debug exceptions causes all other exceptions to be masked too/
18  * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
19  * always masked and unmasked together, and have no side effects for other
20  * flags. Keeping to this order makes it easier for entry.S to know which
21  * exceptions should be unmasked.
22  */
23 
24 /*
25  * CPU interrupt mask handling.
26  */
arch_local_irq_enable(void)27 static inline void arch_local_irq_enable(void)
28 {
29 	if (system_has_prio_mask_debugging()) {
30 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
31 
32 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
33 	}
34 
35 	asm volatile(ALTERNATIVE(
36 		"msr	daifclr, #3		// arch_local_irq_enable",
37 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
38 		ARM64_HAS_IRQ_PRIO_MASKING)
39 		:
40 		: "r" ((unsigned long) GIC_PRIO_IRQON)
41 		: "memory");
42 
43 	pmr_sync();
44 }
45 
arch_local_irq_disable(void)46 static inline void arch_local_irq_disable(void)
47 {
48 	if (system_has_prio_mask_debugging()) {
49 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
50 
51 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
52 	}
53 
54 	asm volatile(ALTERNATIVE(
55 		"msr	daifset, #3		// arch_local_irq_disable",
56 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
57 		ARM64_HAS_IRQ_PRIO_MASKING)
58 		:
59 		: "r" ((unsigned long) GIC_PRIO_IRQOFF)
60 		: "memory");
61 }
62 
63 /*
64  * Save the current interrupt enable state.
65  */
arch_local_save_flags(void)66 static inline unsigned long arch_local_save_flags(void)
67 {
68 	unsigned long flags;
69 
70 	asm volatile(ALTERNATIVE(
71 		"mrs	%0, daif",
72 		__mrs_s("%0", SYS_ICC_PMR_EL1),
73 		ARM64_HAS_IRQ_PRIO_MASKING)
74 		: "=&r" (flags)
75 		:
76 		: "memory");
77 
78 	return flags;
79 }
80 
arch_irqs_disabled_flags(unsigned long flags)81 static inline int arch_irqs_disabled_flags(unsigned long flags)
82 {
83 	int res;
84 
85 	asm volatile(ALTERNATIVE(
86 		"and	%w0, %w1, #" __stringify(PSR_I_BIT),
87 		"eor	%w0, %w1, #" __stringify(GIC_PRIO_IRQON),
88 		ARM64_HAS_IRQ_PRIO_MASKING)
89 		: "=&r" (res)
90 		: "r" ((int) flags)
91 		: "memory");
92 
93 	return res;
94 }
95 
arch_irqs_disabled(void)96 static inline int arch_irqs_disabled(void)
97 {
98 	return arch_irqs_disabled_flags(arch_local_save_flags());
99 }
100 
arch_local_irq_save(void)101 static inline unsigned long arch_local_irq_save(void)
102 {
103 	unsigned long flags;
104 
105 	flags = arch_local_save_flags();
106 
107 	/*
108 	 * There are too many states with IRQs disabled, just keep the current
109 	 * state if interrupts are already disabled/masked.
110 	 */
111 	if (!arch_irqs_disabled_flags(flags))
112 		arch_local_irq_disable();
113 
114 	return flags;
115 }
116 
117 /*
118  * restore saved IRQ state
119  */
arch_local_irq_restore(unsigned long flags)120 static inline void arch_local_irq_restore(unsigned long flags)
121 {
122 	asm volatile(ALTERNATIVE(
123 		"msr	daif, %0",
124 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
125 		ARM64_HAS_IRQ_PRIO_MASKING)
126 		:
127 		: "r" (flags)
128 		: "memory");
129 
130 	pmr_sync();
131 }
132 
133 #endif /* __ASM_IRQFLAGS_H */
134