1 #ifndef _ASM_HW_IRQ_H
2 #define _ASM_HW_IRQ_H
3
4 /* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar */
5
6 #include <asm/atomic.h>
7 #include <asm/numa.h>
8 #include <xen/cpumask.h>
9 #include <xen/percpu.h>
10 #include <xen/smp.h>
11 #include <asm/hvm/irq.h>
12
13 extern unsigned int nr_irqs_gsi;
14 extern unsigned int nr_irqs;
15 #define nr_static_irqs nr_irqs_gsi
16
17 #define IO_APIC_IRQ(irq) (platform_legacy_irq(irq) ? \
18 (1 << (irq)) & io_apic_irqs : \
19 (irq) < nr_irqs_gsi)
20
21 #define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs)
22
23 #define LEGACY_VECTOR(irq) ((irq) + FIRST_LEGACY_VECTOR)
24
25 typedef struct {
26 DECLARE_BITMAP(_bits, X86_NR_VECTORS);
27 } vmask_t;
28
29 struct irq_desc;
30
31 struct arch_irq_desc {
32 s16 vector; /* vector itself is only 8 bits, */
33 s16 old_vector; /* but we use -1 for unassigned */
34 /*
35 * Except for high priority interrupts @cpu_mask may have bits set for
36 * offline CPUs. Consumers need to be careful to mask this down to
37 * online ones as necessary. There is supposed to always be a non-
38 * empty intersection with cpu_online_map.
39 */
40 cpumask_var_t cpu_mask;
41 cpumask_var_t old_cpu_mask;
42 cpumask_var_t pending_mask;
43 vmask_t *used_vectors;
44 unsigned move_cleanup_count;
45 u8 move_in_progress : 1;
46 s8 used;
47 /*
48 * Weak reference to domain having permission over this IRQ (which can
49 * be different from the domain actually having the IRQ assigned)
50 */
51 domid_t creator_domid;
52 };
53
54 /* For use with irq_desc.arch.used */
55 #define IRQ_UNUSED (0)
56 #define IRQ_USED (1)
57 #define IRQ_RESERVED (-1)
58
59 #define IRQ_VECTOR_UNASSIGNED (-1)
60
61 typedef int vector_irq_t[X86_NR_VECTORS];
62 DECLARE_PER_CPU(vector_irq_t, vector_irq);
63
64 extern bool opt_noirqbalance;
65
66 #define OPT_IRQ_VECTOR_MAP_DEFAULT 0 /* Do the default thing */
67 #define OPT_IRQ_VECTOR_MAP_NONE 1 /* None */
68 #define OPT_IRQ_VECTOR_MAP_GLOBAL 2 /* One global vector map (no vector sharing) */
69 #define OPT_IRQ_VECTOR_MAP_PERDEV 3 /* Per-device vetor map (no vector sharing w/in a device) */
70
71 extern int opt_irq_vector_map;
72
73 /*
74 * Per-cpu current frame pointer - the location of the last exception frame on
75 * the stack
76 */
77 DECLARE_PER_CPU(struct cpu_user_regs *, __irq_regs);
78
get_irq_regs(void)79 static inline struct cpu_user_regs *get_irq_regs(void)
80 {
81 return this_cpu(__irq_regs);
82 }
83
set_irq_regs(struct cpu_user_regs * new_regs)84 static inline struct cpu_user_regs *set_irq_regs(struct cpu_user_regs *new_regs)
85 {
86 struct cpu_user_regs *old_regs, **pp_regs = &this_cpu(__irq_regs);
87
88 old_regs = *pp_regs;
89 *pp_regs = new_regs;
90 return old_regs;
91 }
92
93
94 #define platform_legacy_irq(irq) ((irq) < 16)
95
96 void event_check_interrupt(struct cpu_user_regs *regs);
97 void invalidate_interrupt(struct cpu_user_regs *regs);
98 void call_function_interrupt(struct cpu_user_regs *regs);
99 void apic_timer_interrupt(struct cpu_user_regs *regs);
100 void error_interrupt(struct cpu_user_regs *regs);
101 void pmu_apic_interrupt(struct cpu_user_regs *regs);
102 void spurious_interrupt(struct cpu_user_regs *regs);
103 void irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
104
105 uint8_t alloc_hipriority_vector(void);
106
107 void set_direct_apic_vector(
108 uint8_t vector, void (*handler)(struct cpu_user_regs *));
109 void alloc_direct_apic_vector(
110 uint8_t *vector, void (*handler)(struct cpu_user_regs *));
111
112 void do_IRQ(struct cpu_user_regs *regs);
113
114 void disable_8259A_irq(struct irq_desc *);
115 void enable_8259A_irq(struct irq_desc *);
116 int i8259A_irq_pending(unsigned int irq);
117 void mask_8259A(void);
118 void unmask_8259A(void);
119 void init_8259A(int aeoi);
120 void make_8259A_irq(unsigned int irq);
121 bool bogus_8259A_irq(unsigned int irq);
122 int i8259A_suspend(void);
123 int i8259A_resume(void);
124
125 void setup_IO_APIC(void);
126 void disable_IO_APIC(void);
127 void setup_ioapic_dest(void);
128 vmask_t *io_apic_get_used_vector_map(unsigned int irq);
129
130 extern unsigned int io_apic_irqs;
131
132 DECLARE_PER_CPU(unsigned int, irq_count);
133
134 struct pirq;
135 struct arch_pirq {
136 int irq;
137 union {
138 struct hvm_pirq {
139 int emuirq;
140 struct hvm_pirq_dpci dpci;
141 } hvm;
142 };
143 };
144
145 #define pirq_dpci(pirq) ((pirq) ? &(pirq)->arch.hvm.dpci : NULL)
146 #define dpci_pirq(pd) container_of(pd, struct pirq, arch.hvm.dpci)
147
148 int pirq_shared(struct domain *d , int irq);
149
150 int map_domain_pirq(struct domain *d, int pirq, int irq, int type,
151 void *data);
152 int unmap_domain_pirq(struct domain *d, int pirq);
153 int get_free_pirq(struct domain *d, int type);
154 int get_free_pirqs(struct domain *, unsigned int nr);
155 void free_domain_pirqs(struct domain *d);
156 int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq);
157 int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
158
159 /* Reset irq affinities to match the given CPU mask. */
160 void fixup_irqs(const cpumask_t *mask, bool verbose);
161 void fixup_eoi(void);
162
163 int init_irq_data(void);
164
165 void clear_irq_vector(int irq);
166
167 int irq_to_vector(int irq);
168 /*
169 * If grant_access is set the current domain is given permissions over
170 * the created IRQ.
171 */
172 int create_irq(nodeid_t node, bool grant_access);
173 void destroy_irq(unsigned int irq);
174 int assign_irq_vector(int irq, const cpumask_t *);
175
176 extern void irq_complete_move(struct irq_desc *);
177
178 extern struct irq_desc *irq_desc;
179
180 void lock_vector_lock(void);
181 void unlock_vector_lock(void);
182
183 void setup_vector_irq(unsigned int cpu);
184
185 void move_native_irq(struct irq_desc *);
186 void move_masked_irq(struct irq_desc *);
187
188 int bind_irq_vector(int irq, int vector, const cpumask_t *);
189
190 void end_nonmaskable_irq(struct irq_desc *, uint8_t vector);
191 void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
192
193 int init_domain_irq_mapping(struct domain *);
194 void cleanup_domain_irq_mapping(struct domain *);
195
196 #define domain_pirq_to_irq(d, pirq) pirq_field(d, pirq, arch.irq, 0)
197 #define domain_irq_to_pirq(d, irq) ({ \
198 void *__ret = radix_tree_lookup(&(d)->arch.irq_pirq, irq); \
199 __ret ? radix_tree_ptr_to_int(__ret) : 0; \
200 })
201 #define PIRQ_ALLOCATED -1
202 #define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq, \
203 arch.hvm.emuirq, IRQ_UNBOUND)
204 #define domain_emuirq_to_pirq(d, emuirq) ({ \
205 void *__ret = radix_tree_lookup(&(d)->arch.hvm.emuirq_pirq, emuirq);\
206 __ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND; \
207 })
208 #define IRQ_UNBOUND -1
209 #define IRQ_PT -2
210 #define IRQ_MSI_EMU -3
211
212 bool cpu_has_pending_apic_eoi(void);
213
arch_move_irqs(struct vcpu * v)214 static inline void arch_move_irqs(struct vcpu *v) { }
215
216 struct msi_info;
217 int allocate_and_map_gsi_pirq(struct domain *d, int index, int *pirq_p);
218 int allocate_and_map_msi_pirq(struct domain *d, int index, int *pirq_p,
219 int type, struct msi_info *msi);
220
221 #endif /* _ASM_HW_IRQ_H */
222