1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support of MSI, HPET and DMAR interrupts.
4 *
5 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Moved from arch/x86/kernel/apic/io_apic.c.
7 * Jiang Liu <jiang.liu@linux.intel.com>
8 * Convert to hierarchical irqdomain
9 */
10 #include <linux/mm.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/pci.h>
14 #include <linux/dmar.h>
15 #include <linux/hpet.h>
16 #include <linux/msi.h>
17 #include <asm/irqdomain.h>
18 #include <asm/hpet.h>
19 #include <asm/hw_irq.h>
20 #include <asm/apic.h>
21 #include <asm/irq_remapping.h>
22
23 struct irq_domain *x86_pci_msi_default_domain __ro_after_init;
24
irq_msi_update_msg(struct irq_data * irqd,struct irq_cfg * cfg)25 static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg)
26 {
27 struct msi_msg msg[2] = { [1] = { }, };
28
29 __irq_msi_compose_msg(cfg, msg, false);
30 irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg);
31 }
32
33 static int
msi_set_affinity(struct irq_data * irqd,const struct cpumask * mask,bool force)34 msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
35 {
36 struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd);
37 struct irq_data *parent = irqd->parent_data;
38 unsigned int cpu;
39 int ret;
40
41 /* Save the current configuration */
42 cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
43 old_cfg = *cfg;
44
45 /* Allocate a new target vector */
46 ret = parent->chip->irq_set_affinity(parent, mask, force);
47 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
48 return ret;
49
50 /*
51 * For non-maskable and non-remapped MSI interrupts the migration
52 * to a different destination CPU and a different vector has to be
53 * done careful to handle the possible stray interrupt which can be
54 * caused by the non-atomic update of the address/data pair.
55 *
56 * Direct update is possible when:
57 * - The MSI is maskable (remapped MSI does not use this code path)).
58 * The quirk bit is not set in this case.
59 * - The new vector is the same as the old vector
60 * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
61 * - The interrupt is not yet started up
62 * - The new destination CPU is the same as the old destination CPU
63 */
64 if (!irqd_msi_nomask_quirk(irqd) ||
65 cfg->vector == old_cfg.vector ||
66 old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
67 !irqd_is_started(irqd) ||
68 cfg->dest_apicid == old_cfg.dest_apicid) {
69 irq_msi_update_msg(irqd, cfg);
70 return ret;
71 }
72
73 /*
74 * Paranoia: Validate that the interrupt target is the local
75 * CPU.
76 */
77 if (WARN_ON_ONCE(cpu != smp_processor_id())) {
78 irq_msi_update_msg(irqd, cfg);
79 return ret;
80 }
81
82 /*
83 * Redirect the interrupt to the new vector on the current CPU
84 * first. This might cause a spurious interrupt on this vector if
85 * the device raises an interrupt right between this update and the
86 * update to the final destination CPU.
87 *
88 * If the vector is in use then the installed device handler will
89 * denote it as spurious which is no harm as this is a rare event
90 * and interrupt handlers have to cope with spurious interrupts
91 * anyway. If the vector is unused, then it is marked so it won't
92 * trigger the 'No irq handler for vector' warning in
93 * common_interrupt().
94 *
95 * This requires to hold vector lock to prevent concurrent updates to
96 * the affected vector.
97 */
98 lock_vector_lock();
99
100 /*
101 * Mark the new target vector on the local CPU if it is currently
102 * unused. Reuse the VECTOR_RETRIGGERED state which is also used in
103 * the CPU hotplug path for a similar purpose. This cannot be
104 * undone here as the current CPU has interrupts disabled and
105 * cannot handle the interrupt before the whole set_affinity()
106 * section is done. In the CPU unplug case, the current CPU is
107 * about to vanish and will not handle any interrupts anymore. The
108 * vector is cleaned up when the CPU comes online again.
109 */
110 if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector])))
111 this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED);
112
113 /* Redirect it to the new vector on the local CPU temporarily */
114 old_cfg.vector = cfg->vector;
115 irq_msi_update_msg(irqd, &old_cfg);
116
117 /* Now transition it to the target CPU */
118 irq_msi_update_msg(irqd, cfg);
119
120 /*
121 * All interrupts after this point are now targeted at the new
122 * vector/CPU.
123 *
124 * Drop vector lock before testing whether the temporary assignment
125 * to the local CPU was hit by an interrupt raised in the device,
126 * because the retrigger function acquires vector lock again.
127 */
128 unlock_vector_lock();
129
130 /*
131 * Check whether the transition raced with a device interrupt and
132 * is pending in the local APICs IRR. It is safe to do this outside
133 * of vector lock as the irq_desc::lock of this interrupt is still
134 * held and interrupts are disabled: The check is not accessing the
135 * underlying vector store. It's just checking the local APIC's
136 * IRR.
137 */
138 if (lapic_vector_set_in_irr(cfg->vector))
139 irq_data_get_irq_chip(irqd)->irq_retrigger(irqd);
140
141 return ret;
142 }
143
144 /*
145 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
146 * which implement the MSI or MSI-X Capability Structure.
147 */
148 static struct irq_chip pci_msi_controller = {
149 .name = "PCI-MSI",
150 .irq_unmask = pci_msi_unmask_irq,
151 .irq_mask = pci_msi_mask_irq,
152 .irq_ack = irq_chip_ack_parent,
153 .irq_retrigger = irq_chip_retrigger_hierarchy,
154 .irq_set_affinity = msi_set_affinity,
155 .flags = IRQCHIP_SKIP_SET_WAKE |
156 IRQCHIP_AFFINITY_PRE_STARTUP,
157 };
158
pci_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * arg)159 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
160 msi_alloc_info_t *arg)
161 {
162 struct pci_dev *pdev = to_pci_dev(dev);
163 struct msi_desc *desc = first_pci_msi_entry(pdev);
164
165 init_irq_alloc_info(arg, NULL);
166 if (desc->msi_attrib.is_msix) {
167 arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
168 } else {
169 arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
170 arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
171 }
172
173 return 0;
174 }
175 EXPORT_SYMBOL_GPL(pci_msi_prepare);
176
177 static struct msi_domain_ops pci_msi_domain_ops = {
178 .msi_prepare = pci_msi_prepare,
179 };
180
181 static struct msi_domain_info pci_msi_domain_info = {
182 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
183 MSI_FLAG_PCI_MSIX,
184 .ops = &pci_msi_domain_ops,
185 .chip = &pci_msi_controller,
186 .handler = handle_edge_irq,
187 .handler_name = "edge",
188 };
189
native_create_pci_msi_domain(void)190 struct irq_domain * __init native_create_pci_msi_domain(void)
191 {
192 struct fwnode_handle *fn;
193 struct irq_domain *d;
194
195 if (disable_apic)
196 return NULL;
197
198 fn = irq_domain_alloc_named_fwnode("PCI-MSI");
199 if (!fn)
200 return NULL;
201
202 d = pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
203 x86_vector_domain);
204 if (!d) {
205 irq_domain_free_fwnode(fn);
206 pr_warn("Failed to initialize PCI-MSI irqdomain.\n");
207 } else {
208 d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
209 }
210 return d;
211 }
212
x86_create_pci_msi_domain(void)213 void __init x86_create_pci_msi_domain(void)
214 {
215 x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain();
216 }
217
218 #ifdef CONFIG_IRQ_REMAP
219 static struct irq_chip pci_msi_ir_controller = {
220 .name = "IR-PCI-MSI",
221 .irq_unmask = pci_msi_unmask_irq,
222 .irq_mask = pci_msi_mask_irq,
223 .irq_ack = irq_chip_ack_parent,
224 .irq_retrigger = irq_chip_retrigger_hierarchy,
225 .flags = IRQCHIP_SKIP_SET_WAKE |
226 IRQCHIP_AFFINITY_PRE_STARTUP,
227 };
228
229 static struct msi_domain_info pci_msi_ir_domain_info = {
230 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
231 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
232 .ops = &pci_msi_domain_ops,
233 .chip = &pci_msi_ir_controller,
234 .handler = handle_edge_irq,
235 .handler_name = "edge",
236 };
237
arch_create_remap_msi_irq_domain(struct irq_domain * parent,const char * name,int id)238 struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
239 const char *name, int id)
240 {
241 struct fwnode_handle *fn;
242 struct irq_domain *d;
243
244 fn = irq_domain_alloc_named_id_fwnode(name, id);
245 if (!fn)
246 return NULL;
247 d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
248 if (!d)
249 irq_domain_free_fwnode(fn);
250 return d;
251 }
252 #endif
253
254 #ifdef CONFIG_DMAR_TABLE
255 /*
256 * The Intel IOMMU (ab)uses the high bits of the MSI address to contain the
257 * high bits of the destination APIC ID. This can't be done in the general
258 * case for MSIs as it would be targeting real memory above 4GiB not the
259 * APIC.
260 */
dmar_msi_compose_msg(struct irq_data * data,struct msi_msg * msg)261 static void dmar_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
262 {
263 __irq_msi_compose_msg(irqd_cfg(data), msg, true);
264 }
265
dmar_msi_write_msg(struct irq_data * data,struct msi_msg * msg)266 static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
267 {
268 dmar_msi_write(data->irq, msg);
269 }
270
271 static struct irq_chip dmar_msi_controller = {
272 .name = "DMAR-MSI",
273 .irq_unmask = dmar_msi_unmask,
274 .irq_mask = dmar_msi_mask,
275 .irq_ack = irq_chip_ack_parent,
276 .irq_set_affinity = msi_domain_set_affinity,
277 .irq_retrigger = irq_chip_retrigger_hierarchy,
278 .irq_compose_msi_msg = dmar_msi_compose_msg,
279 .irq_write_msi_msg = dmar_msi_write_msg,
280 .flags = IRQCHIP_SKIP_SET_WAKE |
281 IRQCHIP_AFFINITY_PRE_STARTUP,
282 };
283
dmar_msi_init(struct irq_domain * domain,struct msi_domain_info * info,unsigned int virq,irq_hw_number_t hwirq,msi_alloc_info_t * arg)284 static int dmar_msi_init(struct irq_domain *domain,
285 struct msi_domain_info *info, unsigned int virq,
286 irq_hw_number_t hwirq, msi_alloc_info_t *arg)
287 {
288 irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL,
289 handle_edge_irq, arg->data, "edge");
290
291 return 0;
292 }
293
294 static struct msi_domain_ops dmar_msi_domain_ops = {
295 .msi_init = dmar_msi_init,
296 };
297
298 static struct msi_domain_info dmar_msi_domain_info = {
299 .ops = &dmar_msi_domain_ops,
300 .chip = &dmar_msi_controller,
301 .flags = MSI_FLAG_USE_DEF_DOM_OPS,
302 };
303
dmar_get_irq_domain(void)304 static struct irq_domain *dmar_get_irq_domain(void)
305 {
306 static struct irq_domain *dmar_domain;
307 static DEFINE_MUTEX(dmar_lock);
308 struct fwnode_handle *fn;
309
310 mutex_lock(&dmar_lock);
311 if (dmar_domain)
312 goto out;
313
314 fn = irq_domain_alloc_named_fwnode("DMAR-MSI");
315 if (fn) {
316 dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
317 x86_vector_domain);
318 if (!dmar_domain)
319 irq_domain_free_fwnode(fn);
320 }
321 out:
322 mutex_unlock(&dmar_lock);
323 return dmar_domain;
324 }
325
dmar_alloc_hwirq(int id,int node,void * arg)326 int dmar_alloc_hwirq(int id, int node, void *arg)
327 {
328 struct irq_domain *domain = dmar_get_irq_domain();
329 struct irq_alloc_info info;
330
331 if (!domain)
332 return -1;
333
334 init_irq_alloc_info(&info, NULL);
335 info.type = X86_IRQ_ALLOC_TYPE_DMAR;
336 info.devid = id;
337 info.hwirq = id;
338 info.data = arg;
339
340 return irq_domain_alloc_irqs(domain, 1, node, &info);
341 }
342
dmar_free_hwirq(int irq)343 void dmar_free_hwirq(int irq)
344 {
345 irq_domain_free_irqs(irq, 1);
346 }
347 #endif
348