1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Code to handle x86 style IRQs plus some generic interrupt stuff.
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
7 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
8 * Copyright (C) 1999-2000 Grant Grundler
9 * Copyright (c) 2005 Matthew Wilcox
10 */
11 #include <linux/bitops.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/seq_file.h>
17 #include <linux/types.h>
18 #include <linux/sched/task_stack.h>
19 #include <asm/io.h>
20
21 #include <asm/softirq_stack.h>
22 #include <asm/smp.h>
23 #include <asm/ldcw.h>
24
25 #undef PARISC_IRQ_CR16_COUNTS
26
27 extern irqreturn_t timer_interrupt(int, void *);
28 extern irqreturn_t ipi_interrupt(int, void *);
29
30 #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
31
32 /* Bits in EIEM correlate with cpu_irq_action[].
33 ** Numbered *Big Endian*! (ie bit 0 is MSB)
34 */
35 static volatile unsigned long cpu_eiem = 0;
36
37 /*
38 ** local ACK bitmap ... habitually set to 1, but reset to zero
39 ** between ->ack() and ->end() of the interrupt to prevent
40 ** re-interruption of a processing interrupt.
41 */
42 static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
43
cpu_mask_irq(struct irq_data * d)44 static void cpu_mask_irq(struct irq_data *d)
45 {
46 unsigned long eirr_bit = EIEM_MASK(d->irq);
47
48 cpu_eiem &= ~eirr_bit;
49 /* Do nothing on the other CPUs. If they get this interrupt,
50 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
51 * handle it, and the set_eiem() at the bottom will ensure it
52 * then gets disabled */
53 }
54
__cpu_unmask_irq(unsigned int irq)55 static void __cpu_unmask_irq(unsigned int irq)
56 {
57 unsigned long eirr_bit = EIEM_MASK(irq);
58
59 cpu_eiem |= eirr_bit;
60
61 /* This is just a simple NOP IPI. But what it does is cause
62 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
63 * of the interrupt handler */
64 smp_send_all_nop();
65 }
66
cpu_unmask_irq(struct irq_data * d)67 static void cpu_unmask_irq(struct irq_data *d)
68 {
69 __cpu_unmask_irq(d->irq);
70 }
71
cpu_ack_irq(struct irq_data * d)72 void cpu_ack_irq(struct irq_data *d)
73 {
74 unsigned long mask = EIEM_MASK(d->irq);
75 int cpu = smp_processor_id();
76
77 /* Clear in EIEM so we can no longer process */
78 per_cpu(local_ack_eiem, cpu) &= ~mask;
79
80 /* disable the interrupt */
81 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
82
83 /* and now ack it */
84 mtctl(mask, 23);
85 }
86
cpu_eoi_irq(struct irq_data * d)87 void cpu_eoi_irq(struct irq_data *d)
88 {
89 unsigned long mask = EIEM_MASK(d->irq);
90 int cpu = smp_processor_id();
91
92 /* set it in the eiems---it's no longer in process */
93 per_cpu(local_ack_eiem, cpu) |= mask;
94
95 /* enable the interrupt */
96 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
97 }
98
99 #ifdef CONFIG_SMP
cpu_check_affinity(struct irq_data * d,const struct cpumask * dest)100 int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
101 {
102 int cpu_dest;
103
104 /* timer and ipi have to always be received on all CPUs */
105 if (irqd_is_per_cpu(d))
106 return -EINVAL;
107
108 /* whatever mask they set, we just allow one CPU */
109 cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1),
110 dest, cpu_online_mask);
111 if (cpu_dest >= nr_cpu_ids)
112 cpu_dest = cpumask_first_and(dest, cpu_online_mask);
113
114 return cpu_dest;
115 }
116
cpu_set_affinity_irq(struct irq_data * d,const struct cpumask * dest,bool force)117 static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
118 bool force)
119 {
120 int cpu_dest;
121
122 cpu_dest = cpu_check_affinity(d, dest);
123 if (cpu_dest < 0)
124 return -1;
125
126 cpumask_copy(irq_data_get_affinity_mask(d), dest);
127
128 return 0;
129 }
130 #endif
131
132 static struct irq_chip cpu_interrupt_type = {
133 .name = "CPU",
134 .irq_mask = cpu_mask_irq,
135 .irq_unmask = cpu_unmask_irq,
136 .irq_ack = cpu_ack_irq,
137 .irq_eoi = cpu_eoi_irq,
138 #ifdef CONFIG_SMP
139 .irq_set_affinity = cpu_set_affinity_irq,
140 #endif
141 /* XXX: Needs to be written. We managed without it so far, but
142 * we really ought to write it.
143 */
144 .irq_retrigger = NULL,
145 };
146
147 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
148 #define irq_stats(x) (&per_cpu(irq_stat, x))
149
150 /*
151 * /proc/interrupts printing for arch specific interrupts
152 */
arch_show_interrupts(struct seq_file * p,int prec)153 int arch_show_interrupts(struct seq_file *p, int prec)
154 {
155 int j;
156
157 #ifdef CONFIG_DEBUG_STACKOVERFLOW
158 seq_printf(p, "%*s: ", prec, "STK");
159 for_each_online_cpu(j)
160 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
161 seq_puts(p, " Kernel stack usage\n");
162 # ifdef CONFIG_IRQSTACKS
163 seq_printf(p, "%*s: ", prec, "IST");
164 for_each_online_cpu(j)
165 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
166 seq_puts(p, " Interrupt stack usage\n");
167 # endif
168 #endif
169 #ifdef CONFIG_SMP
170 if (num_online_cpus() > 1) {
171 seq_printf(p, "%*s: ", prec, "RES");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
174 seq_puts(p, " Rescheduling interrupts\n");
175 seq_printf(p, "%*s: ", prec, "CAL");
176 for_each_online_cpu(j)
177 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
178 seq_puts(p, " Function call interrupts\n");
179 }
180 #endif
181 seq_printf(p, "%*s: ", prec, "UAH");
182 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
184 seq_puts(p, " Unaligned access handler traps\n");
185 seq_printf(p, "%*s: ", prec, "FPA");
186 for_each_online_cpu(j)
187 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
188 seq_puts(p, " Floating point assist traps\n");
189 seq_printf(p, "%*s: ", prec, "TLB");
190 for_each_online_cpu(j)
191 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
192 seq_puts(p, " TLB shootdowns\n");
193 return 0;
194 }
195
show_interrupts(struct seq_file * p,void * v)196 int show_interrupts(struct seq_file *p, void *v)
197 {
198 int i = *(loff_t *) v, j;
199 unsigned long flags;
200
201 if (i == 0) {
202 seq_puts(p, " ");
203 for_each_online_cpu(j)
204 seq_printf(p, " CPU%d", j);
205
206 #ifdef PARISC_IRQ_CR16_COUNTS
207 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
208 #endif
209 seq_putc(p, '\n');
210 }
211
212 if (i < NR_IRQS) {
213 struct irq_desc *desc = irq_to_desc(i);
214 struct irqaction *action;
215
216 raw_spin_lock_irqsave(&desc->lock, flags);
217 action = desc->action;
218 if (!action)
219 goto skip;
220 seq_printf(p, "%3d: ", i);
221
222 for_each_online_cpu(j)
223 seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, j));
224
225 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
226 #ifndef PARISC_IRQ_CR16_COUNTS
227 seq_printf(p, " %s", action->name);
228
229 while ((action = action->next))
230 seq_printf(p, ", %s", action->name);
231 #else
232 for ( ;action; action = action->next) {
233 unsigned int k, avg, min, max;
234
235 min = max = action->cr16_hist[0];
236
237 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
238 int hist = action->cr16_hist[k];
239
240 if (hist) {
241 avg += hist;
242 } else
243 break;
244
245 if (hist > max) max = hist;
246 if (hist < min) min = hist;
247 }
248
249 avg /= k;
250 seq_printf(p, " %s[%d/%d/%d]", action->name,
251 min,avg,max);
252 }
253 #endif
254
255 seq_putc(p, '\n');
256 skip:
257 raw_spin_unlock_irqrestore(&desc->lock, flags);
258 }
259
260 if (i == NR_IRQS)
261 arch_show_interrupts(p, 3);
262
263 return 0;
264 }
265
266
267
268 /*
269 ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
270 ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
271 **
272 ** To use txn_XXX() interfaces, get a Virtual IRQ first.
273 ** Then use that to get the Transaction address and data.
274 */
275
cpu_claim_irq(unsigned int irq,struct irq_chip * type,void * data)276 int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
277 {
278 if (irq_has_action(irq))
279 return -EBUSY;
280 if (irq_get_chip(irq) != &cpu_interrupt_type)
281 return -EBUSY;
282
283 /* for iosapic interrupts */
284 if (type) {
285 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
286 irq_set_chip_data(irq, data);
287 __cpu_unmask_irq(irq);
288 }
289 return 0;
290 }
291
txn_claim_irq(int irq)292 int txn_claim_irq(int irq)
293 {
294 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
295 }
296
297 /*
298 * The bits_wide parameter accommodates the limitations of the HW/SW which
299 * use these bits:
300 * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
301 * V-class (EPIC): 6 bits
302 * N/L/A-class (iosapic): 8 bits
303 * PCI 2.2 MSI: 16 bits
304 * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
305 *
306 * On the service provider side:
307 * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
308 * o PA 2.0 wide mode 6-bits (per processor)
309 * o IA64 8-bits (0-256 total)
310 *
311 * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
312 * by the processor...and the N/L-class I/O subsystem supports more bits than
313 * PA2.0 has. The first case is the problem.
314 */
txn_alloc_irq(unsigned int bits_wide)315 int txn_alloc_irq(unsigned int bits_wide)
316 {
317 int irq;
318
319 /* never return irq 0 cause that's the interval timer */
320 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
321 if (cpu_claim_irq(irq, NULL, NULL) < 0)
322 continue;
323 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
324 continue;
325 return irq;
326 }
327
328 /* unlikely, but be prepared */
329 return -1;
330 }
331
332
txn_affinity_addr(unsigned int irq,int cpu)333 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
334 {
335 #ifdef CONFIG_SMP
336 struct irq_data *d = irq_get_irq_data(irq);
337 cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
338 #endif
339
340 return per_cpu(cpu_data, cpu).txn_addr;
341 }
342
343
txn_alloc_addr(unsigned int virt_irq)344 unsigned long txn_alloc_addr(unsigned int virt_irq)
345 {
346 static int next_cpu = -1;
347
348 next_cpu++; /* assign to "next" CPU we want this bugger on */
349
350 /* validate entry */
351 while ((next_cpu < nr_cpu_ids) &&
352 (!per_cpu(cpu_data, next_cpu).txn_addr ||
353 !cpu_online(next_cpu)))
354 next_cpu++;
355
356 if (next_cpu >= nr_cpu_ids)
357 next_cpu = 0; /* nothing else, assign monarch */
358
359 return txn_affinity_addr(virt_irq, next_cpu);
360 }
361
362
txn_alloc_data(unsigned int virt_irq)363 unsigned int txn_alloc_data(unsigned int virt_irq)
364 {
365 return virt_irq - CPU_IRQ_BASE;
366 }
367
eirr_to_irq(unsigned long eirr)368 static inline int eirr_to_irq(unsigned long eirr)
369 {
370 int bit = fls_long(eirr);
371 return (BITS_PER_LONG - bit) + TIMER_IRQ;
372 }
373
374 #ifdef CONFIG_IRQSTACKS
375 /*
376 * IRQ STACK - used for irq handler
377 */
378 #ifdef CONFIG_64BIT
379 #define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
380 #else
381 #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
382 #endif
383
384 union irq_stack_union {
385 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
386 volatile unsigned int slock[4];
387 volatile unsigned int lock[1];
388 };
389
390 DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
391 .slock = { 1,1,1,1 },
392 };
393 #endif
394
395
396 int sysctl_panic_on_stackoverflow = 1;
397
stack_overflow_check(struct pt_regs * regs)398 static inline void stack_overflow_check(struct pt_regs *regs)
399 {
400 #ifdef CONFIG_DEBUG_STACKOVERFLOW
401 #define STACK_MARGIN (256*6)
402
403 unsigned long stack_start = (unsigned long) task_stack_page(current);
404 unsigned long sp = regs->gr[30];
405 unsigned long stack_usage;
406 unsigned int *last_usage;
407 int cpu = smp_processor_id();
408
409 /* if sr7 != 0, we interrupted a userspace process which we do not want
410 * to check for stack overflow. We will only check the kernel stack. */
411 if (regs->sr[7])
412 return;
413
414 /* exit if already in panic */
415 if (sysctl_panic_on_stackoverflow < 0)
416 return;
417
418 /* calculate kernel stack usage */
419 stack_usage = sp - stack_start;
420 #ifdef CONFIG_IRQSTACKS
421 if (likely(stack_usage <= THREAD_SIZE))
422 goto check_kernel_stack; /* found kernel stack */
423
424 /* check irq stack usage */
425 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
426 stack_usage = sp - stack_start;
427
428 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
429 if (unlikely(stack_usage > *last_usage))
430 *last_usage = stack_usage;
431
432 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
433 return;
434
435 pr_emerg("stackcheck: %s will most likely overflow irq stack "
436 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
437 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
438 goto panic_check;
439
440 check_kernel_stack:
441 #endif
442
443 /* check kernel stack usage */
444 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
445
446 if (unlikely(stack_usage > *last_usage))
447 *last_usage = stack_usage;
448
449 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
450 return;
451
452 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
453 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
454 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
455
456 #ifdef CONFIG_IRQSTACKS
457 panic_check:
458 #endif
459 if (sysctl_panic_on_stackoverflow) {
460 sysctl_panic_on_stackoverflow = -1; /* disable further checks */
461 panic("low stack detected by irq handler - check messages\n");
462 }
463 #endif
464 }
465
466 #ifdef CONFIG_IRQSTACKS
467 /* in entry.S: */
468 void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
469
execute_on_irq_stack(void * func,unsigned long param1)470 static void execute_on_irq_stack(void *func, unsigned long param1)
471 {
472 union irq_stack_union *union_ptr;
473 unsigned long irq_stack;
474 volatile unsigned int *irq_stack_in_use;
475
476 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
477 irq_stack = (unsigned long) &union_ptr->stack;
478 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
479 FRAME_ALIGN); /* align for stack frame usage */
480
481 /* We may be called recursive. If we are already using the irq stack,
482 * just continue to use it. Use spinlocks to serialize
483 * the irq stack usage.
484 */
485 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
486 if (!__ldcw(irq_stack_in_use)) {
487 void (*direct_call)(unsigned long p1) = func;
488
489 /* We are using the IRQ stack already.
490 * Do direct call on current stack. */
491 direct_call(param1);
492 return;
493 }
494
495 /* This is where we switch to the IRQ stack. */
496 call_on_stack(param1, func, irq_stack);
497
498 /* free up irq stack usage. */
499 *irq_stack_in_use = 1;
500 }
501
do_softirq_own_stack(void)502 void do_softirq_own_stack(void)
503 {
504 execute_on_irq_stack(__do_softirq, 0);
505 }
506 #endif /* CONFIG_IRQSTACKS */
507
508 /* ONLY called from entry.S:intr_extint() */
do_cpu_irq_mask(struct pt_regs * regs)509 void do_cpu_irq_mask(struct pt_regs *regs)
510 {
511 struct pt_regs *old_regs;
512 unsigned long eirr_val;
513 int irq, cpu = smp_processor_id();
514 struct irq_data *irq_data;
515 #ifdef CONFIG_SMP
516 cpumask_t dest;
517 #endif
518
519 old_regs = set_irq_regs(regs);
520 local_irq_disable();
521 irq_enter();
522
523 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
524 if (!eirr_val)
525 goto set_out;
526 irq = eirr_to_irq(eirr_val);
527
528 irq_data = irq_get_irq_data(irq);
529
530 /* Filter out spurious interrupts, mostly from serial port at bootup */
531 if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
532 goto set_out;
533
534 #ifdef CONFIG_SMP
535 cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
536 if (irqd_is_per_cpu(irq_data) &&
537 !cpumask_test_cpu(smp_processor_id(), &dest)) {
538 int cpu = cpumask_first(&dest);
539
540 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
541 irq, smp_processor_id(), cpu);
542 gsc_writel(irq + CPU_IRQ_BASE,
543 per_cpu(cpu_data, cpu).hpa);
544 goto set_out;
545 }
546 #endif
547 stack_overflow_check(regs);
548
549 #ifdef CONFIG_IRQSTACKS
550 execute_on_irq_stack(&generic_handle_irq, irq);
551 #else
552 generic_handle_irq(irq);
553 #endif /* CONFIG_IRQSTACKS */
554
555 out:
556 irq_exit();
557 set_irq_regs(old_regs);
558 return;
559
560 set_out:
561 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
562 goto out;
563 }
564
claim_cpu_irqs(void)565 static void claim_cpu_irqs(void)
566 {
567 unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL;
568 int i;
569
570 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
571 irq_set_chip_and_handler(i, &cpu_interrupt_type,
572 handle_percpu_irq);
573 }
574
575 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
576 if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL))
577 pr_err("Failed to register timer interrupt\n");
578 #ifdef CONFIG_SMP
579 irq_set_handler(IPI_IRQ, handle_percpu_irq);
580 if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL))
581 pr_err("Failed to register IPI interrupt\n");
582 #endif
583 }
584
init_IRQ(void)585 void __init init_IRQ(void)
586 {
587 local_irq_disable(); /* PARANOID - should already be disabled */
588 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
589 #ifdef CONFIG_SMP
590 if (!cpu_eiem) {
591 claim_cpu_irqs();
592 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
593 }
594 #else
595 claim_cpu_irqs();
596 cpu_eiem = EIEM_MASK(TIMER_IRQ);
597 #endif
598 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
599 }
600