1 /*
2  * xen/arch/arm/irq.c
3  *
4  * ARM Interrupt support
5  *
6  * Ian Campbell <ian.campbell@citrix.com>
7  * Copyright (c) 2011 Citrix Systems.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #include <xen/lib.h>
21 #include <xen/spinlock.h>
22 #include <xen/irq.h>
23 #include <xen/init.h>
24 #include <xen/errno.h>
25 #include <xen/sched.h>
26 
27 #include <asm/gic.h>
28 #include <asm/vgic.h>
29 
30 const unsigned int nr_irqs = NR_IRQS;
31 
32 static unsigned int local_irqs_type[NR_LOCAL_IRQS];
33 static DEFINE_SPINLOCK(local_irqs_type_lock);
34 
35 /* Describe an IRQ assigned to a guest */
36 struct irq_guest
37 {
38     struct domain *d;
39     unsigned int virq;
40 };
41 
ack_none(struct irq_desc * irq)42 static void ack_none(struct irq_desc *irq)
43 {
44     printk("unexpected IRQ trap at irq %02x\n", irq->irq);
45 }
46 
end_none(struct irq_desc * irq)47 static void end_none(struct irq_desc *irq)
48 {
49     /*
50      * Still allow a CPU to end an interrupt if we receive a spurious
51      * interrupt. This will prevent the CPU to lose interrupt forever.
52      */
53     gic_hw_ops->gic_host_irq_type->end(irq);
54 }
55 
56 hw_irq_controller no_irq_type = {
57     .typename = "none",
58     .startup = irq_startup_none,
59     .shutdown = irq_shutdown_none,
60     .enable = irq_enable_none,
61     .disable = irq_disable_none,
62     .ack = ack_none,
63     .end = end_none
64 };
65 
66 static irq_desc_t irq_desc[NR_IRQS];
67 static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc);
68 
__irq_to_desc(int irq)69 irq_desc_t *__irq_to_desc(int irq)
70 {
71     if ( irq < NR_LOCAL_IRQS )
72         return &this_cpu(local_irq_desc)[irq];
73 
74     return &irq_desc[irq-NR_LOCAL_IRQS];
75 }
76 
arch_init_one_irq_desc(struct irq_desc * desc)77 int arch_init_one_irq_desc(struct irq_desc *desc)
78 {
79     desc->arch.type = IRQ_TYPE_INVALID;
80     return 0;
81 }
82 
83 
init_irq_data(void)84 static int __init init_irq_data(void)
85 {
86     int irq;
87 
88     for ( irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++ )
89     {
90         struct irq_desc *desc = irq_to_desc(irq);
91         init_one_irq_desc(desc);
92         desc->irq = irq;
93         desc->action  = NULL;
94     }
95 
96     return 0;
97 }
98 
init_local_irq_data(void)99 static int init_local_irq_data(void)
100 {
101     int irq;
102 
103     spin_lock(&local_irqs_type_lock);
104 
105     for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ )
106     {
107         struct irq_desc *desc = irq_to_desc(irq);
108         init_one_irq_desc(desc);
109         desc->irq = irq;
110         desc->action  = NULL;
111 
112         /* PPIs are included in local_irqs, we copy the IRQ type from
113          * local_irqs_type when bringing up local IRQ for this CPU in
114          * order to pick up any configuration done before this CPU came
115          * up. For interrupts configured after this point this is done in
116          * irq_set_type.
117          */
118         desc->arch.type = local_irqs_type[irq];
119     }
120 
121     spin_unlock(&local_irqs_type_lock);
122 
123     return 0;
124 }
125 
init_IRQ(void)126 void __init init_IRQ(void)
127 {
128     int irq;
129 
130     spin_lock(&local_irqs_type_lock);
131     for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ )
132         local_irqs_type[irq] = IRQ_TYPE_INVALID;
133     spin_unlock(&local_irqs_type_lock);
134 
135     BUG_ON(init_local_irq_data() < 0);
136     BUG_ON(init_irq_data() < 0);
137 }
138 
init_secondary_IRQ(void)139 void init_secondary_IRQ(void)
140 {
141     BUG_ON(init_local_irq_data() < 0);
142 }
143 
irq_get_guest_info(struct irq_desc * desc)144 static inline struct irq_guest *irq_get_guest_info(struct irq_desc *desc)
145 {
146     ASSERT(spin_is_locked(&desc->lock));
147     ASSERT(test_bit(_IRQ_GUEST, &desc->status));
148     ASSERT(desc->action != NULL);
149 
150     return desc->action->dev_id;
151 }
152 
irq_get_domain(struct irq_desc * desc)153 static inline struct domain *irq_get_domain(struct irq_desc *desc)
154 {
155     return irq_get_guest_info(desc)->d;
156 }
157 
irq_set_affinity(struct irq_desc * desc,const cpumask_t * cpu_mask)158 void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
159 {
160     if ( desc != NULL )
161         desc->handler->set_affinity(desc, cpu_mask);
162 }
163 
request_irq(unsigned int irq,unsigned int irqflags,void (* handler)(int,void *,struct cpu_user_regs *),const char * devname,void * dev_id)164 int request_irq(unsigned int irq, unsigned int irqflags,
165                 void (*handler)(int, void *, struct cpu_user_regs *),
166                 const char *devname, void *dev_id)
167 {
168     struct irqaction *action;
169     int retval;
170 
171     /*
172      * Sanity-check: shared interrupts must pass in a real dev-ID,
173      * otherwise we'll have trouble later trying to figure out
174      * which interrupt is which (messes up the interrupt freeing
175      * logic etc).
176      */
177     if ( irq >= nr_irqs )
178         return -EINVAL;
179     if ( !handler )
180         return -EINVAL;
181 
182     action = xmalloc(struct irqaction);
183     if ( !action )
184         return -ENOMEM;
185 
186     action->handler = handler;
187     action->name = devname;
188     action->dev_id = dev_id;
189     action->free_on_release = 1;
190 
191     retval = setup_irq(irq, irqflags, action);
192     if ( retval )
193         xfree(action);
194 
195     return retval;
196 }
197 
198 /* Dispatch an interrupt */
do_IRQ(struct cpu_user_regs * regs,unsigned int irq,int is_fiq)199 void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq)
200 {
201     struct irq_desc *desc = irq_to_desc(irq);
202     struct irqaction *action;
203 
204     perfc_incr(irqs);
205 
206     ASSERT(irq >= 16); /* SGIs do not come down this path */
207 
208     if ( irq < 32 )
209         perfc_incr(ppis);
210     else
211         perfc_incr(spis);
212 
213     /* TODO: this_cpu(irq_count)++; */
214 
215     irq_enter();
216 
217     spin_lock(&desc->lock);
218     desc->handler->ack(desc);
219 
220 #ifndef NDEBUG
221     if ( !desc->action )
222     {
223         printk("Unknown %s %#3.3x\n",
224                is_fiq ? "FIQ" : "IRQ", irq);
225         goto out;
226     }
227 #endif
228 
229     if ( test_bit(_IRQ_GUEST, &desc->status) )
230     {
231         struct irq_guest *info = irq_get_guest_info(desc);
232 
233         perfc_incr(guest_irqs);
234         desc->handler->end(desc);
235 
236         set_bit(_IRQ_INPROGRESS, &desc->status);
237 
238         /*
239          * The irq cannot be a PPI, we only support delivery of SPIs to
240          * guests.
241          */
242         vgic_inject_irq(info->d, NULL, info->virq, true);
243         goto out_no_end;
244     }
245 
246     if ( test_bit(_IRQ_DISABLED, &desc->status) )
247         goto out;
248 
249     set_bit(_IRQ_INPROGRESS, &desc->status);
250 
251     action = desc->action;
252 
253     spin_unlock_irq(&desc->lock);
254 
255     do
256     {
257         action->handler(irq, action->dev_id, regs);
258         action = action->next;
259     } while ( action );
260 
261     spin_lock_irq(&desc->lock);
262 
263     clear_bit(_IRQ_INPROGRESS, &desc->status);
264 
265 out:
266     desc->handler->end(desc);
267 out_no_end:
268     spin_unlock(&desc->lock);
269     irq_exit();
270 }
271 
release_irq(unsigned int irq,const void * dev_id)272 void release_irq(unsigned int irq, const void *dev_id)
273 {
274     struct irq_desc *desc;
275     unsigned long flags;
276     struct irqaction *action, **action_ptr;
277 
278     desc = irq_to_desc(irq);
279 
280     spin_lock_irqsave(&desc->lock,flags);
281 
282     action_ptr = &desc->action;
283     for ( ;; )
284     {
285         action = *action_ptr;
286         if ( !action )
287         {
288             printk(XENLOG_WARNING "Trying to free already-free IRQ %u\n", irq);
289             spin_unlock_irqrestore(&desc->lock, flags);
290             return;
291         }
292 
293         if ( action->dev_id == dev_id )
294             break;
295 
296         action_ptr = &action->next;
297     }
298 
299     /* Found it - remove it from the action list */
300     *action_ptr = action->next;
301 
302     /* If this was the last action, shut down the IRQ */
303     if ( !desc->action )
304     {
305         desc->handler->shutdown(desc);
306         clear_bit(_IRQ_GUEST, &desc->status);
307     }
308 
309     spin_unlock_irqrestore(&desc->lock,flags);
310 
311     /* Wait to make sure it's not being used on another CPU */
312     do { smp_mb(); } while ( test_bit(_IRQ_INPROGRESS, &desc->status) );
313 
314     if ( action->free_on_release )
315         xfree(action);
316 }
317 
__setup_irq(struct irq_desc * desc,unsigned int irqflags,struct irqaction * new)318 static int __setup_irq(struct irq_desc *desc, unsigned int irqflags,
319                        struct irqaction *new)
320 {
321     bool shared = irqflags & IRQF_SHARED;
322 
323     ASSERT(new != NULL);
324 
325     /* Sanity checks:
326      *  - if the IRQ is marked as shared
327      *  - dev_id is not NULL when IRQF_SHARED is set
328      */
329     if ( desc->action != NULL && (!test_bit(_IRQF_SHARED, &desc->status) || !shared) )
330         return -EINVAL;
331     if ( shared && new->dev_id == NULL )
332         return -EINVAL;
333 
334     if ( shared )
335         set_bit(_IRQF_SHARED, &desc->status);
336 
337     new->next = desc->action;
338     dsb(ish);
339     desc->action = new;
340     dsb(ish);
341 
342     return 0;
343 }
344 
setup_irq(unsigned int irq,unsigned int irqflags,struct irqaction * new)345 int setup_irq(unsigned int irq, unsigned int irqflags, struct irqaction *new)
346 {
347     int rc;
348     unsigned long flags;
349     struct irq_desc *desc;
350     bool disabled;
351 
352     desc = irq_to_desc(irq);
353 
354     spin_lock_irqsave(&desc->lock, flags);
355 
356     if ( test_bit(_IRQ_GUEST, &desc->status) )
357     {
358         struct domain *d = irq_get_domain(desc);
359 
360         spin_unlock_irqrestore(&desc->lock, flags);
361         printk(XENLOG_ERR "ERROR: IRQ %u is already in use by the domain %u\n",
362                irq, d->domain_id);
363         return -EBUSY;
364     }
365 
366     disabled = (desc->action == NULL);
367 
368     rc = __setup_irq(desc, irqflags, new);
369     if ( rc )
370         goto err;
371 
372     /* First time the IRQ is setup */
373     if ( disabled )
374     {
375         gic_route_irq_to_xen(desc, GIC_PRI_IRQ);
376         /* It's fine to use smp_processor_id() because:
377          * For PPI: irq_desc is banked
378          * For SPI: we don't care for now which CPU will receive the
379          * interrupt
380          * TODO: Handle case where SPI is setup on different CPU than
381          * the targeted CPU and the priority.
382          */
383         irq_set_affinity(desc, cpumask_of(smp_processor_id()));
384         desc->handler->startup(desc);
385     }
386 
387 err:
388     spin_unlock_irqrestore(&desc->lock, flags);
389 
390     return rc;
391 }
392 
is_assignable_irq(unsigned int irq)393 bool is_assignable_irq(unsigned int irq)
394 {
395     /* For now, we can only route SPIs to the guest */
396     return (irq >= NR_LOCAL_IRQS) && (irq < gic_number_lines());
397 }
398 
399 /*
400  * Only the hardware domain is allowed to set the configure the
401  * interrupt type for now.
402  *
403  * XXX: See whether it is possible to let any domain configure the type.
404  */
irq_type_set_by_domain(const struct domain * d)405 bool irq_type_set_by_domain(const struct domain *d)
406 {
407     return (d == hardware_domain);
408 }
409 
410 /*
411  * Route an IRQ to a specific guest.
412  * For now only SPIs are assignable to the guest.
413  */
route_irq_to_guest(struct domain * d,unsigned int virq,unsigned int irq,const char * devname)414 int route_irq_to_guest(struct domain *d, unsigned int virq,
415                        unsigned int irq, const char * devname)
416 {
417     struct irqaction *action;
418     struct irq_guest *info;
419     struct irq_desc *desc;
420     unsigned long flags;
421     int retval = 0;
422 
423     if ( virq >= vgic_num_irqs(d) )
424     {
425         printk(XENLOG_G_ERR
426                "the vIRQ number %u is too high for domain %u (max = %u)\n",
427                irq, d->domain_id, vgic_num_irqs(d));
428         return -EINVAL;
429     }
430 
431     /* Only routing to virtual SPIs is supported */
432     if ( virq < NR_LOCAL_IRQS )
433     {
434         printk(XENLOG_G_ERR "IRQ can only be routed to an SPI\n");
435         return -EINVAL;
436     }
437 
438     if ( !is_assignable_irq(irq) )
439     {
440         printk(XENLOG_G_ERR "the IRQ%u is not routable\n", irq);
441         return -EINVAL;
442     }
443     desc = irq_to_desc(irq);
444 
445     action = xmalloc(struct irqaction);
446     if ( !action )
447         return -ENOMEM;
448 
449     info = xmalloc(struct irq_guest);
450     if ( !info )
451     {
452         xfree(action);
453         return -ENOMEM;
454     }
455 
456     info->d = d;
457     info->virq = virq;
458 
459     action->dev_id = info;
460     action->name = devname;
461     action->free_on_release = 1;
462 
463     spin_lock_irqsave(&desc->lock, flags);
464 
465     if ( !irq_type_set_by_domain(d) && desc->arch.type == IRQ_TYPE_INVALID )
466     {
467         printk(XENLOG_G_ERR "IRQ %u has not been configured\n", irq);
468         retval = -EIO;
469         goto out;
470     }
471 
472     /*
473      * If the IRQ is already used by someone
474      *  - If it's the same domain -> Xen doesn't need to update the IRQ desc.
475      *  For safety check if we are not trying to assign the IRQ to a
476      *  different vIRQ.
477      *  - Otherwise -> For now, don't allow the IRQ to be shared between
478      *  Xen and domains.
479      */
480     if ( desc->action != NULL )
481     {
482         if ( test_bit(_IRQ_GUEST, &desc->status) )
483         {
484             struct domain *ad = irq_get_domain(desc);
485 
486             if ( d != ad )
487             {
488                 printk(XENLOG_G_ERR "IRQ %u is already used by domain %u\n",
489                        irq, ad->domain_id);
490                 retval = -EBUSY;
491             }
492             else if ( irq_get_guest_info(desc)->virq != virq )
493             {
494                 printk(XENLOG_G_ERR
495                        "d%u: IRQ %u is already assigned to vIRQ %u\n",
496                        d->domain_id, irq, irq_get_guest_info(desc)->virq);
497                 retval = -EBUSY;
498             }
499         }
500         else
501         {
502             printk(XENLOG_G_ERR "IRQ %u is already used by Xen\n", irq);
503             retval = -EBUSY;
504         }
505         goto out;
506     }
507 
508     retval = __setup_irq(desc, 0, action);
509     if ( retval )
510         goto out;
511 
512     retval = gic_route_irq_to_guest(d, virq, desc, GIC_PRI_IRQ);
513 
514     spin_unlock_irqrestore(&desc->lock, flags);
515 
516     if ( retval )
517     {
518         release_irq(desc->irq, info);
519         goto free_info;
520     }
521 
522     return 0;
523 
524 out:
525     spin_unlock_irqrestore(&desc->lock, flags);
526     xfree(action);
527 free_info:
528     xfree(info);
529 
530     return retval;
531 }
532 
release_guest_irq(struct domain * d,unsigned int virq)533 int release_guest_irq(struct domain *d, unsigned int virq)
534 {
535     struct irq_desc *desc;
536     struct irq_guest *info;
537     unsigned long flags;
538     int ret;
539 
540     /* Only SPIs are supported */
541     if ( virq < NR_LOCAL_IRQS || virq >= vgic_num_irqs(d) )
542         return -EINVAL;
543 
544     desc = vgic_get_hw_irq_desc(d, NULL, virq);
545     if ( !desc )
546         return -EINVAL;
547 
548     spin_lock_irqsave(&desc->lock, flags);
549 
550     ret = -EINVAL;
551     if ( !test_bit(_IRQ_GUEST, &desc->status) )
552         goto unlock;
553 
554     info = irq_get_guest_info(desc);
555     ret = -EINVAL;
556     if ( d != info->d )
557         goto unlock;
558 
559     ret = gic_remove_irq_from_guest(d, virq, desc);
560     if ( ret )
561         goto unlock;
562 
563     spin_unlock_irqrestore(&desc->lock, flags);
564 
565     release_irq(desc->irq, info);
566     xfree(info);
567 
568     return 0;
569 
570 unlock:
571     spin_unlock_irqrestore(&desc->lock, flags);
572 
573     return ret;
574 }
575 
576 /*
577  * pirq event channels. We don't use these on ARM, instead we use the
578  * features of the GIC to inject virtualised normal interrupts.
579  */
alloc_pirq_struct(struct domain * d)580 struct pirq *alloc_pirq_struct(struct domain *d)
581 {
582     return NULL;
583 }
584 
585 /*
586  * These are all unreachable given an alloc_pirq_struct
587  * which returns NULL, all callers try to lookup struct pirq first
588  * which will fail.
589  */
pirq_guest_bind(struct vcpu * v,struct pirq * pirq,int will_share)590 int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share)
591 {
592     BUG();
593 }
594 
pirq_guest_unbind(struct domain * d,struct pirq * pirq)595 void pirq_guest_unbind(struct domain *d, struct pirq *pirq)
596 {
597     BUG();
598 }
599 
pirq_set_affinity(struct domain * d,int pirq,const cpumask_t * mask)600 void pirq_set_affinity(struct domain *d, int pirq, const cpumask_t *mask)
601 {
602     BUG();
603 }
604 
irq_validate_new_type(unsigned int curr,unsigned new)605 static bool irq_validate_new_type(unsigned int curr, unsigned new)
606 {
607     return (curr == IRQ_TYPE_INVALID || curr == new );
608 }
609 
irq_set_spi_type(unsigned int spi,unsigned int type)610 int irq_set_spi_type(unsigned int spi, unsigned int type)
611 {
612     unsigned long flags;
613     struct irq_desc *desc = irq_to_desc(spi);
614     int ret = -EBUSY;
615 
616     /* This function should not be used for other than SPIs */
617     if ( spi < NR_LOCAL_IRQS )
618         return -EINVAL;
619 
620     spin_lock_irqsave(&desc->lock, flags);
621 
622     if ( !irq_validate_new_type(desc->arch.type, type) )
623         goto err;
624 
625     desc->arch.type = type;
626 
627     ret = 0;
628 
629 err:
630     spin_unlock_irqrestore(&desc->lock, flags);
631     return ret;
632 }
633 
irq_local_set_type(unsigned int irq,unsigned int type)634 static int irq_local_set_type(unsigned int irq, unsigned int type)
635 {
636     unsigned int cpu;
637     unsigned int old_type;
638     unsigned long flags;
639     int ret = -EBUSY;
640     struct irq_desc *desc;
641 
642     ASSERT(irq < NR_LOCAL_IRQS);
643 
644     spin_lock(&local_irqs_type_lock);
645 
646     old_type = local_irqs_type[irq];
647 
648     if ( !irq_validate_new_type(old_type, type) )
649         goto unlock;
650 
651     ret = 0;
652     /* We don't need to reconfigure if the type is correctly set */
653     if ( old_type == type )
654         goto unlock;
655 
656     local_irqs_type[irq] = type;
657 
658     for_each_cpu( cpu, &cpu_online_map )
659     {
660         desc = &per_cpu(local_irq_desc, cpu)[irq];
661         spin_lock_irqsave(&desc->lock, flags);
662         desc->arch.type = type;
663         spin_unlock_irqrestore(&desc->lock, flags);
664     }
665 
666 unlock:
667     spin_unlock(&local_irqs_type_lock);
668     return ret;
669 }
670 
irq_set_type(unsigned int irq,unsigned int type)671 int irq_set_type(unsigned int irq, unsigned int type)
672 {
673     int res;
674 
675     /* Setup the IRQ type */
676     if ( irq < NR_LOCAL_IRQS )
677         res = irq_local_set_type(irq, type);
678     else
679         res = irq_set_spi_type(irq, type);
680 
681     return res;
682 }
683 
platform_get_irq(const struct dt_device_node * device,int index)684 int platform_get_irq(const struct dt_device_node *device, int index)
685 {
686     struct dt_irq dt_irq;
687     unsigned int type, irq;
688 
689     if ( dt_device_get_irq(device, index, &dt_irq) )
690         return -1;
691 
692     irq = dt_irq.irq;
693     type = dt_irq.type;
694 
695     if ( irq_set_type(irq, type) )
696         return -1;
697 
698     return irq;
699 }
700 
701 /*
702  * Local variables:
703  * mode: C
704  * c-file-style: "BSD"
705  * c-basic-offset: 4
706  * indent-tabs-mode: nil
707  * End:
708  */
709