1 /******************************************************************************
2  * irq.c
3  *
4  * Interrupt distribution and delivery logic.
5  *
6  * Copyright (c) 2006, K A Fraser, XenSource Inc.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include <xen/types.h>
22 #include <xen/event.h>
23 #include <xen/sched.h>
24 #include <xen/irq.h>
25 #include <xen/keyhandler.h>
26 #include <asm/hvm/domain.h>
27 #include <asm/hvm/support.h>
28 #include <asm/msi.h>
29 #include <public/hvm/params.h>
30 
hvm_domain_use_pirq(const struct domain * d,const struct pirq * pirq)31 bool hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq)
32 {
33     return is_hvm_domain(d) && pirq && pirq->arch.hvm.emuirq != IRQ_UNBOUND;
34 }
35 
36 /* Must be called with hvm_domain->irq_lock hold */
assert_gsi(struct domain * d,unsigned ioapic_gsi)37 static void assert_gsi(struct domain *d, unsigned ioapic_gsi)
38 {
39     struct pirq *pirq =
40         pirq_info(d, domain_emuirq_to_pirq(d, ioapic_gsi));
41 
42     if ( hvm_domain_use_pirq(d, pirq) )
43     {
44         send_guest_pirq(d, pirq);
45         return;
46     }
47     vioapic_irq_positive_edge(d, ioapic_gsi);
48 }
49 
hvm_ioapic_assert(struct domain * d,unsigned int gsi,bool level)50 int hvm_ioapic_assert(struct domain *d, unsigned int gsi, bool level)
51 {
52     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
53     int vector;
54 
55     if ( gsi >= hvm_irq->nr_gsis )
56     {
57         ASSERT_UNREACHABLE();
58         return -1;
59     }
60 
61     spin_lock(&d->arch.hvm.irq_lock);
62     if ( !level || hvm_irq->gsi_assert_count[gsi]++ == 0 )
63         assert_gsi(d, gsi);
64     vector = vioapic_get_vector(d, gsi);
65     spin_unlock(&d->arch.hvm.irq_lock);
66 
67     return vector;
68 }
69 
hvm_ioapic_deassert(struct domain * d,unsigned int gsi)70 void hvm_ioapic_deassert(struct domain *d, unsigned int gsi)
71 {
72     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
73 
74     if ( gsi >= hvm_irq->nr_gsis )
75     {
76         ASSERT_UNREACHABLE();
77         return;
78     }
79 
80     spin_lock(&d->arch.hvm.irq_lock);
81     hvm_irq->gsi_assert_count[gsi]--;
82     spin_unlock(&d->arch.hvm.irq_lock);
83 }
84 
assert_irq(struct domain * d,unsigned ioapic_gsi,unsigned pic_irq)85 static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
86 {
87     assert_gsi(d, ioapic_gsi);
88     vpic_irq_positive_edge(d, pic_irq);
89 }
90 
91 /* Must be called with hvm_domain->irq_lock hold */
deassert_irq(struct domain * d,unsigned isa_irq)92 static void deassert_irq(struct domain *d, unsigned isa_irq)
93 {
94     struct pirq *pirq =
95         pirq_info(d, domain_emuirq_to_pirq(d, isa_irq));
96 
97     if ( !hvm_domain_use_pirq(d, pirq) )
98         vpic_irq_negative_edge(d, isa_irq);
99 }
100 
__hvm_pci_intx_assert(struct domain * d,unsigned int device,unsigned int intx)101 static void __hvm_pci_intx_assert(
102     struct domain *d, unsigned int device, unsigned int intx)
103 {
104     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
105     unsigned int gsi, link, isa_irq;
106 
107     ASSERT((device <= 31) && (intx <= 3));
108 
109     if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
110         return;
111 
112     gsi = hvm_pci_intx_gsi(device, intx);
113     if ( gsi >= hvm_irq->nr_gsis )
114     {
115         ASSERT_UNREACHABLE();
116         return;
117     }
118     if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
119         assert_gsi(d, gsi);
120 
121     link    = hvm_pci_intx_link(device, intx);
122     isa_irq = hvm_irq->pci_link.route[link];
123     if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
124          (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
125         assert_irq(d, isa_irq, isa_irq);
126 }
127 
hvm_pci_intx_assert(struct domain * d,unsigned int device,unsigned int intx)128 void hvm_pci_intx_assert(
129     struct domain *d, unsigned int device, unsigned int intx)
130 {
131     spin_lock(&d->arch.hvm.irq_lock);
132     __hvm_pci_intx_assert(d, device, intx);
133     spin_unlock(&d->arch.hvm.irq_lock);
134 }
135 
__hvm_pci_intx_deassert(struct domain * d,unsigned int device,unsigned int intx)136 static void __hvm_pci_intx_deassert(
137     struct domain *d, unsigned int device, unsigned int intx)
138 {
139     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
140     unsigned int gsi, link, isa_irq;
141 
142     ASSERT((device <= 31) && (intx <= 3));
143 
144     if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
145         return;
146 
147     gsi = hvm_pci_intx_gsi(device, intx);
148     if ( gsi >= hvm_irq->nr_gsis )
149     {
150         ASSERT_UNREACHABLE();
151         return;
152     }
153     --hvm_irq->gsi_assert_count[gsi];
154 
155     link    = hvm_pci_intx_link(device, intx);
156     isa_irq = hvm_irq->pci_link.route[link];
157     if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
158          (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
159         deassert_irq(d, isa_irq);
160 }
161 
hvm_pci_intx_deassert(struct domain * d,unsigned int device,unsigned int intx)162 void hvm_pci_intx_deassert(
163     struct domain *d, unsigned int device, unsigned int intx)
164 {
165     spin_lock(&d->arch.hvm.irq_lock);
166     __hvm_pci_intx_deassert(d, device, intx);
167     spin_unlock(&d->arch.hvm.irq_lock);
168 }
169 
hvm_gsi_assert(struct domain * d,unsigned int gsi)170 void hvm_gsi_assert(struct domain *d, unsigned int gsi)
171 {
172     int trig = vioapic_get_trigger_mode(d, gsi);
173     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
174 
175     if ( gsi >= hvm_irq->nr_gsis || trig < 0 )
176     {
177         ASSERT_UNREACHABLE();
178         return;
179     }
180 
181     /*
182      * __hvm_pci_intx_{de}assert uses a bitfield in pci_intx.i to track the
183      * status of each interrupt line, and Xen does the routing and GSI
184      * assertion based on that. The value of the pci_intx.i bitmap prevents the
185      * same line from triggering multiple times. As we don't use that bitmap
186      * for the hardware domain, Xen needs to rely on gsi_assert_count in order
187      * to know if the GSI is pending or not.
188      */
189     spin_lock(&d->arch.hvm.irq_lock);
190     if ( trig == VIOAPIC_EDGE_TRIG || !hvm_irq->gsi_assert_count[gsi] )
191     {
192         if ( trig == VIOAPIC_LEVEL_TRIG )
193             hvm_irq->gsi_assert_count[gsi] = 1;
194         assert_gsi(d, gsi);
195     }
196     spin_unlock(&d->arch.hvm.irq_lock);
197 }
198 
hvm_gsi_deassert(struct domain * d,unsigned int gsi)199 void hvm_gsi_deassert(struct domain *d, unsigned int gsi)
200 {
201     int trig = vioapic_get_trigger_mode(d, gsi);
202     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
203 
204     if ( trig != VIOAPIC_LEVEL_TRIG || gsi >= hvm_irq->nr_gsis )
205     {
206         ASSERT(trig == VIOAPIC_EDGE_TRIG && gsi < hvm_irq->nr_gsis);
207         return;
208     }
209 
210     spin_lock(&d->arch.hvm.irq_lock);
211     hvm_irq->gsi_assert_count[gsi] = 0;
212     spin_unlock(&d->arch.hvm.irq_lock);
213 }
214 
hvm_isa_irq_assert(struct domain * d,unsigned int isa_irq,int (* get_vector)(const struct domain * d,unsigned int gsi))215 int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq,
216                        int (*get_vector)(const struct domain *d,
217                                          unsigned int gsi))
218 {
219     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
220     unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
221     int vector = -1;
222 
223     ASSERT(isa_irq <= 15);
224 
225     spin_lock(&d->arch.hvm.irq_lock);
226 
227     if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
228          (hvm_irq->gsi_assert_count[gsi]++ == 0) )
229         assert_irq(d, gsi, isa_irq);
230 
231     if ( get_vector )
232         vector = get_vector(d, gsi);
233 
234     spin_unlock(&d->arch.hvm.irq_lock);
235 
236     return vector;
237 }
238 
hvm_isa_irq_deassert(struct domain * d,unsigned int isa_irq)239 void hvm_isa_irq_deassert(
240     struct domain *d, unsigned int isa_irq)
241 {
242     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
243     unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
244 
245     ASSERT(isa_irq <= 15);
246 
247     spin_lock(&d->arch.hvm.irq_lock);
248 
249     if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
250          (--hvm_irq->gsi_assert_count[gsi] == 0) )
251         deassert_irq(d, isa_irq);
252 
253     spin_unlock(&d->arch.hvm.irq_lock);
254 }
255 
hvm_set_callback_irq_level(struct vcpu * v)256 static void hvm_set_callback_irq_level(struct vcpu *v)
257 {
258     struct domain *d = v->domain;
259     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
260     unsigned int gsi, pdev, pintx, asserted;
261 
262     ASSERT(v->vcpu_id == 0);
263 
264     spin_lock(&d->arch.hvm.irq_lock);
265 
266     /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
267     asserted = !!vcpu_info(v, evtchn_upcall_pending);
268     if ( hvm_irq->callback_via_asserted == asserted )
269         goto out;
270     hvm_irq->callback_via_asserted = asserted;
271 
272     /* Callback status has changed. Update the callback via. */
273     switch ( hvm_irq->callback_via_type )
274     {
275     case HVMIRQ_callback_gsi:
276         gsi = hvm_irq->callback_via.gsi;
277         if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) )
278         {
279             vioapic_irq_positive_edge(d, gsi);
280             if ( gsi <= 15 )
281                 vpic_irq_positive_edge(d, gsi);
282         }
283         else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) )
284         {
285             if ( gsi <= 15 )
286                 vpic_irq_negative_edge(d, gsi);
287         }
288         break;
289     case HVMIRQ_callback_pci_intx:
290         pdev  = hvm_irq->callback_via.pci.dev;
291         pintx = hvm_irq->callback_via.pci.intx;
292         if ( asserted )
293             __hvm_pci_intx_assert(d, pdev, pintx);
294         else
295             __hvm_pci_intx_deassert(d, pdev, pintx);
296     default:
297         break;
298     }
299 
300  out:
301     spin_unlock(&d->arch.hvm.irq_lock);
302 }
303 
hvm_maybe_deassert_evtchn_irq(void)304 void hvm_maybe_deassert_evtchn_irq(void)
305 {
306     struct domain *d = current->domain;
307     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
308 
309     if ( hvm_irq->callback_via_asserted &&
310          !vcpu_info(d->vcpu[0], evtchn_upcall_pending) )
311         hvm_set_callback_irq_level(d->vcpu[0]);
312 }
313 
hvm_assert_evtchn_irq(struct vcpu * v)314 void hvm_assert_evtchn_irq(struct vcpu *v)
315 {
316     if ( unlikely(in_irq() || !local_irq_is_enabled()) )
317     {
318         tasklet_schedule(&v->arch.hvm.assert_evtchn_irq_tasklet);
319         return;
320     }
321 
322     if ( v->arch.hvm.evtchn_upcall_vector != 0 )
323     {
324         uint8_t vector = v->arch.hvm.evtchn_upcall_vector;
325 
326         vlapic_set_irq(vcpu_vlapic(v), vector, 0);
327     }
328     else if ( is_hvm_pv_evtchn_vcpu(v) )
329         vcpu_kick(v);
330     else if ( v->vcpu_id == 0 )
331         hvm_set_callback_irq_level(v);
332 }
333 
hvm_set_pci_link_route(struct domain * d,u8 link,u8 isa_irq)334 int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
335 {
336     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
337     u8 old_isa_irq;
338     int i;
339 
340     if ( (link > 3) || (isa_irq > 15) )
341         return -EINVAL;
342 
343     spin_lock(&d->arch.hvm.irq_lock);
344 
345     old_isa_irq = hvm_irq->pci_link.route[link];
346     if ( old_isa_irq == isa_irq )
347         goto out;
348     hvm_irq->pci_link.route[link] = isa_irq;
349 
350     /* PCI pass-through fixup. */
351     if ( hvm_irq->dpci )
352     {
353         if ( old_isa_irq )
354             clear_bit(old_isa_irq, &hvm_irq->dpci->isairq_map);
355 
356         for ( i = 0; i < NR_LINK; i++ )
357             if ( hvm_irq->dpci->link_cnt[i] && hvm_irq->pci_link.route[i] )
358                 set_bit(hvm_irq->pci_link.route[i],
359                         &hvm_irq->dpci->isairq_map);
360     }
361 
362     if ( hvm_irq->pci_link_assert_count[link] == 0 )
363         goto out;
364 
365     if ( old_isa_irq && (--hvm_irq->gsi_assert_count[old_isa_irq] == 0) )
366         vpic_irq_negative_edge(d, old_isa_irq);
367 
368     if ( isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
369     {
370         vioapic_irq_positive_edge(d, isa_irq);
371         vpic_irq_positive_edge(d, isa_irq);
372     }
373 
374  out:
375     spin_unlock(&d->arch.hvm.irq_lock);
376 
377     dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
378             d->domain_id, link, old_isa_irq, isa_irq);
379 
380     return 0;
381 }
382 
hvm_inject_msi(struct domain * d,uint64_t addr,uint32_t data)383 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
384 {
385     uint32_t tmp = (uint32_t) addr;
386     uint8_t  dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
387     uint8_t  dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK);
388     uint8_t  delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK)
389         >> MSI_DATA_DELIVERY_MODE_SHIFT;
390     uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK)
391         >> MSI_DATA_TRIGGER_SHIFT;
392     uint8_t vector = data & MSI_DATA_VECTOR_MASK;
393 
394     if ( !vector )
395     {
396         int pirq = ((addr >> 32) & 0xffffff00) | dest;
397 
398         if ( pirq > 0 )
399         {
400             struct pirq *info = pirq_info(d, pirq);
401 
402             /* if it is the first time, allocate the pirq */
403             if ( !info || info->arch.hvm.emuirq == IRQ_UNBOUND )
404             {
405                 int rc;
406 
407                 spin_lock(&d->event_lock);
408                 rc = map_domain_emuirq_pirq(d, pirq, IRQ_MSI_EMU);
409                 spin_unlock(&d->event_lock);
410                 if ( rc )
411                     return rc;
412                 info = pirq_info(d, pirq);
413                 if ( !info )
414                     return -EBUSY;
415             }
416             else if ( info->arch.hvm.emuirq != IRQ_MSI_EMU )
417                 return -EINVAL;
418             send_guest_pirq(d, info);
419             return 0;
420         }
421         return -ERANGE;
422     }
423 
424     return vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
425 }
426 
hvm_set_callback_via(struct domain * d,uint64_t via)427 void hvm_set_callback_via(struct domain *d, uint64_t via)
428 {
429     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
430     unsigned int gsi=0, pdev=0, pintx=0;
431     uint8_t via_type;
432     struct vcpu *v;
433 
434     via_type = (uint8_t)MASK_EXTR(via, HVM_PARAM_CALLBACK_IRQ_TYPE_MASK) + 1;
435     if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
436          (via_type > HVMIRQ_callback_vector) )
437         via_type = HVMIRQ_callback_none;
438 
439     if ( via_type != HVMIRQ_callback_vector &&
440          (!has_vlapic(d) || !has_vioapic(d) || !has_vpic(d)) )
441         return;
442 
443     spin_lock(&d->arch.hvm.irq_lock);
444 
445     /* Tear down old callback via. */
446     if ( hvm_irq->callback_via_asserted )
447     {
448         switch ( hvm_irq->callback_via_type )
449         {
450         case HVMIRQ_callback_gsi:
451             gsi = hvm_irq->callback_via.gsi;
452             if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) )
453                 vpic_irq_negative_edge(d, gsi);
454             break;
455         case HVMIRQ_callback_pci_intx:
456             pdev  = hvm_irq->callback_via.pci.dev;
457             pintx = hvm_irq->callback_via.pci.intx;
458             __hvm_pci_intx_deassert(d, pdev, pintx);
459             break;
460         default:
461             break;
462         }
463     }
464 
465     /* Set up new callback via. */
466     switch ( hvm_irq->callback_via_type = via_type )
467     {
468     case HVMIRQ_callback_gsi:
469         gsi = hvm_irq->callback_via.gsi = (uint8_t)via;
470         if ( (gsi == 0) || (gsi >= hvm_irq->nr_gsis) )
471             hvm_irq->callback_via_type = HVMIRQ_callback_none;
472         else if ( hvm_irq->callback_via_asserted &&
473                   (hvm_irq->gsi_assert_count[gsi]++ == 0) )
474         {
475             vioapic_irq_positive_edge(d, gsi);
476             if ( gsi <= 15 )
477                 vpic_irq_positive_edge(d, gsi);
478         }
479         break;
480     case HVMIRQ_callback_pci_intx:
481         pdev  = hvm_irq->callback_via.pci.dev  = (uint8_t)(via >> 11) & 31;
482         pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3;
483         if ( hvm_irq->callback_via_asserted )
484              __hvm_pci_intx_assert(d, pdev, pintx);
485         break;
486     case HVMIRQ_callback_vector:
487         hvm_irq->callback_via.vector = (uint8_t)via;
488         break;
489     default:
490         break;
491     }
492 
493     spin_unlock(&d->arch.hvm.irq_lock);
494 
495     for_each_vcpu ( d, v )
496         if ( is_vcpu_online(v) )
497             hvm_assert_evtchn_irq(v);
498 
499 #ifndef NDEBUG
500     printk(XENLOG_G_INFO "Dom%u callback via changed to ", d->domain_id);
501     switch ( via_type )
502     {
503     case HVMIRQ_callback_gsi:
504         printk("GSI %u\n", gsi);
505         break;
506     case HVMIRQ_callback_pci_intx:
507         printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
508         break;
509     case HVMIRQ_callback_vector:
510         printk("Direct Vector 0x%02x\n", (uint8_t)via);
511         break;
512     default:
513         printk("None\n");
514         break;
515     }
516 #endif
517 }
518 
hvm_vcpu_has_pending_irq(struct vcpu * v)519 struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
520 {
521     struct hvm_domain *plat = &v->domain->arch.hvm;
522     int vector;
523 
524     /*
525      * Always call vlapic_sync_pir_to_irr so that PIR is synced into IRR when
526      * using posted interrupts. Note this is also done by
527      * vlapic_has_pending_irq but depending on which interrupts are pending
528      * hvm_vcpu_has_pending_irq will return early without calling
529      * vlapic_has_pending_irq.
530      */
531     vlapic_sync_pir_to_irr(v);
532 
533     if ( unlikely(v->arch.nmi_pending) )
534         return hvm_intack_nmi;
535 
536     if ( unlikely(v->arch.mce_pending) )
537         return hvm_intack_mce;
538 
539     if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector)
540          && vcpu_info(v, evtchn_upcall_pending) )
541         return hvm_intack_vector(plat->irq->callback_via.vector);
542 
543     if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
544         return hvm_intack_pic(0);
545 
546     vector = vlapic_has_pending_irq(v);
547     if ( vector != -1 )
548         return hvm_intack_lapic(vector);
549 
550     return hvm_intack_none;
551 }
552 
hvm_vcpu_ack_pending_irq(struct vcpu * v,struct hvm_intack intack)553 struct hvm_intack hvm_vcpu_ack_pending_irq(
554     struct vcpu *v, struct hvm_intack intack)
555 {
556     int vector;
557 
558     switch ( intack.source )
559     {
560     case hvm_intsrc_nmi:
561         if ( !test_and_clear_bool(v->arch.nmi_pending) )
562             intack = hvm_intack_none;
563         break;
564     case hvm_intsrc_mce:
565         if ( !test_and_clear_bool(v->arch.mce_pending) )
566             intack = hvm_intack_none;
567         break;
568     case hvm_intsrc_pic:
569         if ( (vector = vpic_ack_pending_irq(v)) == -1 )
570             intack = hvm_intack_none;
571         else
572             intack.vector = (uint8_t)vector;
573         break;
574     case hvm_intsrc_lapic:
575         if ( !vlapic_ack_pending_irq(v, intack.vector, 0) )
576             intack = hvm_intack_none;
577         break;
578     case hvm_intsrc_vector:
579         break;
580     default:
581         intack = hvm_intack_none;
582         break;
583     }
584 
585     return intack;
586 }
587 
hvm_local_events_need_delivery(struct vcpu * v)588 int hvm_local_events_need_delivery(struct vcpu *v)
589 {
590     struct hvm_intack intack = hvm_vcpu_has_pending_irq(v);
591 
592     if ( likely(intack.source == hvm_intsrc_none) )
593         return 0;
594 
595     return !hvm_interrupt_blocked(v, intack);
596 }
597 
irq_dump(struct domain * d)598 static void irq_dump(struct domain *d)
599 {
600     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
601     int i;
602     printk("Domain %d:\n", d->domain_id);
603     printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
604            " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
605            hvm_irq->pci_intx.pad[0],  hvm_irq->pci_intx.pad[1],
606            (uint32_t) hvm_irq->isa_irq.pad[0],
607            hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
608            hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
609     for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 )
610         printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
611                " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
612                i, i+7,
613                hvm_irq->gsi_assert_count[i+0],
614                hvm_irq->gsi_assert_count[i+1],
615                hvm_irq->gsi_assert_count[i+2],
616                hvm_irq->gsi_assert_count[i+3],
617                hvm_irq->gsi_assert_count[i+4],
618                hvm_irq->gsi_assert_count[i+5],
619                hvm_irq->gsi_assert_count[i+6],
620                hvm_irq->gsi_assert_count[i+7]);
621     if ( i != hvm_irq->nr_gsis )
622     {
623         printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1);
624         for ( ; i < hvm_irq->nr_gsis; i++)
625             printk(" %2"PRIu8, hvm_irq->gsi_assert_count[i]);
626         printk("\n");
627     }
628     printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
629            hvm_irq->pci_link_assert_count[0],
630            hvm_irq->pci_link_assert_count[1],
631            hvm_irq->pci_link_assert_count[2],
632            hvm_irq->pci_link_assert_count[3]);
633     printk("Callback via %i:%#"PRIx32",%s asserted\n",
634            hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
635            hvm_irq->callback_via_asserted ? "" : " not");
636 }
637 
dump_irq_info(unsigned char key)638 static void dump_irq_info(unsigned char key)
639 {
640     struct domain *d;
641 
642     printk("'%c' pressed -> dumping HVM irq info\n", key);
643 
644     rcu_read_lock(&domlist_read_lock);
645 
646     for_each_domain ( d )
647         if ( is_hvm_domain(d) )
648             irq_dump(d);
649 
650     rcu_read_unlock(&domlist_read_lock);
651 }
652 
dump_irq_info_key_init(void)653 static int __init dump_irq_info_key_init(void)
654 {
655     register_keyhandler('I', dump_irq_info, "dump HVM irq info", 1);
656     return 0;
657 }
658 __initcall(dump_irq_info_key_init);
659 
irq_save_pci(struct vcpu * v,hvm_domain_context_t * h)660 static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h)
661 {
662     struct domain *d = v->domain;
663     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
664     unsigned int asserted, pdev, pintx;
665     int rc;
666 
667     spin_lock(&d->arch.hvm.irq_lock);
668 
669     pdev  = hvm_irq->callback_via.pci.dev;
670     pintx = hvm_irq->callback_via.pci.intx;
671     asserted = (hvm_irq->callback_via_asserted &&
672                 (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx));
673 
674     /*
675      * Deassert virtual interrupt via PCI INTx line. The virtual interrupt
676      * status is not save/restored, so the INTx line must be deasserted in
677      * the restore context.
678      */
679     if ( asserted )
680         __hvm_pci_intx_deassert(d, pdev, pintx);
681 
682     /* Save PCI IRQ lines */
683     rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
684 
685     if ( asserted )
686         __hvm_pci_intx_assert(d, pdev, pintx);
687 
688     spin_unlock(&d->arch.hvm.irq_lock);
689 
690     return rc;
691 }
692 
irq_save_isa(struct vcpu * v,hvm_domain_context_t * h)693 static int irq_save_isa(struct vcpu *v, hvm_domain_context_t *h)
694 {
695     const struct domain *d = v->domain;
696     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
697 
698     /* Save ISA IRQ lines */
699     return hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq);
700 }
701 
irq_save_link(struct vcpu * v,hvm_domain_context_t * h)702 static int irq_save_link(struct vcpu *v, hvm_domain_context_t *h)
703 {
704     const struct domain *d = v->domain;
705     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
706 
707     /* Save PCI-ISA link state */
708     return hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link);
709 }
710 
irq_load_pci(struct domain * d,hvm_domain_context_t * h)711 static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
712 {
713     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
714     int link, dev, intx, gsi;
715 
716     /* Load the PCI IRQ lines */
717     if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
718         return -EINVAL;
719 
720     /* Clear the PCI link assert counts */
721     for ( link = 0; link < 4; link++ )
722         hvm_irq->pci_link_assert_count[link] = 0;
723 
724     /* Clear the GSI link assert counts */
725     for ( gsi = 0; gsi < hvm_irq->nr_gsis; gsi++ )
726         hvm_irq->gsi_assert_count[gsi] = 0;
727 
728     /* Recalculate the counts from the IRQ line state */
729     for ( dev = 0; dev < 32; dev++ )
730         for ( intx = 0; intx < 4; intx++ )
731             if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
732             {
733                 /* Direct GSI assert */
734                 gsi = hvm_pci_intx_gsi(dev, intx);
735                 hvm_irq->gsi_assert_count[gsi]++;
736                 /* PCI-ISA bridge assert */
737                 link = hvm_pci_intx_link(dev, intx);
738                 hvm_irq->pci_link_assert_count[link]++;
739             }
740 
741     return 0;
742 }
743 
irq_load_isa(struct domain * d,hvm_domain_context_t * h)744 static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
745 {
746     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
747     int irq;
748 
749     /* Load the ISA IRQ lines */
750     if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
751         return -EINVAL;
752 
753     /* Adjust the GSI assert counts for the ISA IRQ line state.
754      * This relies on the PCI IRQ state being loaded first. */
755     for ( irq = 0; platform_legacy_irq(irq); irq++ )
756         if ( test_bit(irq, &hvm_irq->isa_irq.i) )
757             hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
758 
759     return 0;
760 }
761 
762 
irq_load_link(struct domain * d,hvm_domain_context_t * h)763 static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
764 {
765     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
766     int link, gsi;
767 
768     /* Load the PCI-ISA IRQ link routing table */
769     if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
770         return -EINVAL;
771 
772     /* Sanity check */
773     for ( link = 0; link < 4; link++ )
774         if ( hvm_irq->pci_link.route[link] > 15 )
775         {
776             gdprintk(XENLOG_ERR,
777                      "HVM restore: PCI-ISA link %u out of range (%u)\n",
778                      link, hvm_irq->pci_link.route[link]);
779             return -EINVAL;
780         }
781 
782     /* Adjust the GSI assert counts for the link outputs.
783      * This relies on the PCI and ISA IRQ state being loaded first */
784     for ( link = 0; link < 4; link++ )
785     {
786         if ( hvm_irq->pci_link_assert_count[link] != 0 )
787         {
788             gsi = hvm_irq->pci_link.route[link];
789             if ( gsi != 0 )
790                 hvm_irq->gsi_assert_count[gsi]++;
791         }
792     }
793 
794     return 0;
795 }
796 
797 HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
798                           1, HVMSR_PER_DOM);
799 HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
800                           1, HVMSR_PER_DOM);
801 HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
802                           1, HVMSR_PER_DOM);
803