1 /*
2 * Copyright (C) 2001 MandrakeSoft S.A.
3 *
4 * MandrakeSoft S.A.
5 * 43, rue d'Aboukir
6 * 75002 Paris - France
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
22 *
23 * Yunhong Jiang <yunhong.jiang@intel.com>
24 * Ported to xen by using virtual IRQ line.
25 */
26
27 #include <xen/types.h>
28 #include <xen/mm.h>
29 #include <xen/xmalloc.h>
30 #include <xen/lib.h>
31 #include <xen/errno.h>
32 #include <xen/sched.h>
33 #include <xen/nospec.h>
34 #include <public/hvm/ioreq.h>
35 #include <asm/hvm/io.h>
36 #include <asm/hvm/vpic.h>
37 #include <asm/hvm/vlapic.h>
38 #include <asm/hvm/support.h>
39 #include <asm/current.h>
40 #include <asm/event.h>
41 #include <asm/io_apic.h>
42
43 /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
44 #define IRQ0_SPECIAL_ROUTING 1
45
46 static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int irq);
47
addr_vioapic(const struct domain * d,unsigned long addr)48 static struct hvm_vioapic *addr_vioapic(const struct domain *d,
49 unsigned long addr)
50 {
51 unsigned int i;
52
53 for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
54 {
55 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
56
57 if ( addr >= vioapic->base_address &&
58 addr < vioapic->base_address + VIOAPIC_MEM_LENGTH )
59 return vioapic;
60 }
61
62 return NULL;
63 }
64
gsi_vioapic(const struct domain * d,unsigned int gsi,unsigned int * pin)65 static struct hvm_vioapic *gsi_vioapic(const struct domain *d,
66 unsigned int gsi, unsigned int *pin)
67 {
68 unsigned int i;
69
70 /*
71 * Make sure the compiler does not optimize away the initialization done by
72 * callers
73 */
74 OPTIMIZER_HIDE_VAR(*pin);
75
76 for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
77 {
78 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
79
80 if ( gsi >= vioapic->base_gsi &&
81 gsi < vioapic->base_gsi + vioapic->nr_pins )
82 {
83 *pin = gsi - vioapic->base_gsi;
84 return vioapic;
85 }
86 }
87
88 return NULL;
89 }
90
vioapic_read_indirect(const struct hvm_vioapic * vioapic)91 static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic)
92 {
93 uint32_t result = 0;
94
95 switch ( vioapic->ioregsel )
96 {
97 case VIOAPIC_REG_VERSION:
98 result = ((union IO_APIC_reg_01){
99 .bits = { .version = VIOAPIC_VERSION_ID,
100 .entries = vioapic->nr_pins - 1 }
101 }).raw;
102 break;
103
104 case VIOAPIC_REG_APIC_ID:
105 /*
106 * Using union IO_APIC_reg_02 for the ID register too, as
107 * union IO_APIC_reg_00's ID field is 8 bits wide for some reason.
108 */
109 case VIOAPIC_REG_ARB_ID:
110 result = ((union IO_APIC_reg_02){
111 .bits = { .arbitration = vioapic->id }
112 }).raw;
113 break;
114
115 default:
116 {
117 uint32_t redir_index = (vioapic->ioregsel - VIOAPIC_REG_RTE0) >> 1;
118 uint64_t redir_content;
119
120 if ( redir_index >= vioapic->nr_pins )
121 {
122 gdprintk(XENLOG_WARNING, "apic_mem_readl:undefined ioregsel %x\n",
123 vioapic->ioregsel);
124 break;
125 }
126
127 redir_content = vioapic->redirtbl[array_index_nospec(redir_index,
128 vioapic->nr_pins)].bits;
129 result = (vioapic->ioregsel & 1) ? (redir_content >> 32)
130 : redir_content;
131 break;
132 }
133 }
134
135 return result;
136 }
137
vioapic_read(struct vcpu * v,unsigned long addr,unsigned int length,unsigned long * pval)138 static int vioapic_read(
139 struct vcpu *v, unsigned long addr,
140 unsigned int length, unsigned long *pval)
141 {
142 const struct hvm_vioapic *vioapic;
143 uint32_t result;
144
145 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "addr %lx", addr);
146
147 vioapic = addr_vioapic(v->domain, addr);
148 ASSERT(vioapic);
149
150 switch ( addr & 0xff )
151 {
152 case VIOAPIC_REG_SELECT:
153 result = vioapic->ioregsel;
154 break;
155
156 case VIOAPIC_REG_WINDOW:
157 result = vioapic_read_indirect(vioapic);
158 break;
159
160 default:
161 result = 0;
162 break;
163 }
164
165 *pval = result;
166 return X86EMUL_OKAY;
167 }
168
vioapic_hwdom_map_gsi(unsigned int gsi,unsigned int trig,unsigned int pol)169 static int vioapic_hwdom_map_gsi(unsigned int gsi, unsigned int trig,
170 unsigned int pol)
171 {
172 struct domain *currd = current->domain;
173 struct xen_domctl_bind_pt_irq pt_irq_bind = {
174 .irq_type = PT_IRQ_TYPE_PCI,
175 .machine_irq = gsi,
176 };
177 int ret, pirq = gsi;
178
179 ASSERT(is_hardware_domain(currd));
180
181 /* Interrupt has been unmasked, bind it now. */
182 ret = mp_register_gsi(gsi, trig, pol);
183 if ( ret == -EEXIST )
184 return 0;
185 if ( ret )
186 {
187 gprintk(XENLOG_WARNING, "vioapic: error registering GSI %u: %d\n",
188 gsi, ret);
189 return ret;
190 }
191
192 ret = allocate_and_map_gsi_pirq(currd, pirq, &pirq);
193 if ( ret )
194 {
195 gprintk(XENLOG_WARNING, "vioapic: error mapping GSI %u: %d\n",
196 gsi, ret);
197 return ret;
198 }
199
200 pcidevs_lock();
201 ret = pt_irq_create_bind(currd, &pt_irq_bind);
202 if ( ret )
203 {
204 gprintk(XENLOG_WARNING, "vioapic: error binding GSI %u: %d\n",
205 gsi, ret);
206 spin_lock(&currd->event_lock);
207 unmap_domain_pirq(currd, pirq);
208 spin_unlock(&currd->event_lock);
209 }
210 pcidevs_unlock();
211
212 return ret;
213 }
214
vioapic_write_redirent(struct hvm_vioapic * vioapic,unsigned int idx,int top_word,uint32_t val)215 static void vioapic_write_redirent(
216 struct hvm_vioapic *vioapic, unsigned int idx,
217 int top_word, uint32_t val)
218 {
219 struct domain *d = vioapic_domain(vioapic);
220 struct hvm_irq *hvm_irq = hvm_domain_irq(d);
221 union vioapic_redir_entry *pent, ent;
222 int unmasked = 0;
223 unsigned int gsi;
224
225 /* Callers of this function should make sure idx is bounded appropriately */
226 ASSERT(idx < vioapic->nr_pins);
227
228 /* Make sure no out-of-bounds value for idx can be used */
229 idx = array_index_nospec(idx, vioapic->nr_pins);
230
231 gsi = vioapic->base_gsi + idx;
232
233 spin_lock(&d->arch.hvm.irq_lock);
234
235 pent = &vioapic->redirtbl[idx];
236 ent = *pent;
237
238 if ( top_word )
239 {
240 /* Contains only the dest_id. */
241 ent.bits = (uint32_t)ent.bits | ((uint64_t)val << 32);
242 }
243 else
244 {
245 unmasked = ent.fields.mask;
246 /* Remote IRR and Delivery Status are read-only. */
247 ent.bits = ((ent.bits >> 32) << 32) | val;
248 ent.fields.delivery_status = 0;
249 ent.fields.remote_irr = pent->fields.remote_irr;
250 unmasked = unmasked && !ent.fields.mask;
251 }
252
253 *pent = ent;
254
255 if ( gsi == 0 )
256 {
257 vlapic_adjust_i8259_target(d);
258 }
259 else if ( ent.fields.trig_mode == VIOAPIC_EDGE_TRIG )
260 pent->fields.remote_irr = 0;
261 else if ( !ent.fields.mask &&
262 !ent.fields.remote_irr &&
263 hvm_irq->gsi_assert_count[gsi] )
264 {
265 pent->fields.remote_irr = 1;
266 vioapic_deliver(vioapic, idx);
267 }
268
269 spin_unlock(&d->arch.hvm.irq_lock);
270
271 if ( is_hardware_domain(d) && unmasked )
272 {
273 /*
274 * NB: don't call vioapic_hwdom_map_gsi while holding hvm.irq_lock
275 * since it can cause deadlocks as event_lock is taken by
276 * allocate_and_map_gsi_pirq, and that will invert the locking order
277 * used by other parts of the code.
278 */
279 int ret = vioapic_hwdom_map_gsi(gsi, ent.fields.trig_mode,
280 ent.fields.polarity);
281 if ( ret )
282 {
283 gprintk(XENLOG_ERR,
284 "unable to bind gsi %u to hardware domain: %d\n", gsi, ret);
285 unmasked = 0;
286 }
287 }
288
289 if ( gsi == 0 || unmasked )
290 pt_may_unmask_irq(d, NULL);
291 }
292
vioapic_write_indirect(struct hvm_vioapic * vioapic,uint32_t val)293 static void vioapic_write_indirect(
294 struct hvm_vioapic *vioapic, uint32_t val)
295 {
296 switch ( vioapic->ioregsel )
297 {
298 case VIOAPIC_REG_VERSION:
299 /* Writes are ignored. */
300 break;
301
302 case VIOAPIC_REG_APIC_ID:
303 /*
304 * Presumably because we emulate an Intel IOAPIC which only has a
305 * 4 bit ID field (compared to 8 for AMD), using union IO_APIC_reg_02
306 * for the ID register (union IO_APIC_reg_00's ID field is 8 bits).
307 */
308 vioapic->id = ((union IO_APIC_reg_02){ .raw = val }).bits.arbitration;
309 break;
310
311 case VIOAPIC_REG_ARB_ID:
312 break;
313
314 default:
315 {
316 uint32_t redir_index = (vioapic->ioregsel - VIOAPIC_REG_RTE0) >> 1;
317
318 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "rte[%02x].%s = %08x",
319 redir_index, vioapic->ioregsel & 1 ? "hi" : "lo", val);
320
321 if ( redir_index >= vioapic->nr_pins )
322 {
323 gdprintk(XENLOG_WARNING, "vioapic_write_indirect "
324 "error register %x\n", vioapic->ioregsel);
325 break;
326 }
327
328 vioapic_write_redirent(
329 vioapic, redir_index, vioapic->ioregsel&1, val);
330 break;
331 }
332 }
333 }
334
vioapic_write(struct vcpu * v,unsigned long addr,unsigned int length,unsigned long val)335 static int vioapic_write(
336 struct vcpu *v, unsigned long addr,
337 unsigned int length, unsigned long val)
338 {
339 struct hvm_vioapic *vioapic;
340
341 vioapic = addr_vioapic(v->domain, addr);
342 ASSERT(vioapic);
343
344 switch ( addr & 0xff )
345 {
346 case VIOAPIC_REG_SELECT:
347 vioapic->ioregsel = val;
348 break;
349
350 case VIOAPIC_REG_WINDOW:
351 vioapic_write_indirect(vioapic, val);
352 break;
353
354 #if VIOAPIC_VERSION_ID >= 0x20
355 case VIOAPIC_REG_EOI:
356 vioapic_update_EOI(v->domain, val);
357 break;
358 #endif
359
360 default:
361 break;
362 }
363
364 return X86EMUL_OKAY;
365 }
366
vioapic_range(struct vcpu * v,unsigned long addr)367 static int vioapic_range(struct vcpu *v, unsigned long addr)
368 {
369 return !!addr_vioapic(v->domain, addr);
370 }
371
372 static const struct hvm_mmio_ops vioapic_mmio_ops = {
373 .check = vioapic_range,
374 .read = vioapic_read,
375 .write = vioapic_write
376 };
377
ioapic_inj_irq(struct hvm_vioapic * vioapic,struct vlapic * target,uint8_t vector,uint8_t trig_mode,uint8_t delivery_mode)378 static void ioapic_inj_irq(
379 struct hvm_vioapic *vioapic,
380 struct vlapic *target,
381 uint8_t vector,
382 uint8_t trig_mode,
383 uint8_t delivery_mode)
384 {
385 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d",
386 vector, trig_mode, delivery_mode);
387
388 ASSERT((delivery_mode == dest_Fixed) ||
389 (delivery_mode == dest_LowestPrio));
390
391 vlapic_set_irq(target, vector, trig_mode);
392 }
393
pit_channel0_enabled(void)394 static inline int pit_channel0_enabled(void)
395 {
396 return pt_active(¤t->domain->arch.vpit.pt0);
397 }
398
vioapic_deliver(struct hvm_vioapic * vioapic,unsigned int pin)399 static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
400 {
401 uint16_t dest = vioapic->redirtbl[pin].fields.dest_id;
402 uint8_t dest_mode = vioapic->redirtbl[pin].fields.dest_mode;
403 uint8_t delivery_mode = vioapic->redirtbl[pin].fields.delivery_mode;
404 uint8_t vector = vioapic->redirtbl[pin].fields.vector;
405 uint8_t trig_mode = vioapic->redirtbl[pin].fields.trig_mode;
406 struct domain *d = vioapic_domain(vioapic);
407 struct vlapic *target;
408 struct vcpu *v;
409 unsigned int irq = vioapic->base_gsi + pin;
410
411 ASSERT(spin_is_locked(&d->arch.hvm.irq_lock));
412
413 HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
414 "dest=%x dest_mode=%x delivery_mode=%x "
415 "vector=%x trig_mode=%x",
416 dest, dest_mode, delivery_mode, vector, trig_mode);
417
418 switch ( delivery_mode )
419 {
420 case dest_LowestPrio:
421 {
422 #ifdef IRQ0_SPECIAL_ROUTING
423 /* Force round-robin to pick VCPU 0 */
424 if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
425 {
426 v = d->vcpu ? d->vcpu[0] : NULL;
427 target = v ? vcpu_vlapic(v) : NULL;
428 }
429 else
430 #endif
431 target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
432 if ( target != NULL )
433 {
434 ioapic_inj_irq(vioapic, target, vector, trig_mode, delivery_mode);
435 }
436 else
437 {
438 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
439 "vector=%x delivery_mode=%x",
440 vector, dest_LowestPrio);
441 }
442 break;
443 }
444
445 case dest_Fixed:
446 {
447 #ifdef IRQ0_SPECIAL_ROUTING
448 /* Do not deliver timer interrupts to VCPU != 0 */
449 if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
450 {
451 if ( (v = d->vcpu ? d->vcpu[0] : NULL) != NULL )
452 ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector,
453 trig_mode, delivery_mode);
454 }
455 else
456 #endif
457 {
458 for_each_vcpu ( d, v )
459 if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
460 0, dest, dest_mode) )
461 ioapic_inj_irq(vioapic, vcpu_vlapic(v), vector,
462 trig_mode, delivery_mode);
463 }
464 break;
465 }
466
467 case dest_NMI:
468 {
469 for_each_vcpu ( d, v )
470 if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
471 0, dest, dest_mode) &&
472 !test_and_set_bool(v->arch.nmi_pending) )
473 vcpu_kick(v);
474 break;
475 }
476
477 default:
478 gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
479 delivery_mode);
480 break;
481 }
482 }
483
vioapic_irq_positive_edge(struct domain * d,unsigned int irq)484 void vioapic_irq_positive_edge(struct domain *d, unsigned int irq)
485 {
486 unsigned int pin = 0; /* See gsi_vioapic */
487 struct hvm_vioapic *vioapic = gsi_vioapic(d, irq, &pin);
488 union vioapic_redir_entry *ent;
489
490 if ( !vioapic )
491 {
492 ASSERT_UNREACHABLE();
493 return;
494 }
495
496 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq);
497
498 ASSERT(pin < vioapic->nr_pins);
499 ASSERT(spin_is_locked(&d->arch.hvm.irq_lock));
500
501 ent = &vioapic->redirtbl[pin];
502 if ( ent->fields.mask )
503 return;
504
505 if ( ent->fields.trig_mode == VIOAPIC_EDGE_TRIG )
506 {
507 vioapic_deliver(vioapic, pin);
508 }
509 else if ( !ent->fields.remote_irr )
510 {
511 ent->fields.remote_irr = 1;
512 vioapic_deliver(vioapic, pin);
513 }
514 }
515
vioapic_update_EOI(struct domain * d,u8 vector)516 void vioapic_update_EOI(struct domain *d, u8 vector)
517 {
518 struct hvm_irq *hvm_irq = hvm_domain_irq(d);
519 union vioapic_redir_entry *ent;
520 unsigned int i;
521
522 ASSERT(has_vioapic(d));
523
524 spin_lock(&d->arch.hvm.irq_lock);
525
526 for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
527 {
528 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
529 unsigned int pin;
530
531 for ( pin = 0; pin < vioapic->nr_pins; pin++ )
532 {
533 ent = &vioapic->redirtbl[pin];
534 if ( ent->fields.vector != vector )
535 continue;
536
537 ent->fields.remote_irr = 0;
538
539 if ( is_iommu_enabled(d) )
540 {
541 spin_unlock(&d->arch.hvm.irq_lock);
542 hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
543 spin_lock(&d->arch.hvm.irq_lock);
544 }
545
546 if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
547 !ent->fields.mask &&
548 hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
549 {
550 ent->fields.remote_irr = 1;
551 vioapic_deliver(vioapic, pin);
552 }
553 }
554 }
555
556 spin_unlock(&d->arch.hvm.irq_lock);
557 }
558
vioapic_get_mask(const struct domain * d,unsigned int gsi)559 int vioapic_get_mask(const struct domain *d, unsigned int gsi)
560 {
561 unsigned int pin = 0; /* See gsi_vioapic */
562 const struct hvm_vioapic *vioapic = gsi_vioapic(d, gsi, &pin);
563
564 if ( !vioapic )
565 return -EINVAL;
566
567 return vioapic->redirtbl[pin].fields.mask;
568 }
569
vioapic_get_vector(const struct domain * d,unsigned int gsi)570 int vioapic_get_vector(const struct domain *d, unsigned int gsi)
571 {
572 unsigned int pin = 0; /* See gsi_vioapic */
573 const struct hvm_vioapic *vioapic = gsi_vioapic(d, gsi, &pin);
574
575 if ( !vioapic )
576 return -EINVAL;
577
578 return vioapic->redirtbl[pin].fields.vector;
579 }
580
vioapic_get_trigger_mode(const struct domain * d,unsigned int gsi)581 int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi)
582 {
583 unsigned int pin = 0; /* See gsi_vioapic */
584 const struct hvm_vioapic *vioapic = gsi_vioapic(d, gsi, &pin);
585
586 if ( !vioapic )
587 return -EINVAL;
588
589 return vioapic->redirtbl[pin].fields.trig_mode;
590 }
591
ioapic_save(struct vcpu * v,hvm_domain_context_t * h)592 static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
593 {
594 const struct domain *d = v->domain;
595 struct hvm_vioapic *s;
596
597 if ( !has_vioapic(d) )
598 return 0;
599
600 s = domain_vioapic(d, 0);
601
602 if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
603 d->arch.hvm.nr_vioapics != 1 )
604 return -EOPNOTSUPP;
605
606 return hvm_save_entry(IOAPIC, 0, h, &s->domU);
607 }
608
ioapic_load(struct domain * d,hvm_domain_context_t * h)609 static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
610 {
611 struct hvm_vioapic *s;
612
613 if ( !has_vioapic(d) )
614 return -ENODEV;
615
616 s = domain_vioapic(d, 0);
617
618 if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
619 d->arch.hvm.nr_vioapics != 1 )
620 return -EOPNOTSUPP;
621
622 return hvm_load_entry(IOAPIC, h, &s->domU);
623 }
624
625 HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
626
vioapic_reset(struct domain * d)627 void vioapic_reset(struct domain *d)
628 {
629 unsigned int i;
630
631 if ( !has_vioapic(d) )
632 {
633 ASSERT(!d->arch.hvm.nr_vioapics);
634 return;
635 }
636
637 for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
638 {
639 struct hvm_vioapic *vioapic = domain_vioapic(d, i);
640 unsigned int nr_pins = vioapic->nr_pins, base_gsi = vioapic->base_gsi;
641 unsigned int pin;
642
643 memset(vioapic, 0, hvm_vioapic_size(nr_pins));
644 for ( pin = 0; pin < nr_pins; pin++ )
645 vioapic->redirtbl[pin].fields.mask = 1;
646
647 if ( !is_hardware_domain(d) )
648 {
649 ASSERT(!i && !base_gsi);
650 vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
651 vioapic->id = 0;
652 }
653 else
654 {
655 vioapic->base_address = mp_ioapics[i].mpc_apicaddr;
656 vioapic->id = mp_ioapics[i].mpc_apicid;
657 }
658 vioapic->base_gsi = base_gsi;
659 vioapic->nr_pins = nr_pins;
660 vioapic->domain = d;
661 }
662 }
663
vioapic_free(const struct domain * d,unsigned int nr_vioapics)664 static void vioapic_free(const struct domain *d, unsigned int nr_vioapics)
665 {
666 unsigned int i;
667
668 for ( i = 0; i < nr_vioapics; i++)
669 xfree(domain_vioapic(d, i));
670 xfree(d->arch.hvm.vioapic);
671 }
672
vioapic_init(struct domain * d)673 int vioapic_init(struct domain *d)
674 {
675 unsigned int i, nr_vioapics, nr_gsis = 0;
676
677 if ( !has_vioapic(d) )
678 {
679 ASSERT(!d->arch.hvm.nr_vioapics);
680 return 0;
681 }
682
683 nr_vioapics = is_hardware_domain(d) ? nr_ioapics : 1;
684
685 if ( (d->arch.hvm.vioapic == NULL) &&
686 ((d->arch.hvm.vioapic =
687 xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) )
688 return -ENOMEM;
689
690 for ( i = 0; i < nr_vioapics; i++ )
691 {
692 unsigned int nr_pins, base_gsi;
693
694 if ( is_hardware_domain(d) )
695 {
696 nr_pins = nr_ioapic_entries[i];
697 base_gsi = io_apic_gsi_base(i);
698 }
699 else
700 {
701 nr_pins = ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl);
702 base_gsi = 0;
703 }
704
705 if ( (domain_vioapic(d, i) =
706 xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL )
707 {
708 vioapic_free(d, nr_vioapics);
709 return -ENOMEM;
710 }
711 domain_vioapic(d, i)->nr_pins = nr_pins;
712 domain_vioapic(d, i)->base_gsi = base_gsi;
713 nr_gsis = max(nr_gsis, base_gsi + nr_pins);
714 }
715
716 /*
717 * NB: hvm_domain_irq(d)->nr_gsis is actually the highest GSI + 1, but
718 * there might be holes in this range (ie: GSIs that don't belong to any
719 * vIO APIC).
720 */
721 ASSERT(hvm_domain_irq(d)->nr_gsis >= nr_gsis);
722
723 d->arch.hvm.nr_vioapics = nr_vioapics;
724 vioapic_reset(d);
725
726 register_mmio_handler(d, &vioapic_mmio_ops);
727
728 return 0;
729 }
730
vioapic_deinit(struct domain * d)731 void vioapic_deinit(struct domain *d)
732 {
733 if ( !has_vioapic(d) )
734 {
735 ASSERT(!d->arch.hvm.nr_vioapics);
736 return;
737 }
738
739 vioapic_free(d, d->arch.hvm.nr_vioapics);
740 }
741