1 /*
2 * xen/arch/arm/vgic.c
3 *
4 * ARM Virtual Generic Interrupt Controller support
5 *
6 * Ian Campbell <ian.campbell@citrix.com>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <xen/bitops.h>
21 #include <xen/lib.h>
22 #include <xen/init.h>
23 #include <xen/domain_page.h>
24 #include <xen/softirq.h>
25 #include <xen/irq.h>
26 #include <xen/sched.h>
27 #include <xen/perfc.h>
28
29 #include <asm/event.h>
30 #include <asm/current.h>
31
32 #include <asm/mmio.h>
33 #include <asm/gic.h>
34 #include <asm/vgic.h>
35
vgic_get_rank(struct vcpu * v,int rank)36 static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank)
37 {
38 if ( rank == 0 )
39 return v->arch.vgic.private_irqs;
40 else if ( rank <= DOMAIN_NR_RANKS(v->domain) )
41 return &v->domain->arch.vgic.shared_irqs[rank - 1];
42 else
43 return NULL;
44 }
45
46 /*
47 * Returns rank corresponding to a GICD_<FOO><n> register for
48 * GICD_<FOO> with <b>-bits-per-interrupt.
49 */
vgic_rank_offset(struct vcpu * v,int b,int n,int s)50 struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n,
51 int s)
52 {
53 int rank = REG_RANK_NR(b, (n >> s));
54
55 return vgic_get_rank(v, rank);
56 }
57
vgic_rank_irq(struct vcpu * v,unsigned int irq)58 struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq)
59 {
60 int rank = irq/32;
61
62 return vgic_get_rank(v, rank);
63 }
64
vgic_init_pending_irq(struct pending_irq * p,unsigned int virq)65 void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
66 {
67 /* The lpi_vcpu_id field must be big enough to hold a VCPU ID. */
68 BUILD_BUG_ON(BIT(sizeof(p->lpi_vcpu_id) * 8, UL) < MAX_VIRT_CPUS);
69
70 memset(p, 0, sizeof(*p));
71 INIT_LIST_HEAD(&p->inflight);
72 INIT_LIST_HEAD(&p->lr_queue);
73 p->irq = virq;
74 p->lpi_vcpu_id = INVALID_VCPU_ID;
75 }
76
vgic_rank_init(struct vgic_irq_rank * rank,uint8_t index,unsigned int vcpu)77 static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index,
78 unsigned int vcpu)
79 {
80 unsigned int i;
81
82 /*
83 * Make sure that the type chosen to store the target is able to
84 * store an VCPU ID between 0 and the maximum of virtual CPUs
85 * supported.
86 */
87 BUILD_BUG_ON((1 << (sizeof(rank->vcpu[0]) * 8)) < MAX_VIRT_CPUS);
88
89 spin_lock_init(&rank->lock);
90
91 rank->index = index;
92
93 for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
94 write_atomic(&rank->vcpu[i], vcpu);
95 }
96
domain_vgic_register(struct domain * d,int * mmio_count)97 int domain_vgic_register(struct domain *d, int *mmio_count)
98 {
99 switch ( d->arch.vgic.version )
100 {
101 #ifdef CONFIG_GICV3
102 case GIC_V3:
103 if ( vgic_v3_init(d, mmio_count) )
104 return -ENODEV;
105 break;
106 #endif
107 case GIC_V2:
108 if ( vgic_v2_init(d, mmio_count) )
109 return -ENODEV;
110 break;
111 default:
112 printk(XENLOG_G_ERR "d%d: Unknown vGIC version %u\n",
113 d->domain_id, d->arch.vgic.version);
114 return -ENODEV;
115 }
116
117 return 0;
118 }
119
domain_vgic_init(struct domain * d,unsigned int nr_spis)120 int domain_vgic_init(struct domain *d, unsigned int nr_spis)
121 {
122 int i;
123 int ret;
124
125 d->arch.vgic.ctlr = 0;
126
127 /*
128 * The vGIC relies on having a pending_irq available for every IRQ
129 * described in the ranks. As each rank describes 32 interrupts, we
130 * need to make sure the number of SPIs is a multiple of 32.
131 */
132 nr_spis = ROUNDUP(nr_spis, 32);
133
134 /* Limit the number of virtual SPIs supported to (1020 - 32) = 988 */
135 if ( nr_spis > (1020 - NR_LOCAL_IRQS) )
136 return -EINVAL;
137
138 d->arch.vgic.nr_spis = nr_spis;
139
140 spin_lock_init(&d->arch.vgic.lock);
141
142 d->arch.vgic.shared_irqs =
143 xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
144 if ( d->arch.vgic.shared_irqs == NULL )
145 return -ENOMEM;
146
147 d->arch.vgic.pending_irqs =
148 xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis);
149 if ( d->arch.vgic.pending_irqs == NULL )
150 return -ENOMEM;
151
152 for (i=0; i<d->arch.vgic.nr_spis; i++)
153 vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32);
154
155 /* SPIs are routed to VCPU0 by default */
156 for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
157 vgic_rank_init(&d->arch.vgic.shared_irqs[i], i + 1, 0);
158
159 ret = d->arch.vgic.handler->domain_init(d);
160 if ( ret )
161 return ret;
162
163 d->arch.vgic.allocated_irqs =
164 xzalloc_array(unsigned long, BITS_TO_LONGS(vgic_num_irqs(d)));
165 if ( !d->arch.vgic.allocated_irqs )
166 return -ENOMEM;
167
168 /* vIRQ0-15 (SGIs) are reserved */
169 for ( i = 0; i < NR_GIC_SGI; i++ )
170 set_bit(i, d->arch.vgic.allocated_irqs);
171
172 return 0;
173 }
174
register_vgic_ops(struct domain * d,const struct vgic_ops * ops)175 void register_vgic_ops(struct domain *d, const struct vgic_ops *ops)
176 {
177 d->arch.vgic.handler = ops;
178 }
179
domain_vgic_free(struct domain * d)180 void domain_vgic_free(struct domain *d)
181 {
182 int i;
183 int ret;
184
185 for ( i = 0; i < (d->arch.vgic.nr_spis); i++ )
186 {
187 struct pending_irq *p = spi_to_pending(d, i + 32);
188
189 if ( p->desc )
190 {
191 ret = release_guest_irq(d, p->irq);
192 if ( ret )
193 dprintk(XENLOG_G_WARNING, "d%u: Failed to release virq %u ret = %d\n",
194 d->domain_id, p->irq, ret);
195 }
196 }
197
198 if ( d->arch.vgic.handler )
199 d->arch.vgic.handler->domain_free(d);
200 xfree(d->arch.vgic.shared_irqs);
201 xfree(d->arch.vgic.pending_irqs);
202 xfree(d->arch.vgic.allocated_irqs);
203 }
204
vcpu_vgic_init(struct vcpu * v)205 int vcpu_vgic_init(struct vcpu *v)
206 {
207 int i;
208
209 v->arch.vgic.private_irqs = xzalloc(struct vgic_irq_rank);
210 if ( v->arch.vgic.private_irqs == NULL )
211 return -ENOMEM;
212
213 /* SGIs/PPIs are always routed to this VCPU */
214 vgic_rank_init(v->arch.vgic.private_irqs, 0, v->vcpu_id);
215
216 v->domain->arch.vgic.handler->vcpu_init(v);
217
218 memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs));
219 for (i = 0; i < 32; i++)
220 vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i);
221
222 INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs);
223 INIT_LIST_HEAD(&v->arch.vgic.lr_pending);
224 spin_lock_init(&v->arch.vgic.lock);
225
226 return 0;
227 }
228
vcpu_vgic_free(struct vcpu * v)229 int vcpu_vgic_free(struct vcpu *v)
230 {
231 xfree(v->arch.vgic.private_irqs);
232 return 0;
233 }
234
vgic_get_target_vcpu(struct vcpu * v,unsigned int virq)235 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
236 {
237 struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
238 int target = read_atomic(&rank->vcpu[virq & INTERRUPT_RANK_MASK]);
239 return v->domain->vcpu[target];
240 }
241
vgic_get_virq_priority(struct vcpu * v,unsigned int virq)242 static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
243 {
244 struct vgic_irq_rank *rank;
245
246 /* LPIs don't have a rank, also store their priority separately. */
247 if ( is_lpi(virq) )
248 return v->domain->arch.vgic.handler->lpi_get_priority(v->domain, virq);
249
250 rank = vgic_rank_irq(v, virq);
251 return ACCESS_ONCE(rank->priority[virq & INTERRUPT_RANK_MASK]);
252 }
253
vgic_migrate_irq(struct vcpu * old,struct vcpu * new,unsigned int irq)254 bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
255 {
256 unsigned long flags;
257 struct pending_irq *p;
258
259 /* This will never be called for an LPI, as we don't migrate them. */
260 ASSERT(!is_lpi(irq));
261
262 spin_lock_irqsave(&old->arch.vgic.lock, flags);
263
264 p = irq_to_pending(old, irq);
265
266 /* nothing to do for virtual interrupts */
267 if ( p->desc == NULL )
268 {
269 spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
270 return true;
271 }
272
273 /* migration already in progress, no need to do anything */
274 if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
275 {
276 gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in progress\n", irq);
277 spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
278 return false;
279 }
280
281 perfc_incr(vgic_irq_migrates);
282
283 if ( list_empty(&p->inflight) )
284 {
285 irq_set_affinity(p->desc, cpumask_of(new->processor));
286 spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
287 return true;
288 }
289 /* If the IRQ is still lr_pending, re-inject it to the new vcpu */
290 if ( !list_empty(&p->lr_queue) )
291 {
292 vgic_remove_irq_from_queues(old, p);
293 irq_set_affinity(p->desc, cpumask_of(new->processor));
294 spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
295 vgic_inject_irq(new->domain, new, irq, true);
296 return true;
297 }
298 /* if the IRQ is in a GICH_LR register, set GIC_IRQ_GUEST_MIGRATING
299 * and wait for the EOI */
300 if ( !list_empty(&p->inflight) )
301 set_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
302
303 spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
304 return true;
305 }
306
arch_move_irqs(struct vcpu * v)307 void arch_move_irqs(struct vcpu *v)
308 {
309 const cpumask_t *cpu_mask = cpumask_of(v->processor);
310 struct domain *d = v->domain;
311 struct pending_irq *p;
312 struct vcpu *v_target;
313 int i;
314
315 /*
316 * We don't migrate LPIs at the moment.
317 * If we ever do, we must make sure that the struct pending_irq does
318 * not go away, as there is no lock preventing this here.
319 * To ensure this, we check if the loop below ever touches LPIs.
320 * In the moment vgic_num_irqs() just covers SPIs, as it's mostly used
321 * for allocating the pending_irq and irq_desc array, in which LPIs
322 * don't participate.
323 */
324 ASSERT(!is_lpi(vgic_num_irqs(d) - 1));
325
326 for ( i = 32; i < vgic_num_irqs(d); i++ )
327 {
328 v_target = vgic_get_target_vcpu(v, i);
329 p = irq_to_pending(v_target, i);
330
331 if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
332 irq_set_affinity(p->desc, cpu_mask);
333 }
334 }
335
vgic_disable_irqs(struct vcpu * v,uint32_t r,int n)336 void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
337 {
338 const unsigned long mask = r;
339 struct pending_irq *p;
340 struct irq_desc *desc;
341 unsigned int irq;
342 unsigned long flags;
343 int i = 0;
344 struct vcpu *v_target;
345
346 /* LPIs will never be disabled via this function. */
347 ASSERT(!is_lpi(32 * n + 31));
348
349 while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
350 irq = i + (32 * n);
351 v_target = vgic_get_target_vcpu(v, irq);
352
353 spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
354 p = irq_to_pending(v_target, irq);
355 clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
356 gic_remove_from_lr_pending(v_target, p);
357 desc = p->desc;
358 spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
359
360 if ( desc != NULL )
361 {
362 spin_lock_irqsave(&desc->lock, flags);
363 desc->handler->disable(desc);
364 spin_unlock_irqrestore(&desc->lock, flags);
365 }
366 i++;
367 }
368 }
369
370 #define VGIC_ICFG_MASK(intr) (1 << ((2 * ((intr) % 16)) + 1))
371
372 /* The function should be called with the rank lock taken */
vgic_get_virq_type(struct vcpu * v,int n,int index)373 static inline unsigned int vgic_get_virq_type(struct vcpu *v, int n, int index)
374 {
375 struct vgic_irq_rank *r = vgic_get_rank(v, n);
376 uint32_t tr = r->icfg[index >> 4];
377
378 ASSERT(spin_is_locked(&r->lock));
379
380 if ( tr & VGIC_ICFG_MASK(index) )
381 return IRQ_TYPE_EDGE_RISING;
382 else
383 return IRQ_TYPE_LEVEL_HIGH;
384 }
385
vgic_enable_irqs(struct vcpu * v,uint32_t r,int n)386 void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
387 {
388 const unsigned long mask = r;
389 struct pending_irq *p;
390 unsigned int irq;
391 unsigned long flags;
392 int i = 0;
393 struct vcpu *v_target;
394 struct domain *d = v->domain;
395
396 /* LPIs will never be enabled via this function. */
397 ASSERT(!is_lpi(32 * n + 31));
398
399 while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
400 irq = i + (32 * n);
401 v_target = vgic_get_target_vcpu(v, irq);
402 spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
403 p = irq_to_pending(v_target, irq);
404 set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
405 if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
406 gic_raise_guest_irq(v_target, irq, p->priority);
407 spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
408 if ( p->desc != NULL )
409 {
410 irq_set_affinity(p->desc, cpumask_of(v_target->processor));
411 spin_lock_irqsave(&p->desc->lock, flags);
412 /*
413 * The irq cannot be a PPI, we only support delivery of SPIs
414 * to guests.
415 */
416 ASSERT(irq >= 32);
417 if ( irq_type_set_by_domain(d) )
418 gic_set_irq_type(p->desc, vgic_get_virq_type(v, n, i));
419 p->desc->handler->enable(p->desc);
420 spin_unlock_irqrestore(&p->desc->lock, flags);
421 }
422 i++;
423 }
424 }
425
vgic_to_sgi(struct vcpu * v,register_t sgir,enum gic_sgi_mode irqmode,int virq,const struct sgi_target * target)426 bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode,
427 int virq, const struct sgi_target *target)
428 {
429 struct domain *d = v->domain;
430 int vcpuid;
431 int i;
432 unsigned int base;
433 unsigned long int bitmap;
434
435 ASSERT( virq < 16 );
436
437 switch ( irqmode )
438 {
439 case SGI_TARGET_LIST:
440 perfc_incr(vgic_sgi_list);
441 base = target->aff1 << 4;
442 bitmap = target->list;
443 for_each_set_bit( i, &bitmap, sizeof(target->list) * 8 )
444 {
445 vcpuid = base + i;
446 if ( vcpuid >= d->max_vcpus || d->vcpu[vcpuid] == NULL ||
447 !is_vcpu_online(d->vcpu[vcpuid]) )
448 {
449 gprintk(XENLOG_WARNING, "VGIC: write r=%"PRIregister" \
450 target->list=%hx, wrong CPUTargetList \n",
451 sgir, target->list);
452 continue;
453 }
454 vgic_inject_irq(d, d->vcpu[vcpuid], virq, true);
455 }
456 break;
457 case SGI_TARGET_OTHERS:
458 perfc_incr(vgic_sgi_others);
459 for ( i = 0; i < d->max_vcpus; i++ )
460 {
461 if ( i != current->vcpu_id && d->vcpu[i] != NULL &&
462 is_vcpu_online(d->vcpu[i]) )
463 vgic_inject_irq(d, d->vcpu[i], virq, true);
464 }
465 break;
466 case SGI_TARGET_SELF:
467 perfc_incr(vgic_sgi_self);
468 vgic_inject_irq(d, current, virq, true);
469 break;
470 default:
471 gprintk(XENLOG_WARNING,
472 "vGICD:unhandled GICD_SGIR write %"PRIregister" \
473 with wrong mode\n", sgir);
474 return false;
475 }
476
477 return true;
478 }
479
480 /*
481 * Returns the pointer to the struct pending_irq belonging to the given
482 * interrupt.
483 * This can return NULL if called for an LPI which has been unmapped
484 * meanwhile.
485 */
irq_to_pending(struct vcpu * v,unsigned int irq)486 struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
487 {
488 struct pending_irq *n;
489 /* Pending irqs allocation strategy: the first vgic.nr_spis irqs
490 * are used for SPIs; the rests are used for per cpu irqs */
491 if ( irq < 32 )
492 n = &v->arch.vgic.pending_irqs[irq];
493 else if ( is_lpi(irq) )
494 n = v->domain->arch.vgic.handler->lpi_to_pending(v->domain, irq);
495 else
496 n = &v->domain->arch.vgic.pending_irqs[irq - 32];
497 return n;
498 }
499
spi_to_pending(struct domain * d,unsigned int irq)500 struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq)
501 {
502 ASSERT(irq >= NR_LOCAL_IRQS);
503
504 return &d->arch.vgic.pending_irqs[irq - 32];
505 }
506
vgic_clear_pending_irqs(struct vcpu * v)507 void vgic_clear_pending_irqs(struct vcpu *v)
508 {
509 struct pending_irq *p, *t;
510 unsigned long flags;
511
512 spin_lock_irqsave(&v->arch.vgic.lock, flags);
513 list_for_each_entry_safe ( p, t, &v->arch.vgic.inflight_irqs, inflight )
514 list_del_init(&p->inflight);
515 gic_clear_pending_irqs(v);
516 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
517 }
518
vgic_remove_irq_from_queues(struct vcpu * v,struct pending_irq * p)519 void vgic_remove_irq_from_queues(struct vcpu *v, struct pending_irq *p)
520 {
521 ASSERT(spin_is_locked(&v->arch.vgic.lock));
522
523 clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
524 list_del_init(&p->inflight);
525 gic_remove_from_lr_pending(v, p);
526 }
527
vgic_inject_irq(struct domain * d,struct vcpu * v,unsigned int virq,bool level)528 void vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq,
529 bool level)
530 {
531 uint8_t priority;
532 struct pending_irq *iter, *n;
533 unsigned long flags;
534
535 /*
536 * For edge triggered interrupts we always ignore a "falling edge".
537 * For level triggered interrupts we shouldn't, but do anyways.
538 */
539 if ( !level )
540 return;
541
542 if ( !v )
543 {
544 /* The IRQ needs to be an SPI if no vCPU is specified. */
545 ASSERT(virq >= 32 && virq <= vgic_num_irqs(d));
546
547 v = vgic_get_target_vcpu(d->vcpu[0], virq);
548 };
549
550 spin_lock_irqsave(&v->arch.vgic.lock, flags);
551
552 n = irq_to_pending(v, virq);
553 /* If an LPI has been removed, there is nothing to inject here. */
554 if ( unlikely(!n) )
555 {
556 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
557 return;
558 }
559
560 /* vcpu offline */
561 if ( test_bit(_VPF_down, &v->pause_flags) )
562 {
563 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
564 return;
565 }
566
567 set_bit(GIC_IRQ_GUEST_QUEUED, &n->status);
568
569 if ( !list_empty(&n->inflight) )
570 {
571 gic_raise_inflight_irq(v, virq);
572 goto out;
573 }
574
575 priority = vgic_get_virq_priority(v, virq);
576 n->priority = priority;
577
578 /* the irq is enabled */
579 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
580 gic_raise_guest_irq(v, virq, priority);
581
582 list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight )
583 {
584 if ( iter->priority > priority )
585 {
586 list_add_tail(&n->inflight, &iter->inflight);
587 goto out;
588 }
589 }
590 list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
591 out:
592 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
593
594 /* we have a new higher priority irq, inject it into the guest */
595 vcpu_kick(v);
596
597 return;
598 }
599
vgic_evtchn_irq_pending(struct vcpu * v)600 bool vgic_evtchn_irq_pending(struct vcpu *v)
601 {
602 struct pending_irq *p;
603
604 p = irq_to_pending(v, v->domain->arch.evtchn_irq);
605 /* Does not work for LPIs. */
606 ASSERT(!is_lpi(v->domain->arch.evtchn_irq));
607
608 return list_empty(&p->inflight);
609 }
610
vgic_emulate(struct cpu_user_regs * regs,union hsr hsr)611 bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr)
612 {
613 struct vcpu *v = current;
614
615 ASSERT(v->domain->arch.vgic.handler->emulate_reg != NULL);
616
617 return v->domain->arch.vgic.handler->emulate_reg(regs, hsr);
618 }
619
vgic_reserve_virq(struct domain * d,unsigned int virq)620 bool vgic_reserve_virq(struct domain *d, unsigned int virq)
621 {
622 if ( virq >= vgic_num_irqs(d) )
623 return false;
624
625 return !test_and_set_bit(virq, d->arch.vgic.allocated_irqs);
626 }
627
vgic_allocate_virq(struct domain * d,bool spi)628 int vgic_allocate_virq(struct domain *d, bool spi)
629 {
630 int first, end;
631 unsigned int virq;
632
633 if ( !spi )
634 {
635 /* We only allocate PPIs. SGIs are all reserved */
636 first = 16;
637 end = 32;
638 }
639 else
640 {
641 first = 32;
642 end = vgic_num_irqs(d);
643 }
644
645 /*
646 * There is no spinlock to protect allocated_irqs, therefore
647 * test_and_set_bit may fail. If so retry it.
648 */
649 do
650 {
651 virq = find_next_zero_bit(d->arch.vgic.allocated_irqs, end, first);
652 if ( virq >= end )
653 return -1;
654 }
655 while ( test_and_set_bit(virq, d->arch.vgic.allocated_irqs) );
656
657 return virq;
658 }
659
vgic_free_virq(struct domain * d,unsigned int virq)660 void vgic_free_virq(struct domain *d, unsigned int virq)
661 {
662 clear_bit(virq, d->arch.vgic.allocated_irqs);
663 }
664
vgic_max_vcpus(unsigned int domctl_vgic_version)665 unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version)
666 {
667 switch ( domctl_vgic_version )
668 {
669 case XEN_DOMCTL_CONFIG_GIC_V2:
670 return 8;
671
672 #ifdef CONFIG_GICV3
673 case XEN_DOMCTL_CONFIG_GIC_V3:
674 return 4096;
675 #endif
676
677 default:
678 return 0;
679 }
680 }
681
682 /*
683 * Local variables:
684 * mode: C
685 * c-file-style: "BSD"
686 * c-basic-offset: 4
687 * indent-tabs-mode: nil
688 * End:
689 */
690
691