1 /*
2 * xen/arch/arm/gic-vgic.c
3 *
4 * ARM Generic Interrupt Controller virtualization support
5 *
6 * Tim Deegan <tim@xen.org>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <xen/errno.h>
21 #include <xen/irq.h>
22 #include <xen/lib.h>
23 #include <xen/sched.h>
24 #include <asm/domain.h>
25 #include <asm/gic.h>
26 #include <asm/vgic.h>
27
28 #define lr_all_full() (this_cpu(lr_mask) == ((1 << gic_get_nr_lrs()) - 1))
29
30 #undef GIC_DEBUG
31
32 static void gic_update_one_lr(struct vcpu *v, int i);
33
gic_set_lr(int lr,struct pending_irq * p,unsigned int state)34 static inline void gic_set_lr(int lr, struct pending_irq *p,
35 unsigned int state)
36 {
37 ASSERT(!local_irq_is_enabled());
38
39 clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status);
40
41 gic_hw_ops->update_lr(lr, p->irq, p->priority,
42 p->desc ? p->desc->irq : INVALID_IRQ, state);
43
44 set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
45 clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
46 p->lr = lr;
47 }
48
gic_add_to_lr_pending(struct vcpu * v,struct pending_irq * n)49 static inline void gic_add_to_lr_pending(struct vcpu *v, struct pending_irq *n)
50 {
51 struct pending_irq *iter;
52
53 ASSERT(spin_is_locked(&v->arch.vgic.lock));
54
55 if ( !list_empty(&n->lr_queue) )
56 return;
57
58 list_for_each_entry ( iter, &v->arch.vgic.lr_pending, lr_queue )
59 {
60 if ( iter->priority > n->priority )
61 {
62 list_add_tail(&n->lr_queue, &iter->lr_queue);
63 return;
64 }
65 }
66 list_add_tail(&n->lr_queue, &v->arch.vgic.lr_pending);
67 }
68
gic_remove_from_lr_pending(struct vcpu * v,struct pending_irq * p)69 void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p)
70 {
71 ASSERT(spin_is_locked(&v->arch.vgic.lock));
72
73 list_del_init(&p->lr_queue);
74 }
75
gic_raise_inflight_irq(struct vcpu * v,unsigned int virtual_irq)76 void gic_raise_inflight_irq(struct vcpu *v, unsigned int virtual_irq)
77 {
78 struct pending_irq *n = irq_to_pending(v, virtual_irq);
79
80 /* If an LPI has been removed meanwhile, there is nothing left to raise. */
81 if ( unlikely(!n) )
82 return;
83
84 ASSERT(spin_is_locked(&v->arch.vgic.lock));
85
86 /* Don't try to update the LR if the interrupt is disabled */
87 if ( !test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
88 return;
89
90 if ( list_empty(&n->lr_queue) )
91 {
92 if ( v == current )
93 gic_update_one_lr(v, n->lr);
94 }
95 #ifdef GIC_DEBUG
96 else
97 gdprintk(XENLOG_DEBUG, "trying to inject irq=%u into %pv, when it is still lr_pending\n",
98 virtual_irq, v);
99 #endif
100 }
101
102 /*
103 * Find an unused LR to insert an IRQ into, starting with the LR given
104 * by @lr. If this new interrupt is a PRISTINE LPI, scan the other LRs to
105 * avoid inserting the same IRQ twice. This situation can occur when an
106 * event gets discarded while the LPI is in an LR, and a new LPI with the
107 * same number gets mapped quickly afterwards.
108 */
gic_find_unused_lr(struct vcpu * v,struct pending_irq * p,unsigned int lr)109 static unsigned int gic_find_unused_lr(struct vcpu *v,
110 struct pending_irq *p,
111 unsigned int lr)
112 {
113 unsigned int nr_lrs = gic_get_nr_lrs();
114 unsigned long *lr_mask = (unsigned long *) &this_cpu(lr_mask);
115 struct gic_lr lr_val;
116
117 ASSERT(spin_is_locked(&v->arch.vgic.lock));
118
119 if ( unlikely(test_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
120 {
121 unsigned int used_lr;
122
123 for_each_set_bit(used_lr, lr_mask, nr_lrs)
124 {
125 gic_hw_ops->read_lr(used_lr, &lr_val);
126 if ( lr_val.virq == p->irq )
127 return used_lr;
128 }
129 }
130
131 lr = find_next_zero_bit(lr_mask, nr_lrs, lr);
132
133 return lr;
134 }
135
gic_raise_guest_irq(struct vcpu * v,unsigned int virtual_irq,unsigned int priority)136 void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq,
137 unsigned int priority)
138 {
139 int i;
140 unsigned int nr_lrs = gic_get_nr_lrs();
141 struct pending_irq *p = irq_to_pending(v, virtual_irq);
142
143 ASSERT(spin_is_locked(&v->arch.vgic.lock));
144
145 if ( unlikely(!p) )
146 /* An unmapped LPI does not need to be raised. */
147 return;
148
149 if ( v == current && list_empty(&v->arch.vgic.lr_pending) )
150 {
151 i = gic_find_unused_lr(v, p, 0);
152
153 if (i < nr_lrs) {
154 set_bit(i, &this_cpu(lr_mask));
155 gic_set_lr(i, p, GICH_LR_PENDING);
156 return;
157 }
158 }
159
160 gic_add_to_lr_pending(v, p);
161 }
162
gic_update_one_lr(struct vcpu * v,int i)163 static void gic_update_one_lr(struct vcpu *v, int i)
164 {
165 struct pending_irq *p;
166 int irq;
167 struct gic_lr lr_val;
168
169 ASSERT(spin_is_locked(&v->arch.vgic.lock));
170 ASSERT(!local_irq_is_enabled());
171
172 gic_hw_ops->read_lr(i, &lr_val);
173 irq = lr_val.virq;
174 p = irq_to_pending(v, irq);
175 /*
176 * An LPI might have been unmapped, in which case we just clean up here.
177 * If that LPI is marked as PRISTINE, the information in the LR is bogus,
178 * as it belongs to a previous, already unmapped LPI. So we discard it
179 * here as well.
180 */
181 if ( unlikely(!p ||
182 test_and_clear_bit(GIC_IRQ_GUEST_PRISTINE_LPI, &p->status)) )
183 {
184 ASSERT(is_lpi(irq));
185
186 gic_hw_ops->clear_lr(i);
187 clear_bit(i, &this_cpu(lr_mask));
188
189 return;
190 }
191
192 if ( lr_val.active )
193 {
194 set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
195 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
196 test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status) )
197 {
198 if ( p->desc == NULL )
199 {
200 lr_val.pending = true;
201 gic_hw_ops->write_lr(i, &lr_val);
202 }
203 else
204 gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into %pv: already active in LR%d\n",
205 irq, v, i);
206 }
207 }
208 else if ( lr_val.pending )
209 {
210 int q __attribute__ ((unused)) = test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
211 #ifdef GIC_DEBUG
212 if ( q )
213 gdprintk(XENLOG_DEBUG, "trying to inject irq=%d into %pv, when it is already pending in LR%d\n",
214 irq, v, i);
215 #endif
216 }
217 else
218 {
219 #ifndef NDEBUG
220 gic_hw_ops->clear_lr(i);
221 #endif
222 clear_bit(i, &this_cpu(lr_mask));
223
224 if ( p->desc != NULL )
225 clear_bit(_IRQ_INPROGRESS, &p->desc->status);
226 clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
227 clear_bit(GIC_IRQ_GUEST_ACTIVE, &p->status);
228 p->lr = GIC_INVALID_LR;
229 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
230 test_bit(GIC_IRQ_GUEST_QUEUED, &p->status) &&
231 !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
232 gic_raise_guest_irq(v, irq, p->priority);
233 else {
234 list_del_init(&p->inflight);
235 /*
236 * Remove from inflight, then change physical affinity. It
237 * makes sure that when a new interrupt is received on the
238 * next pcpu, inflight is already cleared. No concurrent
239 * accesses to inflight.
240 */
241 smp_wmb();
242 if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
243 {
244 struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
245 irq_set_affinity(p->desc, cpumask_of(v_target->processor));
246 clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
247 }
248 }
249 }
250 }
251
vgic_sync_from_lrs(struct vcpu * v)252 void vgic_sync_from_lrs(struct vcpu *v)
253 {
254 int i = 0;
255 unsigned long flags;
256 unsigned int nr_lrs = gic_get_nr_lrs();
257
258 /* The idle domain has no LRs to be cleared. Since gic_restore_state
259 * doesn't write any LR registers for the idle domain they could be
260 * non-zero. */
261 if ( is_idle_vcpu(v) )
262 return;
263
264 gic_hw_ops->update_hcr_status(GICH_HCR_UIE, false);
265
266 spin_lock_irqsave(&v->arch.vgic.lock, flags);
267
268 while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask),
269 nr_lrs, i)) < nr_lrs ) {
270 gic_update_one_lr(v, i);
271 i++;
272 }
273
274 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
275 }
276
gic_restore_pending_irqs(struct vcpu * v)277 static void gic_restore_pending_irqs(struct vcpu *v)
278 {
279 int lr = 0;
280 struct pending_irq *p, *t, *p_r;
281 struct list_head *inflight_r;
282 unsigned int nr_lrs = gic_get_nr_lrs();
283 int lrs = nr_lrs;
284
285 ASSERT(!local_irq_is_enabled());
286
287 spin_lock(&v->arch.vgic.lock);
288
289 if ( list_empty(&v->arch.vgic.lr_pending) )
290 goto out;
291
292 inflight_r = &v->arch.vgic.inflight_irqs;
293 list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
294 {
295 lr = gic_find_unused_lr(v, p, lr);
296 if ( lr >= nr_lrs )
297 {
298 /* No more free LRs: find a lower priority irq to evict */
299 list_for_each_entry_reverse( p_r, inflight_r, inflight )
300 {
301 if ( p_r->priority == p->priority )
302 goto out;
303 if ( test_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status) &&
304 !test_bit(GIC_IRQ_GUEST_ACTIVE, &p_r->status) )
305 goto found;
306 }
307 /* We didn't find a victim this time, and we won't next
308 * time, so quit */
309 goto out;
310
311 found:
312 lr = p_r->lr;
313 p_r->lr = GIC_INVALID_LR;
314 set_bit(GIC_IRQ_GUEST_QUEUED, &p_r->status);
315 clear_bit(GIC_IRQ_GUEST_VISIBLE, &p_r->status);
316 gic_add_to_lr_pending(v, p_r);
317 inflight_r = &p_r->inflight;
318 }
319
320 gic_set_lr(lr, p, GICH_LR_PENDING);
321 list_del_init(&p->lr_queue);
322 set_bit(lr, &this_cpu(lr_mask));
323
324 /* We can only evict nr_lrs entries */
325 lrs--;
326 if ( lrs == 0 )
327 break;
328 }
329
330 out:
331 spin_unlock(&v->arch.vgic.lock);
332 }
333
gic_clear_pending_irqs(struct vcpu * v)334 void gic_clear_pending_irqs(struct vcpu *v)
335 {
336 struct pending_irq *p, *t;
337
338 ASSERT(spin_is_locked(&v->arch.vgic.lock));
339
340 v->arch.lr_mask = 0;
341 list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
342 gic_remove_from_lr_pending(v, p);
343 }
344
345 /**
346 * vgic_vcpu_pending_irq() - determine if interrupts need to be injected
347 * @vcpu: The vCPU on which to check for interrupts.
348 *
349 * Checks whether there is an interrupt on the given VCPU which needs
350 * handling in the guest. This requires at least one IRQ to be pending
351 * and enabled.
352 *
353 * Returns: 1 if the guest should run to handle interrupts, 0 otherwise.
354 */
vgic_vcpu_pending_irq(struct vcpu * v)355 int vgic_vcpu_pending_irq(struct vcpu *v)
356 {
357 struct pending_irq *p;
358 unsigned long flags;
359 const unsigned long apr = gic_hw_ops->read_apr(0);
360 int mask_priority;
361 int active_priority;
362 int rc = 0;
363
364 /* We rely on reading the VMCR, which is only accessible locally. */
365 ASSERT(v == current);
366
367 mask_priority = gic_hw_ops->read_vmcr_priority();
368 active_priority = find_first_bit(&apr, 32);
369
370 spin_lock_irqsave(&v->arch.vgic.lock, flags);
371
372 /* TODO: We order the guest irqs by priority, but we don't change
373 * the priority of host irqs. */
374
375 /* find the first enabled non-active irq, the queue is already
376 * ordered by priority */
377 list_for_each_entry( p, &v->arch.vgic.inflight_irqs, inflight )
378 {
379 if ( GIC_PRI_TO_GUEST(p->priority) >= mask_priority )
380 goto out;
381 if ( GIC_PRI_TO_GUEST(p->priority) >= active_priority )
382 goto out;
383 if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
384 {
385 rc = 1;
386 goto out;
387 }
388 }
389
390 out:
391 spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
392 return rc;
393 }
394
vgic_sync_to_lrs(void)395 void vgic_sync_to_lrs(void)
396 {
397 ASSERT(!local_irq_is_enabled());
398
399 gic_restore_pending_irqs(current);
400
401 if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() )
402 gic_hw_ops->update_hcr_status(GICH_HCR_UIE, true);
403 }
404
gic_dump_vgic_info(struct vcpu * v)405 void gic_dump_vgic_info(struct vcpu *v)
406 {
407 struct pending_irq *p;
408
409 list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight )
410 printk("Inflight irq=%u lr=%u\n", p->irq, p->lr);
411
412 list_for_each_entry( p, &v->arch.vgic.lr_pending, lr_queue )
413 printk("Pending irq=%d\n", p->irq);
414 }
415
vgic_get_hw_irq_desc(struct domain * d,struct vcpu * v,unsigned int virq)416 struct irq_desc *vgic_get_hw_irq_desc(struct domain *d, struct vcpu *v,
417 unsigned int virq)
418 {
419 struct pending_irq *p;
420
421 ASSERT(!v && virq >= 32);
422
423 if ( !v )
424 v = d->vcpu[0];
425
426 p = irq_to_pending(v, virq);
427 if ( !p )
428 return NULL;
429
430 return p->desc;
431 }
432
vgic_connect_hw_irq(struct domain * d,struct vcpu * v,unsigned int virq,struct irq_desc * desc,bool connect)433 int vgic_connect_hw_irq(struct domain *d, struct vcpu *v, unsigned int virq,
434 struct irq_desc *desc, bool connect)
435 {
436 unsigned long flags;
437 /*
438 * Use vcpu0 to retrieve the pending_irq struct. Given that we only
439 * route SPIs to guests, it doesn't make any difference.
440 */
441 struct vcpu *v_target = vgic_get_target_vcpu(d->vcpu[0], virq);
442 struct vgic_irq_rank *rank = vgic_rank_irq(v_target, virq);
443 struct pending_irq *p = irq_to_pending(v_target, virq);
444 int ret = 0;
445
446 /* "desc" is optional when we disconnect an IRQ. */
447 ASSERT(!connect || desc);
448
449 /* We are taking to rank lock to prevent parallel connections. */
450 vgic_lock_rank(v_target, rank, flags);
451
452 if ( connect )
453 {
454 /* The VIRQ should not be already enabled by the guest */
455 if ( !p->desc &&
456 !test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
457 p->desc = desc;
458 else
459 ret = -EBUSY;
460 }
461 else
462 {
463 if ( desc && p->desc != desc )
464 ret = -EINVAL;
465 else
466 p->desc = NULL;
467 }
468
469 vgic_unlock_rank(v_target, rank, flags);
470
471 return ret;
472 }
473
474 /*
475 * Local variables:
476 * mode: C
477 * c-file-style: "BSD"
478 * c-basic-offset: 4
479 * indent-tabs-mode: nil
480 * End:
481 */
482