1 /*
2  * ARM Virtual Generic Interrupt Controller support
3  *
4  * Ian Campbell <ian.campbell@citrix.com>
5  * Copyright (c) 2011 Citrix Systems.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #ifndef __ASM_ARM_VGIC_H__
19 #define __ASM_ARM_VGIC_H__
20 
21 #ifdef CONFIG_NEW_VGIC
22 #include <asm/new_vgic.h>
23 #else
24 
25 #include <xen/radix-tree.h>
26 #include <xen/rbtree.h>
27 
28 struct pending_irq
29 {
30     /*
31      * The following two states track the lifecycle of the guest irq.
32      * However because we are not sure and we don't want to track
33      * whether an irq added to an LR register is PENDING or ACTIVE, the
34      * following states are just an approximation.
35      *
36      * GIC_IRQ_GUEST_QUEUED: the irq is asserted and queued for
37      * injection into the guest's LRs.
38      *
39      * GIC_IRQ_GUEST_VISIBLE: the irq has been added to an LR register,
40      * therefore the guest is aware of it. From the guest point of view
41      * the irq can be pending (if the guest has not acked the irq yet)
42      * or active (after acking the irq).
43      *
44      * In order for the state machine to be fully accurate, for level
45      * interrupts, we should keep the interrupt's pending state until
46      * the guest deactivates the irq. However because we are not sure
47      * when that happens, we instead track whether there is an interrupt
48      * queued using GIC_IRQ_GUEST_QUEUED. We clear it when we add it to
49      * an LR register. We set it when we receive another interrupt
50      * notification.  Therefore it is possible to set
51      * GIC_IRQ_GUEST_QUEUED while the irq is GIC_IRQ_GUEST_VISIBLE. We
52      * could also change the state of the guest irq in the LR register
53      * from active to active and pending, but for simplicity we simply
54      * inject a second irq after the guest EOIs the first one.
55      *
56      *
57      * An additional state is used to keep track of whether the guest
58      * irq is enabled at the vgicd level:
59      *
60      * GIC_IRQ_GUEST_ENABLED: the guest IRQ is enabled at the VGICD
61      * level (GICD_ICENABLER/GICD_ISENABLER).
62      *
63      * GIC_IRQ_GUEST_MIGRATING: the irq is being migrated to a different
64      * vcpu while it is still inflight and on an GICH_LR register on the
65      * old vcpu.
66      *
67      * GIC_IRQ_GUEST_PRISTINE_LPI: the IRQ is a newly mapped LPI, which
68      * has never been in an LR before. This means that any trace of an
69      * LPI with the same number in an LR must be from an older LPI, which
70      * has been unmapped before.
71      *
72      */
73 #define GIC_IRQ_GUEST_QUEUED   0
74 #define GIC_IRQ_GUEST_ACTIVE   1
75 #define GIC_IRQ_GUEST_VISIBLE  2
76 #define GIC_IRQ_GUEST_ENABLED  3
77 #define GIC_IRQ_GUEST_MIGRATING   4
78 #define GIC_IRQ_GUEST_PRISTINE_LPI  5
79     unsigned long status;
80     struct irq_desc *desc; /* only set if the irq corresponds to a physical irq */
81     unsigned int irq;
82 #define GIC_INVALID_LR         (uint8_t)~0
83     uint8_t lr;
84     uint8_t priority;
85     uint8_t lpi_priority;       /* Caches the priority if this is an LPI. */
86     uint8_t lpi_vcpu_id;        /* The VCPU for an LPI. */
87     /* inflight is used to append instances of pending_irq to
88      * vgic.inflight_irqs */
89     struct list_head inflight;
90     /* lr_queue is used to append instances of pending_irq to
91      * lr_pending. lr_pending is a per vcpu queue, therefore lr_queue
92      * accesses are protected with the vgic lock.
93      * TODO: when implementing irq migration, taking only the current
94      * vgic lock is not going to be enough. */
95     struct list_head lr_queue;
96 };
97 
98 #define NR_INTERRUPT_PER_RANK   32
99 #define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1)
100 
101 /* Represents state corresponding to a block of 32 interrupts */
102 struct vgic_irq_rank {
103     spinlock_t lock; /* Covers access to all other members of this struct */
104 
105     uint8_t index;
106 
107     uint32_t ienable;
108     uint32_t icfg[2];
109 
110     /*
111      * Provide efficient access to the priority of an vIRQ while keeping
112      * the emulation simple.
113      * Note, this is working fine as long as Xen is using little endian.
114      */
115     union {
116         uint8_t priority[32];
117         uint32_t ipriorityr[8];
118     };
119 
120     /*
121      * It's more convenient to store a target VCPU per vIRQ
122      * than the register ITARGETSR/IROUTER itself.
123      * Use atomic operations to read/write the vcpu fields to avoid
124      * taking the rank lock.
125      */
126     uint8_t vcpu[32];
127 };
128 
129 struct vgic_dist {
130     /* Version of the vGIC */
131     enum gic_version version;
132     /* GIC HW version specific vGIC driver handler */
133     const struct vgic_ops *handler;
134     /*
135      * Covers access to other members of this struct _except_ for
136      * shared_irqs where each member contains its own locking.
137      *
138      * If both class of lock is required then this lock must be
139      * taken first. If multiple rank locks are required (including
140      * the per-vcpu private_irqs rank) then they must be taken in
141      * rank order.
142      */
143     spinlock_t lock;
144     uint32_t ctlr;
145     int nr_spis; /* Number of SPIs */
146     unsigned long *allocated_irqs; /* bitmap of IRQs allocated */
147     struct vgic_irq_rank *shared_irqs;
148     /*
149      * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in
150      * struct arch_vcpu.
151      */
152     struct pending_irq *pending_irqs;
153     /* Base address for guest GIC */
154     paddr_t dbase; /* Distributor base address */
155 #ifdef CONFIG_GICV3
156     /* GIC V3 addressing */
157     /* List of contiguous occupied by the redistributors */
158     struct vgic_rdist_region {
159         paddr_t base;                   /* Base address */
160         paddr_t size;                   /* Size */
161         unsigned int first_cpu;         /* First CPU handled */
162     } *rdist_regions;
163     int nr_regions;                     /* Number of rdist regions */
164     unsigned long int nr_lpis;
165     uint64_t rdist_propbase;
166     struct rb_root its_devices;         /* Devices mapped to an ITS */
167     spinlock_t its_devices_lock;        /* Protects the its_devices tree */
168     struct radix_tree_root pend_lpi_tree; /* Stores struct pending_irq's */
169     rwlock_t pend_lpi_tree_lock;        /* Protects the pend_lpi_tree */
170     struct list_head vits_list;         /* List of virtual ITSes */
171     unsigned int intid_bits;
172     /*
173      * TODO: if there are more bool's being added below, consider
174      * a flags variable instead.
175      */
176     bool rdists_enabled;                /* Is any redistributor enabled? */
177     bool has_its;
178 #endif
179 };
180 
181 struct vgic_cpu {
182     /*
183      * SGIs and PPIs are per-VCPU, SPIs are domain global and in
184      * struct arch_domain.
185      */
186     struct pending_irq pending_irqs[32];
187     struct vgic_irq_rank *private_irqs;
188 
189     /* This list is ordered by IRQ priority and it is used to keep
190      * track of the IRQs that the VGIC injected into the guest.
191      * Depending on the availability of LR registers, the IRQs might
192      * actually be in an LR, and therefore injected into the guest,
193      * or queued in gic.lr_pending.
194      * As soon as an IRQ is EOI'd by the guest and removed from the
195      * corresponding LR it is also removed from this list. */
196     struct list_head inflight_irqs;
197     /* lr_pending is used to queue IRQs (struct pending_irq) that the
198      * vgic tried to inject in the guest (calling gic_set_guest_irq) but
199      * no LRs were available at the time.
200      * As soon as an LR is freed we remove the first IRQ from this
201      * list and write it to the LR register.
202      * lr_pending is a subset of vgic.inflight_irqs. */
203     struct list_head lr_pending;
204     spinlock_t lock;
205 
206     /* GICv3: redistributor base and flags for this vCPU */
207     paddr_t rdist_base;
208     uint64_t rdist_pendbase;
209 #define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
210 #define VGIC_V3_LPIS_ENABLED    (1 << 1)
211     uint8_t flags;
212 };
213 
214 struct sgi_target {
215     uint8_t aff1;
216     uint16_t list;
217 };
218 
sgi_target_init(struct sgi_target * sgi_target)219 static inline void sgi_target_init(struct sgi_target *sgi_target)
220 {
221     sgi_target->aff1 = 0;
222     sgi_target->list = 0;
223 }
224 
225 struct vgic_ops {
226     /* Initialize vGIC */
227     int (*vcpu_init)(struct vcpu *v);
228     /* Domain specific initialization of vGIC */
229     int (*domain_init)(struct domain *d);
230     /* Release resources that were allocated by domain_init */
231     void (*domain_free)(struct domain *d);
232     /* vGIC sysreg/cpregs emulate */
233     bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr);
234     /* lookup the struct pending_irq for a given LPI interrupt */
235     struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int vlpi);
236     int (*lpi_get_priority)(struct domain *d, uint32_t vlpi);
237 };
238 
239 /* Number of ranks of interrupt registers for a domain */
240 #define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_spis+31)/32)
241 
242 #define vgic_lock(v)   spin_lock_irq(&(v)->domain->arch.vgic.lock)
243 #define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
244 
245 #define vgic_lock_rank(v, r, flags)   spin_lock_irqsave(&(r)->lock, flags)
246 #define vgic_unlock_rank(v, r, flags) spin_unlock_irqrestore(&(r)->lock, flags)
247 
248 /*
249  * Rank containing GICD_<FOO><n> for GICD_<FOO> with
250  * <b>-bits-per-interrupt
251  */
REG_RANK_NR(int b,uint32_t n)252 static inline int REG_RANK_NR(int b, uint32_t n)
253 {
254     switch ( b )
255     {
256     /*
257      * IRQ ranks are of size 32. So n cannot be shifted beyond 5 for 32
258      * and above. For 64-bit n is already shifted DBAT_DOUBLE_WORD
259      * by the caller
260      */
261     case 64:
262     case 32: return n >> 5;
263     case 16: return n >> 4;
264     case 8: return n >> 3;
265     case 4: return n >> 2;
266     case 2: return n >> 1;
267     case 1: return n;
268     default: BUG();
269     }
270 }
271 
272 enum gic_sgi_mode;
273 
274 /*
275  * Offset of GICD_<FOO><n> with its rank, for GICD_<FOO> size <s> with
276  * <b>-bits-per-interrupt.
277  */
278 #define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32)
279 
280 
281 extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq);
282 extern void vgic_remove_irq_from_queues(struct vcpu *v, struct pending_irq *p);
283 extern void gic_remove_from_lr_pending(struct vcpu *v, struct pending_irq *p);
284 extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq);
285 extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
286 extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq);
287 extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n, int s);
288 extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq);
289 extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n);
290 extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n);
291 extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops);
292 int vgic_v2_init(struct domain *d, int *mmio_count);
293 int vgic_v3_init(struct domain *d, int *mmio_count);
294 
295 extern bool vgic_to_sgi(struct vcpu *v, register_t sgir,
296                         enum gic_sgi_mode irqmode, int virq,
297                         const struct sgi_target *target);
298 extern bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq);
299 
300 #endif /* !CONFIG_NEW_VGIC */
301 
302 /*** Common VGIC functions used by Xen arch code ****/
303 
304 /*
305  * In the moment vgic_num_irqs() just covers SPIs and the private IRQs,
306  * as it's mostly used for allocating the pending_irq and irq_desc array,
307  * in which LPIs don't participate.
308  */
309 #define vgic_num_irqs(d)        ((d)->arch.vgic.nr_spis + 32)
310 
311 /*
312  * Allocate a guest VIRQ
313  *  - spi == 0 => allocate a PPI. It will be the same on every vCPU
314  *  - spi == 1 => allocate an SPI
315  */
316 extern int vgic_allocate_virq(struct domain *d, bool spi);
317 /* Reserve a specific guest vIRQ */
318 extern bool vgic_reserve_virq(struct domain *d, unsigned int virq);
319 extern void vgic_free_virq(struct domain *d, unsigned int virq);
320 
vgic_allocate_ppi(struct domain * d)321 static inline int vgic_allocate_ppi(struct domain *d)
322 {
323     return vgic_allocate_virq(d, false /* ppi */);
324 }
325 
vgic_allocate_spi(struct domain * d)326 static inline int vgic_allocate_spi(struct domain *d)
327 {
328     return vgic_allocate_virq(d, true /* spi */);
329 }
330 
331 struct irq_desc *vgic_get_hw_irq_desc(struct domain *d, struct vcpu *v,
332                                       unsigned int virq);
333 int vgic_connect_hw_irq(struct domain *d, struct vcpu *v, unsigned int virq,
334                         struct irq_desc *desc, bool connect);
335 
336 bool vgic_evtchn_irq_pending(struct vcpu *v);
337 
338 int domain_vgic_register(struct domain *d, int *mmio_count);
339 int domain_vgic_init(struct domain *d, unsigned int nr_spis);
340 void domain_vgic_free(struct domain *d);
341 int vcpu_vgic_init(struct vcpu *vcpu);
342 int vcpu_vgic_free(struct vcpu *vcpu);
343 
344 void vgic_inject_irq(struct domain *d, struct vcpu *v, unsigned int virq,
345                      bool level);
346 
347 extern void vgic_clear_pending_irqs(struct vcpu *v);
348 
349 extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr);
350 
351 /* Maximum vCPUs for a specific vGIC version, or 0 for unsupported. */
352 unsigned int vgic_max_vcpus(unsigned int domctl_vgic_version);
353 
354 void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t csize,
355                       paddr_t vbase, uint32_t aliased_offset);
356 
357 #ifdef CONFIG_GICV3
358 struct rdist_region;
359 void vgic_v3_setup_hw(paddr_t dbase,
360                       unsigned int nr_rdist_regions,
361                       const struct rdist_region *regions,
362                       unsigned int intid_bits);
363 #endif
364 
365 void vgic_sync_to_lrs(void);
366 void vgic_sync_from_lrs(struct vcpu *v);
367 
368 int vgic_vcpu_pending_irq(struct vcpu *v);
369 
370 #endif /* __ASM_ARM_VGIC_H__ */
371 
372 /*
373  * Local variables:
374  * mode: C
375  * c-file-style: "BSD"
376  * c-basic-offset: 4
377  * indent-tabs-mode: nil
378  * End:
379  */
380