/xen/xen/arch/arm/vgic/ |
A D | vgic.c | 79 irq = NULL; in vgic_get_lpi() 84 return irq; in vgic_get_lpi() 151 xfree(irq); in vgic_put_irq() 173 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle() 181 if ( irq->enabled && irq_is_pending(irq) ) in vgic_target_oracle() 668 ret = irq_is_pending(irq) && irq->enabled; in vgic_vcpu_pending_irq() 817 if ( irq->hw && irq->target_vcpu == v) in arch_move_irqs() 880 if ( !irq->hw && !irq->enabled ) in vgic_connect_hw_irq() 883 irq->hwintid = desc->irq; in vgic_connect_hw_irq() 890 if ( desc && irq->hwintid != desc->irq ) in vgic_connect_hw_irq() [all …]
|
A D | vgic-mmio.c | 58 if ( irq->enabled ) in vgic_mmio_read_enable() 89 irq->enabled = true; in vgic_mmio_write_senable() 90 if ( irq->hw ) in vgic_mmio_write_senable() 137 if ( irq->hw ) in vgic_mmio_write_cenable() 197 if ( irq->hw ) in vgic_mmio_write_spending() 216 ASSERT(irq->hw && desc->irq == irq->hwintid); in vgic_mmio_write_spending() 245 if ( irq->hw ) in vgic_mmio_write_cpending() 269 ASSERT(irq->hw && desc->irq == irq->hwintid); in vgic_mmio_write_cpending() 308 if ( irq->active ) in vgic_mmio_read_active() 340 if ( irq->active || irq->vcpu ) in vgic_mmio_write_cactive() [all …]
|
A D | vgic-v2.c | 73 struct vgic_irq *irq; in vgic_v2_fold_lr_state() local 105 if ( irq->hw ) in vgic_v2_fold_lr_state() 111 spin_lock(&irq->irq_lock); in vgic_v2_fold_lr_state() 158 if ( !irq->line_level ) in vgic_v2_fold_lr_state() 196 lr_val.virq = irq->intid; in vgic_v2_populate_lr() 198 if ( irq_is_pending(irq) ) in vgic_v2_populate_lr() 212 if ( irq->source ) in vgic_v2_populate_lr() 217 lr_val.active = irq->active; in vgic_v2_populate_lr() 219 if ( irq->hw ) in vgic_v2_populate_lr() 228 if ( irq->active && irq_is_pending(irq) ) in vgic_v2_populate_lr() [all …]
|
A D | vgic-mmio-v2.c | 119 irq->pending_latch = true; in vgic_mmio_write_sgir() 123 vgic_put_irq(d, irq); in vgic_mmio_write_sgir() 140 vgic_put_irq(vcpu->domain, irq); in vgic_mmio_read_target() 166 if ( irq->targets ) in vgic_mmio_write_target() 168 irq->target_vcpu = vcpu->domain->vcpu[ffs(irq->targets) - 1]; in vgic_mmio_write_target() 169 if ( irq->hw ) in vgic_mmio_write_target() 177 irq->target_vcpu = NULL; in vgic_mmio_write_target() 222 if ( !irq->source ) in vgic_mmio_write_sgipendc() 223 irq->pending_latch = false; in vgic_mmio_write_sgipendc() 248 if ( irq->source ) in vgic_mmio_write_sgipends() [all …]
|
A D | vgic.h | 35 static inline bool irq_is_pending(struct vgic_irq *irq) in irq_is_pending() argument 37 if ( irq->config == VGIC_CONFIG_EDGE ) in irq_is_pending() 38 return irq->pending_latch; in irq_is_pending() 40 return irq->pending_latch || irq->line_level; in irq_is_pending() 43 static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq) in vgic_irq_is_mapped_level() argument 45 return irq->config == VGIC_CONFIG_LEVEL && irq->hw; in vgic_irq_is_mapped_level() 50 void vgic_put_irq(struct domain *d, struct vgic_irq *irq); 51 void vgic_queue_irq_unlock(struct domain *d, struct vgic_irq *irq, 55 static inline void vgic_get_irq_kref(struct vgic_irq *irq) in vgic_get_irq_kref() argument 57 if ( irq->intid < VGIC_MIN_LPI ) in vgic_get_irq_kref() [all …]
|
A D | vgic-init.c | 74 INIT_LIST_HEAD(&irq->ap_list); in vgic_vcpu_early_init() 76 irq->intid = i; in vgic_vcpu_early_init() 77 irq->vcpu = NULL; in vgic_vcpu_early_init() 78 irq->target_vcpu = vcpu; in vgic_vcpu_early_init() 80 atomic_set(&irq->refcount, 0); in vgic_vcpu_early_init() 84 irq->enabled = 1; in vgic_vcpu_early_init() 162 irq->vcpu = NULL; in domain_vgic_init() 163 irq->target_vcpu = NULL; in domain_vgic_init() 166 irq->targets = 0; in domain_vgic_init() 168 irq->mpidr = 0; in domain_vgic_init() [all …]
|
/xen/xen/arch/x86/ |
A D | i8259.c | 103 if (irq & 8) in _disable_8259A_irq() 107 per_cpu(vector_irq, 0)[LEGACY_VECTOR(irq)] = ~irq; in _disable_8259A_irq() 123 per_cpu(vector_irq, 0)[LEGACY_VECTOR(desc->irq)] = desc->irq; in enable_8259A_irq() 124 if (desc->irq & 8) in enable_8259A_irq() 133 unsigned int mask = 1<<irq; in i8259A_irq_pending() 138 if (irq < 8) in i8259A_irq_pending() 176 int irqmask = 1<<irq; in i8259A_irq_real() 178 if (irq < 8) { in i8259A_irq_real() 237 if (irq & 8) { in _mask_and_ack_8259A_irq() 345 for (irq = 0; platform_legacy_irq(irq); irq++) { in init_IRQ() [all …]
|
A D | irq.c | 132 .irq = irq, in _trace_irq_mask() 265 for (irq = nr_irqs_gsi; irq < nr_irqs; irq++) in create_irq() 429 for ( irq = 0; irq < nr_irqs_gsi; irq++ ) in init_irq_data() 432 desc->irq = irq; in init_irq_data() 436 irq_to_desc(irq)->irq = irq; in init_irq_data() 677 for ( irq = 0; irq < nr_irqs; ++irq ) in setup_vector_irq() 1242 pirq->arch.irq = irq; in set_domain_irq_pirq() 2469 for ( irq = 0; irq < nr_irqs; irq++ ) in dump_irqs() 2551 for ( irq = 0; irq < nr_irqs; irq++ ) in fixup_irqs() 2673 for ( irq = 0; irq < nr_irqs; irq++ ) in fixup_eoi() [all …]
|
A D | io_apic.c | 86 if (irq < 0) in share_vector_maps() 558 int pin, irq; in set_ioapic_affinity_irq() local 561 irq = desc->irq; in set_ioapic_affinity_irq() 903 int irq, i; in pin_2_irq() local 936 irq = 0; in pin_2_irq() 941 return irq; in pin_2_irq() 1787 int irq; in init_IO_APIC_traps() local 1789 for (irq = 0; platform_legacy_irq(irq); irq++) in init_IO_APIC_traps() 1790 if (IO_APIC_IRQ(irq) && !irq_to_vector(irq)) in init_IO_APIC_traps() 2373 pirq = (irq >= 256) ? irq : rte.vector; in ioapic_guest_write() [all …]
|
/xen/xen/arch/arm/ |
A D | irq.c | 44 printk("unexpected IRQ trap at irq %02x\n", irq->irq); in ack_none() 86 int irq; in init_irq_data() local 88 for ( irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++ ) in init_irq_data() 92 desc->irq = irq; in init_irq_data() 101 int irq; in init_local_irq_data() local 105 for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ ) in init_local_irq_data() 109 desc->irq = irq; in init_local_irq_data() 128 int irq; in init_IRQ() local 131 for ( irq = 0; irq < NR_LOCAL_IRQS; irq++ ) in init_IRQ() 692 irq = dt_irq.irq; in platform_get_irq() [all …]
|
A D | vgic.c | 60 int rank = irq/32; in vgic_rank_irq() 73 p->irq = virq; in vgic_init_pending_irq() 260 ASSERT(!is_lpi(irq)); in vgic_migrate_irq() 264 p = irq_to_pending(old, irq); in vgic_migrate_irq() 341 unsigned int irq; in vgic_disable_irqs() local 350 irq = i + (32 * n); in vgic_disable_irqs() 390 unsigned int irq; in vgic_enable_irqs() local 400 irq = i + (32 * n); in vgic_enable_irqs() 416 ASSERT(irq >= 32); in vgic_enable_irqs() 491 if ( irq < 32 ) in irq_to_pending() [all …]
|
A D | domctl.c | 76 uint32_t irq = bind->u.spi.spi; in arch_do_domctl() local 87 if ( irq != virq ) in arch_do_domctl() 98 rc = xsm_map_domain_irq(XSM_HOOK, d, irq, NULL); in arch_do_domctl() 106 if ( !irq_access_permitted(current->domain, irq) ) in arch_do_domctl() 112 rc = route_irq_to_guest(d, virq, irq, "routed IRQ"); in arch_do_domctl() 122 uint32_t irq = bind->u.spi.spi; in arch_do_domctl() local 130 if ( irq != virq ) in arch_do_domctl() 137 if ( !irq_access_permitted(current->domain, irq) ) in arch_do_domctl()
|
A D | gic.c | 123 ASSERT(desc->irq < gic_number_lines());/* Can't route interrupts that don't exist */ in gic_route_irq_to_xen() 381 unsigned int irq; in gic_interrupt() local 385 irq = gic_hw_ops->read_irq(); in gic_interrupt() 387 if ( likely(irq >= 16 && irq < 1020) ) in gic_interrupt() 390 do_IRQ(regs, irq, is_fiq); in gic_interrupt() 392 else if ( is_lpi(irq) ) in gic_interrupt() 395 gic_hw_ops->do_LPI(irq); in gic_interrupt() 397 else if ( unlikely(irq < 16) ) in gic_interrupt() 399 do_sgi(regs, irq); in gic_interrupt() 409 static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) in maintenance_interrupt() argument
|
A D | time.c | 223 static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) in timer_interrupt() argument 225 if ( irq == (timer_irq[TIMER_HYP_PPI]) && in timer_interrupt() 235 if ( irq == (timer_irq[TIMER_PHYS_NONSECURE_PPI]) && in timer_interrupt() 246 static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) in vtimer_interrupt() argument 265 vgic_inject_irq(current->domain, current, current->arch.virt_timer.irq, true); in vtimer_interrupt() 279 static void check_timer_irq_cfg(unsigned int irq, const char *which) in check_timer_irq_cfg() argument 281 struct irq_desc *desc = irq_to_desc(irq); in check_timer_irq_cfg() 291 "WARNING: %s-timer IRQ%u is not level triggered.\n", which, irq); in check_timer_irq_cfg()
|
A D | gic-vgic.c | 41 gic_hw_ops->update_lr(lr, p->irq, p->priority, in gic_set_lr() 126 if ( lr_val.virq == p->irq ) in gic_find_unused_lr() 166 int irq; in gic_update_one_lr() local 173 irq = lr_val.virq; in gic_update_one_lr() 174 p = irq_to_pending(v, irq); in gic_update_one_lr() 184 ASSERT(is_lpi(irq)); in gic_update_one_lr() 205 irq, v, i); in gic_update_one_lr() 214 irq, v, i); in gic_update_one_lr() 232 gic_raise_guest_irq(v, irq, p->priority); in gic_update_one_lr() 410 printk("Inflight irq=%u lr=%u\n", p->irq, p->lr); in gic_dump_vgic_info() [all …]
|
/xen/xen/arch/x86/hvm/ |
A D | vpic.c | 97 int irq; in vpic_update_int_output() local 103 irq); in vpic_update_int_output() 159 int irq = -1; in vpic_intack() local 167 BUG_ON(irq < 0); in vpic_intack() 176 irq += 8; in vpic_intack() 181 return irq; in vpic_intack() 459 if ( irq == 2 ) in vpic_irq_positive_edge() 477 if ( irq == 2 ) in vpic_irq_negative_edge() 487 int irq; in vpic_ack_pending_irq() local 498 if ( irq == -1 ) in vpic_ack_pending_irq() [all …]
|
A D | vpt.c | 86 return pt->irq; in pt_irq_vector() 88 isa_irq = pt->irq; in pt_irq_vector() 111 unsigned int gsi = pt->irq; in pt_irq_masked() 131 gsi = hvm_isa_irq_to_gsi(pt->irq); in pt_irq_masked() 318 int irq, pt_vector = -1; in pt_update_irq() local 356 irq = earliest_pt->irq; in pt_update_irq() 369 vlapic_set_irq(vcpu_vlapic(v), irq, 0); in pt_update_irq() 370 pt_vector = irq; in pt_update_irq() 374 hvm_isa_irq_deassert(v->domain, irq); in pt_update_irq() 377 hvm_isa_irq_assert(v->domain, irq, NULL); in pt_update_irq() [all …]
|
/xen/xen/include/asm-x86/ |
A D | irq.h | 17 #define IO_APIC_IRQ(irq) (platform_legacy_irq(irq) ? \ argument 18 (1 << (irq)) & io_apic_irqs : \ 19 (irq) < nr_irqs_gsi) 21 #define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs) argument 23 #define LEGACY_VECTOR(irq) ((irq) + FIRST_LEGACY_VECTOR) argument 94 #define platform_legacy_irq(irq) ((irq) < 16) argument 120 void make_8259A_irq(unsigned int irq); 136 int irq; member 165 void clear_irq_vector(int irq); 167 int irq_to_vector(int irq); [all …]
|
/xen/xen/include/asm-arm/ |
A D | irq.h | 60 struct irq_desc *__irq_to_desc(int irq); 62 #define irq_to_desc(irq) __irq_to_desc(irq) argument 64 void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq); 66 static inline bool is_lpi(unsigned int irq) in is_lpi() argument 68 return irq >= LPI_OFFSET; in is_lpi() 73 bool is_assignable_irq(unsigned int irq); 79 unsigned int irq, const char *devname); 80 int release_guest_irq(struct domain *d, unsigned int irq); 89 int irq_set_type(unsigned int irq, unsigned int type);
|
A D | new_vgic.h | 35 #define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) argument 36 #define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ argument 37 (irq) <= VGIC_MAX_SPI)
|
/xen/xen/drivers/char/ |
A D | cadence-uart.c | 33 unsigned int irq; member 43 static void cuart_interrupt(int irq, void *data, struct cpu_user_regs *regs) in cuart_interrupt() argument 75 if ( uart->irq > 0 ) in cuart_init_postirq() 80 if ( (rc = setup_irq(uart->irq, 0, &uart->irqaction)) != 0 ) in cuart_init_postirq() 81 printk("ERROR: Failed to allocate cadence-uart IRQ %d\n", uart->irq); in cuart_init_postirq() 133 return ( (uart->irq > 0) ? uart->irq : -1 ); in cuart_irq() 152 .irq = cuart_irq, 182 uart->irq = res; in cuart_init()
|
A D | pl011.c | 34 unsigned int irq; member 63 static void pl011_interrupt(int irq, void *data, struct cpu_user_regs *regs) in pl011_interrupt() argument 131 if ( uart->irq > 0 ) in pl011_init_postirq() 136 if ( (rc = setup_irq(uart->irq, 0, &uart->irqaction)) != 0 ) in pl011_init_postirq() 137 printk("ERROR: Failed to allocate pl011 IRQ %d\n", uart->irq); in pl011_init_postirq() 186 return ((uart->irq > 0) ? uart->irq : -1); in pl011_irq() 219 .irq = pl011_irq, 225 static int __init pl011_uart_init(int irq, u64 addr, u64 size, bool sbsa) in pl011_uart_init() argument 230 uart->irq = irq; in pl011_uart_init()
|
/xen/xen/include/xen/ |
A D | irq.h | 91 int irq; member 103 #define irq_to_desc(irq) (&irq_desc[irq]) argument 111 extern int setup_irq(unsigned int irq, unsigned int irqflags, 113 extern void release_irq(unsigned int irq, const void *dev_id); 114 extern int request_irq(unsigned int irq, unsigned int irqflags, 161 extern void pirq_set_affinity(struct domain *d, int irq, const cpumask_t *); 163 struct domain *d, int irq, unsigned long *pflags);
|
A D | device_tree.h | 131 unsigned int irq; member 136 static inline bool_t dt_irq_is_level_triggered(const struct dt_irq *irq) in dt_irq_is_level_triggered() argument 138 unsigned int type = irq->type; in dt_irq_is_level_triggered() 158 #define dt_irq(irq) ((irq)->irq) argument 159 #define dt_irq_flags(irq) ((irq)->flags) argument 518 struct dt_irq *irq); 531 struct dt_raw_irq *irq);
|
/xen/xen/include/asm-x86/hvm/ |
A D | vpic.h | 33 void vpic_irq_positive_edge(struct domain *d, int irq); 34 void vpic_irq_negative_edge(struct domain *d, int irq); 38 int is_periodic_irq(struct vcpu *v, int irq, int type);
|