Lines Matching refs:irq_data

40 	struct irq_data *irqd = irq_desc_get_irq_data(desc);  in __synchronize_hardirq()
50 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
55 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
148 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
176 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
198 static void irq_validate_effective_affinity(struct irq_data *data) in irq_validate_effective_affinity()
209 static inline void irq_init_effective_affinity(struct irq_data *data, in irq_init_effective_affinity()
215 static inline void irq_validate_effective_affinity(struct irq_data *data) { } in irq_validate_effective_affinity()
216 static inline void irq_init_effective_affinity(struct irq_data *data, in irq_init_effective_affinity()
220 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, in irq_do_set_affinity()
284 static inline int irq_set_affinity_pending(struct irq_data *data, in irq_set_affinity_pending()
294 static inline int irq_set_affinity_pending(struct irq_data *data, in irq_set_affinity_pending()
301 static int irq_try_set_affinity(struct irq_data *data, in irq_try_set_affinity()
316 static bool irq_set_affinity_deactivated(struct irq_data *data, in irq_set_affinity_deactivated()
340 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, in irq_set_affinity_locked()
408 if (irqd_is_started(&desc->irq_data)) { in irq_update_affinity_desc()
414 if (irqd_affinity_is_managed(&desc->irq_data)) { in irq_update_affinity_desc()
423 activated = irqd_is_activated(&desc->irq_data); in irq_update_affinity_desc()
425 irq_domain_deactivate_irq(&desc->irq_data); in irq_update_affinity_desc()
428 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); in irq_update_affinity_desc()
429 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); in irq_update_affinity_desc()
436 irq_domain_activate_irq(&desc->irq_data, false); in irq_update_affinity_desc()
517 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
598 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
599 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
604 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
618 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
647 struct irq_data *data; in irq_set_vcpu_affinity()
815 if (WARN(!desc->irq_data.chip, in enable_irq()
847 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
848 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
896 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
906 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
941 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
956 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
958 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
964 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
969 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
970 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
974 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
976 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
980 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
1091 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
1108 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
1109 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
1147 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
1365 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1373 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1382 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi()
1398 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_setup()
1406 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_teardown()
1472 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1484 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1535 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1559 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1586 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1595 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1596 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1599 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1663 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1714 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1717 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1732 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1752 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1842 unsigned irq = desc->irq_data.irq; in __free_irq()
1960 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1970 irq_chip_pm_put(&desc->irq_data); in __free_irq()
2038 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
2166 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2175 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2309 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2333 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2355 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2460 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2537 retval = irq_chip_pm_get(&desc->irq_data); in setup_percpu_irq()
2544 irq_chip_pm_put(&desc->irq_data); in setup_percpu_irq()
2594 retval = irq_chip_pm_get(&desc->irq_data); in __request_percpu_irq()
2603 irq_chip_pm_put(&desc->irq_data); in __request_percpu_irq()
2665 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2680 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2763 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, in __irq_get_irqchip_state()
2804 struct irq_data *data; in irq_get_irqchip_state()
2837 struct irq_data *data; in irq_set_irqchip_state()