Lines Matching refs:pwq

377 static void show_pwq(struct pool_workqueue *pwq);
443 #define for_each_pwq(pwq, wq) \ argument
444 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
638 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
641 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
1106 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1108 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1109 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1110 pwq->refcnt++; in get_pwq()
1120 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1122 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1123 if (likely(--pwq->refcnt)) in put_pwq()
1125 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1135 schedule_work(&pwq->unbound_release_work); in put_pwq()
1144 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1146 if (pwq) { in put_pwq_unlocked()
1151 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1152 put_pwq(pwq); in put_pwq_unlocked()
1153 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1159 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_inactive_work() local
1162 if (list_empty(&pwq->pool->worklist)) in pwq_activate_inactive_work()
1163 pwq->pool->watchdog_ts = jiffies; in pwq_activate_inactive_work()
1164 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_inactive_work()
1166 pwq->nr_active++; in pwq_activate_inactive_work()
1169 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) in pwq_activate_first_inactive() argument
1171 struct work_struct *work = list_first_entry(&pwq->inactive_works, in pwq_activate_first_inactive()
1188 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) in pwq_dec_nr_in_flight() argument
1193 pwq->nr_active--; in pwq_dec_nr_in_flight()
1194 if (!list_empty(&pwq->inactive_works)) { in pwq_dec_nr_in_flight()
1196 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight()
1197 pwq_activate_first_inactive(pwq); in pwq_dec_nr_in_flight()
1201 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1204 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
1208 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
1212 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
1218 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1219 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1221 put_pwq(pwq); in pwq_dec_nr_in_flight()
1258 struct pool_workqueue *pwq; in try_to_grab_pending() local
1297 pwq = get_work_pwq(work); in try_to_grab_pending()
1298 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1316 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); in try_to_grab_pending()
1348 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1351 struct worker_pool *pool = pwq->pool; in insert_work()
1357 set_work_pwq(work, pwq, extra_flags); in insert_work()
1359 get_pwq(pwq); in insert_work()
1424 struct pool_workqueue *pwq; in __queue_work() local
1449 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1453 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1462 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1470 pwq = worker->current_pwq; in __queue_work()
1474 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1477 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1488 if (unlikely(!pwq->refcnt)) { in __queue_work()
1490 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
1500 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1505 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
1506 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
1508 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work()
1510 pwq->nr_active++; in __queue_work()
1511 worklist = &pwq->pool->worklist; in __queue_work()
1513 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1516 worklist = &pwq->inactive_works; in __queue_work()
1520 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1523 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
2041 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
2042 struct workqueue_struct *wq = pwq->wq; in send_mayday()
2050 if (list_empty(&pwq->mayday_node)) { in send_mayday()
2056 get_pwq(pwq); in send_mayday()
2057 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2194 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
2196 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2232 worker->current_pwq = pwq; in process_one_work()
2240 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2273 lock_map_acquire(&pwq->wq->lockdep_map); in process_one_work()
2305 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2341 pwq_dec_nr_in_flight(pwq, work_data); in process_one_work()
2522 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
2524 struct worker_pool *pool = pwq->pool; in rescuer_thread()
2529 list_del_init(&pwq->mayday_node); in rescuer_thread()
2543 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2563 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
2569 if (wq->rescuer && list_empty(&pwq->mayday_node)) { in rescuer_thread()
2570 get_pwq(pwq); in rescuer_thread()
2571 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2581 put_pwq(pwq); in rescuer_thread()
2680 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
2721 pwq->nr_in_flight[work_color]++; in insert_wq_barrier()
2725 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
2763 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
2770 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2771 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs()
2776 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
2778 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
2779 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
2786 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
2787 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
2971 struct pool_workqueue *pwq; in drain_workqueue() local
2987 for_each_pwq(pwq, wq) { in drain_workqueue()
2990 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2991 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); in drain_workqueue()
2992 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
3017 struct pool_workqueue *pwq; in start_flush_work() local
3030 pwq = get_work_pwq(work); in start_flush_work()
3031 if (pwq) { in start_flush_work()
3032 if (unlikely(pwq->pool != pool)) in start_flush_work()
3038 pwq = worker->current_pwq; in start_flush_work()
3041 check_flush_dependency(pwq->wq, work); in start_flush_work()
3043 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3056 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { in start_flush_work()
3057 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
3058 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3706 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn() local
3708 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn()
3709 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn()
3716 if (!list_empty(&pwq->pwqs_node)) { in pwq_unbound_release_workfn()
3721 list_del_rcu(&pwq->pwqs_node); in pwq_unbound_release_workfn()
3730 call_rcu(&pwq->rcu, rcu_free_pwq); in pwq_unbound_release_workfn()
3750 static void pwq_adjust_max_active(struct pool_workqueue *pwq) in pwq_adjust_max_active() argument
3752 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active()
3760 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3764 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3774 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3776 while (!list_empty(&pwq->inactive_works) && in pwq_adjust_max_active()
3777 pwq->nr_active < pwq->max_active) { in pwq_adjust_max_active()
3778 pwq_activate_first_inactive(pwq); in pwq_adjust_max_active()
3789 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3791 pwq->max_active = 0; in pwq_adjust_max_active()
3794 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3798 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3801 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); in init_pwq()
3803 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
3805 pwq->pool = pool; in init_pwq()
3806 pwq->wq = wq; in init_pwq()
3807 pwq->flush_color = -1; in init_pwq()
3808 pwq->refcnt = 1; in init_pwq()
3809 INIT_LIST_HEAD(&pwq->inactive_works); in init_pwq()
3810 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
3811 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
3812 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); in init_pwq()
3816 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
3818 struct workqueue_struct *wq = pwq->wq; in link_pwq()
3823 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
3827 pwq->work_color = wq->work_color; in link_pwq()
3830 pwq_adjust_max_active(pwq); in link_pwq()
3833 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3841 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
3849 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3850 if (!pwq) { in alloc_unbound_pwq()
3855 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3856 return pwq; in alloc_unbound_pwq()
3914 struct pool_workqueue *pwq) in numa_pwq_tbl_install() argument
3922 link_pwq(pwq); in numa_pwq_tbl_install()
3925 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
4147 struct pool_workqueue *old_pwq = NULL, *pwq; in wq_update_unbound_numa() local
4166 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
4175 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4182 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
4183 if (!pwq) { in wq_update_unbound_numa()
4191 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
4216 struct pool_workqueue *pwq = in alloc_and_link_pwqs() local
4221 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
4224 link_pwq(pwq); in alloc_and_link_pwqs()
4296 struct pool_workqueue *pwq; in alloc_workqueue() local
4363 for_each_pwq(pwq, wq) in alloc_workqueue()
4364 pwq_adjust_max_active(pwq); in alloc_workqueue()
4386 static bool pwq_busy(struct pool_workqueue *pwq) in pwq_busy() argument
4391 if (pwq->nr_in_flight[i]) in pwq_busy()
4394 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) in pwq_busy()
4396 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) in pwq_busy()
4410 struct pool_workqueue *pwq; in destroy_workqueue() local
4442 for_each_pwq(pwq, wq) { in destroy_workqueue()
4443 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
4444 if (WARN_ON(pwq_busy(pwq))) { in destroy_workqueue()
4447 show_pwq(pwq); in destroy_workqueue()
4448 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4454 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4479 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
4481 put_pwq_unlocked(pwq); in destroy_workqueue()
4488 pwq = wq->dfl_pwq; in destroy_workqueue()
4490 put_pwq_unlocked(pwq); in destroy_workqueue()
4507 struct pool_workqueue *pwq; in workqueue_set_max_active() local
4520 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4521 pwq_adjust_max_active(pwq); in workqueue_set_max_active()
4578 struct pool_workqueue *pwq; in workqueue_congested() local
4588 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4590 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4592 ret = !list_empty(&pwq->inactive_works); in workqueue_congested()
4675 struct pool_workqueue *pwq = NULL; in print_worker_info() local
4693 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4694 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4728 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
4730 struct worker_pool *pool = pwq->pool; in show_pwq()
4740 pwq->nr_active, pwq->max_active, pwq->refcnt, in show_pwq()
4741 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
4744 if (worker->current_pwq == pwq) { in show_pwq()
4754 if (worker->current_pwq != pwq) in show_pwq()
4769 if (get_work_pwq(work) == pwq) { in show_pwq()
4779 if (get_work_pwq(work) != pwq) in show_pwq()
4788 if (!list_empty(&pwq->inactive_works)) { in show_pwq()
4792 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
4806 struct pool_workqueue *pwq; in show_one_workqueue() local
4810 for_each_pwq(pwq, wq) { in show_one_workqueue()
4811 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4821 for_each_pwq(pwq, wq) { in show_one_workqueue()
4822 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in show_one_workqueue()
4823 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4830 show_pwq(pwq); in show_one_workqueue()
4833 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_one_workqueue()
5259 struct pool_workqueue *pwq; in freeze_workqueues_begin() local
5268 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
5269 pwq_adjust_max_active(pwq); in freeze_workqueues_begin()
5293 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
5307 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
5308 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
5309 if (pwq->nr_active) { in freeze_workqueues_busy()
5334 struct pool_workqueue *pwq; in thaw_workqueues() local
5346 for_each_pwq(pwq, wq) in thaw_workqueues()
5347 pwq_adjust_max_active(pwq); in thaw_workqueues()