Lines Matching refs:worker
140 static void io_wqe_dec_running(struct io_worker *worker);
147 static bool io_worker_get(struct io_worker *worker) in io_worker_get() argument
149 return refcount_inc_not_zero(&worker->ref); in io_worker_get()
152 static void io_worker_release(struct io_worker *worker) in io_worker_release() argument
154 if (refcount_dec_and_test(&worker->ref)) in io_worker_release()
155 complete(&worker->ref_done); in io_worker_release()
169 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker) in io_wqe_get_acct() argument
171 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); in io_wqe_get_acct()
180 static void io_worker_cancel_cb(struct io_worker *worker) in io_worker_cancel_cb() argument
182 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_cancel_cb()
183 struct io_wqe *wqe = worker->wqe; in io_worker_cancel_cb()
187 raw_spin_lock(&worker->wqe->lock); in io_worker_cancel_cb()
189 raw_spin_unlock(&worker->wqe->lock); in io_worker_cancel_cb()
191 clear_bit_unlock(0, &worker->create_state); in io_worker_cancel_cb()
192 io_worker_release(worker); in io_worker_cancel_cb()
197 struct io_worker *worker; in io_task_worker_match() local
201 worker = container_of(cb, struct io_worker, create_work); in io_task_worker_match()
202 return worker == data; in io_task_worker_match()
205 static void io_worker_exit(struct io_worker *worker) in io_worker_exit() argument
207 struct io_wqe *wqe = worker->wqe; in io_worker_exit()
212 io_task_worker_match, worker); in io_worker_exit()
216 io_worker_cancel_cb(worker); in io_worker_exit()
219 io_worker_release(worker); in io_worker_exit()
220 wait_for_completion(&worker->ref_done); in io_worker_exit()
223 if (worker->flags & IO_WORKER_F_FREE) in io_worker_exit()
224 hlist_nulls_del_rcu(&worker->nulls_node); in io_worker_exit()
225 list_del_rcu(&worker->all_list); in io_worker_exit()
227 io_wqe_dec_running(worker); in io_worker_exit()
228 worker->flags = 0; in io_worker_exit()
233 kfree_rcu(worker, rcu); in io_worker_exit()
255 struct io_worker *worker; in io_wqe_activate_free_worker() local
262 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { in io_wqe_activate_free_worker()
263 if (!io_worker_get(worker)) in io_wqe_activate_free_worker()
265 if (io_wqe_get_acct(worker) != acct) { in io_wqe_activate_free_worker()
266 io_worker_release(worker); in io_wqe_activate_free_worker()
269 if (wake_up_process(worker->task)) { in io_wqe_activate_free_worker()
270 io_worker_release(worker); in io_wqe_activate_free_worker()
273 io_worker_release(worker); in io_wqe_activate_free_worker()
304 static void io_wqe_inc_running(struct io_worker *worker) in io_wqe_inc_running() argument
306 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_inc_running()
313 struct io_worker *worker; in create_worker_cb() local
319 worker = container_of(cb, struct io_worker, create_work); in create_worker_cb()
320 wqe = worker->wqe; in create_worker_cb()
322 acct = &wqe->acct[worker->create_index]; in create_worker_cb()
330 create_io_worker(wq, wqe, worker->create_index); in create_worker_cb()
335 clear_bit_unlock(0, &worker->create_state); in create_worker_cb()
336 io_worker_release(worker); in create_worker_cb()
339 static bool io_queue_worker_create(struct io_worker *worker, in io_queue_worker_create() argument
343 struct io_wqe *wqe = worker->wqe; in io_queue_worker_create()
349 if (!io_worker_get(worker)) in io_queue_worker_create()
357 if (test_bit(0, &worker->create_state) || in io_queue_worker_create()
358 test_and_set_bit_lock(0, &worker->create_state)) in io_queue_worker_create()
362 init_task_work(&worker->create_work, func); in io_queue_worker_create()
363 worker->create_index = acct->index; in io_queue_worker_create()
364 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { in io_queue_worker_create()
377 clear_bit_unlock(0, &worker->create_state); in io_queue_worker_create()
379 io_worker_release(worker); in io_queue_worker_create()
386 static void io_wqe_dec_running(struct io_worker *worker) in io_wqe_dec_running() argument
389 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_dec_running()
390 struct io_wqe *wqe = worker->wqe; in io_wqe_dec_running()
392 if (!(worker->flags & IO_WORKER_F_UP)) in io_wqe_dec_running()
399 io_queue_worker_create(worker, acct, create_worker_cb); in io_wqe_dec_running()
408 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, in __io_worker_busy() argument
412 if (worker->flags & IO_WORKER_F_FREE) { in __io_worker_busy()
413 worker->flags &= ~IO_WORKER_F_FREE; in __io_worker_busy()
414 hlist_nulls_del_init_rcu(&worker->nulls_node); in __io_worker_busy()
425 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) in __io_worker_idle() argument
428 if (!(worker->flags & IO_WORKER_F_FREE)) { in __io_worker_idle()
429 worker->flags |= IO_WORKER_F_FREE; in __io_worker_idle()
430 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in __io_worker_idle()
458 struct io_worker *worker) in io_get_next_work() argument
464 struct io_wqe *wqe = worker->wqe; in io_get_next_work()
524 static void io_assign_current_work(struct io_worker *worker, in io_assign_current_work() argument
532 spin_lock(&worker->lock); in io_assign_current_work()
533 worker->cur_work = work; in io_assign_current_work()
534 spin_unlock(&worker->lock); in io_assign_current_work()
539 static void io_worker_handle_work(struct io_worker *worker) in io_worker_handle_work() argument
542 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_handle_work()
543 struct io_wqe *wqe = worker->wqe; in io_worker_handle_work()
557 work = io_get_next_work(acct, worker); in io_worker_handle_work()
559 __io_worker_busy(wqe, worker, work); in io_worker_handle_work()
564 io_assign_current_work(worker, work); in io_worker_handle_work()
577 io_assign_current_work(worker, NULL); in io_worker_handle_work()
585 io_assign_current_work(worker, work); in io_worker_handle_work()
611 struct io_worker *worker = data; in io_wqe_worker() local
612 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_worker()
613 struct io_wqe *wqe = worker->wqe; in io_wqe_worker()
618 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); in io_wqe_worker()
632 io_worker_handle_work(worker); in io_wqe_worker()
643 __io_worker_idle(wqe, worker); in io_wqe_worker()
660 io_worker_handle_work(worker); in io_wqe_worker()
664 io_worker_exit(worker); in io_wqe_worker()
673 struct io_worker *worker = tsk->pf_io_worker; in io_wq_worker_running() local
675 if (!worker) in io_wq_worker_running()
677 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_running()
679 if (worker->flags & IO_WORKER_F_RUNNING) in io_wq_worker_running()
681 worker->flags |= IO_WORKER_F_RUNNING; in io_wq_worker_running()
682 io_wqe_inc_running(worker); in io_wq_worker_running()
691 struct io_worker *worker = tsk->pf_io_worker; in io_wq_worker_sleeping() local
693 if (!worker) in io_wq_worker_sleeping()
695 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_sleeping()
697 if (!(worker->flags & IO_WORKER_F_RUNNING)) in io_wq_worker_sleeping()
700 worker->flags &= ~IO_WORKER_F_RUNNING; in io_wq_worker_sleeping()
702 raw_spin_lock(&worker->wqe->lock); in io_wq_worker_sleeping()
703 io_wqe_dec_running(worker); in io_wq_worker_sleeping()
704 raw_spin_unlock(&worker->wqe->lock); in io_wq_worker_sleeping()
707 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, in io_init_new_worker() argument
710 tsk->pf_io_worker = worker; in io_init_new_worker()
711 worker->task = tsk; in io_init_new_worker()
716 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in io_init_new_worker()
717 list_add_tail_rcu(&worker->all_list, &wqe->all_list); in io_init_new_worker()
718 worker->flags |= IO_WORKER_F_FREE; in io_init_new_worker()
750 struct io_worker *worker; in create_worker_cont() local
754 worker = container_of(cb, struct io_worker, create_work); in create_worker_cont()
755 clear_bit_unlock(0, &worker->create_state); in create_worker_cont()
756 wqe = worker->wqe; in create_worker_cont()
757 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_worker_cont()
759 io_init_new_worker(wqe, worker, tsk); in create_worker_cont()
760 io_worker_release(worker); in create_worker_cont()
763 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in create_worker_cont()
779 kfree(worker); in create_worker_cont()
784 io_worker_release(worker); in create_worker_cont()
785 schedule_work(&worker->work); in create_worker_cont()
790 struct io_worker *worker = container_of(work, struct io_worker, work); in io_workqueue_create() local
791 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_workqueue_create()
793 if (!io_queue_worker_create(worker, acct, create_worker_cont)) in io_workqueue_create()
794 kfree(worker); in io_workqueue_create()
800 struct io_worker *worker; in create_io_worker() local
805 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); in create_io_worker()
806 if (!worker) { in create_io_worker()
816 refcount_set(&worker->ref, 1); in create_io_worker()
817 worker->wqe = wqe; in create_io_worker()
818 spin_lock_init(&worker->lock); in create_io_worker()
819 init_completion(&worker->ref_done); in create_io_worker()
822 worker->flags |= IO_WORKER_F_BOUND; in create_io_worker()
824 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_io_worker()
826 io_init_new_worker(wqe, worker, tsk); in create_io_worker()
828 kfree(worker); in create_io_worker()
831 INIT_WORK(&worker->work, io_workqueue_create); in create_io_worker()
832 schedule_work(&worker->work); in create_io_worker()
846 struct io_worker *worker; in io_wq_for_each_worker() local
849 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { in io_wq_for_each_worker()
850 if (io_worker_get(worker)) { in io_wq_for_each_worker()
852 if (worker->task) in io_wq_for_each_worker()
853 ret = func(worker, data); in io_wq_for_each_worker()
854 io_worker_release(worker); in io_wq_for_each_worker()
863 static bool io_wq_worker_wake(struct io_worker *worker, void *data) in io_wq_worker_wake() argument
865 set_notify_signal(worker->task); in io_wq_worker_wake()
866 wake_up_process(worker->task); in io_wq_worker_wake()
976 static bool io_wq_worker_cancel(struct io_worker *worker, void *data) in io_wq_worker_cancel() argument
984 spin_lock(&worker->lock); in io_wq_worker_cancel()
985 if (worker->cur_work && in io_wq_worker_cancel()
986 match->fn(worker->cur_work, match->data)) { in io_wq_worker_cancel()
987 set_notify_signal(worker->task); in io_wq_worker_cancel()
990 spin_unlock(&worker->lock); in io_wq_worker_cancel()
1201 struct io_worker *worker; in io_task_work_match() local
1205 worker = container_of(cb, struct io_worker, create_work); in io_task_work_match()
1206 return worker->wqe->wq == data; in io_task_work_match()
1219 struct io_worker *worker; in io_wq_cancel_tw_create() local
1221 worker = container_of(cb, struct io_worker, create_work); in io_wq_cancel_tw_create()
1222 io_worker_cancel_cb(worker); in io_wq_cancel_tw_create()
1287 static bool io_wq_worker_affinity(struct io_worker *worker, void *data) in io_wq_worker_affinity() argument
1292 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()
1294 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()