Lines Matching refs:wqe
48 struct io_wqe *wqe; member
139 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
141 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
158 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound) in io_get_acct() argument
160 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; in io_get_acct()
163 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, in io_work_get_acct() argument
166 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
171 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); in io_wqe_get_acct()
183 struct io_wqe *wqe = worker->wqe; in io_worker_cancel_cb() local
184 struct io_wq *wq = wqe->wq; in io_worker_cancel_cb()
187 raw_spin_lock(&worker->wqe->lock); in io_worker_cancel_cb()
189 raw_spin_unlock(&worker->wqe->lock); in io_worker_cancel_cb()
207 struct io_wqe *wqe = worker->wqe; in io_worker_exit() local
208 struct io_wq *wq = wqe->wq; in io_worker_exit()
222 raw_spin_lock(&wqe->lock); in io_worker_exit()
231 raw_spin_unlock(&wqe->lock); in io_worker_exit()
234 io_worker_ref_put(wqe->wq); in io_worker_exit()
250 static bool io_wqe_activate_free_worker(struct io_wqe *wqe, in io_wqe_activate_free_worker() argument
262 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { in io_wqe_activate_free_worker()
283 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) in io_wqe_create_worker() argument
292 raw_spin_lock(&wqe->lock); in io_wqe_create_worker()
294 raw_spin_unlock(&wqe->lock); in io_wqe_create_worker()
298 raw_spin_unlock(&wqe->lock); in io_wqe_create_worker()
300 atomic_inc(&wqe->wq->worker_refs); in io_wqe_create_worker()
301 return create_io_worker(wqe->wq, wqe, acct->index); in io_wqe_create_worker()
315 struct io_wqe *wqe; in create_worker_cb() local
320 wqe = worker->wqe; in create_worker_cb()
321 wq = wqe->wq; in create_worker_cb()
322 acct = &wqe->acct[worker->create_index]; in create_worker_cb()
323 raw_spin_lock(&wqe->lock); in create_worker_cb()
328 raw_spin_unlock(&wqe->lock); in create_worker_cb()
330 create_io_worker(wq, wqe, worker->create_index); in create_worker_cb()
343 struct io_wqe *wqe = worker->wqe; in io_queue_worker_create() local
344 struct io_wq *wq = wqe->wq; in io_queue_worker_create()
387 __must_hold(wqe->lock) in io_wqe_dec_running()
390 struct io_wqe *wqe = worker->wqe; in io_wqe_dec_running() local
397 atomic_inc(&wqe->wq->worker_refs); in io_wqe_dec_running()
398 raw_spin_unlock(&wqe->lock); in io_wqe_dec_running()
400 raw_spin_lock(&wqe->lock); in io_wqe_dec_running()
408 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, in __io_worker_busy() argument
410 __must_hold(wqe->lock) in __io_worker_busy()
425 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) in __io_worker_idle() argument
426 __must_hold(wqe->lock) in __io_worker_idle()
430 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in __io_worker_idle()
439 static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash) in io_wait_on_hash() argument
441 struct io_wq *wq = wqe->wq; in io_wait_on_hash()
445 if (list_empty(&wqe->wait.entry)) { in io_wait_on_hash()
446 __add_wait_queue(&wq->hash->wait, &wqe->wait); in io_wait_on_hash()
449 list_del_init(&wqe->wait.entry); in io_wait_on_hash()
459 __must_hold(wqe->lock) in io_get_next_work()
464 struct io_wqe *wqe = worker->wqe; in io_get_next_work() local
479 tail = wqe->hash_tail[hash]; in io_get_next_work()
482 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) { in io_get_next_work()
483 wqe->hash_tail[hash] = NULL; in io_get_next_work()
501 raw_spin_unlock(&wqe->lock); in io_get_next_work()
502 unstalled = io_wait_on_hash(wqe, stall_hash); in io_get_next_work()
503 raw_spin_lock(&wqe->lock); in io_get_next_work()
506 if (wq_has_sleeper(&wqe->wq->hash->wait)) in io_get_next_work()
507 wake_up(&wqe->wq->hash->wait); in io_get_next_work()
537 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
540 __releases(wqe->lock) in io_worker_handle_work()
543 struct io_wqe *wqe = worker->wqe; in io_worker_handle_work() local
544 struct io_wq *wq = wqe->wq; in io_worker_handle_work()
559 __io_worker_busy(wqe, worker, work); in io_worker_handle_work()
561 raw_spin_unlock(&wqe->lock); in io_worker_handle_work()
587 io_wqe_enqueue(wqe, linked); in io_worker_handle_work()
597 raw_spin_lock(&wqe->lock); in io_worker_handle_work()
601 raw_spin_unlock(&wqe->lock); in io_worker_handle_work()
605 raw_spin_lock(&wqe->lock); in io_worker_handle_work()
613 struct io_wqe *wqe = worker->wqe; in io_wqe_worker() local
614 struct io_wq *wq = wqe->wq; in io_wqe_worker()
630 raw_spin_lock(&wqe->lock); in io_wqe_worker()
638 raw_spin_unlock(&wqe->lock); in io_wqe_worker()
643 __io_worker_idle(wqe, worker); in io_wqe_worker()
644 raw_spin_unlock(&wqe->lock); in io_wqe_worker()
659 raw_spin_lock(&wqe->lock); in io_wqe_worker()
702 raw_spin_lock(&worker->wqe->lock); in io_wq_worker_sleeping()
704 raw_spin_unlock(&worker->wqe->lock); in io_wq_worker_sleeping()
707 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, in io_init_new_worker() argument
712 set_cpus_allowed_ptr(tsk, wqe->cpu_mask); in io_init_new_worker()
715 raw_spin_lock(&wqe->lock); in io_init_new_worker()
716 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in io_init_new_worker()
717 list_add_tail_rcu(&worker->all_list, &wqe->all_list); in io_init_new_worker()
719 raw_spin_unlock(&wqe->lock); in io_init_new_worker()
752 struct io_wqe *wqe; in create_worker_cont() local
756 wqe = worker->wqe; in create_worker_cont()
757 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_worker_cont()
759 io_init_new_worker(wqe, worker, tsk); in create_worker_cont()
766 raw_spin_lock(&wqe->lock); in create_worker_cont()
774 while (io_acct_cancel_pending_work(wqe, acct, &match)) in create_worker_cont()
775 raw_spin_lock(&wqe->lock); in create_worker_cont()
777 raw_spin_unlock(&wqe->lock); in create_worker_cont()
778 io_worker_ref_put(wqe->wq); in create_worker_cont()
797 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) in create_io_worker() argument
799 struct io_wqe_acct *acct = &wqe->acct[index]; in create_io_worker()
805 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); in create_io_worker()
809 raw_spin_lock(&wqe->lock); in create_io_worker()
811 raw_spin_unlock(&wqe->lock); in create_io_worker()
817 worker->wqe = wqe; in create_io_worker()
824 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_io_worker()
826 io_init_new_worker(wqe, worker, tsk); in create_io_worker()
842 static bool io_wq_for_each_worker(struct io_wqe *wqe, in io_wq_for_each_worker() argument
849 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { in io_wq_for_each_worker()
870 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) in io_run_cancel() argument
872 struct io_wq *wq = wqe->wq; in io_run_cancel()
881 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) in io_wqe_insert_work() argument
883 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_insert_work()
894 tail = wqe->hash_tail[hash]; in io_wqe_insert_work()
895 wqe->hash_tail[hash] = work; in io_wqe_insert_work()
907 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) in io_wqe_enqueue() argument
909 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_enqueue()
917 if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) || in io_wqe_enqueue()
919 io_run_cancel(work, wqe); in io_wqe_enqueue()
923 raw_spin_lock(&wqe->lock); in io_wqe_enqueue()
924 io_wqe_insert_work(wqe, work); in io_wqe_enqueue()
928 do_create = !io_wqe_activate_free_worker(wqe, acct); in io_wqe_enqueue()
931 raw_spin_unlock(&wqe->lock); in io_wqe_enqueue()
937 did_create = io_wqe_create_worker(wqe, acct); in io_wqe_enqueue()
941 raw_spin_lock(&wqe->lock); in io_wqe_enqueue()
950 if (io_acct_cancel_pending_work(wqe, acct, &match)) in io_wqe_enqueue()
951 raw_spin_lock(&wqe->lock); in io_wqe_enqueue()
953 raw_spin_unlock(&wqe->lock); in io_wqe_enqueue()
959 struct io_wqe *wqe = wq->wqes[numa_node_id()]; in io_wq_enqueue() local
961 io_wqe_enqueue(wqe, work); in io_wq_enqueue()
995 static inline void io_wqe_remove_pending(struct io_wqe *wqe, in io_wqe_remove_pending() argument
999 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_remove_pending()
1003 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { in io_wqe_remove_pending()
1007 wqe->hash_tail[hash] = prev_work; in io_wqe_remove_pending()
1009 wqe->hash_tail[hash] = NULL; in io_wqe_remove_pending()
1014 static bool io_acct_cancel_pending_work(struct io_wqe *wqe, in io_acct_cancel_pending_work() argument
1017 __releases(wqe->lock) in io_acct_cancel_pending_work()
1026 io_wqe_remove_pending(wqe, work, prev); in io_acct_cancel_pending_work()
1027 raw_spin_unlock(&wqe->lock); in io_acct_cancel_pending_work()
1028 io_run_cancel(work, wqe); in io_acct_cancel_pending_work()
1037 static void io_wqe_cancel_pending_work(struct io_wqe *wqe, in io_wqe_cancel_pending_work() argument
1042 raw_spin_lock(&wqe->lock); in io_wqe_cancel_pending_work()
1044 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0); in io_wqe_cancel_pending_work()
1046 if (io_acct_cancel_pending_work(wqe, acct, match)) { in io_wqe_cancel_pending_work()
1052 raw_spin_unlock(&wqe->lock); in io_wqe_cancel_pending_work()
1055 static void io_wqe_cancel_running_work(struct io_wqe *wqe, in io_wqe_cancel_running_work() argument
1059 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); in io_wqe_cancel_running_work()
1079 struct io_wqe *wqe = wq->wqes[node]; in io_wq_cancel_cb() local
1081 io_wqe_cancel_pending_work(wqe, &match); in io_wq_cancel_cb()
1093 struct io_wqe *wqe = wq->wqes[node]; in io_wq_cancel_cb() local
1095 io_wqe_cancel_running_work(wqe, &match); in io_wq_cancel_cb()
1110 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait); in io_wqe_hash_wake() local
1117 struct io_wqe_acct *acct = &wqe->acct[i]; in io_wqe_hash_wake()
1120 io_wqe_activate_free_worker(wqe, acct); in io_wqe_hash_wake()
1150 struct io_wqe *wqe; in io_wq_create() local
1155 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); in io_wq_create()
1156 if (!wqe) in io_wq_create()
1158 if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) in io_wq_create()
1160 cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); in io_wq_create()
1161 wq->wqes[node] = wqe; in io_wq_create()
1162 wqe->node = alloc_node; in io_wq_create()
1163 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; in io_wq_create()
1164 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = in io_wq_create()
1166 INIT_LIST_HEAD(&wqe->wait.entry); in io_wq_create()
1167 wqe->wait.func = io_wqe_hash_wake; in io_wq_create()
1169 struct io_wqe_acct *acct = &wqe->acct[i]; in io_wq_create()
1175 wqe->wq = wq; in io_wq_create()
1176 raw_spin_lock_init(&wqe->lock); in io_wq_create()
1177 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); in io_wq_create()
1178 INIT_LIST_HEAD(&wqe->all_list); in io_wq_create()
1206 return worker->wqe->wq == data; in io_task_work_match()
1237 struct io_wqe *wqe = wq->wqes[node]; in io_wq_exit_workers() local
1239 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL); in io_wq_exit_workers()
1261 struct io_wqe *wqe = wq->wqes[node]; in io_wq_destroy() local
1266 io_wqe_cancel_pending_work(wqe, &match); in io_wq_destroy()
1267 free_cpumask_var(wqe->cpu_mask); in io_wq_destroy()
1268 kfree(wqe); in io_wq_destroy()
1292 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()
1294 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()
1333 struct io_wqe *wqe = wq->wqes[i]; in io_wq_cpu_affinity() local
1336 cpumask_copy(wqe->cpu_mask, mask); in io_wq_cpu_affinity()
1338 cpumask_copy(wqe->cpu_mask, cpumask_of_node(i)); in io_wq_cpu_affinity()
1368 struct io_wqe *wqe = wq->wqes[node]; in io_wq_max_workers() local
1371 raw_spin_lock(&wqe->lock); in io_wq_max_workers()
1373 acct = &wqe->acct[i]; in io_wq_max_workers()
1379 raw_spin_unlock(&wqe->lock); in io_wq_max_workers()