Lines Matching refs:acct
93 struct io_wqe_acct acct[2]; member
142 struct io_wqe_acct *acct,
160 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; in io_get_acct()
182 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_cancel_cb() local
186 atomic_dec(&acct->nr_running); in io_worker_cancel_cb()
188 acct->nr_workers--; in io_worker_cancel_cb()
238 static inline bool io_acct_run_queue(struct io_wqe_acct *acct) in io_acct_run_queue() argument
240 if (!wq_list_empty(&acct->work_list) && in io_acct_run_queue()
241 !test_bit(IO_ACCT_STALLED_BIT, &acct->flags)) in io_acct_run_queue()
251 struct io_wqe_acct *acct) in io_wqe_activate_free_worker() argument
265 if (io_wqe_get_acct(worker) != acct) { in io_wqe_activate_free_worker()
283 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) in io_wqe_create_worker() argument
289 if (unlikely(!acct->max_workers)) in io_wqe_create_worker()
293 if (acct->nr_workers >= acct->max_workers) { in io_wqe_create_worker()
297 acct->nr_workers++; in io_wqe_create_worker()
299 atomic_inc(&acct->nr_running); in io_wqe_create_worker()
301 return create_io_worker(wqe->wq, wqe, acct->index); in io_wqe_create_worker()
306 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_inc_running() local
308 atomic_inc(&acct->nr_running); in io_wqe_inc_running()
316 struct io_wqe_acct *acct; in create_worker_cb() local
322 acct = &wqe->acct[worker->create_index]; in create_worker_cb()
324 if (acct->nr_workers < acct->max_workers) { in create_worker_cb()
325 acct->nr_workers++; in create_worker_cb()
332 atomic_dec(&acct->nr_running); in create_worker_cb()
340 struct io_wqe_acct *acct, in io_queue_worker_create() argument
363 worker->create_index = acct->index; in io_queue_worker_create()
381 atomic_dec(&acct->nr_running); in io_queue_worker_create()
389 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_dec_running() local
395 if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) { in io_wqe_dec_running()
396 atomic_inc(&acct->nr_running); in io_wqe_dec_running()
399 io_queue_worker_create(worker, acct, create_worker_cb); in io_wqe_dec_running()
457 static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct, in io_get_next_work() argument
466 wq_list_for_each(node, prev, &acct->work_list) { in io_get_next_work()
473 wq_list_del(&acct->work_list, node, prev); in io_get_next_work()
484 wq_list_cut(&acct->work_list, &tail->list, prev); in io_get_next_work()
500 set_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_get_next_work()
505 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_get_next_work()
542 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_handle_work() local
557 work = io_get_next_work(acct, worker); in io_worker_handle_work()
593 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_worker_handle_work()
612 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_worker() local
631 if (io_acct_run_queue(acct)) { in io_wqe_worker()
636 if (last_timeout && acct->nr_workers > 1) { in io_wqe_worker()
637 acct->nr_workers--; in io_wqe_worker()
763 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in create_worker_cont() local
765 atomic_dec(&acct->nr_running); in create_worker_cont()
767 acct->nr_workers--; in create_worker_cont()
768 if (!acct->nr_workers) { in create_worker_cont()
774 while (io_acct_cancel_pending_work(wqe, acct, &match)) in create_worker_cont()
791 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_workqueue_create() local
793 if (!io_queue_worker_create(worker, acct, create_worker_cont)) in io_workqueue_create()
799 struct io_wqe_acct *acct = &wqe->acct[index]; in create_io_worker() local
808 atomic_dec(&acct->nr_running); in create_io_worker()
810 acct->nr_workers--; in create_io_worker()
883 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_insert_work() local
889 wq_list_add_tail(&work->list, &acct->work_list); in io_wqe_insert_work()
899 wq_list_add_after(&work->list, &tail->list, &acct->work_list); in io_wqe_insert_work()
909 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_enqueue() local
925 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_wqe_enqueue()
928 do_create = !io_wqe_activate_free_worker(wqe, acct); in io_wqe_enqueue()
934 !atomic_read(&acct->nr_running))) { in io_wqe_enqueue()
937 did_create = io_wqe_create_worker(wqe, acct); in io_wqe_enqueue()
943 if (!acct->nr_workers) { in io_wqe_enqueue()
950 if (io_acct_cancel_pending_work(wqe, acct, &match)) in io_wqe_enqueue()
999 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_remove_pending() local
1011 wq_list_del(&acct->work_list, &work->list, prev); in io_wqe_remove_pending()
1015 struct io_wqe_acct *acct, in io_acct_cancel_pending_work() argument
1022 wq_list_for_each(node, prev, &acct->work_list) { in io_acct_cancel_pending_work()
1044 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0); in io_wqe_cancel_pending_work() local
1046 if (io_acct_cancel_pending_work(wqe, acct, match)) { in io_wqe_cancel_pending_work()
1117 struct io_wqe_acct *acct = &wqe->acct[i]; in io_wqe_hash_wake() local
1119 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags)) in io_wqe_hash_wake()
1120 io_wqe_activate_free_worker(wqe, acct); in io_wqe_hash_wake()
1163 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; in io_wq_create()
1164 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = in io_wq_create()
1169 struct io_wqe_acct *acct = &wqe->acct[i]; in io_wq_create() local
1171 acct->index = i; in io_wq_create()
1172 atomic_set(&acct->nr_running, 0); in io_wq_create()
1173 INIT_WQ_LIST(&acct->work_list); in io_wq_create()
1369 struct io_wqe_acct *acct; in io_wq_max_workers() local
1373 acct = &wqe->acct[i]; in io_wq_max_workers()
1375 prev[i] = max_t(int, acct->max_workers, prev[i]); in io_wq_max_workers()
1377 acct->max_workers = new_count[i]; in io_wq_max_workers()