Lines Matching refs:sqd
1109 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
7391 static inline bool io_sqd_events_pending(struct io_sq_data *sqd) in io_sqd_events_pending() argument
7393 return READ_ONCE(sqd->state); in io_sqd_events_pending()
7451 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) in io_sqd_update_thread_idle() argument
7456 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sqd_update_thread_idle()
7458 sqd->sq_thread_idle = sq_thread_idle; in io_sqd_update_thread_idle()
7461 static bool io_sqd_handle_event(struct io_sq_data *sqd) in io_sqd_handle_event() argument
7466 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || in io_sqd_handle_event()
7468 mutex_unlock(&sqd->lock); in io_sqd_handle_event()
7472 mutex_lock(&sqd->lock); in io_sqd_handle_event()
7474 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sqd_handle_event()
7479 struct io_sq_data *sqd = data; in io_sq_thread() local
7485 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); in io_sq_thread()
7488 if (sqd->sq_cpu != -1) in io_sq_thread()
7489 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); in io_sq_thread()
7496 mutex_lock(&sqd->lock); in io_sq_thread()
7500 if (io_sqd_events_pending(sqd) || signal_pending(current)) { in io_sq_thread()
7501 if (io_sqd_handle_event(sqd)) in io_sq_thread()
7503 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7506 cap_entries = !list_is_singular(&sqd->ctx_list); in io_sq_thread()
7507 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
7519 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7523 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); in io_sq_thread()
7524 if (!io_sqd_events_pending(sqd) && !current->task_works) { in io_sq_thread()
7527 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
7542 mutex_unlock(&sqd->lock); in io_sq_thread()
7544 mutex_lock(&sqd->lock); in io_sq_thread()
7546 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
7550 finish_wait(&sqd->wait, &wait); in io_sq_thread()
7551 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7554 io_uring_cancel_generic(true, sqd); in io_sq_thread()
7555 sqd->thread = NULL; in io_sq_thread()
7556 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
7559 mutex_unlock(&sqd->lock); in io_sq_thread()
7563 complete(&sqd->exited); in io_sq_thread()
7967 static void io_sq_thread_unpark(struct io_sq_data *sqd) in io_sq_thread_unpark() argument
7968 __releases(&sqd->lock) in io_sq_thread_unpark()
7970 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_unpark()
7976 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
7977 if (atomic_dec_return(&sqd->park_pending)) in io_sq_thread_unpark()
7978 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
7979 mutex_unlock(&sqd->lock); in io_sq_thread_unpark()
7982 static void io_sq_thread_park(struct io_sq_data *sqd) in io_sq_thread_park() argument
7983 __acquires(&sqd->lock) in io_sq_thread_park()
7985 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_park()
7987 atomic_inc(&sqd->park_pending); in io_sq_thread_park()
7988 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_park()
7989 mutex_lock(&sqd->lock); in io_sq_thread_park()
7990 if (sqd->thread) in io_sq_thread_park()
7991 wake_up_process(sqd->thread); in io_sq_thread_park()
7994 static void io_sq_thread_stop(struct io_sq_data *sqd) in io_sq_thread_stop() argument
7996 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_stop()
7997 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); in io_sq_thread_stop()
7999 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sq_thread_stop()
8000 mutex_lock(&sqd->lock); in io_sq_thread_stop()
8001 if (sqd->thread) in io_sq_thread_stop()
8002 wake_up_process(sqd->thread); in io_sq_thread_stop()
8003 mutex_unlock(&sqd->lock); in io_sq_thread_stop()
8004 wait_for_completion(&sqd->exited); in io_sq_thread_stop()
8007 static void io_put_sq_data(struct io_sq_data *sqd) in io_put_sq_data() argument
8009 if (refcount_dec_and_test(&sqd->refs)) { in io_put_sq_data()
8010 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); in io_put_sq_data()
8012 io_sq_thread_stop(sqd); in io_put_sq_data()
8013 kfree(sqd); in io_put_sq_data()
8019 struct io_sq_data *sqd = ctx->sq_data; in io_sq_thread_finish() local
8021 if (sqd) { in io_sq_thread_finish()
8022 io_sq_thread_park(sqd); in io_sq_thread_finish()
8024 io_sqd_update_thread_idle(sqd); in io_sq_thread_finish()
8025 io_sq_thread_unpark(sqd); in io_sq_thread_finish()
8027 io_put_sq_data(sqd); in io_sq_thread_finish()
8035 struct io_sq_data *sqd; in io_attach_sq_data() local
8047 sqd = ctx_attach->sq_data; in io_attach_sq_data()
8048 if (!sqd) { in io_attach_sq_data()
8052 if (sqd->task_tgid != current->tgid) { in io_attach_sq_data()
8057 refcount_inc(&sqd->refs); in io_attach_sq_data()
8059 return sqd; in io_attach_sq_data()
8065 struct io_sq_data *sqd; in io_get_sq_data() local
8069 sqd = io_attach_sq_data(p); in io_get_sq_data()
8070 if (!IS_ERR(sqd)) { in io_get_sq_data()
8072 return sqd; in io_get_sq_data()
8075 if (PTR_ERR(sqd) != -EPERM) in io_get_sq_data()
8076 return sqd; in io_get_sq_data()
8079 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); in io_get_sq_data()
8080 if (!sqd) in io_get_sq_data()
8083 atomic_set(&sqd->park_pending, 0); in io_get_sq_data()
8084 refcount_set(&sqd->refs, 1); in io_get_sq_data()
8085 INIT_LIST_HEAD(&sqd->ctx_list); in io_get_sq_data()
8086 mutex_init(&sqd->lock); in io_get_sq_data()
8087 init_waitqueue_head(&sqd->wait); in io_get_sq_data()
8088 init_completion(&sqd->exited); in io_get_sq_data()
8089 return sqd; in io_get_sq_data()
8717 struct io_sq_data *sqd; in io_sq_offload_create() local
8724 sqd = io_get_sq_data(p, &attached); in io_sq_offload_create()
8725 if (IS_ERR(sqd)) { in io_sq_offload_create()
8726 ret = PTR_ERR(sqd); in io_sq_offload_create()
8731 ctx->sq_data = sqd; in io_sq_offload_create()
8736 io_sq_thread_park(sqd); in io_sq_offload_create()
8737 list_add(&ctx->sqd_list, &sqd->ctx_list); in io_sq_offload_create()
8738 io_sqd_update_thread_idle(sqd); in io_sq_offload_create()
8740 ret = (attached && !sqd->thread) ? -ENXIO : 0; in io_sq_offload_create()
8741 io_sq_thread_unpark(sqd); in io_sq_offload_create()
8754 sqd->sq_cpu = cpu; in io_sq_offload_create()
8756 sqd->sq_cpu = -1; in io_sq_offload_create()
8759 sqd->task_pid = current->pid; in io_sq_offload_create()
8760 sqd->task_tgid = current->tgid; in io_sq_offload_create()
8761 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); in io_sq_offload_create()
8767 sqd->thread = tsk; in io_sq_offload_create()
9489 struct io_sq_data *sqd = ctx->sq_data; in io_ring_exit_work() local
9492 io_sq_thread_park(sqd); in io_ring_exit_work()
9493 tsk = sqd->thread; in io_ring_exit_work()
9497 io_sq_thread_unpark(sqd); in io_ring_exit_work()
9834 struct io_sq_data *sqd) in io_uring_cancel_generic() argument
9841 WARN_ON_ONCE(sqd && sqd->thread != current); in io_uring_cancel_generic()
9856 if (!sqd) { in io_uring_cancel_generic()
9868 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_uring_cancel_generic()
10800 struct io_sq_data *sqd = NULL; in io_register_iowq_max_workers() local
10811 sqd = ctx->sq_data; in io_register_iowq_max_workers()
10812 if (sqd) { in io_register_iowq_max_workers()
10818 refcount_inc(&sqd->refs); in io_register_iowq_max_workers()
10820 mutex_lock(&sqd->lock); in io_register_iowq_max_workers()
10822 if (sqd->thread) in io_register_iowq_max_workers()
10823 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers()
10844 if (sqd) { in io_register_iowq_max_workers()
10845 mutex_unlock(&sqd->lock); in io_register_iowq_max_workers()
10846 io_put_sq_data(sqd); in io_register_iowq_max_workers()
10853 if (sqd) in io_register_iowq_max_workers()
10870 if (sqd) { in io_register_iowq_max_workers()
10871 mutex_unlock(&sqd->lock); in io_register_iowq_max_workers()
10872 io_put_sq_data(sqd); in io_register_iowq_max_workers()