Lines Matching refs:task
45 static void rpc_release_task(struct rpc_task *task);
61 rpc_task_timeout(const struct rpc_task *task) in rpc_task_timeout() argument
63 unsigned long timeout = READ_ONCE(task->tk_timeout); in rpc_task_timeout()
80 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
82 if (list_empty(&task->u.tk_wait.timer_list)) in __rpc_disable_timer()
84 task->tk_timeout = 0; in __rpc_disable_timer()
85 list_del(&task->u.tk_wait.timer_list); in __rpc_disable_timer()
106 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument
109 task->tk_timeout = timeout; in __rpc_add_timer()
112 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer()
132 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) in __rpc_list_enqueue_task() argument
137 if (t->tk_owner == task->tk_owner) { in __rpc_list_enqueue_task()
138 list_add_tail(&task->u.tk_wait.links, in __rpc_list_enqueue_task()
141 task->u.tk_wait.list.next = q; in __rpc_list_enqueue_task()
142 task->u.tk_wait.list.prev = NULL; in __rpc_list_enqueue_task()
146 INIT_LIST_HEAD(&task->u.tk_wait.links); in __rpc_list_enqueue_task()
147 list_add_tail(&task->u.tk_wait.list, q); in __rpc_list_enqueue_task()
154 __rpc_list_dequeue_task(struct rpc_task *task) in __rpc_list_dequeue_task() argument
159 if (task->u.tk_wait.list.prev == NULL) { in __rpc_list_dequeue_task()
160 list_del(&task->u.tk_wait.links); in __rpc_list_dequeue_task()
163 if (!list_empty(&task->u.tk_wait.links)) { in __rpc_list_dequeue_task()
164 t = list_first_entry(&task->u.tk_wait.links, in __rpc_list_dequeue_task()
170 list_del(&task->u.tk_wait.links); in __rpc_list_dequeue_task()
172 list_del(&task->u.tk_wait.list); in __rpc_list_dequeue_task()
179 struct rpc_task *task, in __rpc_add_wait_queue_priority() argument
184 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); in __rpc_add_wait_queue_priority()
196 struct rpc_task *task, in __rpc_add_wait_queue() argument
199 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); in __rpc_add_wait_queue()
201 __rpc_add_wait_queue_priority(queue, task, queue_priority); in __rpc_add_wait_queue()
202 else if (RPC_IS_SWAPPER(task)) in __rpc_add_wait_queue()
203 list_add(&task->u.tk_wait.list, &queue->tasks[0]); in __rpc_add_wait_queue()
205 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); in __rpc_add_wait_queue()
206 task->tk_waitqueue = queue; in __rpc_add_wait_queue()
210 rpc_set_queued(task); in __rpc_add_wait_queue()
216 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) in __rpc_remove_wait_queue_priority() argument
218 __rpc_list_dequeue_task(task); in __rpc_remove_wait_queue_priority()
225 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_remove_wait_queue() argument
227 __rpc_disable_timer(queue, task); in __rpc_remove_wait_queue()
229 __rpc_remove_wait_queue_priority(task); in __rpc_remove_wait_queue()
231 list_del(&task->u.tk_wait.list); in __rpc_remove_wait_queue()
278 static void rpc_task_set_debuginfo(struct rpc_task *task) in rpc_task_set_debuginfo() argument
280 struct rpc_clnt *clnt = task->tk_client; in rpc_task_set_debuginfo()
286 task->tk_pid = atomic_inc_return(&rpc_pid); in rpc_task_set_debuginfo()
290 task->tk_pid = atomic_inc_return(&clnt->cl_pid); in rpc_task_set_debuginfo()
293 static inline void rpc_task_set_debuginfo(struct rpc_task *task) in rpc_task_set_debuginfo() argument
298 static void rpc_set_active(struct rpc_task *task) in rpc_set_active() argument
300 rpc_task_set_debuginfo(task); in rpc_set_active()
301 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); in rpc_set_active()
302 trace_rpc_task_begin(task, NULL); in rpc_set_active()
309 static int rpc_complete_task(struct rpc_task *task) in rpc_complete_task() argument
311 void *m = &task->tk_runstate; in rpc_complete_task()
317 trace_rpc_task_complete(task, NULL); in rpc_complete_task()
320 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); in rpc_complete_task()
321 ret = atomic_dec_and_test(&task->tk_count); in rpc_complete_task()
335 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) in __rpc_wait_for_completion_task() argument
339 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, in __rpc_wait_for_completion_task()
356 struct rpc_task *task) in rpc_make_runnable() argument
358 bool need_wakeup = !rpc_test_and_set_running(task); in rpc_make_runnable()
360 rpc_clear_queued(task); in rpc_make_runnable()
363 if (RPC_IS_ASYNC(task)) { in rpc_make_runnable()
364 INIT_WORK(&task->u.tk_work, rpc_async_schedule); in rpc_make_runnable()
365 queue_work(wq, &task->u.tk_work); in rpc_make_runnable()
367 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); in rpc_make_runnable()
377 struct rpc_task *task, in __rpc_do_sleep_on_priority() argument
380 trace_rpc_task_sleep(task, q); in __rpc_do_sleep_on_priority()
382 __rpc_add_wait_queue(q, task, queue_priority); in __rpc_do_sleep_on_priority()
386 struct rpc_task *task, in __rpc_sleep_on_priority() argument
389 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) in __rpc_sleep_on_priority()
391 __rpc_do_sleep_on_priority(q, task, queue_priority); in __rpc_sleep_on_priority()
395 struct rpc_task *task, unsigned long timeout, in __rpc_sleep_on_priority_timeout() argument
398 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) in __rpc_sleep_on_priority_timeout()
401 __rpc_do_sleep_on_priority(q, task, queue_priority); in __rpc_sleep_on_priority_timeout()
402 __rpc_add_timer(q, task, timeout); in __rpc_sleep_on_priority_timeout()
404 task->tk_status = -ETIMEDOUT; in __rpc_sleep_on_priority_timeout()
407 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) in rpc_set_tk_callback() argument
409 if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) in rpc_set_tk_callback()
410 task->tk_callback = action; in rpc_set_tk_callback()
413 static bool rpc_sleep_check_activated(struct rpc_task *task) in rpc_sleep_check_activated() argument
416 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { in rpc_sleep_check_activated()
417 task->tk_status = -EIO; in rpc_sleep_check_activated()
418 rpc_put_task_async(task); in rpc_sleep_check_activated()
424 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, in rpc_sleep_on_timeout() argument
427 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on_timeout()
430 rpc_set_tk_callback(task, action); in rpc_sleep_on_timeout()
436 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); in rpc_sleep_on_timeout()
441 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, in rpc_sleep_on() argument
444 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on()
447 rpc_set_tk_callback(task, action); in rpc_sleep_on()
449 WARN_ON_ONCE(task->tk_timeout != 0); in rpc_sleep_on()
454 __rpc_sleep_on_priority(q, task, task->tk_priority); in rpc_sleep_on()
460 struct rpc_task *task, unsigned long timeout, int priority) in rpc_sleep_on_priority_timeout() argument
462 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on_priority_timeout()
470 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); in rpc_sleep_on_priority_timeout()
475 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, in rpc_sleep_on_priority() argument
478 if (!rpc_sleep_check_activated(task)) in rpc_sleep_on_priority()
481 WARN_ON_ONCE(task->tk_timeout != 0); in rpc_sleep_on_priority()
487 __rpc_sleep_on_priority(q, task, priority); in rpc_sleep_on_priority()
502 struct rpc_task *task) in __rpc_do_wake_up_task_on_wq() argument
505 if (!RPC_IS_ACTIVATED(task)) { in __rpc_do_wake_up_task_on_wq()
506 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); in __rpc_do_wake_up_task_on_wq()
510 trace_rpc_task_wakeup(task, queue); in __rpc_do_wake_up_task_on_wq()
512 __rpc_remove_wait_queue(queue, task); in __rpc_do_wake_up_task_on_wq()
514 rpc_make_runnable(wq, task); in __rpc_do_wake_up_task_on_wq()
522 struct rpc_wait_queue *queue, struct rpc_task *task, in rpc_wake_up_task_on_wq_queue_action_locked() argument
525 if (RPC_IS_QUEUED(task)) { in rpc_wake_up_task_on_wq_queue_action_locked()
527 if (task->tk_waitqueue == queue) { in rpc_wake_up_task_on_wq_queue_action_locked()
528 if (action == NULL || action(task, data)) { in rpc_wake_up_task_on_wq_queue_action_locked()
529 __rpc_do_wake_up_task_on_wq(wq, queue, task); in rpc_wake_up_task_on_wq_queue_action_locked()
530 return task; in rpc_wake_up_task_on_wq_queue_action_locked()
541 struct rpc_task *task) in rpc_wake_up_task_queue_locked() argument
544 task, NULL, NULL); in rpc_wake_up_task_queue_locked()
550 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) in rpc_wake_up_queued_task() argument
552 if (!RPC_IS_QUEUED(task)) in rpc_wake_up_queued_task()
555 rpc_wake_up_task_queue_locked(queue, task); in rpc_wake_up_queued_task()
560 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) in rpc_task_action_set_status() argument
562 task->tk_status = *(int *)status; in rpc_task_action_set_status()
568 struct rpc_task *task, int status) in rpc_wake_up_task_queue_set_status_locked() argument
571 task, rpc_task_action_set_status, &status); in rpc_wake_up_task_queue_set_status_locked()
585 struct rpc_task *task, int status) in rpc_wake_up_queued_task_set_status() argument
587 if (!RPC_IS_QUEUED(task)) in rpc_wake_up_queued_task_set_status()
590 rpc_wake_up_task_queue_set_status_locked(queue, task, status); in rpc_wake_up_queued_task_set_status()
600 struct rpc_task *task; in __rpc_find_next_queued_priority() local
607 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in __rpc_find_next_queued_priority()
617 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in __rpc_find_next_queued_priority()
630 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in __rpc_find_next_queued_priority()
641 return task; in __rpc_find_next_queued_priority()
660 struct rpc_task *task = NULL; in rpc_wake_up_first_on_wq() local
663 task = __rpc_find_next_queued(queue); in rpc_wake_up_first_on_wq()
664 if (task != NULL) in rpc_wake_up_first_on_wq()
665 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, in rpc_wake_up_first_on_wq()
666 task, func, data); in rpc_wake_up_first_on_wq()
669 return task; in rpc_wake_up_first_on_wq()
682 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) in rpc_wake_up_next_func() argument
703 struct rpc_task *task; in rpc_wake_up_locked() local
706 task = __rpc_find_next_queued(queue); in rpc_wake_up_locked()
707 if (task == NULL) in rpc_wake_up_locked()
709 rpc_wake_up_task_queue_locked(queue, task); in rpc_wake_up_locked()
734 struct rpc_task *task; in rpc_wake_up_status_locked() local
737 task = __rpc_find_next_queued(queue); in rpc_wake_up_status_locked()
738 if (task == NULL) in rpc_wake_up_status_locked()
740 rpc_wake_up_task_queue_set_status_locked(queue, task, status); in rpc_wake_up_status_locked()
764 struct rpc_task *task, *n; in __rpc_queue_timer_fn() local
769 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { in __rpc_queue_timer_fn()
770 timeo = task->tk_timeout; in __rpc_queue_timer_fn()
772 trace_rpc_task_timeout(task, task->tk_action); in __rpc_queue_timer_fn()
773 task->tk_status = -ETIMEDOUT; in __rpc_queue_timer_fn()
774 rpc_wake_up_task_queue_locked(queue, task); in __rpc_queue_timer_fn()
785 static void __rpc_atrun(struct rpc_task *task) in __rpc_atrun() argument
787 if (task->tk_status == -ETIMEDOUT) in __rpc_atrun()
788 task->tk_status = 0; in __rpc_atrun()
794 void rpc_delay(struct rpc_task *task, unsigned long delay) in rpc_delay() argument
796 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); in rpc_delay()
803 void rpc_prepare_task(struct rpc_task *task) in rpc_prepare_task() argument
805 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); in rpc_prepare_task()
809 rpc_init_task_statistics(struct rpc_task *task) in rpc_init_task_statistics() argument
812 task->tk_garb_retry = 2; in rpc_init_task_statistics()
813 task->tk_cred_retry = 2; in rpc_init_task_statistics()
814 task->tk_rebind_retry = 2; in rpc_init_task_statistics()
817 task->tk_start = ktime_get(); in rpc_init_task_statistics()
821 rpc_reset_task_statistics(struct rpc_task *task) in rpc_reset_task_statistics() argument
823 task->tk_timeouts = 0; in rpc_reset_task_statistics()
824 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); in rpc_reset_task_statistics()
825 rpc_init_task_statistics(task); in rpc_reset_task_statistics()
831 void rpc_exit_task(struct rpc_task *task) in rpc_exit_task() argument
833 trace_rpc_task_end(task, task->tk_action); in rpc_exit_task()
834 task->tk_action = NULL; in rpc_exit_task()
835 if (task->tk_ops->rpc_count_stats) in rpc_exit_task()
836 task->tk_ops->rpc_count_stats(task, task->tk_calldata); in rpc_exit_task()
837 else if (task->tk_client) in rpc_exit_task()
838 rpc_count_iostats(task, task->tk_client->cl_metrics); in rpc_exit_task()
839 if (task->tk_ops->rpc_call_done != NULL) { in rpc_exit_task()
840 trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done); in rpc_exit_task()
841 task->tk_ops->rpc_call_done(task, task->tk_calldata); in rpc_exit_task()
842 if (task->tk_action != NULL) { in rpc_exit_task()
844 xprt_release(task); in rpc_exit_task()
845 rpc_reset_task_statistics(task); in rpc_exit_task()
850 void rpc_signal_task(struct rpc_task *task) in rpc_signal_task() argument
854 if (!RPC_IS_ACTIVATED(task)) in rpc_signal_task()
857 trace_rpc_task_signalled(task, task->tk_action); in rpc_signal_task()
858 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); in rpc_signal_task()
860 queue = READ_ONCE(task->tk_waitqueue); in rpc_signal_task()
862 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); in rpc_signal_task()
865 void rpc_exit(struct rpc_task *task, int status) in rpc_exit() argument
867 task->tk_status = status; in rpc_exit()
868 task->tk_action = rpc_exit_task; in rpc_exit()
869 rpc_wake_up_queued_task(task->tk_waitqueue, task); in rpc_exit()
882 static void __rpc_execute(struct rpc_task *task) in __rpc_execute() argument
885 int task_is_async = RPC_IS_ASYNC(task); in __rpc_execute()
888 WARN_ON_ONCE(RPC_IS_QUEUED(task)); in __rpc_execute()
889 if (RPC_IS_QUEUED(task)) in __rpc_execute()
902 do_action = task->tk_action; in __rpc_execute()
903 if (task->tk_callback) { in __rpc_execute()
904 do_action = task->tk_callback; in __rpc_execute()
905 task->tk_callback = NULL; in __rpc_execute()
909 trace_rpc_task_run_action(task, do_action); in __rpc_execute()
910 do_action(task); in __rpc_execute()
915 if (!RPC_IS_QUEUED(task)) { in __rpc_execute()
923 if (RPC_SIGNALLED(task)) { in __rpc_execute()
924 task->tk_rpc_status = -ERESTARTSYS; in __rpc_execute()
925 rpc_exit(task, -ERESTARTSYS); in __rpc_execute()
937 queue = task->tk_waitqueue; in __rpc_execute()
939 if (!RPC_IS_QUEUED(task)) { in __rpc_execute()
943 rpc_clear_running(task); in __rpc_execute()
949 trace_rpc_task_sync_sleep(task, task->tk_action); in __rpc_execute()
950 status = out_of_line_wait_on_bit(&task->tk_runstate, in __rpc_execute()
960 trace_rpc_task_signalled(task, task->tk_action); in __rpc_execute()
961 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); in __rpc_execute()
962 task->tk_rpc_status = -ERESTARTSYS; in __rpc_execute()
963 rpc_exit(task, -ERESTARTSYS); in __rpc_execute()
965 trace_rpc_task_sync_wake(task, task->tk_action); in __rpc_execute()
969 rpc_release_task(task); in __rpc_execute()
981 void rpc_execute(struct rpc_task *task) in rpc_execute() argument
983 bool is_async = RPC_IS_ASYNC(task); in rpc_execute()
985 rpc_set_active(task); in rpc_execute()
986 rpc_make_runnable(rpciod_workqueue, task); in rpc_execute()
989 __rpc_execute(task); in rpc_execute()
1019 int rpc_malloc(struct rpc_task *task) in rpc_malloc() argument
1021 struct rpc_rqst *rqst = task->tk_rqstp; in rpc_malloc()
1026 if (RPC_IS_SWAPPER(task)) in rpc_malloc()
1050 void rpc_free(struct rpc_task *task) in rpc_free() argument
1052 void *buffer = task->tk_rqstp->rq_buffer; in rpc_free()
1069 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) in rpc_init_task() argument
1071 memset(task, 0, sizeof(*task)); in rpc_init_task()
1072 atomic_set(&task->tk_count, 1); in rpc_init_task()
1073 task->tk_flags = task_setup_data->flags; in rpc_init_task()
1074 task->tk_ops = task_setup_data->callback_ops; in rpc_init_task()
1075 task->tk_calldata = task_setup_data->callback_data; in rpc_init_task()
1076 INIT_LIST_HEAD(&task->tk_task); in rpc_init_task()
1078 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; in rpc_init_task()
1079 task->tk_owner = current->tgid; in rpc_init_task()
1082 task->tk_workqueue = task_setup_data->workqueue; in rpc_init_task()
1084 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, in rpc_init_task()
1087 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); in rpc_init_task()
1089 if (task->tk_ops->rpc_call_prepare != NULL) in rpc_init_task()
1090 task->tk_action = rpc_prepare_task; in rpc_init_task()
1092 rpc_init_task_statistics(task); in rpc_init_task()
1106 struct rpc_task *task = setup_data->task; in rpc_new_task() local
1109 if (task == NULL) { in rpc_new_task()
1110 task = rpc_alloc_task(); in rpc_new_task()
1114 rpc_init_task(task, setup_data); in rpc_new_task()
1115 task->tk_flags |= flags; in rpc_new_task()
1116 return task; in rpc_new_task()
1138 static void rpc_free_task(struct rpc_task *task) in rpc_free_task() argument
1140 unsigned short tk_flags = task->tk_flags; in rpc_free_task()
1142 put_rpccred(task->tk_op_cred); in rpc_free_task()
1143 rpc_release_calldata(task->tk_ops, task->tk_calldata); in rpc_free_task()
1146 mempool_free(task, rpc_task_mempool); in rpc_free_task()
1157 static void rpc_release_resources_task(struct rpc_task *task) in rpc_release_resources_task() argument
1159 xprt_release(task); in rpc_release_resources_task()
1160 if (task->tk_msg.rpc_cred) { in rpc_release_resources_task()
1161 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) in rpc_release_resources_task()
1162 put_cred(task->tk_msg.rpc_cred); in rpc_release_resources_task()
1163 task->tk_msg.rpc_cred = NULL; in rpc_release_resources_task()
1165 rpc_task_release_client(task); in rpc_release_resources_task()
1168 static void rpc_final_put_task(struct rpc_task *task, in rpc_final_put_task() argument
1172 INIT_WORK(&task->u.tk_work, rpc_async_release); in rpc_final_put_task()
1173 queue_work(q, &task->u.tk_work); in rpc_final_put_task()
1175 rpc_free_task(task); in rpc_final_put_task()
1178 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) in rpc_do_put_task() argument
1180 if (atomic_dec_and_test(&task->tk_count)) { in rpc_do_put_task()
1181 rpc_release_resources_task(task); in rpc_do_put_task()
1182 rpc_final_put_task(task, q); in rpc_do_put_task()
1186 void rpc_put_task(struct rpc_task *task) in rpc_put_task() argument
1188 rpc_do_put_task(task, NULL); in rpc_put_task()
1192 void rpc_put_task_async(struct rpc_task *task) in rpc_put_task_async() argument
1194 rpc_do_put_task(task, task->tk_workqueue); in rpc_put_task_async()
1198 static void rpc_release_task(struct rpc_task *task) in rpc_release_task() argument
1200 WARN_ON_ONCE(RPC_IS_QUEUED(task)); in rpc_release_task()
1202 rpc_release_resources_task(task); in rpc_release_task()
1209 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { in rpc_release_task()
1211 if (!rpc_complete_task(task)) in rpc_release_task()
1214 if (!atomic_dec_and_test(&task->tk_count)) in rpc_release_task()
1217 rpc_final_put_task(task, task->tk_workqueue); in rpc_release_task()