Lines Matching defs:task
45 static void rpc_release_task(struct rpc_task *task);
68 bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
70 if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
76 rpc_task_timeout(const struct rpc_task *task)
78 unsigned long timeout = READ_ONCE(task->tk_timeout);
90 * Disable the timer for a given RPC task. Should be called with
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
97 if (list_empty(&task->u.tk_wait.timer_list))
99 task->tk_timeout = 0;
100 list_del(&task->u.tk_wait.timer_list);
118 * Set up a timer for the current task.
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
124 task->tk_timeout = timeout;
127 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
147 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
152 if (t->tk_owner == task->tk_owner) {
153 list_add_tail(&task->u.tk_wait.links,
155 /* Cache the queue head in task->u.tk_wait.list */
156 task->u.tk_wait.list.next = q;
157 task->u.tk_wait.list.prev = NULL;
161 INIT_LIST_HEAD(&task->u.tk_wait.links);
162 list_add_tail(&task->u.tk_wait.list, q);
169 __rpc_list_dequeue_task(struct rpc_task *task)
174 if (task->u.tk_wait.list.prev == NULL) {
175 list_del(&task->u.tk_wait.links);
178 if (!list_empty(&task->u.tk_wait.links)) {
179 t = list_first_entry(&task->u.tk_wait.links,
185 list_del(&task->u.tk_wait.links);
187 list_del(&task->u.tk_wait.list);
194 struct rpc_task *task,
199 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
206 struct rpc_task *task,
209 INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
211 __rpc_add_wait_queue_priority(queue, task, queue_priority);
213 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
214 task->tk_waitqueue = queue;
218 rpc_set_queued(task);
224 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
226 __rpc_list_dequeue_task(task);
233 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
235 __rpc_disable_timer(queue, task);
237 __rpc_remove_wait_queue_priority(task);
239 list_del(&task->u.tk_wait.list);
286 static void rpc_task_set_debuginfo(struct rpc_task *task)
288 struct rpc_clnt *clnt = task->tk_client;
290 /* Might be a task carrying a reverse-direction operation */
294 task->tk_pid = atomic_inc_return(&rpc_pid);
298 task->tk_pid = atomic_inc_return(&clnt->cl_pid);
301 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
306 static void rpc_set_active(struct rpc_task *task)
308 rpc_task_set_debuginfo(task);
309 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
310 trace_rpc_task_begin(task, NULL);
317 static int rpc_complete_task(struct rpc_task *task)
319 void *m = &task->tk_runstate;
325 trace_rpc_task_complete(task, NULL);
328 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
329 ret = atomic_dec_and_test(&task->tk_count);
343 int rpc_wait_for_completion_task(struct rpc_task *task)
345 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
351 * Make an RPC task runnable.
353 * Note: If the task is ASYNC, and is being made runnable after sitting on an
362 struct rpc_task *task)
364 bool need_wakeup = !rpc_test_and_set_running(task);
366 rpc_clear_queued(task);
369 if (RPC_IS_ASYNC(task)) {
370 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
371 queue_work(wq, &task->u.tk_work);
373 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
379 * NB: An RPC task will only receive interrupt-driven events as long
383 struct rpc_task *task,
386 trace_rpc_task_sleep(task, q);
388 __rpc_add_wait_queue(q, task, queue_priority);
392 struct rpc_task *task,
395 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
397 __rpc_do_sleep_on_priority(q, task, queue_priority);
401 struct rpc_task *task, unsigned long timeout,
404 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
407 __rpc_do_sleep_on_priority(q, task, queue_priority);
408 __rpc_add_timer(q, task, timeout);
410 task->tk_status = -ETIMEDOUT;
413 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
415 if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
416 task->tk_callback = action;
419 static bool rpc_sleep_check_activated(struct rpc_task *task)
421 /* We shouldn't ever put an inactive task to sleep */
422 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
423 task->tk_status = -EIO;
424 rpc_put_task_async(task);
430 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
433 if (!rpc_sleep_check_activated(task))
436 rpc_set_tk_callback(task, action);
442 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
447 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
450 if (!rpc_sleep_check_activated(task))
453 rpc_set_tk_callback(task, action);
455 WARN_ON_ONCE(task->tk_timeout != 0);
460 __rpc_sleep_on_priority(q, task, task->tk_priority);
466 struct rpc_task *task, unsigned long timeout, int priority)
468 if (!rpc_sleep_check_activated(task))
476 __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
481 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
484 if (!rpc_sleep_check_activated(task))
487 WARN_ON_ONCE(task->tk_timeout != 0);
493 __rpc_sleep_on_priority(q, task, priority);
500 * @wq: workqueue on which to run task
502 * @task: task to be woken up
504 * Caller must hold queue->lock, and have cleared the task queued flag.
508 struct rpc_task *task)
510 /* Has the task been executed yet? If not, we cannot wake it up! */
511 if (!RPC_IS_ACTIVATED(task)) {
512 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
516 trace_rpc_task_wakeup(task, queue);
518 __rpc_remove_wait_queue(queue, task);
520 rpc_make_runnable(wq, task);
524 * Wake up a queued task while the queue lock is being held
528 struct rpc_wait_queue *queue, struct rpc_task *task,
531 if (RPC_IS_QUEUED(task)) {
533 if (task->tk_waitqueue == queue) {
534 if (action == NULL || action(task, data)) {
535 __rpc_do_wake_up_task_on_wq(wq, queue, task);
536 return task;
544 * Wake up a queued task while the queue lock is being held
547 struct rpc_task *task)
550 task, NULL, NULL);
554 * Wake up a task on a specific queue
556 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
558 if (!RPC_IS_QUEUED(task))
561 rpc_wake_up_task_queue_locked(queue, task);
566 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
568 task->tk_status = *(int *)status;
574 struct rpc_task *task, int status)
577 task, rpc_task_action_set_status, &status);
581 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
583 * @task: pointer to rpc_task
586 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
591 struct rpc_task *task, int status)
593 if (!RPC_IS_QUEUED(task))
596 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
601 * Wake up the next task on a priority queue.
606 struct rpc_task *task;
613 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
623 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
636 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
647 return task;
660 * Wake up the first task on the wait queue.
666 struct rpc_task *task = NULL;
669 task = __rpc_find_next_queued(queue);
670 if (task != NULL)
671 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
672 task, func, data);
675 return task;
679 * Wake up the first task on the wait queue.
688 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
694 * Wake up the next task on the wait queue.
709 struct rpc_task *task;
712 task = __rpc_find_next_queued(queue);
713 if (task == NULL)
715 rpc_wake_up_task_queue_locked(queue, task);
740 struct rpc_task *task;
743 task = __rpc_find_next_queued(queue);
744 if (task == NULL)
746 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
770 struct rpc_task *task, *n;
775 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
776 timeo = task->tk_timeout;
778 trace_rpc_task_timeout(task, task->tk_action);
779 task->tk_status = -ETIMEDOUT;
780 rpc_wake_up_task_queue_locked(queue, task);
791 static void __rpc_atrun(struct rpc_task *task)
793 if (task->tk_status == -ETIMEDOUT)
794 task->tk_status = 0;
798 * Run a task at a later time
800 void rpc_delay(struct rpc_task *task, unsigned long delay)
802 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
807 * Helper to call task->tk_ops->rpc_call_prepare
809 void rpc_prepare_task(struct rpc_task *task)
811 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
815 rpc_init_task_statistics(struct rpc_task *task)
818 task->tk_garb_retry = 2;
819 task->tk_cred_retry = 2;
822 task->tk_start = ktime_get();
826 rpc_reset_task_statistics(struct rpc_task *task)
828 task->tk_timeouts = 0;
829 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
830 rpc_init_task_statistics(task);
834 * Helper that calls task->tk_ops->rpc_call_done if it exists
836 void rpc_exit_task(struct rpc_task *task)
838 trace_rpc_task_end(task, task->tk_action);
839 task->tk_action = NULL;
840 if (task->tk_ops->rpc_count_stats)
841 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
842 else if (task->tk_client)
843 rpc_count_iostats(task, task->tk_client->cl_metrics);
844 if (task->tk_ops->rpc_call_done != NULL) {
845 trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done);
846 task->tk_ops->rpc_call_done(task, task->tk_calldata);
847 if (task->tk_action != NULL) {
849 xprt_release(task);
850 rpc_reset_task_statistics(task);
855 void rpc_signal_task(struct rpc_task *task)
859 if (!RPC_IS_ACTIVATED(task))
862 if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
864 trace_rpc_task_signalled(task, task->tk_action);
865 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
867 queue = READ_ONCE(task->tk_waitqueue);
869 rpc_wake_up_queued_task(queue, task);
872 void rpc_task_try_cancel(struct rpc_task *task, int error)
876 if (!rpc_task_set_rpc_status(task, error))
878 queue = READ_ONCE(task->tk_waitqueue);
880 rpc_wake_up_queued_task(queue, task);
883 void rpc_exit(struct rpc_task *task, int status)
885 task->tk_status = status;
886 task->tk_action = rpc_exit_task;
887 rpc_wake_up_queued_task(task->tk_waitqueue, task);
909 static void __rpc_execute(struct rpc_task *task)
912 int task_is_async = RPC_IS_ASYNC(task);
916 WARN_ON_ONCE(RPC_IS_QUEUED(task));
917 if (RPC_IS_QUEUED(task))
926 * tk_action may be NULL if the task has been killed.
928 do_action = task->tk_action;
931 (status = READ_ONCE(task->tk_rpc_status)) != 0) {
932 task->tk_status = status;
936 if (task->tk_callback) {
937 do_action = task->tk_callback;
938 task->tk_callback = NULL;
942 if (RPC_IS_SWAPPER(task) ||
943 xprt_needs_memalloc(task->tk_xprt, task))
946 trace_rpc_task_run_action(task, do_action);
947 do_action(task);
950 * Lockless check for whether task is sleeping or not.
952 if (!RPC_IS_QUEUED(task)) {
966 queue = task->tk_waitqueue;
968 if (!RPC_IS_QUEUED(task)) {
972 /* Wake up any task that has an exit status */
973 if (READ_ONCE(task->tk_rpc_status) != 0) {
974 rpc_wake_up_task_queue_locked(queue, task);
978 rpc_clear_running(task);
983 /* sync task: sleep here */
984 trace_rpc_task_sync_sleep(task, task->tk_action);
985 status = out_of_line_wait_on_bit(&task->tk_runstate,
990 * When a sync task receives a signal, it exits with
995 rpc_signal_task(task);
997 trace_rpc_task_sync_wake(task, task->tk_action);
1000 /* Release all resources associated with the task */
1001 rpc_release_task(task);
1009 * This may be called recursively if e.g. an async NFS task updates
1011 * NOTE: Upon exit of this function the task is guaranteed to be
1013 * been called, so your task memory may have been freed.
1015 void rpc_execute(struct rpc_task *task)
1017 bool is_async = RPC_IS_ASYNC(task);
1019 rpc_set_active(task);
1020 rpc_make_runnable(rpciod_workqueue, task);
1023 __rpc_execute(task);
1038 * @task: RPC task
1041 * RPC call and RPC reply that this task is being used for. When
1053 int rpc_malloc(struct rpc_task *task)
1055 struct rpc_rqst *rqst = task->tk_rqstp;
1064 if (!buf && RPC_IS_ASYNC(task))
1081 * @task: RPC task
1084 void rpc_free(struct rpc_task *task)
1086 void *buffer = task->tk_rqstp->rq_buffer;
1101 * Creation and deletion of RPC task structures
1103 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1105 memset(task, 0, sizeof(*task));
1106 atomic_set(&task->tk_count, 1);
1107 task->tk_flags = task_setup_data->flags;
1108 task->tk_ops = task_setup_data->callback_ops;
1109 task->tk_calldata = task_setup_data->callback_data;
1110 INIT_LIST_HEAD(&task->tk_task);
1112 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1113 task->tk_owner = current->tgid;
1116 task->tk_workqueue = task_setup_data->workqueue;
1118 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1121 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1123 if (task->tk_ops->rpc_call_prepare != NULL)
1124 task->tk_action = rpc_prepare_task;
1126 rpc_init_task_statistics(task);
1131 struct rpc_task *task;
1133 task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask());
1134 if (task)
1135 return task;
1140 * Create a new task for the specified client.
1144 struct rpc_task *task = setup_data->task;
1147 if (task == NULL) {
1148 task = rpc_alloc_task();
1149 if (task == NULL) {
1157 rpc_init_task(task, setup_data);
1158 task->tk_flags |= flags;
1159 return task;
1163 * rpc_free_task - release rpc task and perform cleanups
1181 static void rpc_free_task(struct rpc_task *task)
1183 unsigned short tk_flags = task->tk_flags;
1185 put_rpccred(task->tk_op_cred);
1186 rpc_release_calldata(task->tk_ops, task->tk_calldata);
1189 mempool_free(task, rpc_task_mempool);
1200 static void rpc_release_resources_task(struct rpc_task *task)
1202 xprt_release(task);
1203 if (task->tk_msg.rpc_cred) {
1204 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1205 put_cred(task->tk_msg.rpc_cred);
1206 task->tk_msg.rpc_cred = NULL;
1208 rpc_task_release_client(task);
1211 static void rpc_final_put_task(struct rpc_task *task,
1215 INIT_WORK(&task->u.tk_work, rpc_async_release);
1216 queue_work(q, &task->u.tk_work);
1218 rpc_free_task(task);
1221 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1223 if (atomic_dec_and_test(&task->tk_count)) {
1224 rpc_release_resources_task(task);
1225 rpc_final_put_task(task, q);
1229 void rpc_put_task(struct rpc_task *task)
1231 rpc_do_put_task(task, NULL);
1235 void rpc_put_task_async(struct rpc_task *task)
1237 rpc_do_put_task(task, task->tk_workqueue);
1241 static void rpc_release_task(struct rpc_task *task)
1243 WARN_ON_ONCE(RPC_IS_QUEUED(task));
1245 rpc_release_resources_task(task);
1249 * so it should be safe to use task->tk_count as a test for whether
1252 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1253 /* Wake up anyone who may be waiting for task completion */
1254 if (!rpc_complete_task(task))
1257 if (!atomic_dec_and_test(&task->tk_count))
1260 rpc_final_put_task(task, task->tk_workqueue);