Lines Matching defs:task

45 static void			 rpc_release_task(struct rpc_task *task);
61 rpc_task_timeout(const struct rpc_task *task)
63 unsigned long timeout = READ_ONCE(task->tk_timeout);
75 * Disable the timer for a given RPC task. Should be called with
80 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
82 if (list_empty(&task->u.tk_wait.timer_list))
84 task->tk_timeout = 0;
85 list_del(&task->u.tk_wait.timer_list);
103 * Set up a timer for the current task.
106 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
109 task->tk_timeout = timeout;
112 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
132 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
137 if (t->tk_owner == task->tk_owner) {
138 list_add_tail(&task->u.tk_wait.links,
140 /* Cache the queue head in task->u.tk_wait.list */
141 task->u.tk_wait.list.next = q;
142 task->u.tk_wait.list.prev = NULL;
146 INIT_LIST_HEAD(&task->u.tk_wait.links);
147 list_add_tail(&task->u.tk_wait.list, q);
154 __rpc_list_dequeue_task(struct rpc_task *task)
159 if (task->u.tk_wait.list.prev == NULL) {
160 list_del(&task->u.tk_wait.links);
163 if (!list_empty(&task->u.tk_wait.links)) {
164 t = list_first_entry(&task->u.tk_wait.links,
170 list_del(&task->u.tk_wait.links);
172 list_del(&task->u.tk_wait.list);
179 struct rpc_task *task,
184 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
191 struct rpc_task *task,
194 INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
196 __rpc_add_wait_queue_priority(queue, task, queue_priority);
198 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
199 task->tk_waitqueue = queue;
203 rpc_set_queued(task);
209 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
211 __rpc_list_dequeue_task(task);
218 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
220 __rpc_disable_timer(queue, task);
222 __rpc_remove_wait_queue_priority(task);
224 list_del(&task->u.tk_wait.list);
271 static void rpc_task_set_debuginfo(struct rpc_task *task)
275 task->tk_pid = atomic_inc_return(&rpc_pid);
278 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
283 static void rpc_set_active(struct rpc_task *task)
285 rpc_task_set_debuginfo(task);
286 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
287 trace_rpc_task_begin(task, NULL);
294 static int rpc_complete_task(struct rpc_task *task)
296 void *m = &task->tk_runstate;
302 trace_rpc_task_complete(task, NULL);
305 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
306 ret = atomic_dec_and_test(&task->tk_count);
320 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
324 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
330 * Make an RPC task runnable.
332 * Note: If the task is ASYNC, and is being made runnable after sitting on an
341 struct rpc_task *task)
343 bool need_wakeup = !rpc_test_and_set_running(task);
345 rpc_clear_queued(task);
348 if (RPC_IS_ASYNC(task)) {
349 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
350 queue_work(wq, &task->u.tk_work);
352 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
358 * NB: An RPC task will only receive interrupt-driven events as long
362 struct rpc_task *task,
365 trace_rpc_task_sleep(task, q);
367 __rpc_add_wait_queue(q, task, queue_priority);
371 struct rpc_task *task,
374 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
376 __rpc_do_sleep_on_priority(q, task, queue_priority);
380 struct rpc_task *task, unsigned long timeout,
383 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
386 __rpc_do_sleep_on_priority(q, task, queue_priority);
387 __rpc_add_timer(q, task, timeout);
389 task->tk_status = -ETIMEDOUT;
392 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
394 if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
395 task->tk_callback = action;
398 static bool rpc_sleep_check_activated(struct rpc_task *task)
400 /* We shouldn't ever put an inactive task to sleep */
401 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
402 task->tk_status = -EIO;
403 rpc_put_task_async(task);
409 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
412 if (!rpc_sleep_check_activated(task))
415 rpc_set_tk_callback(task, action);
421 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
426 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
429 if (!rpc_sleep_check_activated(task))
432 rpc_set_tk_callback(task, action);
434 WARN_ON_ONCE(task->tk_timeout != 0);
439 __rpc_sleep_on_priority(q, task, task->tk_priority);
445 struct rpc_task *task, unsigned long timeout, int priority)
447 if (!rpc_sleep_check_activated(task))
455 __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
460 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
463 if (!rpc_sleep_check_activated(task))
466 WARN_ON_ONCE(task->tk_timeout != 0);
472 __rpc_sleep_on_priority(q, task, priority);
479 * @wq: workqueue on which to run task
481 * @task: task to be woken up
483 * Caller must hold queue->lock, and have cleared the task queued flag.
487 struct rpc_task *task)
489 /* Has the task been executed yet? If not, we cannot wake it up! */
490 if (!RPC_IS_ACTIVATED(task)) {
491 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
495 trace_rpc_task_wakeup(task, queue);
497 __rpc_remove_wait_queue(queue, task);
499 rpc_make_runnable(wq, task);
503 * Wake up a queued task while the queue lock is being held
507 struct rpc_wait_queue *queue, struct rpc_task *task,
510 if (RPC_IS_QUEUED(task)) {
512 if (task->tk_waitqueue == queue) {
513 if (action == NULL || action(task, data)) {
514 __rpc_do_wake_up_task_on_wq(wq, queue, task);
515 return task;
523 * Wake up a queued task while the queue lock is being held
526 struct rpc_task *task)
529 task, NULL, NULL);
533 * Wake up a task on a specific queue
535 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
537 if (!RPC_IS_QUEUED(task))
540 rpc_wake_up_task_queue_locked(queue, task);
545 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
547 task->tk_status = *(int *)status;
553 struct rpc_task *task, int status)
556 task, rpc_task_action_set_status, &status);
560 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
562 * @task: pointer to rpc_task
565 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
570 struct rpc_task *task, int status)
572 if (!RPC_IS_QUEUED(task))
575 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
580 * Wake up the next task on a priority queue.
585 struct rpc_task *task;
592 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
602 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
615 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
626 return task;
639 * Wake up the first task on the wait queue.
645 struct rpc_task *task = NULL;
648 task = __rpc_find_next_queued(queue);
649 if (task != NULL)
650 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
651 task, func, data);
654 return task;
658 * Wake up the first task on the wait queue.
667 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
673 * Wake up the next task on the wait queue.
688 struct rpc_task *task;
691 task = __rpc_find_next_queued(queue);
692 if (task == NULL)
694 rpc_wake_up_task_queue_locked(queue, task);
719 struct rpc_task *task;
722 task = __rpc_find_next_queued(queue);
723 if (task == NULL)
725 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
749 struct rpc_task *task, *n;
754 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
755 timeo = task->tk_timeout;
757 trace_rpc_task_timeout(task, task->tk_action);
758 task->tk_status = -ETIMEDOUT;
759 rpc_wake_up_task_queue_locked(queue, task);
770 static void __rpc_atrun(struct rpc_task *task)
772 if (task->tk_status == -ETIMEDOUT)
773 task->tk_status = 0;
777 * Run a task at a later time
779 void rpc_delay(struct rpc_task *task, unsigned long delay)
781 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
786 * Helper to call task->tk_ops->rpc_call_prepare
788 void rpc_prepare_task(struct rpc_task *task)
790 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
794 rpc_init_task_statistics(struct rpc_task *task)
797 task->tk_garb_retry = 2;
798 task->tk_cred_retry = 2;
801 task->tk_start = ktime_get();
805 rpc_reset_task_statistics(struct rpc_task *task)
807 task->tk_timeouts = 0;
808 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
809 rpc_init_task_statistics(task);
813 * Helper that calls task->tk_ops->rpc_call_done if it exists
815 void rpc_exit_task(struct rpc_task *task)
817 trace_rpc_task_end(task, task->tk_action);
818 task->tk_action = NULL;
819 if (task->tk_ops->rpc_count_stats)
820 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
821 else if (task->tk_client)
822 rpc_count_iostats(task, task->tk_client->cl_metrics);
823 if (task->tk_ops->rpc_call_done != NULL) {
824 task->tk_ops->rpc_call_done(task, task->tk_calldata);
825 if (task->tk_action != NULL) {
827 xprt_release(task);
828 rpc_reset_task_statistics(task);
833 void rpc_signal_task(struct rpc_task *task)
837 if (!RPC_IS_ACTIVATED(task))
840 trace_rpc_task_signalled(task, task->tk_action);
841 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
843 queue = READ_ONCE(task->tk_waitqueue);
845 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
848 void rpc_exit(struct rpc_task *task, int status)
850 task->tk_status = status;
851 task->tk_action = rpc_exit_task;
852 rpc_wake_up_queued_task(task->tk_waitqueue, task);
865 static void __rpc_execute(struct rpc_task *task)
868 int task_is_async = RPC_IS_ASYNC(task);
871 WARN_ON_ONCE(RPC_IS_QUEUED(task));
872 if (RPC_IS_QUEUED(task))
881 * tk_action may be NULL if the task has been killed.
885 do_action = task->tk_action;
886 if (task->tk_callback) {
887 do_action = task->tk_callback;
888 task->tk_callback = NULL;
892 trace_rpc_task_run_action(task, do_action);
893 do_action(task);
896 * Lockless check for whether task is sleeping or not.
898 if (!RPC_IS_QUEUED(task))
904 if (RPC_SIGNALLED(task)) {
905 task->tk_rpc_status = -ERESTARTSYS;
906 rpc_exit(task, -ERESTARTSYS);
918 queue = task->tk_waitqueue;
920 if (!RPC_IS_QUEUED(task)) {
924 rpc_clear_running(task);
929 /* sync task: sleep here */
930 trace_rpc_task_sync_sleep(task, task->tk_action);
931 status = out_of_line_wait_on_bit(&task->tk_runstate,
936 * When a sync task receives a signal, it exits with
941 trace_rpc_task_signalled(task, task->tk_action);
942 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
943 task->tk_rpc_status = -ERESTARTSYS;
944 rpc_exit(task, -ERESTARTSYS);
946 trace_rpc_task_sync_wake(task, task->tk_action);
949 /* Release all resources associated with the task */
950 rpc_release_task(task);
956 * This may be called recursively if e.g. an async NFS task updates
958 * NOTE: Upon exit of this function the task is guaranteed to be
960 * been called, so your task memory may have been freed.
962 void rpc_execute(struct rpc_task *task)
964 bool is_async = RPC_IS_ASYNC(task);
966 rpc_set_active(task);
967 rpc_make_runnable(rpciod_workqueue, task);
970 __rpc_execute(task);
985 * @task: RPC task
988 * RPC call and RPC reply that this task is being used for. When
1000 int rpc_malloc(struct rpc_task *task)
1002 struct rpc_rqst *rqst = task->tk_rqstp;
1007 if (RPC_IS_ASYNC(task))
1009 if (RPC_IS_SWAPPER(task))
1030 * @task: RPC task
1033 void rpc_free(struct rpc_task *task)
1035 void *buffer = task->tk_rqstp->rq_buffer;
1050 * Creation and deletion of RPC task structures
1052 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1054 memset(task, 0, sizeof(*task));
1055 atomic_set(&task->tk_count, 1);
1056 task->tk_flags = task_setup_data->flags;
1057 task->tk_ops = task_setup_data->callback_ops;
1058 task->tk_calldata = task_setup_data->callback_data;
1059 INIT_LIST_HEAD(&task->tk_task);
1061 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1062 task->tk_owner = current->tgid;
1065 task->tk_workqueue = task_setup_data->workqueue;
1067 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1070 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1072 if (task->tk_ops->rpc_call_prepare != NULL)
1073 task->tk_action = rpc_prepare_task;
1075 rpc_init_task_statistics(task);
1085 * Create a new task for the specified client.
1089 struct rpc_task *task = setup_data->task;
1092 if (task == NULL) {
1093 task = rpc_alloc_task();
1097 rpc_init_task(task, setup_data);
1098 task->tk_flags |= flags;
1099 return task;
1103 * rpc_free_task - release rpc task and perform cleanups
1121 static void rpc_free_task(struct rpc_task *task)
1123 unsigned short tk_flags = task->tk_flags;
1125 put_rpccred(task->tk_op_cred);
1126 rpc_release_calldata(task->tk_ops, task->tk_calldata);
1129 mempool_free(task, rpc_task_mempool);
1140 static void rpc_release_resources_task(struct rpc_task *task)
1142 xprt_release(task);
1143 if (task->tk_msg.rpc_cred) {
1144 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1145 put_cred(task->tk_msg.rpc_cred);
1146 task->tk_msg.rpc_cred = NULL;
1148 rpc_task_release_client(task);
1151 static void rpc_final_put_task(struct rpc_task *task,
1155 INIT_WORK(&task->u.tk_work, rpc_async_release);
1156 queue_work(q, &task->u.tk_work);
1158 rpc_free_task(task);
1161 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1163 if (atomic_dec_and_test(&task->tk_count)) {
1164 rpc_release_resources_task(task);
1165 rpc_final_put_task(task, q);
1169 void rpc_put_task(struct rpc_task *task)
1171 rpc_do_put_task(task, NULL);
1175 void rpc_put_task_async(struct rpc_task *task)
1177 rpc_do_put_task(task, task->tk_workqueue);
1181 static void rpc_release_task(struct rpc_task *task)
1183 WARN_ON_ONCE(RPC_IS_QUEUED(task));
1185 rpc_release_resources_task(task);
1189 * so it should be safe to use task->tk_count as a test for whether
1192 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1193 /* Wake up anyone who may be waiting for task completion */
1194 if (!rpc_complete_task(task))
1197 if (!atomic_dec_and_test(&task->tk_count))
1200 rpc_final_put_task(task, task->tk_workqueue);