Lines Matching defs:queue

91  * queue->lock and bh_disabled in order to avoid races within
95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
101 if (list_empty(&queue->timer_list.list))
102 cancel_delayed_work(&queue->timer_list.dwork);
106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
109 queue->timer_list.expires = expires;
114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
126 rpc_set_queue_timer(queue, timeout);
127 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
130 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
132 if (queue->priority != priority) {
133 queue->priority = priority;
134 queue->nr = 1U << priority;
138 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
140 rpc_set_waitqueue_priority(queue, queue->maxpriority);
144 * Add a request to a queue list
155 /* Cache the queue head in task->u.tk_wait.list */
166 * Remove request from a queue list
182 /* Assume __rpc_list_enqueue_task() cached the queue head */
191 * Add new request to a priority queue.
193 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
197 if (unlikely(queue_priority > queue->maxpriority))
198 queue_priority = queue->maxpriority;
199 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
203 * Add new request to wait queue.
205 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
210 if (RPC_IS_PRIORITY(queue))
211 __rpc_add_wait_queue_priority(queue, task, queue_priority);
213 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
214 task->tk_waitqueue = queue;
215 queue->qlen++;
222 * Remove request from a priority queue.
230 * Remove request from queue.
233 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
235 __rpc_disable_timer(queue, task);
236 if (RPC_IS_PRIORITY(queue))
240 queue->qlen--;
243 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
247 spin_lock_init(&queue->lock);
248 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
249 INIT_LIST_HEAD(&queue->tasks[i]);
250 queue->maxpriority = nr_queues - 1;
251 rpc_reset_waitqueue_priority(queue);
252 queue->qlen = 0;
253 queue->timer_list.expires = 0;
254 INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
255 INIT_LIST_HEAD(&queue->timer_list.list);
256 rpc_assign_waitqueue_name(queue, qname);
259 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
261 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
265 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
267 __rpc_init_priority_wait_queue(queue, qname, 1);
271 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
273 cancel_delayed_work_sync(&queue->timer_list.dwork);
354 * rpc_wait_queue, this must be called with the queue spinlock held to protect
355 * the wait queue operation.
377 * Prepare for sleeping on a wait queue.
380 * as it's on a wait queue.
439 * Protect the queue operations.
457 * Protect the queue operations.
473 * Protect the queue operations.
490 * Protect the queue operations.
501 * @queue: wait queue
504 * Caller must hold queue->lock, and have cleared the task queued flag.
507 struct rpc_wait_queue *queue,
516 trace_rpc_task_wakeup(task, queue);
518 __rpc_remove_wait_queue(queue, task);
524 * Wake up a queued task while the queue lock is being held
528 struct rpc_wait_queue *queue, struct rpc_task *task,
533 if (task->tk_waitqueue == queue) {
535 __rpc_do_wake_up_task_on_wq(wq, queue, task);
544 * Wake up a queued task while the queue lock is being held
546 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
549 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
554 * Wake up a task on a specific queue
556 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
560 spin_lock(&queue->lock);
561 rpc_wake_up_task_queue_locked(queue, task);
562 spin_unlock(&queue->lock);
573 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
576 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
582 * @queue: pointer to rpc_wait_queue
586 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
590 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
595 spin_lock(&queue->lock);
596 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
597 spin_unlock(&queue->lock);
601 * Wake up the next task on a priority queue.
603 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
609 * Service the privileged queue.
611 q = &queue->tasks[RPC_NR_PRIORITY - 1];
612 if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
620 q = &queue->tasks[queue->priority];
621 if (!list_empty(q) && queue->nr) {
622 queue->nr--;
628 * Service the next queue.
631 if (q == &queue->tasks[0])
632 q = &queue->tasks[queue->maxpriority];
639 } while (q != &queue->tasks[queue->priority]);
641 rpc_reset_waitqueue_priority(queue);
645 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
650 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
652 if (RPC_IS_PRIORITY(queue))
653 return __rpc_find_next_queued_priority(queue);
654 if (!list_empty(&queue->tasks[0]))
655 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
660 * Wake up the first task on the wait queue.
663 struct rpc_wait_queue *queue,
668 spin_lock(&queue->lock);
669 task = __rpc_find_next_queued(queue);
671 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
673 spin_unlock(&queue->lock);
679 * Wake up the first task on the wait queue.
681 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
684 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
694 * Wake up the next task on the wait queue.
696 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
698 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
704 * @queue: rpc_wait_queue on which the tasks are sleeping
707 static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
712 task = __rpc_find_next_queued(queue);
715 rpc_wake_up_task_queue_locked(queue, task);
721 * @queue: rpc_wait_queue on which the tasks are sleeping
723 * Grabs queue->lock
725 void rpc_wake_up(struct rpc_wait_queue *queue)
727 spin_lock(&queue->lock);
728 rpc_wake_up_locked(queue);
729 spin_unlock(&queue->lock);
735 * @queue: rpc_wait_queue on which the tasks are sleeping
738 static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
743 task = __rpc_find_next_queued(queue);
746 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
752 * @queue: rpc_wait_queue on which the tasks are sleeping
755 * Grabs queue->lock
757 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
759 spin_lock(&queue->lock);
760 rpc_wake_up_status_locked(queue, status);
761 spin_unlock(&queue->lock);
767 struct rpc_wait_queue *queue = container_of(work,
773 spin_lock(&queue->lock);
775 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
780 rpc_wake_up_task_queue_locked(queue, task);
786 if (!list_empty(&queue->timer_list.list))
787 rpc_set_queue_timer(queue, expires);
788 spin_unlock(&queue->lock);
857 struct rpc_wait_queue *queue;
867 queue = READ_ONCE(task->tk_waitqueue);
868 if (queue)
869 rpc_wake_up_queued_task(queue, task);
874 struct rpc_wait_queue *queue;
878 queue = READ_ONCE(task->tk_waitqueue);
879 if (queue)
880 rpc_wake_up_queued_task(queue, task);
911 struct rpc_wait_queue *queue;
958 * The queue->lock protects against races with
966 queue = task->tk_waitqueue;
967 spin_lock(&queue->lock);
969 spin_unlock(&queue->lock);
974 rpc_wake_up_task_queue_locked(queue, task);
975 spin_unlock(&queue->lock);
979 spin_unlock(&queue->lock);
992 * clean up after sleeping on some queue, we don't