Lines Matching defs:queue

20  * Mark a hardware queue as needing a restart.
81 * its queue by itself in its completion handler, so we don't need to
82 * restart queue if .get_budget() fails to get the budget.
89 struct request_queue *q = hctx->queue;
100 max_dispatch = hctx->queue->nr_requests;
124 * same queue and if we didn't dispatch then there's
125 * no guarantee anyone will kick the queue. Kick it
209 * its queue by itself in its completion handler, so we don't need to
210 * restart queue if .get_budget() fails to get the budget.
217 struct request_queue *q = hctx->queue;
244 * same queue and if we didn't dispatch then there's
245 * no guarantee anyone will kick the queue. Kick it
290 * of low device queue depth. Once we pull requests out of the IO
292 * leave them there for as long as we can. Mark the hw queue as
308 if (hctx->queue->elevator)
311 /* dequeue request one by one from sw queue if queue is busy */
321 struct request_queue *q = hctx->queue;
360 /* default per sw-queue merge */
363 * Reverse check our software queue for entries that we could
399 static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
401 blk_mq_free_rq_map(queue->sched_shared_tags);
402 queue->sched_shared_tags = NULL;
405 /* called in queue's release handler, tagset has gone away */
423 static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
425 struct blk_mq_tag_set *set = queue->tag_set;
431 queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
434 if (!queue->sched_shared_tags)
437 blk_mq_tag_update_sched_shared_tags(queue);
454 * Additionally, this is a per-hw queue depth.