Lines Matching refs:hctx

22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
36 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
39 * meantime new request added to hctx->dispatch is missed to check in
44 blk_mq_run_hw_queue(hctx, true);
58 struct blk_mq_hw_ctx *hctx =
65 if (rq->mq_hctx != hctx) {
74 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
84 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
87 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
89 struct request_queue *q = hctx->queue;
97 if (hctx->dispatch_busy)
100 max_dispatch = hctx->queue->nr_requests;
106 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
109 if (!list_empty_careful(&hctx->dispatch)) {
118 rq = e->type->ops.dispatch_request(hctx);
123 * budget could have blocked any "hctx"s with the
141 if (rq->mq_hctx != hctx)
159 * Requests from different hctx may be dequeued from some
162 * Sort the requests in the list according to their hctx,
163 * dispatch batching requests from same hctx at a time.
170 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
178 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
184 ret = __blk_mq_do_dispatch_sched(hctx);
188 blk_mq_delay_run_hw_queue(hctx, 0);
196 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
199 unsigned short idx = ctx->index_hw[hctx->type];
201 if (++idx == hctx->nr_ctx)
204 return hctx->ctxs[idx];
212 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
215 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
217 struct request_queue *q = hctx->queue;
219 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
226 if (!list_empty_careful(&hctx->dispatch)) {
231 if (!sbitmap_any_bit_set(&hctx->ctx_map))
238 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
243 * budget could have blocked any "hctx"s with the
262 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
266 WRITE_ONCE(hctx->dispatch_from, ctx);
270 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
279 if (!list_empty_careful(&hctx->dispatch)) {
280 spin_lock(&hctx->lock);
281 if (!list_empty(&hctx->dispatch))
282 list_splice_init(&hctx->dispatch, &rq_list);
283 spin_unlock(&hctx->lock);
300 blk_mq_sched_mark_restart_hctx(hctx);
301 if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
305 need_dispatch = hctx->dispatch_busy;
308 if (hctx->queue->elevator)
309 return blk_mq_do_dispatch_sched(hctx);
313 return blk_mq_do_dispatch_ctx(hctx);
314 blk_mq_flush_busy_ctxs(hctx, &rq_list);
315 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
319 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
321 struct request_queue *q = hctx->queue;
324 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
327 hctx->run++;
330 * A return of -EAGAIN is an indication that hctx->dispatch is not
333 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
334 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
335 blk_mq_run_hw_queue(hctx, true);
344 struct blk_mq_hw_ctx *hctx;
354 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
355 type = hctx->type;
356 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
383 struct blk_mq_hw_ctx *hctx,
387 hctx->sched_tags = q->sched_shared_tags;
391 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
394 if (!hctx->sched_tags)
408 struct blk_mq_hw_ctx *hctx;
411 queue_for_each_hw_ctx(q, hctx, i) {
412 if (hctx->sched_tags) {
414 blk_mq_free_rq_map(hctx->sched_tags);
415 hctx->sched_tags = NULL;
446 struct blk_mq_hw_ctx *hctx;
465 queue_for_each_hw_ctx(q, hctx, i) {
466 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
479 queue_for_each_hw_ctx(q, hctx, i) {
481 ret = e->ops.init_hctx(hctx, i);
491 blk_mq_debugfs_register_sched_hctx(q, hctx);
511 struct blk_mq_hw_ctx *hctx;
518 queue_for_each_hw_ctx(q, hctx, i) {
519 if (hctx->sched_tags)
521 hctx->sched_tags, i);
528 struct blk_mq_hw_ctx *hctx;
532 queue_for_each_hw_ctx(q, hctx, i) {
534 blk_mq_debugfs_unregister_sched_hctx(hctx);
537 if (e->type->ops.exit_hctx && hctx->sched_data) {
538 e->type->ops.exit_hctx(hctx, i);
539 hctx->sched_data = NULL;
541 flags = hctx->flags;