Lines Matching refs:hctx
50 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
52 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
55 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
59 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
61 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
63 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
66 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
69 * meantime new request added to hctx->dispatch is missed to check in
74 blk_mq_run_hw_queue(hctx, true);
88 struct blk_mq_hw_ctx *hctx =
95 if (rq->mq_hctx != hctx) {
104 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
114 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
117 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
119 struct request_queue *q = hctx->queue;
127 if (hctx->dispatch_busy)
130 max_dispatch = hctx->queue->nr_requests;
135 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
138 if (!list_empty_careful(&hctx->dispatch)) {
146 rq = e->type->ops.dispatch_request(hctx);
151 * budget could have blocked any "hctx"s with the
166 if (rq->mq_hctx != hctx)
175 * Requests from different hctx may be dequeued from some
178 * Sort the requests in the list according to their hctx,
179 * dispatch batching requests from same hctx at a time.
186 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
194 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
200 ret = __blk_mq_do_dispatch_sched(hctx);
204 blk_mq_delay_run_hw_queue(hctx, 0);
212 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
215 unsigned short idx = ctx->index_hw[hctx->type];
217 if (++idx == hctx->nr_ctx)
220 return hctx->ctxs[idx];
228 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
231 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
233 struct request_queue *q = hctx->queue;
235 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
240 if (!list_empty_careful(&hctx->dispatch)) {
245 if (!sbitmap_any_bit_set(&hctx->ctx_map))
251 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
256 * budget could have blocked any "hctx"s with the
273 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
277 WRITE_ONCE(hctx->dispatch_from, ctx);
281 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
283 struct request_queue *q = hctx->queue;
293 if (!list_empty_careful(&hctx->dispatch)) {
294 spin_lock(&hctx->lock);
295 if (!list_empty(&hctx->dispatch))
296 list_splice_init(&hctx->dispatch, &rq_list);
297 spin_unlock(&hctx->lock);
314 blk_mq_sched_mark_restart_hctx(hctx);
315 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
317 ret = blk_mq_do_dispatch_sched(hctx);
319 ret = blk_mq_do_dispatch_ctx(hctx);
322 ret = blk_mq_do_dispatch_sched(hctx);
323 } else if (hctx->dispatch_busy) {
325 ret = blk_mq_do_dispatch_ctx(hctx);
327 blk_mq_flush_busy_ctxs(hctx, &rq_list);
328 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
334 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
336 struct request_queue *q = hctx->queue;
339 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
342 hctx->run++;
345 * A return of -EAGAIN is an indication that hctx->dispatch is not
348 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
349 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
350 blk_mq_run_hw_queue(hctx, true);
359 struct blk_mq_hw_ctx *hctx;
367 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
368 type = hctx->type;
369 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
403 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
410 * passthrough request has to be added to hctx->dispatch directly.
413 * FS request will be added to hctx->dispatch. However passthrough
416 * chance to dispatch it given we prioritize requests in hctx->dispatch.
433 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
437 if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
441 * hctx->dispatch) directly and there is at most one in-flight
448 * rq to the front of hctx->dispatch, it is easier to introduce
454 * drive when adding flush rq to the front of hctx->dispatch.
456 * Simply queue flush rq to the front of hctx->dispatch so that
468 e->type->ops.insert_requests(hctx, &list, at_head);
471 __blk_mq_insert_request(hctx, rq, at_head);
477 blk_mq_run_hw_queue(hctx, async);
480 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
485 struct request_queue *q = hctx->queue;
494 e = hctx->queue->elevator;
496 e->type->ops.insert_requests(hctx, list, false);
503 if (!hctx->dispatch_busy && !e && !run_queue_async) {
504 blk_mq_try_issue_list_directly(hctx, list);
508 blk_mq_insert_requests(hctx, ctx, list);
511 blk_mq_run_hw_queue(hctx, run_queue_async);
517 struct blk_mq_hw_ctx *hctx,
525 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
527 if (!hctx->sched_tags)
530 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
532 blk_mq_free_rq_map(hctx->sched_tags, flags);
533 hctx->sched_tags = NULL;
542 struct blk_mq_hw_ctx *hctx;
545 queue_for_each_hw_ctx(q, hctx, i) {
547 unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
549 if (hctx->sched_tags) {
550 blk_mq_free_rq_map(hctx->sched_tags, flags);
551 hctx->sched_tags = NULL;
558 struct blk_mq_hw_ctx *hctx;
577 queue_for_each_hw_ctx(q, hctx, i) {
578 ret = blk_mq_sched_alloc_tags(q, hctx, i);
589 queue_for_each_hw_ctx(q, hctx, i) {
591 ret = e->ops.init_hctx(hctx, i);
600 blk_mq_debugfs_register_sched_hctx(q, hctx);
618 struct blk_mq_hw_ctx *hctx;
621 queue_for_each_hw_ctx(q, hctx, i) {
622 if (hctx->sched_tags)
623 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
629 struct blk_mq_hw_ctx *hctx;
632 queue_for_each_hw_ctx(q, hctx, i) {
633 blk_mq_debugfs_unregister_sched_hctx(hctx);
634 if (e->type->ops.exit_hctx && hctx->sched_data) {
635 e->type->ops.exit_hctx(hctx, i);
636 hctx->sched_data = NULL;