Lines Matching refs:ctx

57  * Check if any of the ctx, dispatch list or elevator
68 * Mark this ctx as having pending work in this hardware queue
71 struct blk_mq_ctx *ctx)
73 const int bit = ctx->index_hw[hctx->type];
80 struct blk_mq_ctx *ctx)
82 const int bit = ctx->index_hw[hctx->type];
351 struct blk_mq_ctx *ctx = data->ctx;
357 rq->mq_ctx = ctx;
474 data->ctx = blk_mq_get_ctx(q);
475 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
658 data.ctx = __blk_mq_get_ctx(q, cpu);
703 struct blk_mq_ctx *ctx = rq->mq_ctx;
715 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
717 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
1206 * For request which hctx has only one ctx mapping,
1703 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1706 spin_lock(&ctx->lock);
1707 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1709 spin_unlock(&ctx->lock);
1738 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1741 spin_lock(&ctx->lock);
1742 if (!list_empty(&ctx->rq_lists[type])) {
1743 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1745 if (list_empty(&ctx->rq_lists[type]))
1748 spin_unlock(&ctx->lock);
2308 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2316 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2492 struct blk_mq_ctx *ctx, struct list_head *list,
2510 * preemption doesn't flush plug list, so it's possible ctx->cpu is
2514 BUG_ON(rq->mq_ctx != ctx);
2520 spin_lock(&ctx->lock);
2521 list_splice_tail_init(list, &ctx->rq_lists[type]);
2522 blk_mq_hctx_mark_pending(hctx, ctx);
2523 spin_unlock(&ctx->lock);
2531 struct blk_mq_ctx *ctx = rq->mq_ctx;
2579 spin_lock(&ctx->lock);
2581 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2584 &ctx->rq_lists[hctx->type]);
2585 blk_mq_hctx_mark_pending(hctx, ctx);
2586 spin_unlock(&ctx->lock);
3578 struct blk_mq_ctx *ctx;
3586 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3589 spin_lock(&ctx->lock);
3590 if (!list_empty(&ctx->rq_lists[type])) {
3591 list_splice_init(&ctx->rq_lists[type], &tmp);
3592 blk_mq_hctx_clear_pending(hctx, ctx);
3594 spin_unlock(&ctx->lock);
3877 struct blk_mq_ctx *ctx;
3893 ctx = per_cpu_ptr(q->queue_ctx, i);
3896 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3907 * case, remap the current ctx to hctx[0] which
3914 ctx->hctxs[j] = hctx;
3925 ctx->index_hw[hctx->type] = hctx->nr_ctx;
3926 hctx->ctxs[hctx->nr_ctx++] = ctx;
3936 ctx->hctxs[j] = blk_mq_map_queue_type(q,
4060 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
4061 ctx->ctxs = ctxs;
4077 * but we can't group ctx/kctx kobj without it.