Lines Matching refs:ctx

67  * Check if any of the ctx, dispatch list or elevator
78 * Mark this ctx as having pending work in this hardware queue
81 struct blk_mq_ctx *ctx)
83 const int bit = ctx->index_hw[hctx->type];
90 struct blk_mq_ctx *ctx)
92 const int bit = ctx->index_hw[hctx->type];
308 rq->mq_ctx = data->ctx;
343 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
390 data->ctx = blk_mq_get_ctx(q);
391 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
489 data.ctx = __blk_mq_get_ctx(q, cpu);
509 struct blk_mq_ctx *ctx = rq->mq_ctx;
517 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
519 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
528 struct blk_mq_ctx *ctx = rq->mq_ctx;
540 ctx->rq_completed[rq_is_sync(rq)]++;
1027 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1030 spin_lock(&ctx->lock);
1031 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1033 spin_unlock(&ctx->lock);
1062 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1065 spin_lock(&ctx->lock);
1066 if (!list_empty(&ctx->rq_lists[type])) {
1067 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1069 if (list_empty(&ctx->rq_lists[type]))
1072 spin_unlock(&ctx->lock);
1839 struct blk_mq_ctx *ctx = rq->mq_ctx;
1842 lockdep_assert_held(&ctx->lock);
1847 list_add(&rq->queuelist, &ctx->rq_lists[type]);
1849 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1855 struct blk_mq_ctx *ctx = rq->mq_ctx;
1857 lockdep_assert_held(&ctx->lock);
1860 blk_mq_hctx_mark_pending(hctx, ctx);
1888 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1896 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1900 BUG_ON(rq->mq_ctx != ctx);
1904 spin_lock(&ctx->lock);
1905 list_splice_tail_init(list, &ctx->rq_lists[type]);
1906 blk_mq_hctx_mark_pending(hctx, ctx);
1907 spin_unlock(&ctx->lock);
2608 struct blk_mq_ctx *ctx;
2616 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2619 spin_lock(&ctx->lock);
2620 if (!list_empty(&ctx->rq_lists[type])) {
2621 list_splice_init(&ctx->rq_lists[type], &tmp);
2622 blk_mq_hctx_clear_pending(hctx, ctx);
2624 spin_unlock(&ctx->lock);
2893 struct blk_mq_ctx *ctx;
2909 ctx = per_cpu_ptr(q->queue_ctx, i);
2912 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2923 * case, remap the current ctx to hctx[0] which
2930 ctx->hctxs[j] = hctx;
2941 ctx->index_hw[hctx->type] = hctx->nr_ctx;
2942 hctx->ctxs[hctx->nr_ctx++] = ctx;
2952 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3076 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3077 ctx->ctxs = ctxs;
3093 * but we can't group ctx/kctx kobj without it.