Lines Matching refs:hctx

43 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
47 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
69 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
73 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
78 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
89 * @type: the hctx type index
130 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
161 struct blk_mq_hw_ctx *hctx;
172 return data->hctx->sched_tags;
174 return data->hctx->tags;
177 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
179 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
182 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
184 return hctx->nr_ctx && hctx->tags;
204 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
206 if (blk_mq_is_sbitmap_shared(hctx->flags))
207 atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
209 atomic_inc(&hctx->nr_active);
212 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
214 if (blk_mq_is_sbitmap_shared(hctx->flags))
215 atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
217 atomic_dec(&hctx->nr_active);
220 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
222 if (blk_mq_is_sbitmap_shared(hctx->flags))
223 return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
224 return atomic_read(&hctx->nr_active);
226 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
229 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
234 __blk_mq_dec_active_requests(hctx);
301 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
306 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
315 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
316 struct request_queue *q = hctx->queue;
323 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
325 users = atomic_read(&hctx->tags->active_queues);
335 return __blk_mq_active_requests(hctx) < depth;