Lines Matching refs:hctx
48 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
50 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
51 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
75 * @type: the hctx type index
121 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
160 struct blk_mq_hw_ctx *hctx;
176 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
189 struct blk_mq_hw_ctx *hctx)
191 if (!hctx)
193 return sbq_wait_ptr(bt, &hctx->wait_index);
199 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
201 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
202 __blk_mq_tag_busy(hctx);
205 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
207 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
208 __blk_mq_tag_idle(hctx);
225 return data->hctx->sched_tags;
226 return data->hctx->tags;
229 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
231 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
234 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
236 return hctx->nr_ctx && hctx->tags;
274 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
276 if (blk_mq_is_shared_tags(hctx->flags))
277 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
279 atomic_inc(&hctx->nr_active);
282 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
285 if (blk_mq_is_shared_tags(hctx->flags))
286 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
288 atomic_sub(val, &hctx->nr_active);
291 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
293 __blk_mq_sub_active_requests(hctx, 1);
296 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
298 if (blk_mq_is_shared_tags(hctx->flags))
299 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
300 return atomic_read(&hctx->nr_active);
302 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
305 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
310 __blk_mq_dec_active_requests(hctx);
322 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
326 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
329 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
330 hctx->tags->rqs[rq->tag] = rq;
334 return __blk_mq_get_driver_tag(hctx, rq);
391 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
396 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
405 if (blk_mq_is_shared_tags(hctx->flags)) {
406 struct request_queue *q = hctx->queue;
411 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
415 users = READ_ONCE(hctx->tags->active_queues);
423 return __blk_mq_active_requests(hctx) < depth;