Lines Matching refs:hctx
51 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
60 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
62 return !list_empty_careful(&hctx->dispatch) ||
63 sbitmap_any_bit_set(&hctx->ctx_map) ||
64 blk_mq_sched_has_work(hctx);
70 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
73 const int bit = ctx->index_hw[hctx->type];
75 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
76 sbitmap_set_bit(&hctx->ctx_map, bit);
79 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82 const int bit = ctx->index_hw[hctx->type];
84 sbitmap_clear_bit(&hctx->ctx_map, bit);
307 struct blk_mq_hw_ctx *hctx;
310 queue_for_each_hw_ctx(q, hctx, i)
311 if (blk_mq_hw_queue_mapped(hctx))
312 blk_mq_tag_wakeup_all(hctx->tags, true);
352 struct blk_mq_hw_ctx *hctx = data->hctx;
358 rq->mq_hctx = hctx;
475 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
477 blk_mq_tag_busy(data->hctx);
495 * Waiting allocations only fail because of an inactive hctx. In that
496 * case just retry the hctx assignment and tag allocation as CPU hotplug
506 * are migrated off the CPU, and thus off the hctx that
652 data.hctx = xa_load(&q->hctx_table, hctx_idx);
653 if (!blk_mq_hw_queue_mapped(data.hctx))
655 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
663 blk_mq_tag_busy(data.hctx);
704 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
712 __blk_mq_dec_active_requests(hctx);
715 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
717 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
718 blk_mq_sched_restart(hctx);
1064 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1067 struct request_queue *q = hctx->queue;
1071 * update hctx->nr_active in batch
1073 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
1074 __blk_mq_sub_active_requests(hctx, nr_tags);
1076 blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1206 * For request which hctx has only one ctx mapping,
1330 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1348 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
1396 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1409 blk_mq_run_hw_queue(hctx, false);
1484 * already. Insert it into the hctx dispatch list to avoid
1642 struct blk_mq_hw_ctx *hctx;
1683 * each hctx as idle.
1685 queue_for_each_hw_ctx(q, hctx, i) {
1686 /* the hctx may be unmapped, so check it here */
1687 if (blk_mq_hw_queue_mapped(hctx))
1688 blk_mq_tag_idle(hctx);
1695 struct blk_mq_hw_ctx *hctx;
1702 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1703 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1704 enum hctx_type type = hctx->type;
1717 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1720 .hctx = hctx,
1724 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1729 struct blk_mq_hw_ctx *hctx;
1737 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1738 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1739 enum hctx_type type = hctx->type;
1753 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1756 unsigned off = start ? start->index_hw[hctx->type] : 0;
1758 .hctx = hctx,
1762 __sbitmap_for_each_set(&hctx->ctx_map, off,
1792 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1797 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1800 __blk_mq_inc_active_requests(hctx);
1802 hctx->tags->rqs[rq->tag] = rq;
1809 struct blk_mq_hw_ctx *hctx;
1811 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1813 spin_lock(&hctx->dispatch_wait_lock);
1818 sbq = &hctx->tags->bitmap_tags;
1821 spin_unlock(&hctx->dispatch_wait_lock);
1823 blk_mq_run_hw_queue(hctx, true);
1833 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1841 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1842 !(blk_mq_is_shared_tags(hctx->flags))) {
1843 blk_mq_sched_mark_restart_hctx(hctx);
1856 wait = &hctx->dispatch_wait;
1861 sbq = &hctx->tags->breserved_tags;
1863 sbq = &hctx->tags->bitmap_tags;
1864 wq = &bt_wait_ptr(sbq, hctx)->wait;
1867 spin_lock(&hctx->dispatch_wait_lock);
1869 spin_unlock(&hctx->dispatch_wait_lock);
1901 spin_unlock(&hctx->dispatch_wait_lock);
1912 spin_unlock(&hctx->dispatch_wait_lock);
1927 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1931 ewma = hctx->dispatch_busy;
1941 hctx->dispatch_busy = ewma;
1975 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1995 if (!blk_mq_mark_tag_wait(hctx, rq)) {
2031 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
2034 if (hctx->queue->mq_ops->commit_rqs && queued) {
2035 trace_block_unplug(hctx->queue, queued, !from_schedule);
2036 hctx->queue->mq_ops->commit_rqs(hctx);
2043 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
2047 struct request_queue *q = hctx->queue;
2066 WARN_ON_ONCE(hctx != rq->mq_hctx);
2082 ret = q->mq_ops->queue_rq(hctx, &bd);
2114 blk_mq_commit_rqs(hctx, queued, false);
2117 * Any items that need requeuing? Stuff them into hctx->dispatch,
2124 ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
2125 blk_mq_is_shared_tags(hctx->flags));
2130 spin_lock(&hctx->lock);
2131 list_splice_tail_init(list, &hctx->dispatch);
2132 spin_unlock(&hctx->lock);
2135 * Order adding requests to hctx->dispatch and checking
2138 * miss the new added requests to hctx->dispatch, meantime
2169 needs_restart = blk_mq_sched_needs_restart(hctx);
2173 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2174 blk_mq_run_hw_queue(hctx, true);
2176 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2178 blk_mq_update_dispatch_busy(hctx, true);
2182 blk_mq_update_dispatch_busy(hctx, false);
2186 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2188 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2191 cpu = cpumask_first(hctx->cpumask);
2201 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2204 int next_cpu = hctx->next_cpu;
2206 if (hctx->queue->nr_hw_queues == 1)
2209 if (--hctx->next_cpu_batch <= 0) {
2211 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2214 next_cpu = blk_mq_first_mapped_cpu(hctx);
2215 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2219 * Do unbound schedule if we can't find a online CPU for this hctx,
2230 * in hctx->cpumask become online again.
2232 hctx->next_cpu = next_cpu;
2233 hctx->next_cpu_batch = 1;
2237 hctx->next_cpu = next_cpu;
2243 * @hctx: Pointer to the hardware queue to run.
2248 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2250 if (unlikely(blk_mq_hctx_stopped(hctx)))
2252 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2259 * @hctx: Pointer to the hardware queue to run.
2266 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2275 might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
2285 __blk_mq_run_dispatch_ops(hctx->queue, false,
2286 need_run = !blk_queue_quiesced(hctx->queue) &&
2287 blk_mq_hctx_has_pending(hctx));
2292 if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2293 blk_mq_delay_run_hw_queue(hctx, 0);
2297 blk_mq_run_dispatch_ops(hctx->queue,
2298 blk_mq_sched_dispatch_requests(hctx));
2312 * dispatch from hctx for the current CPU since running multiple queues
2316 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2318 if (!blk_mq_hctx_stopped(hctx))
2319 return hctx;
2330 struct blk_mq_hw_ctx *hctx, *sq_hctx;
2336 queue_for_each_hw_ctx(q, hctx, i) {
2337 if (blk_mq_hctx_stopped(hctx))
2340 * Dispatch from this hctx either if there's no hctx preferred
2344 if (!sq_hctx || sq_hctx == hctx ||
2345 !list_empty_careful(&hctx->dispatch))
2346 blk_mq_run_hw_queue(hctx, async);
2358 struct blk_mq_hw_ctx *hctx, *sq_hctx;
2364 queue_for_each_hw_ctx(q, hctx, i) {
2365 if (blk_mq_hctx_stopped(hctx))
2369 * pending delay untouched. Otherwise, a hctx can stall
2370 * if another hctx is re-delaying the other's work
2373 if (delayed_work_pending(&hctx->run_work))
2376 * Dispatch from this hctx either if there's no hctx preferred
2380 if (!sq_hctx || sq_hctx == hctx ||
2381 !list_empty_careful(&hctx->dispatch))
2382 blk_mq_delay_run_hw_queue(hctx, msecs);
2396 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2398 cancel_delayed_work(&hctx->run_work);
2400 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2415 struct blk_mq_hw_ctx *hctx;
2418 queue_for_each_hw_ctx(q, hctx, i)
2419 blk_mq_stop_hw_queue(hctx);
2423 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2425 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2427 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
2433 struct blk_mq_hw_ctx *hctx;
2436 queue_for_each_hw_ctx(q, hctx, i)
2437 blk_mq_start_hw_queue(hctx);
2441 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2443 if (!blk_mq_hctx_stopped(hctx))
2446 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2447 blk_mq_run_hw_queue(hctx, async);
2453 struct blk_mq_hw_ctx *hctx;
2456 queue_for_each_hw_ctx(q, hctx, i)
2457 blk_mq_start_stopped_hw_queue(hctx, async ||
2458 (hctx->flags & BLK_MQ_F_BLOCKING));
2464 struct blk_mq_hw_ctx *hctx =
2467 blk_mq_run_dispatch_ops(hctx->queue,
2468 blk_mq_sched_dispatch_requests(hctx));
2481 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2483 spin_lock(&hctx->lock);
2485 list_add(&rq->queuelist, &hctx->dispatch);
2487 list_add_tail(&rq->queuelist, &hctx->dispatch);
2488 spin_unlock(&hctx->lock);
2491 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
2496 enum hctx_type type = hctx->type;
2502 if (!hctx->dispatch_busy && !run_queue_async) {
2503 blk_mq_run_dispatch_ops(hctx->queue,
2504 blk_mq_try_issue_list_directly(hctx, list));
2522 blk_mq_hctx_mark_pending(hctx, ctx);
2525 blk_mq_run_hw_queue(hctx, run_queue_async);
2532 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2536 * Passthrough request have to be added to hctx->dispatch
2539 * them, which gets them added to hctx->dispatch.
2543 * dispatch it given we prioritize requests in hctx->dispatch.
2550 * hctx->dispatch) directly and there is at most one in-flight
2557 * rq to the front of hctx->dispatch, it is easier to introduce
2563 * drive when adding flush rq to the front of hctx->dispatch.
2565 * Simply queue flush rq to the front of hctx->dispatch so that
2575 q->elevator->type->ops.insert_requests(hctx, &list, flags);
2581 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2584 &ctx->rq_lists[hctx->type]);
2585 blk_mq_hctx_mark_pending(hctx, ctx);
2608 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2623 ret = q->mq_ops->queue_rq(hctx, &bd);
2626 blk_mq_update_dispatch_busy(hctx, false);
2630 blk_mq_update_dispatch_busy(hctx, true);
2634 blk_mq_update_dispatch_busy(hctx, false);
2658 * @hctx: Pointer of the associated hardware queue.
2662 * request directly to device driver. Else, insert at hctx->dispatch queue, so
2666 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2671 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2678 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2682 ret = __blk_mq_issue_directly(hctx, rq, true);
2689 blk_mq_run_hw_queue(hctx, false);
2699 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2701 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2708 return __blk_mq_issue_directly(hctx, rq, last);
2713 struct blk_mq_hw_ctx *hctx = NULL;
2721 if (hctx != rq->mq_hctx) {
2722 if (hctx) {
2723 blk_mq_commit_rqs(hctx, queued, false);
2726 hctx = rq->mq_hctx;
2737 blk_mq_run_hw_queue(hctx, false);
2747 blk_mq_commit_rqs(hctx, queued, false);
2854 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2874 blk_mq_run_hw_queue(hctx, false);
2884 blk_mq_commit_rqs(hctx, queued, false);
2986 struct blk_mq_hw_ctx *hctx;
3054 hctx = rq->mq_hctx;
3056 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3058 blk_mq_run_hw_queue(hctx, true);
3060 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3491 struct blk_mq_hw_ctx *hctx;
3499 if (rq->mq_hctx != iter_data->hctx)
3505 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3507 struct blk_mq_tags *tags = hctx->sched_tags ?
3508 hctx->sched_tags : hctx->tags;
3510 .hctx = hctx,
3518 struct blk_mq_hw_ctx *hctx)
3520 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3522 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3529 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3532 if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3533 !blk_mq_last_cpu_in_hctx(cpu, hctx))
3537 * Prevent new request from being allocated on the current hctx.
3543 set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3551 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3552 while (blk_mq_hctx_has_requests(hctx))
3554 percpu_ref_put(&hctx->queue->q_usage_counter);
3562 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3565 if (cpumask_test_cpu(cpu, hctx->cpumask))
3566 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3577 struct blk_mq_hw_ctx *hctx;
3582 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3583 if (!cpumask_test_cpu(cpu, hctx->cpumask))
3586 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3587 type = hctx->type;
3592 blk_mq_hctx_clear_pending(hctx, ctx);
3599 spin_lock(&hctx->lock);
3600 list_splice_tail_init(&tmp, &hctx->dispatch);
3601 spin_unlock(&hctx->lock);
3603 blk_mq_run_hw_queue(hctx, true);
3607 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3609 if (!(hctx->flags & BLK_MQ_F_STACKING))
3611 &hctx->cpuhp_online);
3613 &hctx->cpuhp_dead);
3645 /* hctx->ctxs will be freed in queue's release handler */
3648 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3650 struct request *flush_rq = hctx->fq->flush_rq;
3652 if (blk_mq_hw_queue_mapped(hctx))
3653 blk_mq_tag_idle(hctx);
3662 set->ops->exit_hctx(hctx, hctx_idx);
3664 blk_mq_remove_cpuhp(hctx);
3669 list_add(&hctx->hctx_list, &q->unused_hctx_list);
3676 struct blk_mq_hw_ctx *hctx;
3679 queue_for_each_hw_ctx(q, hctx, i) {
3682 blk_mq_exit_hctx(q, set, hctx, i);
3688 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3690 hctx->queue_num = hctx_idx;
3692 if (!(hctx->flags & BLK_MQ_F_STACKING))
3694 &hctx->cpuhp_online);
3695 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3697 hctx->tags = set->tags[hctx_idx];
3700 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3703 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3704 hctx->numa_node))
3707 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3714 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3717 set->ops->exit_hctx(hctx, hctx_idx);
3719 blk_mq_remove_cpuhp(hctx);
3727 struct blk_mq_hw_ctx *hctx;
3730 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3731 if (!hctx)
3734 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3737 atomic_set(&hctx->nr_active, 0);
3740 hctx->numa_node = node;
3742 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3743 spin_lock_init(&hctx->lock);
3744 INIT_LIST_HEAD(&hctx->dispatch);
3745 hctx->queue = q;
3746 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3748 INIT_LIST_HEAD(&hctx->hctx_list);
3754 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3756 if (!hctx->ctxs)
3759 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3762 hctx->nr_ctx = 0;
3764 spin_lock_init(&hctx->dispatch_wait_lock);
3765 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3766 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3768 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3769 if (!hctx->fq)
3772 blk_mq_hctx_kobj_init(hctx);
3774 return hctx;
3777 sbitmap_free(&hctx->ctx_map);
3779 kfree(hctx->ctxs);
3781 free_cpumask_var(hctx->cpumask);
3783 kfree(hctx);
3796 struct blk_mq_hw_ctx *hctx;
3811 hctx = blk_mq_map_queue_type(q, j, i);
3812 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3813 hctx->numa_node = cpu_to_node(i);
3876 struct blk_mq_hw_ctx *hctx;
3880 queue_for_each_hw_ctx(q, hctx, i) {
3881 cpumask_clear(hctx->cpumask);
3882 hctx->nr_ctx = 0;
3883 hctx->dispatch_from = NULL;
3889 * If the cpu isn't present, the cpu is mapped to first hctx.
3905 * If tags initialization fail for some hctx,
3906 * that hctx won't be brought online. In this
3907 * case, remap the current ctx to hctx[0] which
3913 hctx = blk_mq_map_queue_type(q, j, i);
3914 ctx->hctxs[j] = hctx;
3920 if (cpumask_test_cpu(i, hctx->cpumask))
3923 cpumask_set_cpu(i, hctx->cpumask);
3924 hctx->type = j;
3925 ctx->index_hw[hctx->type] = hctx->nr_ctx;
3926 hctx->ctxs[hctx->nr_ctx++] = ctx;
3932 BUG_ON(!hctx->nr_ctx);
3940 queue_for_each_hw_ctx(q, hctx, i) {
3945 if (!hctx->nr_ctx) {
3953 hctx->tags = NULL;
3957 hctx->tags = set->tags[i];
3958 WARN_ON(!hctx->tags);
3965 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3970 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3971 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3981 struct blk_mq_hw_ctx *hctx;
3984 queue_for_each_hw_ctx(q, hctx, i) {
3986 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3988 blk_mq_tag_idle(hctx);
3989 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
4081 struct blk_mq_hw_ctx *hctx, *next;
4084 queue_for_each_hw_ctx(q, hctx, i)
4085 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
4087 /* all hctx are in .unused_hctx_list now */
4088 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
4089 list_del_init(&hctx->hctx_list);
4090 kobject_put(&hctx->kobj);
4192 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4194 /* reuse dead hctx first */
4198 hctx = tmp;
4202 if (hctx)
4203 list_del_init(&hctx->hctx_list);
4206 if (!hctx)
4207 hctx = blk_mq_alloc_hctx(q, set, node);
4208 if (!hctx)
4211 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4214 return hctx;
4217 kobject_put(&hctx->kobj);
4225 struct blk_mq_hw_ctx *hctx;
4243 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4245 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4246 WARN_ON_ONCE(!hctx);
4260 xa_for_each_start(&q->hctx_table, j, hctx, j)
4261 blk_mq_exit_hctx(q, set, hctx, j);
4330 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4332 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4625 struct blk_mq_hw_ctx *hctx;
4639 queue_for_each_hw_ctx(q, hctx, i) {
4640 if (!hctx->tags)
4646 if (hctx->sched_tags) {
4647 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4650 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4656 q->elevator->type->ops.depth_updated(hctx);
4833 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4840 ret = q->mq_ops->poll(hctx, iob);
4863 struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4865 return blk_hctx_poll(q, hctx, iob, flags);
4894 struct blk_mq_hw_ctx *hctx;
4899 queue_for_each_hw_ctx(q, hctx, i)
4900 cancel_delayed_work_sync(&hctx->run_work);