Lines Matching refs:hctx
70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
72 return !list_empty_careful(&hctx->dispatch) ||
73 sbitmap_any_bit_set(&hctx->ctx_map) ||
74 blk_mq_sched_has_work(hctx);
80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
83 const int bit = ctx->index_hw[hctx->type];
85 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
86 sbitmap_set_bit(&hctx->ctx_map, bit);
89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
92 const int bit = ctx->index_hw[hctx->type];
94 sbitmap_clear_bit(&hctx->ctx_map, bit);
102 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
229 struct blk_mq_hw_ctx *hctx;
235 queue_for_each_hw_ctx(q, hctx, i) {
236 if (hctx->flags & BLK_MQ_F_BLOCKING)
237 synchronize_srcu(hctx->srcu);
275 struct blk_mq_hw_ctx *hctx;
278 queue_for_each_hw_ctx(q, hctx, i)
279 if (blk_mq_hw_queue_mapped(hctx))
280 blk_mq_tag_wakeup_all(hctx->tags, true);
309 rq->mq_hctx = data->hctx;
359 data->hctx->queued++;
391 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
393 blk_mq_tag_busy(data->hctx);
396 * Waiting allocations only fail because of an inactive hctx. In that
397 * case just retry the hctx assignment and tag allocation as CPU hotplug
408 * off the CPU, and thus off the hctx that is going away.
483 data.hctx = q->queue_hw_ctx[hctx_idx];
484 if (!blk_mq_hw_queue_mapped(data.hctx))
486 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
492 blk_mq_tag_busy(data.hctx);
510 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
517 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
519 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
520 blk_mq_sched_restart(hctx);
529 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
542 __blk_mq_dec_active_requests(hctx);
722 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
723 __releases(hctx->srcu)
725 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
728 srcu_read_unlock(hctx->srcu, srcu_idx);
731 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
732 __acquires(hctx->srcu)
734 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
739 *srcu_idx = srcu_read_lock(hctx->srcu);
820 * data, so insert it to hctx dispatch list to avoid any
888 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
895 if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
957 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
979 struct blk_mq_hw_ctx *hctx;
1007 * each hctx as idle.
1009 queue_for_each_hw_ctx(q, hctx, i) {
1010 /* the hctx may be unmapped, so check it here */
1011 if (blk_mq_hw_queue_mapped(hctx))
1012 blk_mq_tag_idle(hctx);
1019 struct blk_mq_hw_ctx *hctx;
1026 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1027 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1028 enum hctx_type type = hctx->type;
1041 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1044 .hctx = hctx,
1048 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1053 struct blk_mq_hw_ctx *hctx;
1061 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1062 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1063 enum hctx_type type = hctx->type;
1077 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1080 unsigned off = start ? start->index_hw[hctx->type] : 0;
1082 .hctx = hctx,
1086 __sbitmap_for_each_set(&hctx->ctx_map, off,
1126 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1131 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1134 __blk_mq_inc_active_requests(hctx);
1136 hctx->tags->rqs[rq->tag] = rq;
1143 struct blk_mq_hw_ctx *hctx;
1145 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1147 spin_lock(&hctx->dispatch_wait_lock);
1152 sbq = hctx->tags->bitmap_tags;
1155 spin_unlock(&hctx->dispatch_wait_lock);
1157 blk_mq_run_hw_queue(hctx, true);
1167 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1170 struct sbitmap_queue *sbq = hctx->tags->bitmap_tags;
1175 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1176 blk_mq_sched_mark_restart_hctx(hctx);
1189 wait = &hctx->dispatch_wait;
1193 wq = &bt_wait_ptr(sbq, hctx)->wait;
1196 spin_lock(&hctx->dispatch_wait_lock);
1198 spin_unlock(&hctx->dispatch_wait_lock);
1230 spin_unlock(&hctx->dispatch_wait_lock);
1241 spin_unlock(&hctx->dispatch_wait_lock);
1256 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1260 ewma = hctx->dispatch_busy;
1270 hctx->dispatch_busy = ewma;
1314 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1329 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1356 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1360 struct request_queue *q = hctx->queue;
1379 WARN_ON_ONCE(hctx != rq->mq_hctx);
1405 ret = q->mq_ops->queue_rq(hctx, &bd);
1434 hctx->dispatched[queued_to_index(queued)]++;
1441 q->mq_ops->commit_rqs(hctx);
1443 * Any items that need requeuing? Stuff them into hctx->dispatch,
1450 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1454 spin_lock(&hctx->lock);
1455 list_splice_tail_init(list, &hctx->dispatch);
1456 spin_unlock(&hctx->lock);
1459 * Order adding requests to hctx->dispatch and checking
1462 * miss the new added requests to hctx->dispatch, meantime
1493 needs_restart = blk_mq_sched_needs_restart(hctx);
1497 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1498 blk_mq_run_hw_queue(hctx, true);
1500 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1502 blk_mq_update_dispatch_busy(hctx, true);
1505 blk_mq_update_dispatch_busy(hctx, false);
1512 * @hctx: Pointer to the hardware queue to run.
1516 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1525 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1528 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1532 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1535 * handle dispatched requests to this hctx
1537 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1538 cpu_online(hctx->next_cpu)) {
1539 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1541 cpumask_empty(hctx->cpumask) ? "inactive": "active");
1551 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1553 hctx_lock(hctx, &srcu_idx);
1554 blk_mq_sched_dispatch_requests(hctx);
1555 hctx_unlock(hctx, srcu_idx);
1558 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1560 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1563 cpu = cpumask_first(hctx->cpumask);
1573 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1576 int next_cpu = hctx->next_cpu;
1578 if (hctx->queue->nr_hw_queues == 1)
1581 if (--hctx->next_cpu_batch <= 0) {
1583 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1586 next_cpu = blk_mq_first_mapped_cpu(hctx);
1587 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1591 * Do unbound schedule if we can't find a online CPU for this hctx,
1602 * in hctx->cpumask become online again.
1604 hctx->next_cpu = next_cpu;
1605 hctx->next_cpu_batch = 1;
1609 hctx->next_cpu = next_cpu;
1615 * @hctx: Pointer to the hardware queue to run.
1622 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1625 if (unlikely(blk_mq_hctx_stopped(hctx)))
1628 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1630 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1631 __blk_mq_run_hw_queue(hctx);
1639 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1645 * @hctx: Pointer to the hardware queue to run.
1650 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1652 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1658 * @hctx: Pointer to the hardware queue to run.
1665 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1678 hctx_lock(hctx, &srcu_idx);
1679 need_run = !blk_queue_quiesced(hctx->queue) &&
1680 blk_mq_hctx_has_pending(hctx);
1681 hctx_unlock(hctx, srcu_idx);
1684 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1695 struct blk_mq_hw_ctx *hctx;
1698 queue_for_each_hw_ctx(q, hctx, i) {
1699 if (blk_mq_hctx_stopped(hctx))
1702 blk_mq_run_hw_queue(hctx, async);
1714 struct blk_mq_hw_ctx *hctx;
1717 queue_for_each_hw_ctx(q, hctx, i) {
1718 if (blk_mq_hctx_stopped(hctx))
1721 blk_mq_delay_run_hw_queue(hctx, msecs);
1735 struct blk_mq_hw_ctx *hctx;
1738 queue_for_each_hw_ctx(q, hctx, i)
1739 if (blk_mq_hctx_stopped(hctx))
1755 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1757 cancel_delayed_work(&hctx->run_work);
1759 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1774 struct blk_mq_hw_ctx *hctx;
1777 queue_for_each_hw_ctx(q, hctx, i)
1778 blk_mq_stop_hw_queue(hctx);
1782 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1784 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1786 blk_mq_run_hw_queue(hctx, false);
1792 struct blk_mq_hw_ctx *hctx;
1795 queue_for_each_hw_ctx(q, hctx, i)
1796 blk_mq_start_hw_queue(hctx);
1800 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1802 if (!blk_mq_hctx_stopped(hctx))
1805 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1806 blk_mq_run_hw_queue(hctx, async);
1812 struct blk_mq_hw_ctx *hctx;
1815 queue_for_each_hw_ctx(q, hctx, i)
1816 blk_mq_start_stopped_hw_queue(hctx, async);
1822 struct blk_mq_hw_ctx *hctx;
1824 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1829 if (blk_mq_hctx_stopped(hctx))
1832 __blk_mq_run_hw_queue(hctx);
1835 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1840 enum hctx_type type = hctx->type;
1852 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1859 __blk_mq_insert_req_list(hctx, rq, at_head);
1860 blk_mq_hctx_mark_pending(hctx, ctx);
1875 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1877 spin_lock(&hctx->lock);
1879 list_add(&rq->queuelist, &hctx->dispatch);
1881 list_add_tail(&rq->queuelist, &hctx->dispatch);
1882 spin_unlock(&hctx->lock);
1885 blk_mq_run_hw_queue(hctx, false);
1888 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1893 enum hctx_type type = hctx->type;
1906 blk_mq_hctx_mark_pending(hctx, ctx);
1979 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1991 new_cookie = request_to_qc_t(hctx, rq);
1998 ret = q->mq_ops->queue_rq(hctx, &bd);
2001 blk_mq_update_dispatch_busy(hctx, false);
2006 blk_mq_update_dispatch_busy(hctx, true);
2010 blk_mq_update_dispatch_busy(hctx, false);
2018 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2033 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2050 return __blk_mq_issue_directly(hctx, rq, cookie, last);
2062 * @hctx: Pointer of the associated hardware queue.
2067 * request directly to device driver. Else, insert at hctx->dispatch queue, so
2071 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2077 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
2079 hctx_lock(hctx, &srcu_idx);
2081 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
2087 hctx_unlock(hctx, srcu_idx);
2095 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2097 hctx_lock(hctx, &srcu_idx);
2098 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
2099 hctx_unlock(hctx, srcu_idx);
2104 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2136 hctx->queue->mq_ops->commit_rqs && queued)
2137 hctx->queue->mq_ops->commit_rqs(hctx);
2224 cookie = request_to_qc_t(data.hctx, rq);
2240 blk_mq_run_hw_queue(data.hctx, true);
2287 data.hctx = same_queue_rq->mq_hctx;
2289 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2293 !data.hctx->dispatch_busy) {
2298 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2521 struct blk_mq_hw_ctx *hctx;
2529 if (rq->mq_hctx != iter_data->hctx)
2535 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
2537 struct blk_mq_tags *tags = hctx->sched_tags ?
2538 hctx->sched_tags : hctx->tags;
2540 .hctx = hctx,
2548 struct blk_mq_hw_ctx *hctx)
2550 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
2552 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
2559 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2562 if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
2563 !blk_mq_last_cpu_in_hctx(cpu, hctx))
2567 * Prevent new request from being allocated on the current hctx.
2573 set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2581 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
2582 while (blk_mq_hctx_has_requests(hctx))
2584 percpu_ref_put(&hctx->queue->q_usage_counter);
2592 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2595 if (cpumask_test_cpu(cpu, hctx->cpumask))
2596 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2607 struct blk_mq_hw_ctx *hctx;
2612 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2613 if (!cpumask_test_cpu(cpu, hctx->cpumask))
2616 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2617 type = hctx->type;
2622 blk_mq_hctx_clear_pending(hctx, ctx);
2629 spin_lock(&hctx->lock);
2630 list_splice_tail_init(&tmp, &hctx->dispatch);
2631 spin_unlock(&hctx->lock);
2633 blk_mq_run_hw_queue(hctx, true);
2637 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2639 if (!(hctx->flags & BLK_MQ_F_STACKING))
2641 &hctx->cpuhp_online);
2643 &hctx->cpuhp_dead);
2675 /* hctx->ctxs will be freed in queue's release handler */
2678 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2680 struct request *flush_rq = hctx->fq->flush_rq;
2682 if (blk_mq_hw_queue_mapped(hctx))
2683 blk_mq_tag_idle(hctx);
2691 set->ops->exit_hctx(hctx, hctx_idx);
2693 blk_mq_remove_cpuhp(hctx);
2696 list_add(&hctx->hctx_list, &q->unused_hctx_list);
2703 struct blk_mq_hw_ctx *hctx;
2706 queue_for_each_hw_ctx(q, hctx, i) {
2709 blk_mq_debugfs_unregister_hctx(hctx);
2710 blk_mq_exit_hctx(q, set, hctx, i);
2730 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2732 hctx->queue_num = hctx_idx;
2734 if (!(hctx->flags & BLK_MQ_F_STACKING))
2736 &hctx->cpuhp_online);
2737 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2739 hctx->tags = set->tags[hctx_idx];
2742 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2745 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2746 hctx->numa_node))
2752 set->ops->exit_hctx(hctx, hctx_idx);
2754 blk_mq_remove_cpuhp(hctx);
2762 struct blk_mq_hw_ctx *hctx;
2765 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2766 if (!hctx)
2769 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2772 atomic_set(&hctx->nr_active, 0);
2773 atomic_set(&hctx->elevator_queued, 0);
2776 hctx->numa_node = node;
2778 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2779 spin_lock_init(&hctx->lock);
2780 INIT_LIST_HEAD(&hctx->dispatch);
2781 hctx->queue = q;
2782 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
2784 INIT_LIST_HEAD(&hctx->hctx_list);
2790 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2792 if (!hctx->ctxs)
2795 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2798 hctx->nr_ctx = 0;
2800 spin_lock_init(&hctx->dispatch_wait_lock);
2801 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2802 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2804 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
2805 if (!hctx->fq)
2808 if (hctx->flags & BLK_MQ_F_BLOCKING)
2809 init_srcu_struct(hctx->srcu);
2810 blk_mq_hctx_kobj_init(hctx);
2812 return hctx;
2815 sbitmap_free(&hctx->ctx_map);
2817 kfree(hctx->ctxs);
2819 free_cpumask_var(hctx->cpumask);
2821 kfree(hctx);
2834 struct blk_mq_hw_ctx *hctx;
2849 hctx = blk_mq_map_queue_type(q, j, i);
2850 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2851 hctx->numa_node = cpu_to_node(i);
2892 struct blk_mq_hw_ctx *hctx;
2896 queue_for_each_hw_ctx(q, hctx, i) {
2897 cpumask_clear(hctx->cpumask);
2898 hctx->nr_ctx = 0;
2899 hctx->dispatch_from = NULL;
2905 * If the cpu isn't present, the cpu is mapped to first hctx.
2921 * If tags initialization fail for some hctx,
2922 * that hctx won't be brought online. In this
2923 * case, remap the current ctx to hctx[0] which
2929 hctx = blk_mq_map_queue_type(q, j, i);
2930 ctx->hctxs[j] = hctx;
2936 if (cpumask_test_cpu(i, hctx->cpumask))
2939 cpumask_set_cpu(i, hctx->cpumask);
2940 hctx->type = j;
2941 ctx->index_hw[hctx->type] = hctx->nr_ctx;
2942 hctx->ctxs[hctx->nr_ctx++] = ctx;
2948 BUG_ON(!hctx->nr_ctx);
2956 queue_for_each_hw_ctx(q, hctx, i) {
2961 if (!hctx->nr_ctx) {
2969 hctx->tags = NULL;
2973 hctx->tags = set->tags[i];
2974 WARN_ON(!hctx->tags);
2981 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2986 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2987 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2997 struct blk_mq_hw_ctx *hctx;
3000 queue_for_each_hw_ctx(q, hctx, i) {
3002 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3004 blk_mq_tag_idle(hctx);
3005 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3097 struct blk_mq_hw_ctx *hctx, *next;
3100 queue_for_each_hw_ctx(q, hctx, i)
3101 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3103 /* all hctx are in .unused_hctx_list now */
3104 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3105 list_del_init(&hctx->hctx_list);
3106 kobject_put(&hctx->kobj);
3184 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3186 /* reuse dead hctx first */
3190 hctx = tmp;
3194 if (hctx)
3195 list_del_init(&hctx->hctx_list);
3198 if (!hctx)
3199 hctx = blk_mq_alloc_hctx(q, set, node);
3200 if (!hctx)
3203 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
3206 return hctx;
3209 kobject_put(&hctx->kobj);
3240 struct blk_mq_hw_ctx *hctx;
3245 * we need to realloc the hctx. If allocation fails, fallback
3251 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
3252 if (hctx) {
3255 hctxs[i] = hctx;
3258 pr_warn("Allocate new hctx on node %d fails,\
3279 struct blk_mq_hw_ctx *hctx = hctxs[j];
3281 if (hctx) {
3282 blk_mq_exit_hctx(q, set, hctx, j);
3365 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
3367 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
3610 struct blk_mq_hw_ctx *hctx;
3623 queue_for_each_hw_ctx(q, hctx, i) {
3624 if (!hctx->tags)
3630 if (!hctx->sched_tags) {
3631 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3636 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3642 q->elevator->type->ops.depth_updated(hctx);
3923 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3931 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3933 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3961 struct blk_mq_hw_ctx *hctx;
3971 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3980 if (blk_mq_poll_hybrid(q, hctx, cookie))
3983 hctx->poll_considered++;
3989 hctx->poll_invoked++;
3991 ret = q->mq_ops->poll(hctx);
3993 hctx->poll_success++;
4022 struct blk_mq_hw_ctx *hctx;
4027 queue_for_each_hw_ctx(q, hctx, i)
4028 cancel_delayed_work_sync(&hctx->run_work);