Home
last modified time | relevance | path

Searched refs:hctx (Results 1 - 25 of 114) sorted by relevance

12345

/kernel/linux/linux-5.10/block/
H A Dblk-mq-sched.c50 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument
52 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
55 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
59 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument
61 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart()
63 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_restart()
66 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) in blk_mq_sched_restart()
69 * meantime new request added to hctx->dispatch is missed to check in in blk_mq_sched_restart()
74 blk_mq_run_hw_queue(hctx, true); in blk_mq_sched_restart()
88 struct blk_mq_hw_ctx *hctx in blk_mq_dispatch_hctx_list() local
117 __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) __blk_mq_do_dispatch_sched() argument
194 blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) blk_mq_do_dispatch_sched() argument
212 blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) blk_mq_next_ctx() argument
231 blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) blk_mq_do_dispatch_ctx() argument
281 __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_sched_dispatch_requests() argument
334 blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) blk_mq_sched_dispatch_requests() argument
359 struct blk_mq_hw_ctx *hctx; __blk_mq_sched_bio_merge() local
403 blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, bool has_sched, struct request *rq) blk_mq_sched_bypass_insert() argument
433 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_sched_insert_request() local
480 blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list, bool run_queue_async) blk_mq_sched_insert_requests() argument
516 blk_mq_sched_alloc_tags(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) blk_mq_sched_alloc_tags() argument
542 struct blk_mq_hw_ctx *hctx; blk_mq_sched_tags_teardown() local
558 struct blk_mq_hw_ctx *hctx; blk_mq_init_sched() local
618 struct blk_mq_hw_ctx *hctx; blk_mq_sched_free_requests() local
629 struct blk_mq_hw_ctx *hctx; blk_mq_exit_sched() local
[all...]
H A Dblk-mq.h43 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
47 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
48 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
69 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
73 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
78 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
89 * @type: the hctx type index
130 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
161 struct blk_mq_hw_ctx *hctx; member
172 return data->hctx in blk_mq_tags_from_data()
177 blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_stopped() argument
182 blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) blk_mq_hw_queue_mapped() argument
204 __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_inc_active_requests() argument
212 __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_dec_active_requests() argument
220 __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_active_requests() argument
226 __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) __blk_mq_put_driver_tag() argument
301 hctx_may_queue(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt) hctx_may_queue() argument
[all...]
H A Dblk-mq.c70 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
72 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
73 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
74 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
80 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
83 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
85 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
86 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending()
89 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
92 const int bit = ctx->index_hw[hctx in blk_mq_hctx_clear_pending()
102 blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) blk_mq_check_inflight() argument
229 struct blk_mq_hw_ctx *hctx; blk_mq_quiesce_queue() local
275 struct blk_mq_hw_ctx *hctx; blk_mq_wake_waiters() local
510 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; __blk_mq_free_request() local
529 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_free_request() local
888 blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) blk_mq_rq_inflight() argument
957 blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) blk_mq_check_expired() argument
979 struct blk_mq_hw_ctx *hctx; blk_mq_timeout_work() local
1019 struct blk_mq_hw_ctx *hctx; global() member
1026 struct blk_mq_hw_ctx *hctx = flush_data->hctx; flush_busy_ctx() local
1041 blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) blk_mq_flush_busy_ctxs() argument
1053 struct blk_mq_hw_ctx *hctx; global() member
1061 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; dispatch_rq_from_ctx() local
1077 blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start) blk_mq_dequeue_from_ctx() argument
1126 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_get_driver_tag() local
1143 struct blk_mq_hw_ctx *hctx; blk_mq_dispatch_wake() local
1167 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) blk_mq_mark_tag_wait() argument
1256 blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) blk_mq_update_dispatch_busy() argument
1314 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_prep_dispatch_rq() local
1356 blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, unsigned int nr_budgets) blk_mq_dispatch_rq_list() argument
1516 __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) __blk_mq_run_hw_queue() argument
1558 blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) blk_mq_first_mapped_cpu() argument
1573 blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_next_cpu() argument
1622 __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, unsigned long msecs) __blk_mq_delay_run_hw_queue() argument
1650 blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) blk_mq_delay_run_hw_queue() argument
1665 blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) blk_mq_run_hw_queue() argument
1695 struct blk_mq_hw_ctx *hctx; blk_mq_run_hw_queues() local
1714 struct blk_mq_hw_ctx *hctx; blk_mq_delay_run_hw_queues() local
1735 struct blk_mq_hw_ctx *hctx; blk_mq_queue_stopped() local
1755 blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) blk_mq_stop_hw_queue() argument
1774 struct blk_mq_hw_ctx *hctx; blk_mq_stop_hw_queues() local
1782 blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) blk_mq_start_hw_queue() argument
1792 struct blk_mq_hw_ctx *hctx; blk_mq_start_hw_queues() local
1800 blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) blk_mq_start_stopped_hw_queue() argument
1812 struct blk_mq_hw_ctx *hctx; blk_mq_start_stopped_hw_queues() local
1822 struct blk_mq_hw_ctx *hctx; blk_mq_run_work_fn() local
1835 __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) __blk_mq_insert_req_list() argument
1852 __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) __blk_mq_insert_request() argument
1875 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_bypass_insert() local
1888 blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list) blk_mq_insert_requests() argument
1979 __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_qc_t *cookie, bool last) __blk_mq_issue_directly() argument
2018 __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_qc_t *cookie, bool bypass_insert, bool last) __blk_mq_try_issue_directly() argument
2071 blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_qc_t *cookie) blk_mq_try_issue_directly() argument
2095 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_issue_directly() local
2104 blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) blk_mq_try_issue_list_directly() argument
2521 struct blk_mq_hw_ctx *hctx; global() member
2535 blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_has_requests() argument
2547 blk_mq_last_cpu_in_hctx(unsigned int cpu, struct blk_mq_hw_ctx *hctx) blk_mq_last_cpu_in_hctx() argument
2559 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_offline() local
2592 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_online() local
2607 struct blk_mq_hw_ctx *hctx; blk_mq_hctx_notify_dead() local
2637 blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) blk_mq_remove_cpuhp() argument
2676 blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) blk_mq_exit_hctx() argument
2703 struct blk_mq_hw_ctx *hctx; blk_mq_exit_hw_queues() local
2728 blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) blk_mq_init_hctx() argument
2762 struct blk_mq_hw_ctx *hctx; blk_mq_alloc_hctx() local
2834 struct blk_mq_hw_ctx *hctx; blk_mq_init_cpu_queues() local
2892 struct blk_mq_hw_ctx *hctx; blk_mq_map_swqueue() local
2997 struct blk_mq_hw_ctx *hctx; queue_set_hctx_shared() local
3097 struct blk_mq_hw_ctx *hctx, *next; blk_mq_release() local
3184 struct blk_mq_hw_ctx *hctx = NULL, *tmp; blk_mq_alloc_and_init_hctx() local
3240 struct blk_mq_hw_ctx *hctx; blk_mq_realloc_hw_ctxs() local
3279 struct blk_mq_hw_ctx *hctx = hctxs[j]; blk_mq_realloc_hw_ctxs() local
3610 struct blk_mq_hw_ctx *hctx; blk_mq_update_nr_requests() local
3922 blk_mq_poll_hybrid(struct request_queue *q, struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) blk_mq_poll_hybrid() argument
3961 struct blk_mq_hw_ctx *hctx; blk_poll() local
4022 struct blk_mq_hw_ctx *hctx; blk_mq_cancel_work_sync() local
[all...]
H A Dblk-mq-debugfs.c228 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local
230 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show()
256 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local
257 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show()
267 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show()
365 __acquires(&hctx->lock)
367 struct blk_mq_hw_ctx *hctx = m->private; variable
369 spin_lock(&hctx->lock);
370 return seq_list_start(&hctx->dispatch, *pos);
375 struct blk_mq_hw_ctx *hctx in hctx_dispatch_next() local
383 struct blk_mq_hw_ctx *hctx = m->private; global() variable
397 struct blk_mq_hw_ctx *hctx; global() member
417 struct blk_mq_hw_ctx *hctx = data; hctx_busy_show() local
434 struct blk_mq_hw_ctx *hctx = data; hctx_type_show() local
443 struct blk_mq_hw_ctx *hctx = data; hctx_ctx_map_show() local
468 struct blk_mq_hw_ctx *hctx = data; hctx_tags_show() local
485 struct blk_mq_hw_ctx *hctx = data; hctx_tags_bitmap_show() local
502 struct blk_mq_hw_ctx *hctx = data; hctx_sched_tags_show() local
519 struct blk_mq_hw_ctx *hctx = data; hctx_sched_tags_bitmap_show() local
536 struct blk_mq_hw_ctx *hctx = data; hctx_io_poll_show() local
547 struct blk_mq_hw_ctx *hctx = data; hctx_io_poll_write() local
555 struct blk_mq_hw_ctx *hctx = data; hctx_dispatched_show() local
573 struct blk_mq_hw_ctx *hctx = data; hctx_dispatched_write() local
583 struct blk_mq_hw_ctx *hctx = data; hctx_queued_show() local
592 struct blk_mq_hw_ctx *hctx = data; hctx_queued_write() local
600 struct blk_mq_hw_ctx *hctx = data; hctx_run_show() local
609 struct blk_mq_hw_ctx *hctx = data; hctx_run_write() local
617 struct blk_mq_hw_ctx *hctx = data; hctx_active_show() local
625 struct blk_mq_hw_ctx *hctx = data; hctx_dispatch_busy_show() local
829 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_register() local
865 blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) blk_mq_debugfs_register_ctx() argument
877 blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_hctx() argument
896 blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_unregister_hctx() argument
905 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_register_hctxs() local
914 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_unregister_hctxs() local
976 blk_mq_debugfs_register_sched_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_sched_hctx() argument
990 blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_unregister_sched_hctx() argument
[all...]
H A Dblk-mq-sysfs.c36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local
39 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_hw_sysfs_release()
40 cleanup_srcu_struct(hctx->srcu); in blk_mq_hw_sysfs_release()
41 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release()
42 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release()
43 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release()
44 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
45 kfree(hctx); in blk_mq_hw_sysfs_release()
106 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local
111 hctx in blk_mq_hw_sysfs_show()
128 struct blk_mq_hw_ctx *hctx; blk_mq_hw_sysfs_store() local
145 blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_nr_tags_show() argument
151 blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_nr_reserved_tags_show() argument
157 blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_cpus_show() argument
227 blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_unregister_hctx() argument
241 blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_register_hctx() argument
272 struct blk_mq_hw_ctx *hctx; blk_mq_unregister_dev() local
287 blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_kobj_init() argument
321 struct blk_mq_hw_ctx *hctx; __blk_mq_register_dev() local
356 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_unregister() local
372 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_register() local
[all...]
H A Dblk-mq-tag.c24 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
26 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in __blk_mq_tag_busy()
27 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy()
34 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && in __blk_mq_tag_busy()
35 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
36 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy()
56 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
58 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
59 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle()
62 if (blk_mq_is_sbitmap_shared(hctx in __blk_mq_tag_idle()
196 struct blk_mq_hw_ctx *hctx; global() member
219 struct blk_mq_hw_ctx *hctx = iter_data->hctx; bt_iter() local
255 bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, busy_iter_fn *fn, void *data, bool reserved) bt_for_each() argument
439 struct blk_mq_hw_ctx *hctx; blk_mq_queue_tag_busy_iter() local
566 blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tagsptr, unsigned int tdepth, bool can_grow) blk_mq_tag_update_depth() argument
[all...]
H A Dblk-mq-tag.h43 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
56 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr()
58 if (!hctx) in bt_wait_ptr()
60 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
72 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument
74 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_busy()
77 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy()
80 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument
82 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_idle()
85 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle()
55 bt_wait_ptr(struct sbitmap_queue *bt, struct blk_mq_hw_ctx *hctx) bt_wait_ptr() argument
[all...]
H A Dkyber-iosched.c138 * There is a same mapping between ctx & hctx and kcq & khd,
448 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) in kyber_depth_updated() argument
450 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated()
451 struct blk_mq_tags *tags = hctx->sched_tags; in kyber_depth_updated()
459 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument
464 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
468 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
470 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
474 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx()
478 if (sbitmap_init_node(&khd->kcq_map[i], hctx in kyber_init_hctx()
513 kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) kyber_exit_hctx() argument
565 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); kyber_bio_merge() local
584 kyber_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *rq_list, bool at_head) kyber_insert_requests() argument
691 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); kyber_domain_wake() local
699 kyber_get_domain_token(struct kyber_queue_data *kqd, struct kyber_hctx_data *khd, struct blk_mq_hw_ctx *hctx) kyber_get_domain_token() argument
747 kyber_dispatch_cur_domain(struct kyber_queue_data *kqd, struct kyber_hctx_data *khd, struct blk_mq_hw_ctx *hctx) kyber_dispatch_cur_domain() argument
796 kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) kyber_dispatch_request() argument
842 kyber_has_work(struct blk_mq_hw_ctx *hctx) kyber_has_work() argument
964 struct blk_mq_hw_ctx *hctx = data; kyber_cur_domain_show() local
973 struct blk_mq_hw_ctx *hctx = data; kyber_batching_show() local
[all...]
H A Dblk-mq-debugfs.h24 struct blk_mq_hw_ctx *hctx);
25 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
32 struct blk_mq_hw_ctx *hctx);
33 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
48 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx()
52 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument
73 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx()
77 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
47 blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_hctx() argument
72 blk_mq_debugfs_register_sched_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_sched_hctx() argument
H A Dblk-mq-sched.h17 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
18 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
22 void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
26 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
71 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument
73 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
76 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
81 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument
83 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_needs_restart()
H A Dmq-deadline.c381 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument
383 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request()
486 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in dd_insert_request() argument
489 struct request_queue *q = hctx->queue; in dd_insert_request()
529 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, in dd_insert_requests() argument
532 struct request_queue *q = hctx->queue; in dd_insert_requests()
541 dd_insert_request(hctx, rq, at_head); in dd_insert_requests()
542 atomic_inc(&hctx->elevator_queued); in dd_insert_requests()
585 static bool dd_has_work(struct blk_mq_hw_ctx *hctx) in dd_has_work() argument
587 struct deadline_data *dd = hctx in dd_has_work()
[all...]
/kernel/linux/linux-6.6/block/
H A Dblk-mq-sched.c22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument
24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_restart() argument
33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in __blk_mq_sched_restart()
36 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) in __blk_mq_sched_restart()
39 * meantime new request added to hctx->dispatch is missed to check in in __blk_mq_sched_restart()
44 blk_mq_run_hw_queue(hctx, true); in __blk_mq_sched_restart()
58 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local
65 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
87 __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) __blk_mq_do_dispatch_sched() argument
178 blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) blk_mq_do_dispatch_sched() argument
196 blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) blk_mq_next_ctx() argument
215 blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) blk_mq_do_dispatch_ctx() argument
270 __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_sched_dispatch_requests() argument
319 blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) blk_mq_sched_dispatch_requests() argument
344 struct blk_mq_hw_ctx *hctx; blk_mq_sched_bio_merge() local
382 blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) blk_mq_sched_alloc_map_and_rqs() argument
408 struct blk_mq_hw_ctx *hctx; blk_mq_sched_tags_teardown() local
446 struct blk_mq_hw_ctx *hctx; blk_mq_init_sched() local
511 struct blk_mq_hw_ctx *hctx; blk_mq_sched_free_rqs() local
528 struct blk_mq_hw_ctx *hctx; blk_mq_exit_sched() local
[all...]
H A Dblk-mq.h48 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
50 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
51 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
75 * @type: the hctx type index
121 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
160 struct blk_mq_hw_ctx *hctx; member
176 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
189 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr()
191 if (!hctx) in bt_wait_ptr()
193 return sbq_wait_ptr(bt, &hctx in bt_wait_ptr()
188 bt_wait_ptr(struct sbitmap_queue *bt, struct blk_mq_hw_ctx *hctx) bt_wait_ptr() argument
199 blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) blk_mq_tag_busy() argument
205 blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) blk_mq_tag_idle() argument
229 blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_stopped() argument
234 blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) blk_mq_hw_queue_mapped() argument
274 __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_inc_active_requests() argument
282 __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, int val) __blk_mq_sub_active_requests() argument
291 __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_dec_active_requests() argument
296 __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) __blk_mq_active_requests() argument
302 __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) __blk_mq_put_driver_tag() argument
326 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_get_driver_tag() local
391 hctx_may_queue(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt) hctx_may_queue() argument
[all...]
H A Dblk-mq.c51 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
60 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
62 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
63 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
64 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
70 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
73 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
75 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
76 sbitmap_set_bit(&hctx in blk_mq_hctx_mark_pending()
79 blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) blk_mq_hctx_clear_pending() argument
307 struct blk_mq_hw_ctx *hctx; blk_mq_wake_waiters() local
352 struct blk_mq_hw_ctx *hctx = data->hctx; blk_mq_rq_ctx_init() local
704 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; __blk_mq_free_request() local
1064 blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, int *tag_array, int nr_tags) blk_mq_flush_tag_batch() argument
1330 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_execute_rq_nowait() local
1396 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_execute_rq() local
1642 struct blk_mq_hw_ctx *hctx; blk_mq_timeout_work() local
1695 struct blk_mq_hw_ctx *hctx; global() member
1702 struct blk_mq_hw_ctx *hctx = flush_data->hctx; flush_busy_ctx() local
1717 blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) blk_mq_flush_busy_ctxs() argument
1729 struct blk_mq_hw_ctx *hctx; global() member
1737 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; dispatch_rq_from_ctx() local
1753 blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start) blk_mq_dequeue_from_ctx() argument
1792 __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) __blk_mq_get_driver_tag() argument
1809 struct blk_mq_hw_ctx *hctx; blk_mq_dispatch_wake() local
1833 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) blk_mq_mark_tag_wait() argument
1927 blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) blk_mq_update_dispatch_busy() argument
1975 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_prep_dispatch_rq() local
2031 blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, bool from_schedule) blk_mq_commit_rqs() argument
2043 blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, unsigned int nr_budgets) blk_mq_dispatch_rq_list() argument
2186 blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) blk_mq_first_mapped_cpu() argument
2201 blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_next_cpu() argument
2248 blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) blk_mq_delay_run_hw_queue() argument
2266 blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) blk_mq_run_hw_queue() argument
2316 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; blk_mq_get_sq_hctx() local
2330 struct blk_mq_hw_ctx *hctx, *sq_hctx; blk_mq_run_hw_queues() local
2358 struct blk_mq_hw_ctx *hctx, *sq_hctx; blk_mq_delay_run_hw_queues() local
2396 blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) blk_mq_stop_hw_queue() argument
2415 struct blk_mq_hw_ctx *hctx; blk_mq_stop_hw_queues() local
2423 blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) blk_mq_start_hw_queue() argument
2433 struct blk_mq_hw_ctx *hctx; blk_mq_start_hw_queues() local
2441 blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) blk_mq_start_stopped_hw_queue() argument
2453 struct blk_mq_hw_ctx *hctx; blk_mq_start_stopped_hw_queues() local
2464 struct blk_mq_hw_ctx *hctx = blk_mq_run_work_fn() local
2481 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_bypass_insert() local
2491 blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list, bool run_queue_async) blk_mq_insert_requests() argument
2532 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_insert_request() local
2608 __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) __blk_mq_issue_directly() argument
2666 blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) blk_mq_try_issue_directly() argument
2699 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; blk_mq_request_issue_directly() local
2713 struct blk_mq_hw_ctx *hctx = NULL; blk_mq_plug_issue_direct() local
2854 blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) blk_mq_try_issue_list_directly() argument
2986 struct blk_mq_hw_ctx *hctx; blk_mq_submit_bio() local
3491 struct blk_mq_hw_ctx *hctx; global() member
3505 blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_has_requests() argument
3517 blk_mq_last_cpu_in_hctx(unsigned int cpu, struct blk_mq_hw_ctx *hctx) blk_mq_last_cpu_in_hctx() argument
3529 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_offline() local
3562 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, blk_mq_hctx_notify_online() local
3577 struct blk_mq_hw_ctx *hctx; blk_mq_hctx_notify_dead() local
3607 blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) blk_mq_remove_cpuhp() argument
3646 blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) blk_mq_exit_hctx() argument
3676 struct blk_mq_hw_ctx *hctx; blk_mq_exit_hw_queues() local
3686 blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) blk_mq_init_hctx() argument
3727 struct blk_mq_hw_ctx *hctx; blk_mq_alloc_hctx() local
3796 struct blk_mq_hw_ctx *hctx; blk_mq_init_cpu_queues() local
3876 struct blk_mq_hw_ctx *hctx; blk_mq_map_swqueue() local
3981 struct blk_mq_hw_ctx *hctx; queue_set_hctx_shared() local
4081 struct blk_mq_hw_ctx *hctx, *next; blk_mq_release() local
4192 struct blk_mq_hw_ctx *hctx = NULL, *tmp; blk_mq_alloc_and_init_hctx() local
4225 struct blk_mq_hw_ctx *hctx; blk_mq_realloc_hw_ctxs() local
4625 struct blk_mq_hw_ctx *hctx; blk_mq_update_nr_requests() local
4833 blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob, unsigned int flags) blk_hctx_poll() argument
4863 struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie); blk_mq_poll() local
4894 struct blk_mq_hw_ctx *hctx; blk_mq_cancel_work_sync() local
[all...]
H A Dblk-mq-sysfs.c34 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local
37 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release()
38 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release()
39 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release()
40 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
41 kfree(hctx); in blk_mq_hw_sysfs_release()
53 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local
58 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show()
59 q = hctx->queue; in blk_mq_hw_sysfs_show()
65 res = entry->show(hctx, pag in blk_mq_hw_sysfs_show()
70 blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_nr_tags_show() argument
76 blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_nr_reserved_tags_show() argument
82 blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) blk_mq_hw_sysfs_cpus_show() argument
144 blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_unregister_hctx() argument
158 blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_register_hctx() argument
187 blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) blk_mq_hctx_kobj_init() argument
222 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_register() local
259 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_unregister() local
275 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_unregister_hctxs() local
291 struct blk_mq_hw_ctx *hctx; blk_mq_sysfs_register_hctxs() local
[all...]
H A Dblk-mq-debugfs.c178 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local
180 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show()
206 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local
207 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show()
217 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show()
314 __acquires(&hctx->lock)
316 struct blk_mq_hw_ctx *hctx = m->private; variable
318 spin_lock(&hctx->lock);
319 return seq_list_start(&hctx->dispatch, *pos);
324 struct blk_mq_hw_ctx *hctx in hctx_dispatch_next() local
332 struct blk_mq_hw_ctx *hctx = m->private; global() variable
346 struct blk_mq_hw_ctx *hctx; global() member
366 struct blk_mq_hw_ctx *hctx = data; hctx_busy_show() local
383 struct blk_mq_hw_ctx *hctx = data; hctx_type_show() local
392 struct blk_mq_hw_ctx *hctx = data; hctx_ctx_map_show() local
417 struct blk_mq_hw_ctx *hctx = data; hctx_tags_show() local
434 struct blk_mq_hw_ctx *hctx = data; hctx_tags_bitmap_show() local
451 struct blk_mq_hw_ctx *hctx = data; hctx_sched_tags_show() local
468 struct blk_mq_hw_ctx *hctx = data; hctx_sched_tags_bitmap_show() local
485 struct blk_mq_hw_ctx *hctx = data; hctx_run_show() local
494 struct blk_mq_hw_ctx *hctx = data; hctx_run_write() local
502 struct blk_mq_hw_ctx *hctx = data; hctx_active_show() local
510 struct blk_mq_hw_ctx *hctx = data; hctx_dispatch_busy_show() local
657 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_register() local
688 blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) blk_mq_debugfs_register_ctx() argument
700 blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_hctx() argument
719 blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_unregister_hctx() argument
730 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_register_hctxs() local
739 struct blk_mq_hw_ctx *hctx; blk_mq_debugfs_unregister_hctxs() local
816 blk_mq_debugfs_register_sched_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_sched_hctx() argument
840 blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_unregister_sched_hctx() argument
[all...]
H A Dblk-mq-tag.c18 * Recalculate wakeup batch when tag is shared by hctx.
38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
41 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_busy()
47 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_busy()
48 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy()
54 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || in __blk_mq_tag_busy()
55 test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
80 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
82 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
85 if (blk_mq_is_shared_tags(hctx in __blk_mq_tag_idle()
245 struct blk_mq_hw_ctx *hctx; global() member
269 struct blk_mq_hw_ctx *hctx = iter_data->hctx; bt_iter() local
312 bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q, struct sbitmap_queue *bt, busy_tag_iter_fn *fn, void *data, bool reserved) bt_for_each() argument
516 struct blk_mq_hw_ctx *hctx; blk_mq_queue_tag_busy_iter() local
601 blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tagsptr, unsigned int tdepth, bool can_grow) blk_mq_tag_update_depth() argument
[all...]
H A Dblk-mq-sched.h16 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
17 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
19 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
25 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument
27 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart()
28 __blk_mq_sched_restart(hctx); in blk_mq_sched_restart()
70 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument
72 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
75 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
80 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument
[all...]
H A Dkyber-iosched.c138 * There is a same mapping between ctx & hctx and kcq & khd,
453 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) in kyber_depth_updated() argument
455 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated()
456 struct blk_mq_tags *tags = hctx->sched_tags; in kyber_depth_updated()
464 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument
469 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
473 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
475 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
479 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx()
483 if (sbitmap_init_node(&khd->kcq_map[i], hctx in kyber_init_hctx()
519 kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) kyber_exit_hctx() argument
571 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); kyber_bio_merge() local
590 kyber_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *rq_list, blk_insert_t flags) kyber_insert_requests() argument
698 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); kyber_domain_wake() local
706 kyber_get_domain_token(struct kyber_queue_data *kqd, struct kyber_hctx_data *khd, struct blk_mq_hw_ctx *hctx) kyber_get_domain_token() argument
754 kyber_dispatch_cur_domain(struct kyber_queue_data *kqd, struct kyber_hctx_data *khd, struct blk_mq_hw_ctx *hctx) kyber_dispatch_cur_domain() argument
803 kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) kyber_dispatch_request() argument
849 kyber_has_work(struct blk_mq_hw_ctx *hctx) kyber_has_work() argument
971 struct blk_mq_hw_ctx *hctx = data; kyber_cur_domain_show() local
980 struct blk_mq_hw_ctx *hctx = data; kyber_batching_show() local
[all...]
H A Dblk-mq-debugfs.h25 struct blk_mq_hw_ctx *hctx);
26 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
33 struct blk_mq_hw_ctx *hctx);
34 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
44 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx()
48 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument
69 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx()
73 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
43 blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_hctx() argument
68 blk_mq_debugfs_register_sched_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) blk_mq_debugfs_register_sched_hctx() argument
/kernel/linux/linux-6.6/samples/hid/
H A Dhid_mouse.bpf.c9 int BPF_PROG(hid_y_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
12 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG()
17 bpf_printk("event: size: %d", hctx->size); in BPF_PROG()
55 int BPF_PROG(hid_x_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
58 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG()
73 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
75 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
H A Dhid_surface_dial.bpf.c14 int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
16 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG()
105 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
/kernel/linux/linux-5.10/include/linux/
H A Dblk-mq.h40 /** @cpumask: Map of available CPUs where this hctx can run. */
70 * this hctx
144 * @elevator_queued: Number of queued requests on hctx.
173 * @hctx_list: if this hctx is not in use, this is an entry in
207 * @HCTX_MAX_TYPES: Number of types of hctx.
219 * @map: One or more ctx -> hctx mappings. One map exists for each
241 * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
243 * A shared reserved tags sbitmap, used over all hctx's
513 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
514 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
587 request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) request_to_qc_t() argument
[all...]
/kernel/linux/linux-5.10/net/dccp/ccids/
H A Dccid3.h104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local
105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk()
106 return hctx; in ccid3_hc_tx_sk()
/kernel/linux/linux-6.6/net/dccp/ccids/
H A Dccid3.h104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local
105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk()
106 return hctx; in ccid3_hc_tx_sk()

Completed in 19 milliseconds

12345