Home
last modified time | relevance | path

Searched refs:blkg (Results 1 - 20 of 20) sorted by relevance

/kernel/linux/linux-6.6/block/
H A Dblk-cgroup.c68 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg).
69 * There are multiple blkg's (one for each block device) attached to each
71 * but it doesn't know which blkg has the updated stats. If there are many
72 * block devices in a system, the cost of iterating all the blkg's to flush
78 * References to blkg are gotten and then put back in the process to
79 * protect against blkg removal.
121 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, in blkg_free_workfn() local
123 struct request_queue *q = blkg->q; in blkg_free_workfn()
129 * of the list blkg->q_node is delayed to here from blkg_destroy(), and in blkg_free_workfn()
135 if (blkg in blkg_free_workfn()
156 blkg_free(struct blkcg_gq *blkg) blkg_free() argument
171 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); __blkg_release() local
202 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); blkg_release() local
212 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, blkg_async_bio_workfn() local
244 struct blkcg_gq *blkg = bio->bi_blkg; blkcg_punt_bio_submit() local
308 struct blkcg_gq *blkg; blkg_alloc() local
378 struct blkcg_gq *blkg; blkg_create() local
475 struct blkcg_gq *blkg; blkg_lookup_create() local
528 blkg_destroy(struct blkcg_gq *blkg) blkg_destroy() argument
578 struct blkcg_gq *blkg, *n; blkg_destroy_all() local
626 struct blkcg_gq *blkg; blkcg_reset_stats() local
663 blkg_dev_name(struct blkcg_gq *blkg) blkg_dev_name() argument
694 struct blkcg_gq *blkg; blkcg_print_blkgs() local
818 struct blkcg_gq *blkg; global() variable
982 blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, struct blkg_iostat *last) blkcg_iostat_update() argument
1022 struct blkcg_gq *blkg = bisc->blkg; __blkcg_rstat_flush() local
1074 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; blkcg_fill_root_iostats() local
1105 blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) blkcg_print_one_stat() argument
1160 struct blkcg_gq *blkg; blkcg_print_stat() local
1239 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, blkcg_destroy_blkgs() local
1415 struct blkcg_gq *new_blkg, *blkg; blkcg_init_disk() local
1516 struct blkcg_gq *blkg, *pinned_blkg = NULL; blkcg_activate_policy() local
1630 struct blkcg_gq *blkg; blkcg_deactivate_policy() local
1788 blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) blkcg_scale_delay() argument
1845 blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) blkcg_maybe_throttle_blkg() argument
1915 struct blkcg_gq *blkg; blkcg_maybe_throttle_current() local
1989 blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) blkcg_add_delay() argument
2009 struct blkcg_gq *blkg, *ret_blkg = NULL; blkg_tryget_closest() local
[all...]
H A Dblk-cgroup.h47 struct blkcg_gq *blkg; member
68 /* is this blkg online? protected by both blkcg and q locks */
125 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
129 * There can be multiple active blkcg policies and each blkg:policy pair is
136 /* the blkg and policy id this per-policy data belongs to */
137 struct blkcg_gq *blkg; member
201 const char *blkg_dev_name(struct blkcg_gq *blkg);
213 struct blkcg_gq *blkg; member
223 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
224 * @return: true if this bio needs to be submitted with the root blkg contex
250 struct blkcg_gq *blkg; blkg_lookup() local
273 blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) blkg_to_pd() argument
309 blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) blkg_path() argument
320 blkg_get(struct blkcg_gq *blkg) blkg_get() argument
332 blkg_tryget(struct blkcg_gq *blkg) blkg_tryget() argument
341 blkg_put(struct blkcg_gq *blkg) blkg_put() argument
383 blkcg_use_delay(struct blkcg_gq *blkg) blkcg_use_delay() argument
391 blkcg_unuse_delay(struct blkcg_gq *blkg) blkcg_unuse_delay() argument
426 blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) blkcg_set_delay() argument
443 blkcg_clear_delay(struct blkcg_gq *blkg) blkcg_clear_delay() argument
493 blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) blkg_to_pd() argument
496 blkg_path(struct blkcg_gq *blkg) blkg_path() argument
497 blkg_get(struct blkcg_gq *blkg) blkg_get() argument
498 blkg_put(struct blkcg_gq *blkg) blkg_put() argument
[all...]
H A Dblk-iolatency.c25 * root blkg
188 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg) in blkg_to_lat() argument
190 return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency)); in blkg_to_lat()
466 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_iolatency_throttle() local
472 while (blkg && blkg->parent) { in blkcg_iolatency_throttle()
473 struct iolatency_grp *iolat = blkg_to_lat(blkg); in blkcg_iolatency_throttle()
475 blkg = blkg->parent; in blkcg_iolatency_throttle()
482 blkg in blkcg_iolatency_throttle()
525 struct blkcg_gq *blkg = lat_to_blkg(iolat); iolatency_check_latencies() local
593 struct blkcg_gq *blkg; blkcg_iolatency_done_bio() local
662 struct blkcg_gq *blkg; blkiolatency_timer_fn() local
787 iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) iolatency_set_min_lat_nsec() argument
809 iolatency_clear_scaling(struct blkcg_gq *blkg) iolatency_clear_scaling() argument
831 struct blkcg_gq *blkg; iolatency_set_limit() local
985 struct blkcg_gq *blkg = lat_to_blkg(iolat); iolatency_pd_init() local
1028 struct blkcg_gq *blkg = lat_to_blkg(iolat); iolatency_pd_offline() local
[all...]
H A Dbfq-cgroup.c284 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) in blkg_to_bfqg() argument
286 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); in blkg_to_bfqg()
427 * Make sure that bfqg and its associated blkg do not in bfq_init_entity()
531 struct blkcg_gq *blkg = pd_to_blkg(pd); in bfq_pd_init() local
532 struct bfq_group *bfqg = blkg_to_bfqg(blkg); in bfq_pd_init()
533 struct bfq_data *bfqd = blkg->q->elevator->elevator_data; in bfq_pd_init()
535 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); in bfq_pd_init()
601 struct blkcg_gq *blkg = bio->bi_blkg; in bfq_bio_bfqg() local
604 while (blkg) { in bfq_bio_bfqg()
605 if (!blkg in bfq_bio_bfqg()
994 struct blkcg_gq *blkg; bfq_end_wr_async() local
1078 struct blkcg_gq *blkg; bfq_io_set_weight_legacy() local
1193 struct blkcg_gq *blkg = pd_to_blkg(pd); bfqg_prfill_stat_recursive() local
[all...]
H A Dblk-cgroup-rwstat.c3 * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
52 const char *dname = blkg_dev_name(pd->blkg); in __blkg_prfill_rwstat()
91 * @blkg: blkg of interest
93 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
96 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
100 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
101 * is at @off bytes into @blkg's blkg_policy_data of the policy.
103 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, in blkg_rwstat_recursive_sum() argument
110 lockdep_assert_held(&blkg in blkg_rwstat_recursive_sum()
[all...]
H A Dblk-throttle.c149 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_bps_limit() local
153 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_bps_limit()
160 if (!list_empty(&blkg->blkcg->css.children) || in tg_bps_limit()
179 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_iops_limit() local
183 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_iops_limit()
190 if (!list_empty(&blkg->blkcg->css.children) || in tg_iops_limit()
389 struct blkcg_gq *blkg = tg_to_blkg(tg); in throtl_pd_init() local
390 struct throtl_data *td = blkg->q->td; in throtl_pd_init()
408 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) in throtl_pd_init()
409 sq->parent_sq = &blkg_to_tg(blkg in throtl_pd_init()
450 struct blkcg_gq *blkg; blk_throtl_update_limit_valid() local
1316 struct blkcg_gq *blkg; tg_conf_updated() local
1723 struct blkcg_gq *blkg; blk_throtl_cancel_bios() local
1877 struct blkcg_gq *blkg; throtl_can_upgrade() local
1925 struct blkcg_gq *blkg; throtl_upgrade_state() local
2179 struct blkcg_gq *blkg = bio->bi_blkg; __blk_throtl_bio() local
2315 struct blkcg_gq *blkg; blk_throtl_bio_endio() local
[all...]
H A Dblk-throttle.h23 * It's also used to track the reference counts on blkg's. A qnode always
26 * queued and decrementing when dequeued is enough to keep the whole blkg
162 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) in blkg_to_tg() argument
164 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); in blkg_to_tg()
H A Dblk-iocost.c684 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg) in blkg_to_iocg() argument
686 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost)); in blkg_to_iocg()
1249 struct blkcg_gq *blkg = iocg_to_blkg(iocg); in weight_updated() local
1250 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg); in weight_updated()
1349 struct blkcg_gq *blkg = iocg_to_blkg(iocg); in iocg_kick_delay() local
1397 blkcg_set_delay(blkg, delay * NSEC_PER_USEC); in iocg_kick_delay()
1405 blkcg_clear_delay(blkg); in iocg_kick_delay()
2597 struct blkcg_gq *blkg = bio->bi_blkg; in ioc_rqos_throttle() local
2599 struct ioc_gq *iocg = blkg_to_iocg(blkg); in ioc_rqos_throttle()
2975 struct blkcg_gq *blkg in ioc_pd_init() local
3096 struct blkcg_gq *blkg; ioc_weight_write() local
[all...]
H A Dblk-cgroup-rwstat.h3 * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
48 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
H A Dblk-ioprio.c94 return blkcg_to_ioprio_blkcg(pd->blkg->blkcg); in ioprio_blkcg_from_bio()
/kernel/linux/linux-5.10/block/
H A Dblk-cgroup.c67 * blkg_free - free a blkg
68 * @blkg: blkg to free
70 * Free @blkg which may be partially allocated.
72 static void blkg_free(struct blkcg_gq *blkg) in blkg_free() argument
76 if (!blkg) in blkg_free()
80 if (blkg->pd[i]) in blkg_free()
81 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_free()
83 free_percpu(blkg->iostat_cpu); in blkg_free()
84 percpu_ref_exit(&blkg in blkg_free()
90 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); __blkg_release() local
111 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); blkg_release() local
118 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, blkg_async_bio_workfn() local
153 struct blkcg_gq *blkg; blkg_alloc() local
206 struct blkcg_gq *blkg; blkg_lookup_slowpath() local
235 struct blkcg_gq *blkg; blkg_create() local
328 struct blkcg_gq *blkg; blkg_lookup_create() local
377 blkg_destroy(struct blkcg_gq *blkg) blkg_destroy() argument
425 struct blkcg_gq *blkg, *n; blkg_destroy_all() local
444 struct blkcg_gq *blkg; blkcg_reset_stats() local
476 blkg_dev_name(struct blkcg_gq *blkg) blkg_dev_name() argument
508 struct blkcg_gq *blkg; blkcg_print_blkgs() local
614 struct blkcg_gq *blkg; __acquires() local
778 struct blkcg_gq *blkg; blkcg_rstat_flush() local
836 struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue); blkcg_fill_root_iostats() local
870 struct blkcg_gq *blkg; blkcg_print_stat() local
1036 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, blkcg_destroy_blkgs() local
1172 struct blkcg_gq *new_blkg, *blkg; blkcg_init_queue() local
1325 struct blkcg_gq *blkg, *pinned_blkg = NULL; blkcg_activate_policy() local
1431 struct blkcg_gq *blkg; blkcg_deactivate_policy() local
1585 struct blkcg_gq *blkg = bio->bi_blkg; __blkcg_punt_bio_submit() local
1608 blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) blkcg_scale_delay() argument
1665 blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) blkcg_maybe_throttle_blkg() argument
1736 struct blkcg_gq *blkg; blkcg_maybe_throttle_current() local
1812 blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) blkcg_add_delay() argument
1832 struct blkcg_gq *blkg, *ret_blkg = NULL; blkg_tryget_closest() local
[all...]
H A Dblk-iolatency.c25 * root blkg
187 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg) in blkg_to_lat() argument
189 return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency)); in blkg_to_lat()
468 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_iolatency_throttle() local
474 while (blkg && blkg->parent) { in blkcg_iolatency_throttle()
475 struct iolatency_grp *iolat = blkg_to_lat(blkg); in blkcg_iolatency_throttle()
477 blkg = blkg->parent; in blkcg_iolatency_throttle()
484 blkg in blkcg_iolatency_throttle()
527 struct blkcg_gq *blkg = lat_to_blkg(iolat); iolatency_check_latencies() local
595 struct blkcg_gq *blkg; blkcg_iolatency_done_bio() local
664 struct blkcg_gq *blkg; blkiolatency_timer_fn() local
790 iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) iolatency_set_min_lat_nsec() argument
812 iolatency_clear_scaling(struct blkcg_gq *blkg) iolatency_clear_scaling() argument
834 struct blkcg_gq *blkg; iolatency_set_limit() local
975 struct blkcg_gq *blkg = lat_to_blkg(iolat); iolatency_pd_init() local
1020 struct blkcg_gq *blkg = lat_to_blkg(iolat); iolatency_pd_offline() local
[all...]
H A Dbfq-cgroup.c289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) in blkg_to_bfqg() argument
291 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); in blkg_to_bfqg()
434 * Make sure that bfqg and its associated blkg do not in bfq_init_entity()
544 struct blkcg_gq *blkg = pd_to_blkg(pd); in bfq_pd_init() local
545 struct bfq_group *bfqg = blkg_to_bfqg(blkg); in bfq_pd_init()
546 struct bfq_data *bfqd = blkg->q->elevator->elevator_data; in bfq_pd_init()
548 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); in bfq_pd_init()
612 struct blkcg_gq *blkg = bio->bi_blkg; in bfq_bio_bfqg() local
615 while (blkg) { in bfq_bio_bfqg()
616 if (!blkg in bfq_bio_bfqg()
976 struct blkcg_gq *blkg; bfq_end_wr_async() local
1060 struct blkcg_gq *blkg; bfq_io_set_weight_legacy() local
1173 struct blkcg_gq *blkg = pd_to_blkg(pd); bfqg_prfill_stat_recursive() local
[all...]
H A Dblk-cgroup-rwstat.c3 * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
52 const char *dname = blkg_dev_name(pd->blkg); in __blkg_prfill_rwstat()
91 * @blkg: blkg of interest
93 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
96 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
100 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
101 * is at @off bytes into @blkg's blkg_policy_data of the policy.
103 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, in blkg_rwstat_recursive_sum() argument
110 lockdep_assert_held(&blkg in blkg_rwstat_recursive_sum()
[all...]
H A Dblk-throttle.c63 * It's also used to track the reference counts on blkg's. A qnode always
66 * queued and decrementing when dequeued is enough to keep the whole blkg
240 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) in blkg_to_tg() argument
242 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); in blkg_to_tg()
302 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_bps_limit() local
306 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_bps_limit()
313 if (!list_empty(&blkg->blkcg->css.children) || in tg_bps_limit()
332 struct blkcg_gq *blkg = tg_to_blkg(tg); in tg_iops_limit() local
336 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) in tg_iops_limit()
343 if (!list_empty(&blkg in tg_iops_limit()
543 struct blkcg_gq *blkg = tg_to_blkg(tg); throtl_pd_init() local
597 struct blkcg_gq *blkg; blk_throtl_update_limit_valid() local
1406 struct blkcg_gq *blkg; tg_conf_updated() local
1945 struct blkcg_gq *blkg; throtl_can_upgrade() local
1993 struct blkcg_gq *blkg; throtl_upgrade_state() local
2223 struct blkcg_gq *blkg = bio->bi_blkg; blk_throtl_charge_bio_split() local
2243 struct blkcg_gq *blkg = bio->bi_blkg; blk_throtl_bio() local
2391 struct blkcg_gq *blkg; blk_throtl_bio_endio() local
[all...]
H A Dblk-iocost.c692 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg) in blkg_to_iocg() argument
694 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost)); in blkg_to_iocg()
1192 struct blkcg_gq *blkg = iocg_to_blkg(iocg); in weight_updated() local
1193 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg); in weight_updated()
1292 struct blkcg_gq *blkg = iocg_to_blkg(iocg); in iocg_kick_delay() local
1340 blkcg_set_delay(blkg, delay * NSEC_PER_USEC); in iocg_kick_delay()
1348 blkcg_clear_delay(blkg); in iocg_kick_delay()
2564 struct blkcg_gq *blkg = bio->bi_blkg; in ioc_rqos_throttle() local
2566 struct ioc_gq *iocg = blkg_to_iocg(blkg); in ioc_rqos_throttle()
2941 struct blkcg_gq *blkg in ioc_pd_init() local
3068 struct blkcg_gq *blkg; ioc_weight_write() local
[all...]
H A Dblk-cgroup-rwstat.h3 * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
48 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
/kernel/linux/linux-5.10/include/linux/
H A Dblk-cgroup.h76 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
80 * There can be multiple active blkcg policies and each blkg:policy pair is
87 /* the blkg and policy id this per-policy data belongs to */
88 struct blkcg_gq *blkg; member
119 /* is this blkg online? protected by both blkcg and q locks */
192 const char *blkg_dev_name(struct blkcg_gq *blkg);
202 struct blkcg_gq *blkg; member
290 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
291 * @return: true if this bio needs to be submitted with the root blkg context.
294 * it were attached to the root blkg, an
331 struct blkcg_gq *blkg; __blkg_lookup() local
376 blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) blkg_to_pd() argument
446 blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) blkg_path() argument
457 blkg_get(struct blkcg_gq *blkg) blkg_get() argument
469 blkg_tryget(struct blkcg_gq *blkg) blkg_tryget() argument
478 blkg_put(struct blkcg_gq *blkg) blkg_put() argument
530 blkcg_use_delay(struct blkcg_gq *blkg) blkcg_use_delay() argument
538 blkcg_unuse_delay(struct blkcg_gq *blkg) blkcg_unuse_delay() argument
577 blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) blkcg_set_delay() argument
594 blkcg_clear_delay(struct blkcg_gq *blkg) blkcg_clear_delay() argument
663 blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) blkg_to_pd() argument
666 blkg_path(struct blkcg_gq *blkg) blkg_path() argument
667 blkg_get(struct blkcg_gq *blkg) blkg_get() argument
668 blkg_put(struct blkcg_gq *blkg) blkg_put() argument
[all...]
/kernel/linux/linux-5.10/tools/cgroup/
H A Diocost_monitor.py74 blkg = drgn.Object(prog, 'struct blkcg_gq',
76 if not blkg.address_:
79 self.blkgs.append((path if path else '/', blkg))
143 blkg = iocg.pd.blkg
165 if blkg.use_delay.counter.value_() != 0:
166 self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
222 blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr) variable
224 if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'):
225 q_id = blkg
[all...]
/kernel/linux/linux-6.6/tools/cgroup/
H A Diocost_monitor.py79 blkg = drgn.Object(prog, 'struct blkcg_gq',
81 if not blkg.address_:
84 self.blkgs.append((path if path else '/', blkg))
146 blkg = iocg.pd.blkg
170 if blkg.use_delay.counter.value_() != 0:
171 self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
229 blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr) variable
231 if devname == blkg.q.mq_kobj.parent.name.string_().decode('utf-8'):
232 q_id = blkg
[all...]

Completed in 25 milliseconds