/kernel/linux/linux-5.10/include/linux/ |
H A D | blk-cgroup.h | 47 struct blkcg { struct 76 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 77 * request_queue (q). This is used by blkcg policies which need to track 78 * information per blkcg - q pair. 80 * There can be multiple active blkcg policies and each blkg:policy pair is 93 * Policies that need to keep per-blkcg data which is independent from any 97 * cpd_init() is invoked to let each policy handle per-blkcg data. 100 /* the blkcg and policy id this per-policy data belongs to */ 101 struct blkcg *blkcg; member 111 struct blkcg *blkcg; global() member 311 blkcg_parent(struct blkcg *blkcg) blkcg_parent() argument 327 __blkg_lookup(struct blkcg *blkcg, struct request_queue *q, bool update_hint) __blkg_lookup() argument 351 blkg_lookup(struct blkcg *blkcg, struct request_queue *q) blkg_lookup() argument 382 blkcg_to_cpd(struct blkcg *blkcg, struct blkcg_policy *pol) blkcg_to_cpd() argument 414 blkcg_pin_online(struct blkcg *blkcg) blkcg_pin_online() argument 428 blkcg_unpin_online(struct blkcg *blkcg) blkcg_unpin_online() argument 624 struct blkcg { global() struct 648 blkg_lookup(struct blkcg *blkcg, void *key) blkg_lookup() argument [all...] |
H A D | blktrace_api.h | 32 struct blkcg; 37 void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
|
H A D | backing-dev.h | 178 void wb_blkcg_offline(struct blkcg *blkcg); 206 * Find the wb of @bdi which matches both the memcg and blkcg of %current. 222 * %current's blkcg equals the effective blkcg of its memcg. No in wb_find_current() 384 static inline void wb_blkcg_offline(struct blkcg *blkcg) in wb_blkcg_offline() argument
|
/kernel/linux/linux-6.6/block/ |
H A D | blk-ioprio.c | 75 static struct ioprio_blkcg *blkcg_to_ioprio_blkcg(struct blkcg *blkcg) in blkcg_to_ioprio_blkcg() argument 77 return container_of(blkcg_to_cpd(blkcg, &ioprio_policy), in blkcg_to_ioprio_blkcg() 94 return blkcg_to_ioprio_blkcg(pd->blkg->blkcg); in ioprio_blkcg_from_bio() 99 struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(seq_css(sf)); in ioprio_show_prio_policy() local 101 seq_printf(sf, "%s\n", policy_name[blkcg->prio_policy]); in ioprio_show_prio_policy() 108 struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(of_css(of)); in ioprio_set_prio_policy() local 117 blkcg->prio_policy = ret; in ioprio_set_prio_policy() 122 ioprio_alloc_pd(struct gendisk *disk, struct blkcg *blkcg, gfp_ argument 142 struct ioprio_blkcg *blkcg; ioprio_alloc_cpd() local 153 struct ioprio_blkcg *blkcg = container_of(cpd, typeof(*blkcg), cpd); ioprio_free_cpd() local 189 struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio); blkcg_set_ioprio() local [all...] |
H A D | blk-cgroup.h | 60 struct blkcg *blkcg; member 68 /* is this blkg online? protected by both blkcg and q locks */ 93 struct blkcg { struct 119 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) in css_to_blkcg() 121 return css ? container_of(css, struct blkcg, css) : NULL; in css_to_blkcg() 125 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 126 * request_queue (q). This is used by blkcg policies which need to track 127 * information per blkcg - q pair. 129 * There can be multiple active blkcg policie 151 struct blkcg *blkcg; global() member 247 blkg_lookup(struct blkcg *blkcg, struct request_queue *q) blkg_lookup() argument 279 blkcg_to_cpd(struct blkcg *blkcg, struct blkcg_policy *pol) blkcg_to_cpd() argument 480 struct blkcg { global() struct 483 blkg_lookup(struct blkcg *blkcg, void *key) blkg_lookup() argument [all...] |
H A D | blk-cgroup.c | 14 * For policy-specific per-blkcg data: 37 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu); 49 struct blkcg blkcg_root; 70 * blkcg. The rstat code keeps track of which cpu has IO stats updated, 74 * lockless lists (lhead) per blkcg are used to track the set of recently 83 static int init_blkcg_llists(struct blkcg *blkcg) in init_blkcg_llists() argument 87 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL); in init_blkcg_llists() 88 if (!blkcg in init_blkcg_llists() 172 struct blkcg *blkcg = blkg->blkcg; __blkg_release() local 292 blkcg_parent(struct blkcg *blkcg) blkcg_parent() argument 305 blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, gfp_t gfp_mask) blkg_alloc() argument 375 blkg_create(struct blkcg *blkcg, struct gendisk *disk, struct blkcg_gq *new_blkg) blkg_create() argument 471 blkg_lookup_create(struct blkcg *blkcg, struct gendisk *disk) blkg_lookup_create() argument 530 struct blkcg *blkcg = blkg->blkcg; blkg_destroy() local 585 struct blkcg *blkcg = blkg->blkcg; blkg_destroy_all() local 625 struct blkcg *blkcg = css_to_blkcg(css); blkcg_reset_stats() local 688 blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int), const struct blkcg_policy *pol, int data, bool show_total) blkcg_print_blkgs() argument 997 __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) __blkcg_rstat_flush() argument 1159 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); blkcg_print_stat() local 1232 blkcg_destroy_blkgs(struct blkcg *blkcg) blkcg_destroy_blkgs() argument 1286 struct blkcg *blkcg = css_to_blkcg(blkcg_css); blkcg_unpin_online() local 1315 struct blkcg *blkcg = css_to_blkcg(css); blkcg_css_free() local 1335 struct blkcg *blkcg; blkcg_css_alloc() local 1598 struct blkcg *blkcg = blkg->blkcg; blkcg_activate_policy() local 1644 struct blkcg *blkcg = blkg->blkcg; blkcg_deactivate_policy() local 1666 struct blkcg *blkcg; blkcg_free_all_cpd() local 1685 struct blkcg *blkcg; blkcg_policy_register() local 1914 struct blkcg *blkcg; blkcg_maybe_throttle_current() local 2103 struct blkcg *blkcg = bio->bi_blkg->blkcg; blk_cgroup_bio_start() local [all...] |
H A D | blk-cgroup-fc-appid.c | 6 * blkcg_set_fc_appid - set the fc_app_id field associted to blkcg 15 struct blkcg *blkcg; in blkcg_set_fc_appid() local 29 blkcg = css_to_blkcg(css); in blkcg_set_fc_appid() 37 strscpy(blkcg->fc_app_id, app_id, app_id_len); in blkcg_set_fc_appid() 53 if (!bio->bi_blkg || bio->bi_blkg->blkcg->fc_app_id[0] == '\0') in blkcg_get_fc_appid() 55 return bio->bi_blkg->blkcg->fc_app_id; in blkcg_get_fc_appid()
|
H A D | bfq-cgroup.c | 488 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) in blkcg_to_bfqgd() argument 490 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); in blkcg_to_bfqgd() 511 struct blkcg *blkcg, gfp_t gfp) in bfq_pd_alloc() 535 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); in bfq_pd_init() 611 bio_associate_blkg_from_css(bio, &blkg->blkcg->css); in bfq_bio_bfqg() 617 &bfqg_to_blkg(bfqd->root_group)->blkcg->css); in bfq_bio_bfqg() 754 * Move bic to blkcg, assuming that bfqd->lock is held; which makes 785 serial_nr = bfqg_to_blkg(bfqg)->blkcg in bfq_bic_update_cgroup() 510 bfq_pd_alloc(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp) bfq_pd_alloc() argument 1006 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); bfq_io_show_weight_legacy() local 1030 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); bfq_io_show_weight() local 1076 struct blkcg *blkcg = css_to_blkcg(css); bfq_io_set_weight_legacy() local 1104 struct blkcg *blkcg = css_to_blkcg(of_css(of)); bfq_io_set_device_weight() local [all...] |
H A D | blk-iocost.c | 200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \ 694 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg) in blkcg_to_iocc() argument 696 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost), in blkcg_to_iocc() 1250 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg); in weight_updated() 1953 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup); in transfer_surpluses() 2953 struct blkcg *blkcg, gfp_t gfp) in ioc_pd_alloc() 2955 int levels = blkcg->css.cgroup->level + 1; in ioc_pd_alloc() 2997 iocg->level = blkg->blkcg in ioc_pd_init() 2952 ioc_pd_alloc(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp) ioc_pd_alloc() argument 3075 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); ioc_weight_show() local 3087 struct blkcg *blkcg = css_to_blkcg(of_css(of)); ioc_weight_write() local 3182 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); ioc_qos_show() local 3376 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); ioc_cost_model_show() local [all...] |
H A D | blk-throttle.c | 160 if (!list_empty(&blkg->blkcg->css.children) || in tg_bps_limit() 190 if (!list_empty(&blkg->blkcg->css.children) || in tg_iops_limit() 230 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\ 339 struct blkcg *blkcg, gfp_t gfp) in throtl_pd_alloc() 1373 struct blkcg *blkcg = css_to_blkcg(of_css(of)); in tg_set_conf() local 1381 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx); in tg_set_conf() 1565 struct blkcg *blkcg in tg_set_limit() local 338 throtl_pd_alloc(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp) throtl_pd_alloc() argument [all...] |
H A D | blk-iolatency.c | 830 struct blkcg *blkcg = css_to_blkcg(of_css(of)); in iolatency_set_limit() local 855 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, &ctx); in iolatency_set_limit() 966 struct blkcg *blkcg, gfp_t gfp) in iolatency_pd_alloc() 965 iolatency_pd_alloc(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp) iolatency_pd_alloc() argument
|
H A D | bfq-iosched.h | 491 /* per (request_queue, blkcg) ioprio */ 494 uint64_t blkcg_serial_nr; /* the current blkcg serial */ 958 * struct bfq_group_data - per-blkcg storage for the blkio subsystem. 1185 &bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css, \ 1191 &bfqg_to_blkg(bfqg)->blkcg->css, fmt, ##args); \
|
H A D | bfq-iosched.c | 603 depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1; in bfqq_request_over_limit()
|
/kernel/linux/linux-5.10/block/ |
H A D | blk-cgroup.c | 14 * For policy-specific per-blkcg data: 47 struct blkcg blkcg_root; 94 /* release the blkcg and parent blkg refs this blkg has been holding */ in __blkg_release() 95 css_put(&blkg->blkcg->css); in __blkg_release() 144 * @blkcg: block cgroup the new blkg is associated with 148 * Allocate a new blkg assocating @blkcg and @q. 150 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument 173 blkg->blkcg = blkcg; in blkg_alloc() 203 blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint) blkg_lookup_slowpath() argument 231 blkg_create(struct blkcg *blkcg, struct request_queue *q, struct blkcg_gq *new_blkg) blkg_create() argument 325 blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) blkg_lookup_create() argument 379 struct blkcg *blkcg = blkg->blkcg; blkg_destroy() local 429 struct blkcg *blkcg = blkg->blkcg; blkg_destroy_all() local 443 struct blkcg *blkcg = css_to_blkcg(css); blkcg_reset_stats() local 502 blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int), const struct blkcg_policy *pol, int data, bool show_total) blkcg_print_blkgs() argument 546 blkg_lookup_check(struct blkcg *blkcg, const struct blkcg_policy *pol, struct request_queue *q) blkg_lookup_check() argument 777 struct blkcg *blkcg = css_to_blkcg(css); blkcg_rstat_flush() local 869 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); blkcg_print_stat() local 1009 struct blkcg *blkcg = css_to_blkcg(css); blkcg_css_offline() local 1029 blkcg_destroy_blkgs(struct blkcg *blkcg) blkcg_destroy_blkgs() argument 1061 struct blkcg *blkcg = css_to_blkcg(css); blkcg_css_free() local 1080 struct blkcg *blkcg; blkcg_css_alloc() local 1147 struct blkcg *blkcg = css_to_blkcg(css); blkcg_css_online() local 1263 struct blkcg *blkcg; blkcg_bind() local 1405 struct blkcg *blkcg = blkg->blkcg; blkcg_activate_policy() local 1444 struct blkcg *blkcg = blkg->blkcg; blkcg_deactivate_policy() local 1472 struct blkcg *blkcg; blkcg_policy_register() local 1551 struct blkcg *blkcg; blkcg_policy_unregister() local 1735 struct blkcg *blkcg; blkcg_maybe_throttle_current() local [all...] |
H A D | bfq-cgroup.c | 495 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) in blkcg_to_bfqgd() argument 497 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); in blkcg_to_bfqgd() 524 struct blkcg *blkcg) in bfq_pd_alloc() 548 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); in bfq_pd_init() 622 bio_associate_blkg_from_css(bio, &blkg->blkcg->css); in bfq_bio_bfqg() 628 &bfqg_to_blkg(bfqd->root_group)->blkcg->css); in bfq_bio_bfqg() 700 * @blkcg: the blk-cgroup to move to. 702 * Move bic to blkcg, assumin 523 bfq_pd_alloc(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg) bfq_pd_alloc() argument 988 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); bfq_io_show_weight_legacy() local 1012 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); bfq_io_show_weight() local 1058 struct blkcg *blkcg = css_to_blkcg(css); bfq_io_set_weight_legacy() local 1086 struct blkcg *blkcg = css_to_blkcg(of_css(of)); bfq_io_set_device_weight() local [all...] |
H A D | blk-iocost.c | 200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \ 702 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg) in blkcg_to_iocc() argument 704 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost), in blkcg_to_iocc() 1193 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg); in weight_updated() 1902 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup); in transfer_surpluses() 2920 struct blkcg *blkcg) in ioc_pd_alloc() 2922 int levels = blkcg->css.cgroup->level + 1; in ioc_pd_alloc() 2963 iocg->level = blkg->blkcg in ioc_pd_init() 2919 ioc_pd_alloc(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg) ioc_pd_alloc() argument 3047 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); ioc_weight_show() local 3059 struct blkcg *blkcg = css_to_blkcg(of_css(of)); ioc_weight_write() local 3148 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); ioc_qos_show() local 3315 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); ioc_cost_model_show() local [all...] |
H A D | blk-throttle.c | 313 if (!list_empty(&blkg->blkcg->css.children) || in tg_bps_limit() 343 if (!list_empty(&blkg->blkcg->css.children) || in tg_iops_limit() 383 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\ 493 struct blkcg *blkcg) in throtl_pd_alloc() 1488 struct blkcg *blkcg = css_to_blkcg(of_css(of)); in tg_set_conf() local 1494 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); in tg_set_conf() 1682 struct blkcg *blkcg in tg_set_limit() local 491 throtl_pd_alloc(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg) throtl_pd_alloc() argument [all...] |
H A D | blk-iolatency.c | 833 struct blkcg *blkcg = css_to_blkcg(of_css(of)); in iolatency_set_limit() local 842 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); in iolatency_set_limit() 956 struct blkcg *blkcg) in iolatency_pd_alloc() 954 iolatency_pd_alloc(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg) iolatency_pd_alloc() argument
|
H A D | bfq-iosched.h | 393 /* per (request_queue, blkcg) ioprio */ 396 uint64_t blkcg_serial_nr; /* the current blkcg serial */ 851 * struct bfq_group_data - per-blkcg storage for the blkio subsystem. 1079 bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \ 1086 bfqg_to_blkg(bfqg)->blkcg, fmt, ##args); \
|
/kernel/linux/linux-5.10/tools/cgroup/ |
H A D | iocost_monitor.py | 64 def blkcg_name(blkcg): 65 return blkcg.css.cgroup.kn.name.string_().decode('utf-8') 67 def walk(self, blkcg, q_id, parent_path): 69 not (blkcg.css.flags.value_() & prog['CSS_ONLINE'].value_()): 72 name = BlkgIterator.blkcg_name(blkcg) 75 address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) 81 for c in list_for_each_entry('struct blkcg', 82 blkcg.css.children.address_of_(), 'css.sibling'):
|
/kernel/linux/linux-6.6/tools/cgroup/ |
H A D | iocost_monitor.py | 69 def blkcg_name(blkcg): 70 return blkcg.css.cgroup.kn.name.string_().decode('utf-8') 72 def walk(self, blkcg, q_id, parent_path): 74 not (blkcg.css.flags.value_() & prog['CSS_ONLINE'].value_()): 77 name = BlkgIterator.blkcg_name(blkcg) 80 address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) 86 for c in list_for_each_entry('struct blkcg', 87 blkcg.css.children.address_of_(), 'css.sibling'):
|
/kernel/linux/linux-5.10/mm/ |
H A D | backing-dev.c | 375 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list. 394 struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css); in cgwb_release_workfn() local 404 blkcg_unpin_online(blkcg); in cgwb_release_workfn() 440 struct blkcg *blkcg; in cgwb_create() local 448 blkcg = css_to_blkcg(blkcg_css); in cgwb_create() 450 blkcg_cgwb_list = &blkcg->cgwb_list; in cgwb_create() 452 /* look up again under lock and discard on blkcg mismatch */ in cgwb_create() 503 blkcg_pin_online(blkcg); in cgwb_create() 669 wb_blkcg_offline(struct blkcg *blkcg) wb_blkcg_offline() argument [all...] |
/kernel/linux/linux-5.10/kernel/trace/ |
H A D | blktrace.c | 148 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg, in __trace_note_message() argument 174 blkcg = NULL; in __trace_note_message() 177 blkcg ? cgroup_id(blkcg->css.cgroup) : 1); in __trace_note_message()
|