Lines Matching refs:blkcg

14  * For policy-specific per-blkcg data:
47 struct blkcg blkcg_root;
94 /* release the blkcg and parent blkg refs this blkg has been holding */
95 css_put(&blkg->blkcg->css);
144 * @blkcg: block cgroup the new blkg is associated with
148 * Allocate a new blkg assocating @blkcg and @q.
150 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
173 blkg->blkcg = blkcg;
187 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
203 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
214 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
218 rcu_assign_pointer(blkcg->blkg_hint, blkg);
231 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
247 /* blkg holds a reference to blkcg */
248 if (!css_tryget_online(&blkcg->css)) {
255 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
264 if (blkcg_parent(blkcg)) {
265 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
282 spin_lock(&blkcg->lock);
283 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
285 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
296 spin_unlock(&blkcg->lock);
306 css_put(&blkcg->css);
314 * @blkcg: blkcg of interest
317 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
325 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
333 blkg = blkg_lookup(blkcg, q);
338 blkg = __blkg_lookup(blkcg, q, true);
343 * Create blkgs walking down from blkcg_root to @blkcg, so that all
348 struct blkcg *pos = blkcg;
349 struct blkcg *parent = blkcg_parent(blkcg);
368 if (pos == blkcg)
379 struct blkcg *blkcg = blkg->blkcg;
383 lockdep_assert_held(&blkcg->lock);
398 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
407 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
408 rcu_assign_pointer(blkcg->blkg_hint, NULL);
429 struct blkcg *blkcg = blkg->blkcg;
431 spin_lock(&blkcg->lock);
433 spin_unlock(&blkcg->lock);
443 struct blkcg *blkcg = css_to_blkcg(css);
448 spin_lock_irq(&blkcg->lock);
455 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
471 spin_unlock_irq(&blkcg->lock);
487 * @blkcg: blkcg of interest
493 * This function invokes @prfill on each blkg of @blkcg if pd for the
502 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
512 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
546 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
555 return __blkg_lookup(blkcg, q, true /* update_hint */);
598 * @blkcg: target block cgroup
608 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
634 blkg = blkg_lookup_check(blkcg, pol, q);
644 * Create blkgs walking down from blkcg_root to @blkcg, so that all
648 struct blkcg *pos = blkcg;
649 struct blkcg *parent;
652 parent = blkcg_parent(blkcg);
696 if (pos == blkcg)
777 struct blkcg *blkcg = css_to_blkcg(css);
782 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
869 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
875 cgroup_rstat_flush(blkcg->css.cgroup);
879 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
979 * blkcg destruction is a three-stage process.
983 * to the completion of writeback associated with the blkcg. This lets us
995 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
996 * This finally frees the blkcg.
1004 * offlined first and only once writeback associated with the blkcg has
1009 struct blkcg *blkcg = css_to_blkcg(css);
1011 /* this prevents anyone from attaching or migrating to this blkcg */
1012 wb_blkcg_offline(blkcg);
1015 blkcg_unpin_online(blkcg);
1020 * @blkcg: blkcg of interest
1022 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1024 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1027 * This is the blkcg counterpart of ioc_release_fn().
1029 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1033 spin_lock_irq(&blkcg->lock);
1035 while (!hlist_empty(&blkcg->blkg_list)) {
1036 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1046 spin_unlock_irq(&blkcg->lock);
1048 spin_lock_irq(&blkcg->lock);
1056 spin_unlock_irq(&blkcg->lock);
1061 struct blkcg *blkcg = css_to_blkcg(css);
1066 list_del(&blkcg->all_blkcgs_node);
1069 if (blkcg->cpd[i])
1070 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1074 kfree(blkcg);
1080 struct blkcg *blkcg;
1087 blkcg = &blkcg_root;
1089 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1090 if (!blkcg) {
1114 blkcg->cpd[i] = cpd;
1115 cpd->blkcg = blkcg;
1121 spin_lock_init(&blkcg->lock);
1122 refcount_set(&blkcg->online_pin, 1);
1123 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1124 INIT_HLIST_HEAD(&blkcg->blkg_list);
1126 INIT_LIST_HEAD(&blkcg->cgwb_list);
1128 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1131 return &blkcg->css;
1135 if (blkcg->cpd[i])
1136 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1138 if (blkcg != &blkcg_root)
1139 kfree(blkcg);
1147 struct blkcg *blkcg = css_to_blkcg(css);
1148 struct blkcg *parent = blkcg_parent(blkcg);
1151 * blkcg_pin_online() is used to delay blkcg offline so that blkgs
1161 * blkcg_init_queue - initialize blkcg part of request queue
1164 * Called from blk_alloc_queue(). Responsible for initializing blkcg
1218 * blkcg_exit_queue - exit and release blkcg part of request_queue
1221 * Called from blk_exit_queue(). Responsible for exiting blkcg part.
1263 struct blkcg *blkcg;
1268 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1269 if (blkcg->cpd[pol->plid])
1270 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1306 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1308 * @pol: blkcg policy to activate
1314 * from IO path. Update of each blkg is protected by both queue and blkcg
1349 blkg->blkcg);
1367 blkg->blkcg);
1405 struct blkcg *blkcg = blkg->blkcg;
1407 spin_lock(&blkcg->lock);
1412 spin_unlock(&blkcg->lock);
1421 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1423 * @pol: blkcg policy to deactivate
1444 struct blkcg *blkcg = blkg->blkcg;
1446 spin_lock(&blkcg->lock);
1453 spin_unlock(&blkcg->lock);
1464 * blkcg_policy_register - register a blkcg policy
1465 * @pol: blkcg policy to register
1467 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1472 struct blkcg *blkcg;
1499 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1506 blkcg->cpd[pol->plid] = cpd;
1507 cpd->blkcg = blkcg;
1528 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1529 if (blkcg->cpd[pol->plid]) {
1530 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1531 blkcg->cpd[pol->plid] = NULL;
1544 * blkcg_policy_unregister - unregister a blkcg policy
1545 * @pol: blkcg policy to unregister
1551 struct blkcg *blkcg;
1568 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1569 if (blkcg->cpd[pol->plid]) {
1570 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1571 blkcg->cpd[pol->plid] = NULL;
1725 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1735 struct blkcg *blkcg;
1748 blkcg = css_to_blkcg(css);
1750 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1752 if (!blkcg)
1754 blkg = blkg_lookup(blkcg, q);
1945 cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);