Lines Matching refs:blkcg

14  * For policy-specific per-blkcg data:
37 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu);
49 struct blkcg blkcg_root;
70 * blkcg. The rstat code keeps track of which cpu has IO stats updated,
74 * lockless lists (lhead) per blkcg are used to track the set of recently
83 static int init_blkcg_llists(struct blkcg *blkcg)
87 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL);
88 if (!blkcg->lhead)
92 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu));
172 struct blkcg *blkcg = blkg->blkcg;
185 __blkcg_rstat_flush(blkcg, cpu);
187 /* release the blkcg and parent blkg refs this blkg has been holding */
188 css_put(&blkg->blkcg->css);
240 * a dedicated per-blkcg work item to avoid such priority inversions.
271 * bio_blkcg_css - return the blkcg CSS associated with a bio
274 * This returns the CSS for the blkcg associated with a bio, or %NULL if not
282 return &bio->bi_blkg->blkcg->css;
287 * blkcg_parent - get the parent of a blkcg
288 * @blkcg: blkcg of interest
290 * Return the parent blkcg of @blkcg. Can be called anytime.
292 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
294 return css_to_blkcg(blkcg->css.parent);
299 * @blkcg: block cgroup the new blkg is associated with
303 * Allocate a new blkg assocating @blkcg and @q.
305 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
325 blkg->blkcg = blkcg;
346 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask);
375 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
389 /* blkg holds a reference to blkcg */
390 if (!css_tryget_online(&blkcg->css)) {
397 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
406 if (blkcg_parent(blkcg)) {
407 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue);
424 spin_lock(&blkcg->lock);
425 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
427 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
441 spin_unlock(&blkcg->lock);
451 css_put(&blkcg->css);
460 * @blkcg: blkcg of interest
463 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to
471 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
480 blkg = blkg_lookup(blkcg, q);
485 blkg = blkg_lookup(blkcg, q);
487 if (blkcg != &blkcg_root &&
488 blkg != rcu_dereference(blkcg->blkg_hint))
489 rcu_assign_pointer(blkcg->blkg_hint, blkg);
494 * Create blkgs walking down from blkcg_root to @blkcg, so that all
499 struct blkcg *pos = blkcg;
500 struct blkcg *parent = blkcg_parent(blkcg);
519 if (pos == blkcg)
530 struct blkcg *blkcg = blkg->blkcg;
534 lockdep_assert_held(&blkcg->lock);
557 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
565 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
566 rcu_assign_pointer(blkcg->blkg_hint, NULL);
585 struct blkcg *blkcg = blkg->blkcg;
590 spin_lock(&blkcg->lock);
592 spin_unlock(&blkcg->lock);
625 struct blkcg *blkcg = css_to_blkcg(css);
630 spin_lock_irq(&blkcg->lock);
637 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
658 spin_unlock_irq(&blkcg->lock);
673 * @blkcg: blkcg of interest
679 * This function invokes @prfill on each blkg of @blkcg if pd for the
688 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
698 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
756 * explicitly if bdev access is needed without resolving the blkcg / policy part
799 * @blkcg: target block cgroup
812 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
843 blkg = blkg_lookup(blkcg, q);
848 * Create blkgs walking down from blkcg_root to @blkcg, so that all
852 struct blkcg *pos = blkcg;
853 struct blkcg *parent;
856 parent = blkcg_parent(blkcg);
898 if (pos == blkcg)
997 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
999 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
1159 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1165 cgroup_rstat_flush(blkcg->css.cgroup);
1168 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
1201 * blkcg destruction is a three-stage process.
1205 * to the completion of writeback associated with the blkcg. This lets us
1217 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1218 * This finally frees the blkcg.
1223 * @blkcg: blkcg of interest
1225 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1227 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1230 * This is the blkcg counterpart of ioc_release_fn().
1232 static void blkcg_destroy_blkgs(struct blkcg *blkcg)
1236 spin_lock_irq(&blkcg->lock);
1238 while (!hlist_empty(&blkcg->blkg_list)) {
1239 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1249 spin_unlock_irq(&blkcg->lock);
1251 spin_lock_irq(&blkcg->lock);
1259 spin_unlock_irq(&blkcg->lock);
1264 * @blkcg_css: blkcg of interest
1266 * While pinned, a blkcg is kept online. This is primarily used to
1277 * @blkcg_css: blkcg of interest
1282 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
1286 struct blkcg *blkcg = css_to_blkcg(blkcg_css);
1289 if (!refcount_dec_and_test(&blkcg->online_pin))
1291 blkcg_destroy_blkgs(blkcg);
1292 blkcg = blkcg_parent(blkcg);
1293 } while (blkcg);
1301 * offlined first and only once writeback associated with the blkcg has
1306 /* this prevents anyone from attaching or migrating to this blkcg */
1315 struct blkcg *blkcg = css_to_blkcg(css);
1320 list_del(&blkcg->all_blkcgs_node);
1323 if (blkcg->cpd[i])
1324 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1328 free_percpu(blkcg->lhead);
1329 kfree(blkcg);
1335 struct blkcg *blkcg;
1341 blkcg = &blkcg_root;
1343 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1344 if (!blkcg)
1348 if (init_blkcg_llists(blkcg))
1368 blkcg->cpd[i] = cpd;
1369 cpd->blkcg = blkcg;
1373 spin_lock_init(&blkcg->lock);
1374 refcount_set(&blkcg->online_pin, 1);
1375 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1376 INIT_HLIST_HEAD(&blkcg->blkg_list);
1378 INIT_LIST_HEAD(&blkcg->cgwb_list);
1380 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1383 return &blkcg->css;
1387 if (blkcg->cpd[i])
1388 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1389 free_percpu(blkcg->lhead);
1391 if (blkcg != &blkcg_root)
1392 kfree(blkcg);
1400 struct blkcg *parent = blkcg_parent(css_to_blkcg(css));
1403 * blkcg_pin_online() is used to delay blkcg offline so that blkgs
1497 * blkcg_activate_policy - activate a blkcg policy on a gendisk
1499 * @pol: blkcg policy to activate
1505 * from IO path. Update of each blkg is protected by both queue and blkcg
1539 pd = pol->pd_alloc_fn(disk, blkg->blkcg,
1557 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg,
1565 spin_lock(&blkg->blkcg->lock);
1578 spin_unlock(&blkg->blkcg->lock);
1598 struct blkcg *blkcg = blkg->blkcg;
1601 spin_lock(&blkcg->lock);
1610 spin_unlock(&blkcg->lock);
1619 * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk
1621 * @pol: blkcg policy to deactivate
1644 struct blkcg *blkcg = blkg->blkcg;
1646 spin_lock(&blkcg->lock);
1653 spin_unlock(&blkcg->lock);
1666 struct blkcg *blkcg;
1668 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1669 if (blkcg->cpd[pol->plid]) {
1670 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1671 blkcg->cpd[pol->plid] = NULL;
1677 * blkcg_policy_register - register a blkcg policy
1678 * @pol: blkcg policy to register
1680 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1685 struct blkcg *blkcg;
1712 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1719 blkcg->cpd[pol->plid] = cpd;
1720 cpd->blkcg = blkcg;
1750 * blkcg_policy_unregister - unregister a blkcg policy
1751 * @pol: blkcg policy to unregister
1905 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1914 struct blkcg *blkcg;
1925 blkcg = css_to_blkcg(blkcg_css());
1926 if (!blkcg)
1928 blkg = blkg_lookup(blkcg, disk->queue);
2103 struct blkcg *blkcg = bio->bi_blkg->blkcg;
2112 if (!cgroup_parent(blkcg->css.cgroup))
2134 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead);
2141 cgroup_rstat_updated(blkcg->css.cgroup, cpu);