Lines Matching refs:blkg

67  * blkg_free - free a blkg
68 * @blkg: blkg to free
70 * Free @blkg which may be partially allocated.
72 static void blkg_free(struct blkcg_gq *blkg)
76 if (!blkg)
80 if (blkg->pd[i])
81 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
83 free_percpu(blkg->iostat_cpu);
84 percpu_ref_exit(&blkg->refcnt);
85 kfree(blkg);
90 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
92 WARN_ON(!bio_list_empty(&blkg->async_bios));
94 /* release the blkcg and parent blkg refs this blkg has been holding */
95 css_put(&blkg->blkcg->css);
96 if (blkg->parent)
97 blkg_put(blkg->parent);
98 blkg_free(blkg);
103 * can access all the fields of blkg and assume these are valid. For
106 * Having a reference to blkg under an rcu allows accesses to only values
111 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
113 call_rcu(&blkg->rcu_head, __blkg_release);
118 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
125 /* as long as there are pending bios, @blkg can't go away */
126 spin_lock_bh(&blkg->async_bio_lock);
127 bio_list_merge(&bios, &blkg->async_bios);
128 bio_list_init(&blkg->async_bios);
129 spin_unlock_bh(&blkg->async_bio_lock);
143 * blkg_alloc - allocate a blkg
144 * @blkcg: block cgroup the new blkg is associated with
145 * @q: request_queue the new blkg is associated with
148 * Allocate a new blkg assocating @blkcg and @q.
153 struct blkcg_gq *blkg;
157 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
158 if (!blkg)
161 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
164 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
165 if (!blkg->iostat_cpu)
168 blkg->q = q;
169 INIT_LIST_HEAD(&blkg->q_node);
170 spin_lock_init(&blkg->async_bio_lock);
171 bio_list_init(&blkg->async_bios);
172 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
173 blkg->blkcg = blkcg;
175 u64_stats_init(&blkg->iostat.sync);
177 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
186 /* alloc per-policy data and attach it to blkg */
191 blkg->pd[i] = pd;
192 pd->blkg = blkg;
196 return blkg;
199 blkg_free(blkg);
206 struct blkcg_gq *blkg;
210 * hint can only be updated under queue_lock as otherwise @blkg
214 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
215 if (blkg && blkg->q == q) {
218 rcu_assign_pointer(blkcg->blkg_hint, blkg);
220 return blkg;
235 struct blkcg_gq *blkg;
241 /* request_queue is dying, do not create/recreate a blkg */
247 /* blkg holds a reference to blkcg */
261 blkg = new_blkg;
265 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
266 if (WARN_ON_ONCE(!blkg->parent)) {
270 blkg_get(blkg->parent);
277 if (blkg->pd[i] && pol->pd_init_fn)
278 pol->pd_init_fn(blkg->pd[i]);
283 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
285 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
286 list_add(&blkg->q_node, &q->blkg_list);
291 if (blkg->pd[i] && pol->pd_online_fn)
292 pol->pd_online_fn(blkg->pd[i]);
295 blkg->online = true;
299 return blkg;
301 /* @blkg failed fully initialized, use the usual release path */
302 blkg_put(blkg);
313 * blkg_lookup_create - lookup blkg, try to create one if not there
317 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
318 * create one. blkg creation is performed recursively from blkcg_root such
319 * that all non-root blkg's have access to the parent blkg. This function
322 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
328 struct blkcg_gq *blkg;
333 blkg = blkg_lookup(blkcg, q);
334 if (blkg)
335 return blkg;
338 blkg = __blkg_lookup(blkcg, q, true);
339 if (blkg)
345 * blkg to the intended blkg should blkg_create() fail.
353 blkg = __blkg_lookup(parent, q, false);
354 if (blkg) {
355 /* remember closest blkg */
356 ret_blkg = blkg;
363 blkg = blkg_create(pos, q, NULL);
364 if (IS_ERR(blkg)) {
365 blkg = ret_blkg;
374 return blkg;
377 static void blkg_destroy(struct blkcg_gq *blkg)
379 struct blkcg *blkcg = blkg->blkcg;
382 lockdep_assert_held(&blkg->q->queue_lock);
386 WARN_ON_ONCE(list_empty(&blkg->q_node));
387 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
392 if (blkg->pd[i] && pol->pd_offline_fn)
393 pol->pd_offline_fn(blkg->pd[i]);
396 blkg->online = false;
398 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
399 list_del_init(&blkg->q_node);
400 hlist_del_init_rcu(&blkg->blkcg_node);
403 * Both setting lookup hint to and clearing it from @blkg are done
404 * under queue_lock. If it's not pointing to @blkg now, it never
407 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
414 percpu_ref_kill(&blkg->refcnt);
425 struct blkcg_gq *blkg, *n;
428 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
429 struct blkcg *blkcg = blkg->blkcg;
432 blkg_destroy(blkg);
444 struct blkcg_gq *blkg;
455 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
458 per_cpu_ptr(blkg->iostat_cpu, cpu);
461 memset(&blkg->iostat, 0, sizeof(blkg->iostat));
466 if (blkg->pd[i] && pol->pd_reset_stats_fn)
467 pol->pd_reset_stats_fn(blkg->pd[i]);
476 const char *blkg_dev_name(struct blkcg_gq *blkg)
479 if (blkg->q->backing_dev_info->dev)
480 return bdi_dev_name(blkg->q->backing_dev_info);
485 * blkcg_print_blkgs - helper for printing per-blkg data
488 * @prfill: fill function to print out a blkg
493 * This function invokes @prfill on each blkg of @blkcg if pd for the
508 struct blkcg_gq *blkg;
512 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
513 spin_lock_irq(&blkg->q->queue_lock);
514 if (blkcg_policy_enabled(blkg->q, pol))
515 total += prfill(sf, blkg->pd[pol->plid], data);
516 spin_unlock_irq(&blkg->q->queue_lock);
535 const char *dname = blkg_dev_name(pd->blkg);
545 /* Performs queue bypass and policy enabled checks then looks up blkg. */
559 * blkg_conf_prep - parse and prepare for per-blkg config update
562 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
597 * blkg_conf_prep - parse and prepare for per-blkg config update
603 * Parse per-blkg config update from @input and initialize @ctx with the
604 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
614 struct blkcg_gq *blkg;
634 blkg = blkg_lookup_check(blkcg, pol, q);
635 if (IS_ERR(blkg)) {
636 ret = PTR_ERR(blkg);
640 if (blkg)
658 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
677 blkg = blkg_lookup_check(pos, pol, q);
678 if (IS_ERR(blkg)) {
679 ret = PTR_ERR(blkg);
684 if (blkg) {
687 blkg = blkg_create(pos, q, new_blkg);
688 if (IS_ERR(blkg)) {
689 ret = PTR_ERR(blkg);
702 ctx->blkg = blkg;
730 * blkg_conf_finish - finish up per-blkg config update
733 * Finish up after per-blkg config update. This function must be paired
778 struct blkcg_gq *blkg;
782 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
783 struct blkcg_gq *parent = blkg->parent;
784 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
795 u64_stats_update_begin(&blkg->iostat.sync);
798 blkg_iostat_add(&blkg->iostat.cur, &delta);
800 u64_stats_update_end(&blkg->iostat.sync);
805 blkg_iostat_set(&delta, &blkg->iostat.cur);
806 blkg_iostat_sub(&delta, &blkg->iostat.last);
808 blkg_iostat_add(&blkg->iostat.last, &delta);
836 struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue);
859 u64_stats_update_begin(&blkg->iostat.sync);
860 blkg_iostat_set(&blkg->iostat.cur, &tmp);
861 u64_stats_update_end(&blkg->iostat.sync);
870 struct blkcg_gq *blkg;
879 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
880 struct blkg_iostat_set *bis = &blkg->iostat;
889 spin_lock_irq(&blkg->q->queue_lock);
891 if (!blkg->online)
894 dname = blkg_dev_name(blkg);
925 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
929 atomic_read(&blkg->use_delay),
930 (unsigned long long)atomic64_read(&blkg->delay_nsec));
937 if (!blkg->pd[i] || !pol->pd_stat_fn)
940 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
955 spin_unlock_irq(&blkg->q->queue_lock);
982 * which offlines writeback. Here we tie the next stage of blkg destruction
990 * the blkg is put back eventually allowing blkcg_css_free() to be called.
992 * workqueue. Any submitted ios that fail to get the blkg ref will be
1036 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1038 struct request_queue *q = blkg->q;
1052 blkg_destroy(blkg);
1172 struct blkcg_gq *new_blkg, *blkg;
1182 /* Make sure the root blkg exists. */
1185 blkg = blkg_create(&blkcg_root, q, new_blkg);
1186 if (IS_ERR(blkg))
1188 q->root_blkg = blkg;
1214 return PTR_ERR(blkg);
1314 * from IO path. Update of each blkg is protected by both queue and blkcg
1325 struct blkcg_gq *blkg, *pinned_blkg = NULL;
1337 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
1340 if (blkg->pd[pol->plid])
1344 if (blkg == pinned_blkg) {
1349 blkg->blkcg);
1355 * prealloc for @blkg w/ GFP_KERNEL.
1359 blkg_get(blkg);
1360 pinned_blkg = blkg;
1367 blkg->blkcg);
1374 blkg->pd[pol->plid] = pd;
1375 pd->blkg = blkg;
1381 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1382 pol->pd_init_fn(blkg->pd[pol->plid]);
1385 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1386 pol->pd_online_fn(blkg->pd[pol->plid]);
1404 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1405 struct blkcg *blkcg = blkg->blkcg;
1408 if (blkg->pd[pol->plid]) {
1409 pol->pd_free_fn(blkg->pd[pol->plid]);
1410 blkg->pd[pol->plid] = NULL;
1431 struct blkcg_gq *blkg;
1443 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1444 struct blkcg *blkcg = blkg->blkcg;
1447 if (blkg->pd[pol->plid]) {
1449 pol->pd_offline_fn(blkg->pd[pol->plid]);
1450 pol->pd_free_fn(blkg->pd[pol->plid]);
1451 blkg->pd[pol->plid] = NULL;
1585 struct blkcg_gq *blkg = bio->bi_blkg;
1591 if (!blkg->parent)
1594 spin_lock_bh(&blkg->async_bio_lock);
1595 bio_list_add(&blkg->async_bios, bio);
1596 spin_unlock_bh(&blkg->async_bio_lock);
1598 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1608 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1610 u64 old = atomic64_read(&blkg->delay_start);
1613 if (atomic_read(&blkg->use_delay) < 0)
1622 * blkg->last_delay so we know what amount is still left to be charged
1623 * to the blkg from this point onward. blkg->last_use keeps track of
1624 * the use_delay counter. The idea is if we're unthrottling the blkg we
1630 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1631 u64 cur = atomic64_read(&blkg->delay_nsec);
1632 u64 sub = min_t(u64, blkg->last_delay, now - old);
1633 int cur_use = atomic_read(&blkg->use_delay);
1639 if (cur_use < blkg->last_use)
1640 sub = max_t(u64, sub, blkg->last_delay >> 1);
1649 atomic64_set(&blkg->delay_nsec, 0);
1650 blkg->last_delay = 0;
1652 atomic64_sub(sub, &blkg->delay_nsec);
1653 blkg->last_delay = cur - sub;
1655 blkg->last_use = cur_use;
1665 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1674 while (blkg->parent) {
1675 int use_delay = atomic_read(&blkg->use_delay);
1680 blkcg_scale_delay(blkg, now);
1681 this_delay = atomic64_read(&blkg->delay_nsec);
1687 blkg = blkg->parent;
1736 struct blkcg_gq *blkg;
1754 blkg = blkg_lookup(blkcg, q);
1755 if (!blkg)
1757 if (!blkg_tryget(blkg))
1761 blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1762 blkg_put(blkg);
1776 * for the blkg for this task. We do not pass the blkg because there are places
1804 * blkcg_add_delay - add delay to this blkg
1805 * @blkg: blkg of interest
1809 * Charge @delta to the blkg's current delay accumulation. This is used to
1812 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1814 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
1816 blkcg_scale_delay(blkg, now);
1817 atomic64_add(delta, &blkg->delay_nsec);
1821 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
1825 * As the failure mode here is to walk up the blkg tree, this ensure that the
1826 * blkg->parent pointers are always valid. This returns the blkg that it ended
1832 struct blkcg_gq *blkg, *ret_blkg = NULL;
1835 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue);
1836 while (blkg) {
1837 if (blkg_tryget(blkg)) {
1838 ret_blkg = blkg;
1841 blkg = blkg->parent;
1853 * Associate @bio with the blkg found by combining the css's blkg and the
1855 * the blkg tree. Therefore, the blkg associated can be anything between @blkg
1857 * then the remaining bios will spill to the closest alive blkg.
1859 * A reference will be taken on the blkg and will be released when @bio is
1878 * bio_associate_blkg - associate a bio with a blkg
1881 * Associate @bio with the blkg found from the bio's css and request_queue.
1882 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
1904 * bio_clone_blkg_association - clone blkg association from src to dst bio