Lines Matching refs:blkg
284 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
286 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
427 * Make sure that bfqg and its associated blkg do not
531 struct blkcg_gq *blkg = pd_to_blkg(pd);
532 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
533 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
535 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
601 struct blkcg_gq *blkg = bio->bi_blkg;
604 while (blkg) {
605 if (!blkg->online) {
606 blkg = blkg->parent;
609 bfqg = blkg_to_bfqg(blkg);
611 bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
614 blkg = blkg->parent;
630 * Must be called under the scheduler lock, to make sure that the blkg
632 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
690 /* pin down bfqg and its associated blkg */
803 * reasons. Operations on blkg objects in blk-cgroup are
811 * the original blkg. If this is the case, then the
813 * the blkg, is useless: it does not prevent blk-cgroup code
814 * from destroying both the original blkg and all objects
816 * blkg.
818 * On the bright side, destroy operations on a blkg invoke, as
820 * blkg. And these hooks are executed with bfqd->lock held for
821 * BFQ. As a consequence, for any blkg associated with the
823 * to, we are guaranteed that such a blkg is not destroyed, and
826 * bfqd->lock held then returns a fully consistent blkg, which
832 * safely use the policy data for the involved blkg (i.e., the
833 * field bfqg->pd) to get to the blkg associated with bfqg,
834 * and then we can safely use any field of blkg. After we
835 * release bfqd->lock, even just getting blkg through this
840 * blkg data we may need for this bic, and for its associated
842 * blkg, which is used in the bfq_log_* functions.
845 * destruction on the blkg_free of the original blkg (which
984 * @blkg is going offline and will be ignored by
994 struct blkcg_gq *blkg;
996 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
997 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1078 struct blkcg_gq *blkg;
1087 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1088 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1126 bfqg = blkg_to_bfqg(ctx.blkg);
1193 struct blkcg_gq *blkg = pd_to_blkg(pd);
1198 lockdep_assert_held(&blkg->q->queue_lock);
1201 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1226 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1244 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,