Lines Matching refs:blkg
289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
291 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
434 * Make sure that bfqg and its associated blkg do not
544 struct blkcg_gq *blkg = pd_to_blkg(pd);
545 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
546 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
548 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
612 struct blkcg_gq *blkg = bio->bi_blkg;
615 while (blkg) {
616 if (!blkg->online) {
617 blkg = blkg->parent;
620 bfqg = blkg_to_bfqg(blkg);
622 bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
625 blkg = blkg->parent;
641 * Must be called under the scheduler lock, to make sure that the blkg
643 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
681 /* pin down bfqg and its associated blkg */
786 * reasons. Operations on blkg objects in blk-cgroup are
794 * the original blkg. If this is the case, then the
796 * the blkg, is useless: it does not prevent blk-cgroup code
797 * from destroying both the original blkg and all objects
799 * blkg.
801 * On the bright side, destroy operations on a blkg invoke, as
803 * blkg. And these hooks are executed with bfqd->lock held for
804 * BFQ. As a consequence, for any blkg associated with the
806 * to, we are guaranteed that such a blkg is not destroyed, and
809 * bfqd->lock held then returns a fully consistent blkg, which
815 * safely use the policy data for the involved blkg (i.e., the
816 * field bfqg->pd) to get to the blkg associated with bfqg,
817 * and then we can safely use any field of blkg. After we
818 * release bfqd->lock, even just getting blkg through this
823 * blkg data we may need for this bic, and for its associated
825 * blkg, which is used in the bfq_log_* functions.
828 * destruction on the blkg_free of the original blkg (which
966 * @blkg is going offline and will be ignored by
976 struct blkcg_gq *blkg;
978 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
979 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1060 struct blkcg_gq *blkg;
1069 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1070 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1106 bfqg = blkg_to_bfqg(ctx.blkg);
1173 struct blkcg_gq *blkg = pd_to_blkg(pd);
1178 lockdep_assert_held(&blkg->q->queue_lock);
1181 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1206 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1224 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,