Lines Matching defs:throtl_grp
64 * belongs to a throtl_grp and gets queued on itself or the parent, so
65 * incrementing the reference of the associated throtl_grp when a qnode is
72 struct throtl_grp *tg; /* tg this qnode belongs to */
80 * children throtl_grp's.
86 * RB tree of active children throtl_grp's, which are sorted by
100 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
108 struct throtl_grp {
123 * throtl_grp so that local bios compete fairly with bios
125 * dispatched from this throtl_grp into its parent and will compete
235 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
237 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
240 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
245 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
254 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
257 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
260 return container_of(sq, struct throtl_grp, service_queue);
269 * A service_queue can be embedded in either a throtl_grp or throtl_data.
274 struct throtl_grp *tg = sq_to_tg(sq);
300 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
330 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
372 * throtl_grp; otherwise, just "throtl".
375 struct throtl_grp *__tg = sq_to_tg((sq)); \
397 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
445 * @tg_to_put: optional out argument for throtl_grp to put
451 * When the first qnode is removed, its associated throtl_grp should be put
453 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
457 struct throtl_grp **tg_to_put)
495 struct throtl_grp *tg;
542 struct throtl_grp *tg = pd_to_tg(pd);
549 * behavior where limits on a given throtl_grp are applied to the
571 static void tg_update_has_rules(struct throtl_grp *tg)
573 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
586 struct throtl_grp *tg = pd_to_tg(pd);
602 struct throtl_grp *tg = blkg_to_tg(blkg);
618 struct throtl_grp *tg = pd_to_tg(pd);
633 struct throtl_grp *tg = pd_to_tg(pd);
641 static struct throtl_grp *
663 struct throtl_grp *tg;
672 static void tg_service_queue_add(struct throtl_grp *tg)
677 struct throtl_grp *__tg;
698 static void throtl_enqueue_tg(struct throtl_grp *tg)
707 static void throtl_dequeue_tg(struct throtl_grp *tg)
772 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
796 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
811 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
817 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
828 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
837 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
896 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
944 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
995 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1056 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1078 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1081 * @tg: the target throtl_grp
1087 struct throtl_grp *tg)
1110 static void tg_update_disptime(struct throtl_grp *tg)
1136 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1137 struct throtl_grp *parent_tg, bool rw)
1146 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1150 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1151 struct throtl_grp *tg_to_put = NULL;
1188 static int throtl_dispatch_tg(struct throtl_grp *tg)
1226 struct throtl_grp *tg;
1255 struct throtl_grp *this_tg);
1260 * This timer is armed when a child throtl_grp with active bio's become
1262 * the first child throtl_grp should be dispatched. This function
1266 * If the parent's parent is another throtl_grp, dispatching is propagated
1274 struct throtl_grp *tg = sq_to_tg(sq);
1369 struct throtl_grp *tg = pd_to_tg(pd);
1380 struct throtl_grp *tg = pd_to_tg(pd);
1402 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1423 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1424 struct throtl_grp *parent_tg;
1490 struct throtl_grp *tg;
1564 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1570 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1576 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1582 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1588 .private = offsetof(struct throtl_grp, stat_bytes),
1593 .private = offsetof(struct throtl_grp, stat_bytes),
1598 .private = offsetof(struct throtl_grp, stat_ios),
1603 .private = offsetof(struct throtl_grp, stat_ios),
1612 struct throtl_grp *tg = pd_to_tg(pd);
1684 struct throtl_grp *tg;
1837 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1849 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1852 struct throtl_grp *parent = tg;
1876 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1902 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1929 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1942 struct throtl_grp *this_tg)
1955 struct throtl_grp *tg = blkg_to_tg(blkg);
1970 static void throtl_upgrade_check(struct throtl_grp *tg)
2001 struct throtl_grp *tg = blkg_to_tg(blkg);
2028 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
2046 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
2058 static void throtl_downgrade_check(struct throtl_grp *tg)
2121 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2224 struct throtl_grp *parent = blkg_to_tg(blkg);
2245 struct throtl_grp *tg = blkg_to_tg(blkg);
2392 struct throtl_grp *tg;