Lines Matching refs:tg

72 	struct throtl_grp	*tg;		/* tg this qnode belongs to */
91 unsigned long first_pending_disptime; /* disptime of the first tg */
245 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
247 return pd_to_blkg(&tg->pd);
274 struct throtl_grp *tg = sq_to_tg(sq);
276 if (tg)
277 return tg->td;
300 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
302 struct blkcg_gq *blkg = tg_to_blkg(tg);
309 td = tg->td;
310 ret = tg->bps[rw][td->limit_index];
314 tg->iops[rw][td->limit_index])
320 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
321 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
324 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
325 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
330 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
332 struct blkcg_gq *blkg = tg_to_blkg(tg);
339 td = tg->td;
340 ret = tg->iops[rw][td->limit_index];
341 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
344 tg->bps[rw][td->limit_index])
350 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
351 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
354 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
357 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
397 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
401 qn->tg = tg;
411 * @qn->tg's reference count is bumped when @qn is activated. See the
420 blkg_get(tg_to_blkg(qn->tg));
472 *tg_to_put = qn->tg;
474 blkg_put(tg_to_blkg(qn->tg));
495 struct throtl_grp *tg;
498 tg = kzalloc_node(sizeof(*tg), gfp, q->node);
499 if (!tg)
502 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
505 if (blkg_rwstat_init(&tg->stat_ios, gfp))
508 throtl_service_queue_init(&tg->service_queue);
511 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
512 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
515 RB_CLEAR_NODE(&tg->rb_node);
516 tg->bps[READ][LIMIT_MAX] = U64_MAX;
517 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
518 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
519 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
520 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
521 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
522 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
523 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
526 tg->latency_target = DFL_LATENCY_TARGET;
527 tg->latency_target_conf = DFL_LATENCY_TARGET;
528 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
529 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
531 return &tg->pd;
534 blkg_rwstat_exit(&tg->stat_bytes);
536 kfree(tg);
542 struct throtl_grp *tg = pd_to_tg(pd);
543 struct blkcg_gq *blkg = tg_to_blkg(tg);
545 struct throtl_service_queue *sq = &tg->service_queue;
563 tg->td = td;
567 * Set has_rules[] if @tg or any of its parents have limits configured.
571 static void tg_update_has_rules(struct throtl_grp *tg)
573 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
574 struct throtl_data *td = tg->td;
578 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
580 (tg_bps_limit(tg, rw) != U64_MAX ||
581 tg_iops_limit(tg, rw) != UINT_MAX));
586 struct throtl_grp *tg = pd_to_tg(pd);
591 tg_update_has_rules(tg);
602 struct throtl_grp *tg = blkg_to_tg(blkg);
604 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
605 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
618 struct throtl_grp *tg = pd_to_tg(pd);
620 tg->bps[READ][LIMIT_LOW] = 0;
621 tg->bps[WRITE][LIMIT_LOW] = 0;
622 tg->iops[READ][LIMIT_LOW] = 0;
623 tg->iops[WRITE][LIMIT_LOW] = 0;
625 blk_throtl_update_limit_valid(tg->td);
627 if (!tg->td->limit_valid[tg->td->limit_index])
628 throtl_upgrade_state(tg->td);
633 struct throtl_grp *tg = pd_to_tg(pd);
635 del_timer_sync(&tg->service_queue.pending_timer);
636 blkg_rwstat_exit(&tg->stat_bytes);
637 blkg_rwstat_exit(&tg->stat_ios);
638 kfree(tg);
663 struct throtl_grp *tg;
665 tg = throtl_rb_first(parent_sq);
666 if (!tg)
669 parent_sq->first_pending_disptime = tg->disptime;
672 static void tg_service_queue_add(struct throtl_grp *tg)
674 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
678 unsigned long key = tg->disptime;
693 rb_link_node(&tg->rb_node, parent, node);
694 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
698 static void throtl_enqueue_tg(struct throtl_grp *tg)
700 if (!(tg->flags & THROTL_TG_PENDING)) {
701 tg_service_queue_add(tg);
702 tg->flags |= THROTL_TG_PENDING;
703 tg->service_queue.parent_sq->nr_pending++;
707 static void throtl_dequeue_tg(struct throtl_grp *tg)
709 if (tg->flags & THROTL_TG_PENDING) {
710 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
711 tg->flags &= ~THROTL_TG_PENDING;
772 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
775 tg->bytes_disp[rw] = 0;
776 tg->io_disp[rw] = 0;
778 atomic_set(&tg->io_split_cnt[rw], 0);
786 if (time_after_eq(start, tg->slice_start[rw]))
787 tg->slice_start[rw] = start;
789 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
790 throtl_log(&tg->service_queue,
792 rw == READ ? 'R' : 'W', tg->slice_start[rw],
793 tg->slice_end[rw], jiffies);
796 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
798 tg->bytes_disp[rw] = 0;
799 tg->io_disp[rw] = 0;
800 tg->slice_start[rw] = jiffies;
801 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
803 atomic_set(&tg->io_split_cnt[rw], 0);
805 throtl_log(&tg->service_queue,
807 rw == READ ? 'R' : 'W', tg->slice_start[rw],
808 tg->slice_end[rw], jiffies);
811 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
814 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
817 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
820 throtl_set_slice_end(tg, rw, jiffy_end);
821 throtl_log(&tg->service_queue,
823 rw == READ ? 'R' : 'W', tg->slice_start[rw],
824 tg->slice_end[rw], jiffies);
828 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
830 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
837 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
842 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
849 if (throtl_slice_used(tg, rw))
860 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
862 time_elapsed = jiffies - tg->slice_start[rw];
864 nr_slices = time_elapsed / tg->td->throtl_slice;
868 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
872 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
878 if (tg->bytes_disp[rw] >= bytes_trim)
879 tg->bytes_disp[rw] -= bytes_trim;
881 tg->bytes_disp[rw] = 0;
883 if (tg->io_disp[rw] >= io_trim)
884 tg->io_disp[rw] -= io_trim;
886 tg->io_disp[rw] = 0;
888 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
890 throtl_log(&tg->service_queue,
893 tg->slice_start[rw], tg->slice_end[rw], jiffies);
896 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
910 jiffy_elapsed = jiffies - tg->slice_start[rw];
913 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
930 if (tg->io_disp[rw] + 1 <= io_allowed) {
944 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
958 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
962 jiffy_elapsed_rnd = tg->td->throtl_slice;
964 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
968 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
975 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
995 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1000 u64 bps_limit = tg_bps_limit(tg, rw);
1001 u32 iops_limit = tg_iops_limit(tg, rw);
1009 BUG_ON(tg->service_queue.nr_queued[rw] &&
1010 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
1012 /* If tg->bps = -1, then BW is unlimited */
1026 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1027 throtl_start_new_slice(tg, rw);
1029 if (time_before(tg->slice_end[rw],
1030 jiffies + tg->td->throtl_slice))
1031 throtl_extend_slice(tg, rw,
1032 jiffies + tg->td->throtl_slice);
1036 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
1038 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
1039 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
1050 if (time_before(tg->slice_end[rw], jiffies + max_wait))
1051 throtl_extend_slice(tg, rw, jiffies + max_wait);
1056 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1062 tg->bytes_disp[rw] += bio_size;
1063 tg->io_disp[rw]++;
1064 tg->last_bytes_disp[rw] += bio_size;
1065 tg->last_io_disp[rw]++;
1071 * is being charged to a tg.
1081 * @tg: the target throtl_grp
1083 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1084 * tg->qnode_on_self[] is used.
1087 struct throtl_grp *tg)
1089 struct throtl_service_queue *sq = &tg->service_queue;
1093 qn = &tg->qnode_on_self[rw];
1096 * If @tg doesn't currently have any bios queued in the same
1097 * direction, queueing @bio can change when @tg should be
1098 * dispatched. Mark that @tg was empty. This is automatically
1102 tg->flags |= THROTL_TG_WAS_EMPTY;
1107 throtl_enqueue_tg(tg);
1110 static void tg_update_disptime(struct throtl_grp *tg)
1112 struct throtl_service_queue *sq = &tg->service_queue;
1118 tg_may_dispatch(tg, bio, &read_wait);
1122 tg_may_dispatch(tg, bio, &write_wait);
1128 throtl_dequeue_tg(tg);
1129 tg->disptime = disptime;
1130 throtl_enqueue_tg(tg);
1133 tg->flags &= ~THROTL_TG_WAS_EMPTY;
1146 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1148 struct throtl_service_queue *sq = &tg->service_queue;
1155 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1156 * from @tg may put its reference and @parent_sq might end up
1157 * getting released prematurely. Remember the tg to put and put it
1163 throtl_charge_bio(tg, bio);
1166 * If our parent is another tg, we just need to transfer @bio to
1173 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1174 start_parent_slice_with_credit(tg, parent_tg, rw);
1176 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1178 BUG_ON(tg->td->nr_queued[rw] <= 0);
1179 tg->td->nr_queued[rw]--;
1182 throtl_trim_slice(tg, rw);
1188 static int throtl_dispatch_tg(struct throtl_grp *tg)
1190 struct throtl_service_queue *sq = &tg->service_queue;
1199 tg_may_dispatch(tg, bio, NULL)) {
1201 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1209 tg_may_dispatch(tg, bio, NULL)) {
1211 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1226 struct throtl_grp *tg;
1232 tg = throtl_rb_first(parent_sq);
1233 if (!tg)
1236 if (time_before(jiffies, tg->disptime))
1239 throtl_dequeue_tg(tg);
1241 nr_disp += throtl_dispatch_tg(tg);
1243 sq = &tg->service_queue;
1245 tg_update_disptime(tg);
1274 struct throtl_grp *tg = sq_to_tg(sq);
1314 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1315 tg_update_disptime(tg);
1319 tg = sq_to_tg(sq);
1369 struct throtl_grp *tg = pd_to_tg(pd);
1370 u64 v = *(u64 *)((void *)tg + off);
1380 struct throtl_grp *tg = pd_to_tg(pd);
1381 unsigned int v = *(unsigned int *)((void *)tg + off);
1402 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1404 struct throtl_service_queue *sq = &tg->service_queue;
1408 throtl_log(&tg->service_queue,
1410 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1411 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1415 * Update has_rules[] flags for the updated tg's subtree. A tg is
1416 * considered to have rules if either the tg itself or any of its
1422 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1444 * We're already holding queue_lock and know @tg is valid. Let's
1451 throtl_start_new_slice(tg, READ);
1452 throtl_start_new_slice(tg, WRITE);
1454 if (tg->flags & THROTL_TG_PENDING) {
1455 tg_update_disptime(tg);
1490 struct throtl_grp *tg;
1508 tg = blkg_to_tg(ctx.blkg);
1511 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1513 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1515 tg_conf_updated(tg, false);
1612 struct throtl_grp *tg = pd_to_tg(pd);
1631 if (tg->bps_conf[READ][off] == bps_dft &&
1632 tg->bps_conf[WRITE][off] == bps_dft &&
1633 tg->iops_conf[READ][off] == iops_dft &&
1634 tg->iops_conf[WRITE][off] == iops_dft &&
1636 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1637 tg->latency_target_conf == DFL_LATENCY_TARGET)))
1640 if (tg->bps_conf[READ][off] != U64_MAX)
1642 tg->bps_conf[READ][off]);
1643 if (tg->bps_conf[WRITE][off] != U64_MAX)
1645 tg->bps_conf[WRITE][off]);
1646 if (tg->iops_conf[READ][off] != UINT_MAX)
1648 tg->iops_conf[READ][off]);
1649 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1651 tg->iops_conf[WRITE][off]);
1653 if (tg->idletime_threshold_conf == ULONG_MAX)
1657 tg->idletime_threshold_conf);
1659 if (tg->latency_target_conf == ULONG_MAX)
1663 " latency=%lu", tg->latency_target_conf);
1684 struct throtl_grp *tg;
1699 tg = blkg_to_tg(ctx.blkg);
1700 v[0] = tg->bps_conf[READ][index];
1701 v[1] = tg->bps_conf[WRITE][index];
1702 v[2] = tg->iops_conf[READ][index];
1703 v[3] = tg->iops_conf[WRITE][index];
1705 idle_time = tg->idletime_threshold_conf;
1706 latency_time = tg->latency_target_conf;
1746 tg->bps_conf[READ][index] = v[0];
1747 tg->bps_conf[WRITE][index] = v[1];
1748 tg->iops_conf[READ][index] = v[2];
1749 tg->iops_conf[WRITE][index] = v[3];
1752 tg->bps[READ][index] = v[0];
1753 tg->bps[WRITE][index] = v[1];
1754 tg->iops[READ][index] = v[2];
1755 tg->iops[WRITE][index] = v[3];
1757 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1758 tg->bps_conf[READ][LIMIT_MAX]);
1759 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1760 tg->bps_conf[WRITE][LIMIT_MAX]);
1761 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1762 tg->iops_conf[READ][LIMIT_MAX]);
1763 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1764 tg->iops_conf[WRITE][LIMIT_MAX]);
1765 tg->idletime_threshold_conf = idle_time;
1766 tg->latency_target_conf = latency_time;
1769 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1770 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1771 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1772 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1773 tg->bps[READ][LIMIT_LOW] = 0;
1774 tg->bps[WRITE][LIMIT_LOW] = 0;
1775 tg->iops[READ][LIMIT_LOW] = 0;
1776 tg->iops[WRITE][LIMIT_LOW] = 0;
1777 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1778 tg->latency_target = DFL_LATENCY_TARGET;
1780 tg->idletime_threshold = tg->idletime_threshold_conf;
1781 tg->latency_target = tg->latency_target_conf;
1784 blk_throtl_update_limit_valid(tg->td);
1785 if (tg->td->limit_valid[LIMIT_LOW]) {
1787 tg->td->limit_index = LIMIT_LOW;
1789 tg->td->limit_index = LIMIT_MAX;
1790 tg_conf_updated(tg, index == LIMIT_LOW &&
1791 tg->td->limit_valid[LIMIT_LOW]);
1837 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1841 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1842 rtime = tg->last_low_overflow_time[READ];
1843 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1844 wtime = tg->last_low_overflow_time[WRITE];
1848 /* tg should not be an intermediate node */
1849 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1852 struct throtl_grp *parent = tg;
1853 unsigned long ret = __tg_last_low_overflow_time(tg);
1876 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1888 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1889 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1890 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1891 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1892 tg->avg_idletime > tg->idletime_threshold ||
1893 (tg->latency_target && tg->bio_cnt &&
1894 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1895 throtl_log(&tg->service_queue,
1897 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1898 tg->bio_cnt, ret, tg->td->scale);
1902 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1904 struct throtl_service_queue *sq = &tg->service_queue;
1911 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1912 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1923 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1924 throtl_tg_is_idle(tg))
1929 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1932 if (throtl_tg_can_upgrade(tg))
1934 tg = sq_to_tg(tg->service_queue.parent_sq);
1935 if (!tg || !tg_to_blkg(tg)->parent)
1955 struct throtl_grp *tg = blkg_to_tg(blkg);
1957 if (tg == this_tg)
1959 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1961 if (!throtl_hierarchy_can_upgrade(tg)) {
1970 static void throtl_upgrade_check(struct throtl_grp *tg)
1974 if (tg->td->limit_index != LIMIT_LOW)
1977 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1980 tg->last_check_time = now;
1983 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1986 if (throtl_can_upgrade(tg->td, NULL))
1987 throtl_upgrade_state(tg->td);
2001 struct throtl_grp *tg = blkg_to_tg(blkg);
2002 struct throtl_service_queue *sq = &tg->service_queue;
2004 tg->disptime = jiffies - 1;
2028 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
2030 struct throtl_data *td = tg->td;
2038 time_after_eq(now, tg_last_low_overflow_time(tg) +
2040 (!throtl_tg_is_idle(tg) ||
2041 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
2046 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
2049 if (!throtl_tg_can_downgrade(tg))
2051 tg = sq_to_tg(tg->service_queue.parent_sq);
2052 if (!tg || !tg_to_blkg(tg)->parent)
2058 static void throtl_downgrade_check(struct throtl_grp *tg)
2065 if (tg->td->limit_index != LIMIT_MAX ||
2066 !tg->td->limit_valid[LIMIT_LOW])
2068 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
2070 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
2073 elapsed_time = now - tg->last_check_time;
2074 tg->last_check_time = now;
2076 if (time_before(now, tg_last_low_overflow_time(tg) +
2077 tg->td->throtl_slice))
2080 if (tg->bps[READ][LIMIT_LOW]) {
2081 bps = tg->last_bytes_disp[READ] * HZ;
2083 if (bps >= tg->bps[READ][LIMIT_LOW])
2084 tg->last_low_overflow_time[READ] = now;
2087 if (tg->bps[WRITE][LIMIT_LOW]) {
2088 bps = tg->last_bytes_disp[WRITE] * HZ;
2090 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2091 tg->last_low_overflow_time[WRITE] = now;
2094 if (tg->iops[READ][LIMIT_LOW]) {
2095 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
2096 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2097 if (iops >= tg->iops[READ][LIMIT_LOW])
2098 tg->last_low_overflow_time[READ] = now;
2101 if (tg->iops[WRITE][LIMIT_LOW]) {
2102 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
2103 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2104 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2105 tg->last_low_overflow_time[WRITE] = now;
2112 if (throtl_hierarchy_can_downgrade(tg))
2113 throtl_downgrade_state(tg->td);
2115 tg->last_bytes_disp[READ] = 0;
2116 tg->last_bytes_disp[WRITE] = 0;
2117 tg->last_io_disp[READ] = 0;
2118 tg->last_io_disp[WRITE] = 0;
2121 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2124 unsigned long last_finish_time = tg->last_finish_time;
2131 last_finish_time == tg->checked_last_finish_time)
2134 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2135 tg->checked_last_finish_time = last_finish_time;
2245 struct throtl_grp *tg = blkg_to_tg(blkg);
2249 struct throtl_data *td = tg->td;
2258 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2260 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2263 if (!tg->has_rules[rw])
2270 blk_throtl_update_idletime(tg);
2272 sq = &tg->service_queue;
2276 if (tg->last_low_overflow_time[rw] == 0)
2277 tg->last_low_overflow_time[rw] = jiffies;
2278 throtl_downgrade_check(tg);
2279 throtl_upgrade_check(tg);
2285 if (!tg_may_dispatch(tg, bio, NULL)) {
2286 tg->last_low_overflow_time[rw] = jiffies;
2287 if (throtl_can_upgrade(td, tg)) {
2295 throtl_charge_bio(tg, bio);
2308 throtl_trim_slice(tg, rw);
2315 qn = &tg->qnode_on_parent[rw];
2317 tg = sq_to_tg(sq);
2318 if (!tg)
2322 /* out-of-limit, queue to @tg */
2325 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2326 tg_bps_limit(tg, rw),
2327 tg->io_disp[rw], tg_iops_limit(tg, rw),
2330 tg->last_low_overflow_time[rw] = jiffies;
2333 throtl_add_bio_tg(bio, qn, tg);
2337 * Update @tg's dispatch time and force schedule dispatch if @tg
2340 * its @tg's disptime is not in the future.
2342 if (tg->flags & THROTL_TG_WAS_EMPTY) {
2343 tg_update_disptime(tg);
2344 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2392 struct throtl_grp *tg;
2402 tg = blkg_to_tg(blkg);
2403 if (!tg->td->limit_valid[LIMIT_LOW])
2407 tg->last_finish_time = finish_time_ns >> 10;
2417 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2420 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2425 threshold = tg->td->avg_buckets[rw][bucket].latency +
2426 tg->latency_target;
2428 tg->bad_bio_cnt++;
2433 tg->bio_cnt++;
2436 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2437 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2438 tg->bio_cnt /= 2;
2439 tg->bad_bio_cnt /= 2;