Lines Matching refs:tg

92 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
94 return pd_to_blkg(&tg->pd);
121 struct throtl_grp *tg = sq_to_tg(sq);
123 if (tg)
124 return tg->td;
147 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
149 struct blkcg_gq *blkg = tg_to_blkg(tg);
156 td = tg->td;
157 ret = tg->bps[rw][td->limit_index];
161 tg->iops[rw][td->limit_index])
167 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
168 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
171 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
172 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
177 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
179 struct blkcg_gq *blkg = tg_to_blkg(tg);
186 td = tg->td;
187 ret = tg->iops[rw][td->limit_index];
188 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
191 tg->bps[rw][td->limit_index])
197 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
198 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
201 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
204 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
244 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
248 qn->tg = tg;
258 * @qn->tg's reference count is bumped when @qn is activated. See the
267 blkg_get(tg_to_blkg(qn->tg));
319 *tg_to_put = qn->tg;
321 blkg_put(tg_to_blkg(qn->tg));
341 struct throtl_grp *tg;
344 tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
345 if (!tg)
348 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
351 if (blkg_rwstat_init(&tg->stat_ios, gfp))
354 throtl_service_queue_init(&tg->service_queue);
357 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
358 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
361 RB_CLEAR_NODE(&tg->rb_node);
362 tg->bps[READ][LIMIT_MAX] = U64_MAX;
363 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
364 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
365 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
366 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
367 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
368 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
369 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
372 tg->latency_target = DFL_LATENCY_TARGET;
373 tg->latency_target_conf = DFL_LATENCY_TARGET;
374 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
375 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
377 return &tg->pd;
380 blkg_rwstat_exit(&tg->stat_bytes);
382 kfree(tg);
388 struct throtl_grp *tg = pd_to_tg(pd);
389 struct blkcg_gq *blkg = tg_to_blkg(tg);
391 struct throtl_service_queue *sq = &tg->service_queue;
410 tg->td = td;
414 * Set has_rules[] if @tg or any of its parents have limits configured.
418 static void tg_update_has_rules(struct throtl_grp *tg)
420 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
421 struct throtl_data *td = tg->td;
425 tg->has_rules_iops[rw] =
428 tg_iops_limit(tg, rw) != UINT_MAX);
429 tg->has_rules_bps[rw] =
432 (tg_bps_limit(tg, rw) != U64_MAX));
438 struct throtl_grp *tg = pd_to_tg(pd);
443 tg_update_has_rules(tg);
455 struct throtl_grp *tg = blkg_to_tg(blkg);
457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
476 struct throtl_grp *tg = pd_to_tg(pd);
478 tg->bps[READ][LIMIT_LOW] = 0;
479 tg->bps[WRITE][LIMIT_LOW] = 0;
480 tg->iops[READ][LIMIT_LOW] = 0;
481 tg->iops[WRITE][LIMIT_LOW] = 0;
483 blk_throtl_update_limit_valid(tg->td);
485 if (!tg->td->limit_valid[tg->td->limit_index])
486 throtl_upgrade_state(tg->td);
491 struct throtl_grp *tg = pd_to_tg(pd);
493 del_timer_sync(&tg->service_queue.pending_timer);
494 blkg_rwstat_exit(&tg->stat_bytes);
495 blkg_rwstat_exit(&tg->stat_ios);
496 kfree(tg);
520 struct throtl_grp *tg;
522 tg = throtl_rb_first(parent_sq);
523 if (!tg)
526 parent_sq->first_pending_disptime = tg->disptime;
529 static void tg_service_queue_add(struct throtl_grp *tg)
531 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
535 unsigned long key = tg->disptime;
550 rb_link_node(&tg->rb_node, parent, node);
551 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
555 static void throtl_enqueue_tg(struct throtl_grp *tg)
557 if (!(tg->flags & THROTL_TG_PENDING)) {
558 tg_service_queue_add(tg);
559 tg->flags |= THROTL_TG_PENDING;
560 tg->service_queue.parent_sq->nr_pending++;
564 static void throtl_dequeue_tg(struct throtl_grp *tg)
566 if (tg->flags & THROTL_TG_PENDING) {
568 tg->service_queue.parent_sq;
570 throtl_rb_erase(&tg->rb_node, parent_sq);
572 tg->flags &= ~THROTL_TG_PENDING;
633 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
636 tg->bytes_disp[rw] = 0;
637 tg->io_disp[rw] = 0;
638 tg->carryover_bytes[rw] = 0;
639 tg->carryover_ios[rw] = 0;
647 if (time_after(start, tg->slice_start[rw]))
648 tg->slice_start[rw] = start;
650 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
651 throtl_log(&tg->service_queue,
653 rw == READ ? 'R' : 'W', tg->slice_start[rw],
654 tg->slice_end[rw], jiffies);
657 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
660 tg->bytes_disp[rw] = 0;
661 tg->io_disp[rw] = 0;
662 tg->slice_start[rw] = jiffies;
663 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
665 tg->carryover_bytes[rw] = 0;
666 tg->carryover_ios[rw] = 0;
669 throtl_log(&tg->service_queue,
671 rw == READ ? 'R' : 'W', tg->slice_start[rw],
672 tg->slice_end[rw], jiffies);
675 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
678 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
681 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
684 throtl_set_slice_end(tg, rw, jiffy_end);
685 throtl_log(&tg->service_queue,
687 rw == READ ? 'R' : 'W', tg->slice_start[rw],
688 tg->slice_end[rw], jiffies);
692 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
694 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
736 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
742 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
749 if (throtl_slice_used(tg, rw))
760 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
762 time_elapsed = rounddown(jiffies - tg->slice_start[rw],
763 tg->td->throtl_slice);
767 bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
769 tg->carryover_bytes[rw];
770 io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
771 tg->carryover_ios[rw];
775 tg->carryover_bytes[rw] = 0;
776 if ((long long)tg->bytes_disp[rw] >= bytes_trim)
777 tg->bytes_disp[rw] -= bytes_trim;
779 tg->bytes_disp[rw] = 0;
781 tg->carryover_ios[rw] = 0;
782 if ((int)tg->io_disp[rw] >= io_trim)
783 tg->io_disp[rw] -= io_trim;
785 tg->io_disp[rw] = 0;
787 tg->slice_start[rw] += time_elapsed;
789 throtl_log(&tg->service_queue,
791 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
792 bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
796 static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
798 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
799 u64 bps_limit = tg_bps_limit(tg, rw);
800 u32 iops_limit = tg_iops_limit(tg, rw);
809 tg->carryover_bytes[rw] +=
811 tg->bytes_disp[rw];
813 tg->carryover_ios[rw] +=
815 tg->io_disp[rw];
818 static void tg_update_carryover(struct throtl_grp *tg)
820 if (tg->service_queue.nr_queued[READ])
821 __tg_update_carryover(tg, READ);
822 if (tg->service_queue.nr_queued[WRITE])
823 __tg_update_carryover(tg, WRITE);
826 throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
827 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
828 tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
831 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
842 jiffy_elapsed = jiffies - tg->slice_start[rw];
845 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
847 tg->carryover_ios[rw];
848 if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
856 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
870 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
874 jiffy_elapsed_rnd = tg->td->throtl_slice;
876 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
878 tg->carryover_bytes[rw];
879 if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
883 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
901 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
906 u64 bps_limit = tg_bps_limit(tg, rw);
907 u32 iops_limit = tg_iops_limit(tg, rw);
915 BUG_ON(tg->service_queue.nr_queued[rw] &&
916 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
918 /* If tg->bps = -1, then BW is unlimited */
920 tg->flags & THROTL_TG_CANCELING) {
933 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
934 throtl_start_new_slice(tg, rw, true);
936 if (time_before(tg->slice_end[rw],
937 jiffies + tg->td->throtl_slice))
938 throtl_extend_slice(tg, rw,
939 jiffies + tg->td->throtl_slice);
942 bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
943 iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
955 if (time_before(tg->slice_end[rw], jiffies + max_wait))
956 throtl_extend_slice(tg, rw, jiffies + max_wait);
961 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
968 tg->bytes_disp[rw] += bio_size;
969 tg->last_bytes_disp[rw] += bio_size;
972 tg->io_disp[rw]++;
973 tg->last_io_disp[rw]++;
980 * @tg: the target throtl_grp
982 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
983 * tg->qnode_on_self[] is used.
986 struct throtl_grp *tg)
988 struct throtl_service_queue *sq = &tg->service_queue;
992 qn = &tg->qnode_on_self[rw];
995 * If @tg doesn't currently have any bios queued in the same
996 * direction, queueing @bio can change when @tg should be
997 * dispatched. Mark that @tg was empty. This is automatically
1001 tg->flags |= THROTL_TG_WAS_EMPTY;
1006 throtl_enqueue_tg(tg);
1009 static void tg_update_disptime(struct throtl_grp *tg)
1011 struct throtl_service_queue *sq = &tg->service_queue;
1017 tg_may_dispatch(tg, bio, &read_wait);
1021 tg_may_dispatch(tg, bio, &write_wait);
1027 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
1028 tg->disptime = disptime;
1029 tg_service_queue_add(tg);
1032 tg->flags &= ~THROTL_TG_WAS_EMPTY;
1045 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1047 struct throtl_service_queue *sq = &tg->service_queue;
1054 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1055 * from @tg may put its reference and @parent_sq might end up
1056 * getting released prematurely. Remember the tg to put and put it
1062 throtl_charge_bio(tg, bio);
1065 * If our parent is another tg, we just need to transfer @bio to
1072 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1073 start_parent_slice_with_credit(tg, parent_tg, rw);
1076 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1078 BUG_ON(tg->td->nr_queued[rw] <= 0);
1079 tg->td->nr_queued[rw]--;
1082 throtl_trim_slice(tg, rw);
1088 static int throtl_dispatch_tg(struct throtl_grp *tg)
1090 struct throtl_service_queue *sq = &tg->service_queue;
1099 tg_may_dispatch(tg, bio, NULL)) {
1101 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1109 tg_may_dispatch(tg, bio, NULL)) {
1111 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1126 struct throtl_grp *tg;
1132 tg = throtl_rb_first(parent_sq);
1133 if (!tg)
1136 if (time_before(jiffies, tg->disptime))
1139 nr_disp += throtl_dispatch_tg(tg);
1141 sq = &tg->service_queue;
1143 tg_update_disptime(tg);
1145 throtl_dequeue_tg(tg);
1174 struct throtl_grp *tg = sq_to_tg(sq);
1182 if (tg)
1183 q = tg->pd.blkg->q;
1224 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1225 tg_update_disptime(tg);
1229 tg = sq_to_tg(sq);
1279 struct throtl_grp *tg = pd_to_tg(pd);
1280 u64 v = *(u64 *)((void *)tg + off);
1290 struct throtl_grp *tg = pd_to_tg(pd);
1291 unsigned int v = *(unsigned int *)((void *)tg + off);
1312 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1314 struct throtl_service_queue *sq = &tg->service_queue;
1318 throtl_log(&tg->service_queue,
1320 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1321 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1325 * Update has_rules[] flags for the updated tg's subtree. A tg is
1326 * considered to have rules if either the tg itself or any of its
1332 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1354 * We're already holding queue_lock and know @tg is valid. Let's
1361 throtl_start_new_slice(tg, READ, false);
1362 throtl_start_new_slice(tg, WRITE, false);
1364 if (tg->flags & THROTL_TG_PENDING) {
1365 tg_update_disptime(tg);
1375 struct throtl_grp *tg;
1391 tg = blkg_to_tg(ctx.blkg);
1392 tg_update_carryover(tg);
1395 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1397 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1399 tg_conf_updated(tg, false);
1495 struct throtl_grp *tg = pd_to_tg(pd);
1514 if (tg->bps_conf[READ][off] == bps_dft &&
1515 tg->bps_conf[WRITE][off] == bps_dft &&
1516 tg->iops_conf[READ][off] == iops_dft &&
1517 tg->iops_conf[WRITE][off] == iops_dft &&
1519 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1520 tg->latency_target_conf == DFL_LATENCY_TARGET)))
1523 if (tg->bps_conf[READ][off] != U64_MAX)
1525 tg->bps_conf[READ][off]);
1526 if (tg->bps_conf[WRITE][off] != U64_MAX)
1528 tg->bps_conf[WRITE][off]);
1529 if (tg->iops_conf[READ][off] != UINT_MAX)
1531 tg->iops_conf[READ][off]);
1532 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1534 tg->iops_conf[WRITE][off]);
1536 if (tg->idletime_threshold_conf == ULONG_MAX)
1540 tg->idletime_threshold_conf);
1542 if (tg->latency_target_conf == ULONG_MAX)
1546 " latency=%lu", tg->latency_target_conf);
1567 struct throtl_grp *tg;
1580 tg = blkg_to_tg(ctx.blkg);
1581 tg_update_carryover(tg);
1583 v[0] = tg->bps_conf[READ][index];
1584 v[1] = tg->bps_conf[WRITE][index];
1585 v[2] = tg->iops_conf[READ][index];
1586 v[3] = tg->iops_conf[WRITE][index];
1588 idle_time = tg->idletime_threshold_conf;
1589 latency_time = tg->latency_target_conf;
1629 tg->bps_conf[READ][index] = v[0];
1630 tg->bps_conf[WRITE][index] = v[1];
1631 tg->iops_conf[READ][index] = v[2];
1632 tg->iops_conf[WRITE][index] = v[3];
1635 tg->bps[READ][index] = v[0];
1636 tg->bps[WRITE][index] = v[1];
1637 tg->iops[READ][index] = v[2];
1638 tg->iops[WRITE][index] = v[3];
1640 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1641 tg->bps_conf[READ][LIMIT_MAX]);
1642 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1643 tg->bps_conf[WRITE][LIMIT_MAX]);
1644 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1645 tg->iops_conf[READ][LIMIT_MAX]);
1646 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1647 tg->iops_conf[WRITE][LIMIT_MAX]);
1648 tg->idletime_threshold_conf = idle_time;
1649 tg->latency_target_conf = latency_time;
1652 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1653 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1654 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1655 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1656 tg->bps[READ][LIMIT_LOW] = 0;
1657 tg->bps[WRITE][LIMIT_LOW] = 0;
1658 tg->iops[READ][LIMIT_LOW] = 0;
1659 tg->iops[WRITE][LIMIT_LOW] = 0;
1660 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1661 tg->latency_target = DFL_LATENCY_TARGET;
1663 tg->idletime_threshold = tg->idletime_threshold_conf;
1664 tg->latency_target = tg->latency_target_conf;
1667 blk_throtl_update_limit_valid(tg->td);
1668 if (tg->td->limit_valid[LIMIT_LOW]) {
1670 tg->td->limit_index = LIMIT_LOW;
1672 tg->td->limit_index = LIMIT_MAX;
1673 tg_conf_updated(tg, index == LIMIT_LOW &&
1674 tg->td->limit_valid[LIMIT_LOW]);
1733 struct throtl_grp *tg = blkg_to_tg(blkg);
1734 struct throtl_service_queue *sq = &tg->service_queue;
1740 tg->flags |= THROTL_TG_CANCELING;
1749 if (!(tg->flags & THROTL_TG_PENDING))
1756 tg_update_disptime(tg);
1765 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1769 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1770 rtime = tg->last_low_overflow_time[READ];
1771 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1772 wtime = tg->last_low_overflow_time[WRITE];
1776 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1779 struct throtl_grp *parent = tg;
1780 unsigned long ret = __tg_last_low_overflow_time(tg);
1803 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1815 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1816 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1817 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1818 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1819 tg->avg_idletime > tg->idletime_threshold ||
1820 (tg->latency_target && tg->bio_cnt &&
1821 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1822 throtl_log(&tg->service_queue,
1824 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1825 tg->bio_cnt, ret, tg->td->scale);
1829 static bool throtl_low_limit_reached(struct throtl_grp *tg, int rw)
1831 struct throtl_service_queue *sq = &tg->service_queue;
1832 bool limit = tg->bps[rw][LIMIT_LOW] || tg->iops[rw][LIMIT_LOW];
1843 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1850 if (throtl_low_limit_reached(tg, READ) &&
1851 throtl_low_limit_reached(tg, WRITE))
1855 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1856 throtl_tg_is_idle(tg))
1861 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1864 if (throtl_tg_can_upgrade(tg))
1866 tg = sq_to_tg(tg->service_queue.parent_sq);
1867 if (!tg || !tg_to_blkg(tg)->parent)
1887 struct throtl_grp *tg = blkg_to_tg(blkg);
1889 if (tg == this_tg)
1891 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1893 if (!throtl_hierarchy_can_upgrade(tg)) {
1902 static void throtl_upgrade_check(struct throtl_grp *tg)
1906 if (tg->td->limit_index != LIMIT_LOW)
1909 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1912 tg->last_check_time = now;
1915 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1918 if (throtl_can_upgrade(tg->td, NULL))
1919 throtl_upgrade_state(tg->td);
1933 struct throtl_grp *tg = blkg_to_tg(blkg);
1934 struct throtl_service_queue *sq = &tg->service_queue;
1936 tg->disptime = jiffies - 1;
1960 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1962 struct throtl_data *td = tg->td;
1969 if (time_after_eq(now, tg_last_low_overflow_time(tg) +
1971 (!throtl_tg_is_idle(tg) ||
1972 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1977 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1979 struct throtl_data *td = tg->td;
1985 if (!throtl_tg_can_downgrade(tg))
1987 tg = sq_to_tg(tg->service_queue.parent_sq);
1988 if (!tg || !tg_to_blkg(tg)->parent)
1994 static void throtl_downgrade_check(struct throtl_grp *tg)
2001 if (tg->td->limit_index != LIMIT_MAX ||
2002 !tg->td->limit_valid[LIMIT_LOW])
2004 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
2006 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
2009 elapsed_time = now - tg->last_check_time;
2010 tg->last_check_time = now;
2012 if (time_before(now, tg_last_low_overflow_time(tg) +
2013 tg->td->throtl_slice))
2016 if (tg->bps[READ][LIMIT_LOW]) {
2017 bps = tg->last_bytes_disp[READ] * HZ;
2019 if (bps >= tg->bps[READ][LIMIT_LOW])
2020 tg->last_low_overflow_time[READ] = now;
2023 if (tg->bps[WRITE][LIMIT_LOW]) {
2024 bps = tg->last_bytes_disp[WRITE] * HZ;
2026 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2027 tg->last_low_overflow_time[WRITE] = now;
2030 if (tg->iops[READ][LIMIT_LOW]) {
2031 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2032 if (iops >= tg->iops[READ][LIMIT_LOW])
2033 tg->last_low_overflow_time[READ] = now;
2036 if (tg->iops[WRITE][LIMIT_LOW]) {
2037 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2038 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2039 tg->last_low_overflow_time[WRITE] = now;
2046 if (throtl_hierarchy_can_downgrade(tg))
2047 throtl_downgrade_state(tg->td);
2049 tg->last_bytes_disp[READ] = 0;
2050 tg->last_bytes_disp[WRITE] = 0;
2051 tg->last_io_disp[READ] = 0;
2052 tg->last_io_disp[WRITE] = 0;
2055 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2058 unsigned long last_finish_time = tg->last_finish_time;
2065 last_finish_time == tg->checked_last_finish_time)
2068 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2069 tg->checked_last_finish_time = last_finish_time;
2153 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2157 static void throtl_downgrade_check(struct throtl_grp *tg)
2161 static void throtl_upgrade_check(struct throtl_grp *tg)
2181 struct throtl_grp *tg = blkg_to_tg(blkg);
2185 struct throtl_data *td = tg->td;
2193 blk_throtl_update_idletime(tg);
2195 sq = &tg->service_queue;
2199 if (tg->last_low_overflow_time[rw] == 0)
2200 tg->last_low_overflow_time[rw] = jiffies;
2201 throtl_downgrade_check(tg);
2202 throtl_upgrade_check(tg);
2208 if (!tg_may_dispatch(tg, bio, NULL)) {
2209 tg->last_low_overflow_time[rw] = jiffies;
2210 if (throtl_can_upgrade(td, tg)) {
2218 throtl_charge_bio(tg, bio);
2231 throtl_trim_slice(tg, rw);
2238 qn = &tg->qnode_on_parent[rw];
2240 tg = sq_to_tg(sq);
2241 if (!tg) {
2247 /* out-of-limit, queue to @tg */
2250 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2251 tg_bps_limit(tg, rw),
2252 tg->io_disp[rw], tg_iops_limit(tg, rw),
2255 tg->last_low_overflow_time[rw] = jiffies;
2258 throtl_add_bio_tg(bio, qn, tg);
2262 * Update @tg's dispatch time and force schedule dispatch if @tg
2265 * its @tg's disptime is not in the future.
2267 if (tg->flags & THROTL_TG_WAS_EMPTY) {
2268 tg_update_disptime(tg);
2269 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2316 struct throtl_grp *tg;
2326 tg = blkg_to_tg(blkg);
2327 if (!tg->td->limit_valid[LIMIT_LOW])
2331 tg->last_finish_time = finish_time_ns >> 10;
2341 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2344 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2349 threshold = tg->td->avg_buckets[rw][bucket].latency +
2350 tg->latency_target;
2352 tg->bad_bio_cnt++;
2357 tg->bio_cnt++;
2360 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2361 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2362 tg->bio_cnt /= 2;
2363 tg->bad_bio_cnt /= 2;