Lines Matching refs:iolat
92 * inflight tracking. The number of cgroups which have iolat enabled is
192 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
194 return pd_to_blkg(&iolat->pd);
197 static inline void latency_stat_init(struct iolatency_grp *iolat,
200 if (iolat->ssd) {
207 static inline void latency_stat_sum(struct iolatency_grp *iolat,
211 if (iolat->ssd) {
218 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
221 struct latency_stat *stat = get_cpu_ptr(iolat->stats);
222 if (iolat->ssd) {
223 if (req_time >= iolat->min_lat_nsec)
231 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
234 if (iolat->ssd) {
239 return stat->rqs.mean <= iolat->min_lat_nsec;
242 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
245 if (iolat->ssd)
250 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
255 if (iolat->ssd)
266 div64_u64(iolat->cur_win_nsec,
268 iolat->lat_avg = calc_load(iolat->lat_avg,
281 struct iolatency_grp *iolat = private_data;
282 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
286 struct iolatency_grp *iolat,
290 struct rq_wait *rqw = &iolat->rq_wait;
291 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
308 rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
370 static void scale_change(struct iolatency_grp *iolat, bool up)
372 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
374 unsigned long old = iolat->rq_depth.max_depth;
380 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
386 iolat->rq_depth.max_depth = old;
387 wake_up_all(&iolat->rq_wait.wait);
391 iolat->rq_depth.max_depth = max(old, 1UL);
396 static void check_scale_change(struct iolatency_grp *iolat)
401 unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
406 if (lat_to_blkg(iolat)->parent == NULL)
409 parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
424 old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
430 if (direction < 0 && iolat->min_lat_nsec) {
433 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
444 if (iolat->nr_samples <= samples_thresh)
449 if (iolat->rq_depth.max_depth == 1 && direction < 0) {
450 blkcg_use_delay(lat_to_blkg(iolat));
456 blkcg_clear_delay(lat_to_blkg(iolat));
457 iolat->rq_depth.max_depth = UINT_MAX;
458 wake_up_all(&iolat->rq_wait.wait);
462 scale_change(iolat, direction > 0);
475 struct iolatency_grp *iolat = blkg_to_lat(blkg);
476 if (!iolat) {
481 check_scale_change(iolat);
482 __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
490 static void iolatency_record_time(struct iolatency_grp *iolat,
512 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
513 u64 sub = iolat->min_lat_nsec;
515 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
519 latency_stat_record_time(iolat, req_time);
525 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
527 struct blkcg_gq *blkg = lat_to_blkg(iolat);
534 latency_stat_init(iolat, &stat);
538 s = per_cpu_ptr(iolat->stats, cpu);
539 latency_stat_sum(iolat, &stat, s);
540 latency_stat_init(iolat, s);
550 iolat_update_total_lat_avg(iolat, &stat);
553 if (latency_sum_ok(iolat, &stat) &&
560 latency_stat_sum(iolat, &iolat->cur_stat, &stat);
561 lat_info->nr_samples -= iolat->nr_samples;
562 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
563 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
569 if (latency_sum_ok(iolat, &iolat->cur_stat) &&
570 latency_sum_ok(iolat, &stat)) {
571 if (latency_stat_samples(iolat, &iolat->cur_stat) <
574 if (lat_info->scale_grp == iolat) {
576 scale_cookie_change(iolat->blkiolat, lat_info, true);
579 lat_info->scale_lat >= iolat->min_lat_nsec) {
582 lat_info->scale_lat > iolat->min_lat_nsec) {
583 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
584 lat_info->scale_grp = iolat;
586 scale_cookie_change(iolat->blkiolat, lat_info, false);
588 latency_stat_init(iolat, &iolat->cur_stat);
597 struct iolatency_grp *iolat;
607 iolat = blkg_to_lat(bio->bi_blkg);
608 if (!iolat)
611 if (!iolat->blkiolat->enabled)
616 iolat = blkg_to_lat(blkg);
617 if (!iolat) {
621 rqw = &iolat->rq_wait;
629 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
630 iolatency_record_time(iolat, &bio->bi_issue, now,
632 window_start = atomic64_read(&iolat->window_start);
634 (now - window_start) >= iolat->cur_win_nsec) {
635 if (atomic64_cmpxchg(&iolat->window_start,
637 iolatency_check_latencies(iolat, now);
671 struct iolatency_grp *iolat;
683 iolat = blkg_to_lat(blkg);
684 if (!iolat)
687 lat_info = &iolat->child_lat;
702 scale_cookie_change(iolat->blkiolat, lat_info, true);
792 struct iolatency_grp *iolat = blkg_to_lat(blkg);
793 struct blk_iolatency *blkiolat = iolat->blkiolat;
794 u64 oldval = iolat->min_lat_nsec;
796 iolat->min_lat_nsec = val;
797 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
798 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
815 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
817 if (!iolat)
820 lat_info = &iolat->child_lat;
836 struct iolatency_grp *iolat;
846 iolat = blkg_to_lat(ctx.blkg);
873 oldval = iolat->min_lat_nsec;
876 if (oldval != iolat->min_lat_nsec)
887 struct iolatency_grp *iolat = pd_to_lat(pd);
890 if (!dname || !iolat->min_lat_nsec)
893 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
905 static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
911 latency_stat_init(iolat, &stat);
915 s = per_cpu_ptr(iolat->stats, cpu);
916 latency_stat_sum(iolat, &stat, s);
920 if (iolat->rq_depth.max_depth == UINT_MAX)
927 iolat->rq_depth.max_depth);
933 struct iolatency_grp *iolat = pd_to_lat(pd);
940 if (iolat->ssd)
941 return iolatency_ssd_stat(iolat, buf, size);
943 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
944 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
945 if (iolat->rq_depth.max_depth == UINT_MAX)
950 iolat->rq_depth.max_depth, avg_lat, cur_win);
958 struct iolatency_grp *iolat;
960 iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
961 if (!iolat)
963 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
965 if (!iolat->stats) {
966 kfree(iolat);
969 return &iolat->pd;
974 struct iolatency_grp *iolat = pd_to_lat(pd);
975 struct blkcg_gq *blkg = lat_to_blkg(iolat);
982 iolat->ssd = true;
984 iolat->ssd = false;
988 stat = per_cpu_ptr(iolat->stats, cpu);
989 latency_stat_init(iolat, stat);
992 latency_stat_init(iolat, &iolat->cur_stat);
993 rq_wait_init(&iolat->rq_wait);
994 spin_lock_init(&iolat->child_lat.lock);
995 iolat->rq_depth.queue_depth = blkg->q->nr_requests;
996 iolat->rq_depth.max_depth = UINT_MAX;
997 iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
998 iolat->blkiolat = blkiolat;
999 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
1000 atomic64_set(&iolat->window_start, now);
1008 atomic_set(&iolat->scale_cookie,
1011 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1014 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1019 struct iolatency_grp *iolat = pd_to_lat(pd);
1020 struct blkcg_gq *blkg = lat_to_blkg(iolat);
1028 struct iolatency_grp *iolat = pd_to_lat(pd);
1029 free_percpu(iolat->stats);
1030 kfree(iolat);