Lines Matching refs:iolat

93 	 * inflight tracking. The number of cgroups which have iolat enabled is
193 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
195 return pd_to_blkg(&iolat->pd);
198 static inline void latency_stat_init(struct iolatency_grp *iolat,
201 if (iolat->ssd) {
208 static inline void latency_stat_sum(struct iolatency_grp *iolat,
212 if (iolat->ssd) {
219 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
222 struct latency_stat *stat = get_cpu_ptr(iolat->stats);
223 if (iolat->ssd) {
224 if (req_time >= iolat->min_lat_nsec)
232 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
235 if (iolat->ssd) {
240 return stat->rqs.mean <= iolat->min_lat_nsec;
243 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
246 if (iolat->ssd)
251 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
256 if (iolat->ssd)
267 div64_u64(iolat->cur_win_nsec,
269 iolat->lat_avg = calc_load(iolat->lat_avg,
282 struct iolatency_grp *iolat = private_data;
283 return rq_wait_inc_below(rqw, iolat->max_depth);
287 struct iolatency_grp *iolat,
291 struct rq_wait *rqw = &iolat->rq_wait;
292 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
309 rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
373 static void scale_change(struct iolatency_grp *iolat, bool up)
375 unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests;
377 unsigned long old = iolat->max_depth;
383 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
389 iolat->max_depth = old;
390 wake_up_all(&iolat->rq_wait.wait);
394 iolat->max_depth = max(old, 1UL);
399 static void check_scale_change(struct iolatency_grp *iolat)
404 unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
408 parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
423 if (!atomic_try_cmpxchg(&iolat->scale_cookie, &our_cookie, cur_cookie)) {
428 if (direction < 0 && iolat->min_lat_nsec) {
431 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
442 if (iolat->nr_samples <= samples_thresh)
447 if (iolat->max_depth == 1 && direction < 0) {
448 blkcg_use_delay(lat_to_blkg(iolat));
454 blkcg_clear_delay(lat_to_blkg(iolat));
455 iolat->max_depth = UINT_MAX;
456 wake_up_all(&iolat->rq_wait.wait);
460 scale_change(iolat, direction > 0);
473 struct iolatency_grp *iolat = blkg_to_lat(blkg);
474 if (!iolat) {
479 check_scale_change(iolat);
480 __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
488 static void iolatency_record_time(struct iolatency_grp *iolat,
510 if (unlikely(issue_as_root && iolat->max_depth != UINT_MAX)) {
511 u64 sub = iolat->min_lat_nsec;
513 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
517 latency_stat_record_time(iolat, req_time);
523 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
525 struct blkcg_gq *blkg = lat_to_blkg(iolat);
532 latency_stat_init(iolat, &stat);
536 s = per_cpu_ptr(iolat->stats, cpu);
537 latency_stat_sum(iolat, &stat, s);
538 latency_stat_init(iolat, s);
548 iolat_update_total_lat_avg(iolat, &stat);
551 if (latency_sum_ok(iolat, &stat) &&
558 latency_stat_sum(iolat, &iolat->cur_stat, &stat);
559 lat_info->nr_samples -= iolat->nr_samples;
560 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
561 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
567 if (latency_sum_ok(iolat, &iolat->cur_stat) &&
568 latency_sum_ok(iolat, &stat)) {
569 if (latency_stat_samples(iolat, &iolat->cur_stat) <
572 if (lat_info->scale_grp == iolat) {
574 scale_cookie_change(iolat->blkiolat, lat_info, true);
577 lat_info->scale_lat >= iolat->min_lat_nsec) {
580 lat_info->scale_lat > iolat->min_lat_nsec) {
581 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
582 lat_info->scale_grp = iolat;
584 scale_cookie_change(iolat->blkiolat, lat_info, false);
586 latency_stat_init(iolat, &iolat->cur_stat);
595 struct iolatency_grp *iolat;
605 iolat = blkg_to_lat(bio->bi_blkg);
606 if (!iolat)
609 if (!iolat->blkiolat->enabled)
614 iolat = blkg_to_lat(blkg);
615 if (!iolat) {
619 rqw = &iolat->rq_wait;
627 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
628 iolatency_record_time(iolat, &bio->bi_issue, now,
630 window_start = atomic64_read(&iolat->window_start);
632 (now - window_start) >= iolat->cur_win_nsec) {
633 if (atomic64_try_cmpxchg(&iolat->window_start,
635 iolatency_check_latencies(iolat, now);
669 struct iolatency_grp *iolat;
681 iolat = blkg_to_lat(blkg);
682 if (!iolat)
685 lat_info = &iolat->child_lat;
700 scale_cookie_change(iolat->blkiolat, lat_info, true);
789 struct iolatency_grp *iolat = blkg_to_lat(blkg);
790 struct blk_iolatency *blkiolat = iolat->blkiolat;
791 u64 oldval = iolat->min_lat_nsec;
793 iolat->min_lat_nsec = val;
794 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
795 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
812 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
814 if (!iolat)
817 lat_info = &iolat->child_lat;
833 struct iolatency_grp *iolat;
859 iolat = blkg_to_lat(ctx.blkg);
886 oldval = iolat->min_lat_nsec;
889 if (oldval != iolat->min_lat_nsec)
900 struct iolatency_grp *iolat = pd_to_lat(pd);
903 if (!dname || !iolat->min_lat_nsec)
906 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
918 static void iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
923 latency_stat_init(iolat, &stat);
927 s = per_cpu_ptr(iolat->stats, cpu);
928 latency_stat_sum(iolat, &stat, s);
932 if (iolat->max_depth == UINT_MAX)
940 iolat->max_depth);
945 struct iolatency_grp *iolat = pd_to_lat(pd);
952 if (iolat->ssd)
953 return iolatency_ssd_stat(iolat, s);
955 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
956 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
957 if (iolat->max_depth == UINT_MAX)
962 iolat->max_depth, avg_lat, cur_win);
968 struct iolatency_grp *iolat;
970 iolat = kzalloc_node(sizeof(*iolat), gfp, disk->node_id);
971 if (!iolat)
973 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
975 if (!iolat->stats) {
976 kfree(iolat);
979 return &iolat->pd;
984 struct iolatency_grp *iolat = pd_to_lat(pd);
985 struct blkcg_gq *blkg = lat_to_blkg(iolat);
992 iolat->ssd = true;
994 iolat->ssd = false;
998 stat = per_cpu_ptr(iolat->stats, cpu);
999 latency_stat_init(iolat, stat);
1002 latency_stat_init(iolat, &iolat->cur_stat);
1003 rq_wait_init(&iolat->rq_wait);
1004 spin_lock_init(&iolat->child_lat.lock);
1005 iolat->max_depth = UINT_MAX;
1006 iolat->blkiolat = blkiolat;
1007 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
1008 atomic64_set(&iolat->window_start, now);
1016 atomic_set(&iolat->scale_cookie,
1019 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1022 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1027 struct iolatency_grp *iolat = pd_to_lat(pd);
1028 struct blkcg_gq *blkg = lat_to_blkg(iolat);
1036 struct iolatency_grp *iolat = pd_to_lat(pd);
1037 free_percpu(iolat->stats);
1038 kfree(iolat);