Lines Matching defs:rwb
146 static inline bool rwb_enabled(struct rq_wb *rwb)
148 return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
149 rwb->enable_state != WBT_STATE_OFF_MANUAL;
152 static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
154 if (rwb_enabled(rwb)) {
166 static bool wb_recent_wait(struct rq_wb *rwb)
168 struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
173 static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
177 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
179 return &rwb->rq_wait[WBT_RWQ_DISCARD];
181 return &rwb->rq_wait[WBT_RWQ_BG];
184 static void rwb_wake_all(struct rq_wb *rwb)
189 struct rq_wait *rqw = &rwb->rq_wait[i];
196 static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
209 limit = rwb->wb_background;
210 else if (rwb->wc && !wb_recent_wait(rwb))
213 limit = rwb->wb_normal;
224 if (!inflight || diff >= rwb->wb_background / 2)
231 struct rq_wb *rwb = RQWB(rqos);
237 rqw = get_rq_wait(rwb, wb_acct);
238 wbt_rqw_done(rwb, rqw, wb_acct);
247 struct rq_wb *rwb = RQWB(rqos);
250 if (rwb->sync_cookie == rq) {
251 rwb->sync_issue = 0;
252 rwb->sync_cookie = NULL;
256 wb_timestamp(rwb, &rwb->last_comp);
258 WARN_ON_ONCE(rq == rwb->sync_cookie);
276 static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
278 u64 now, issue = READ_ONCE(rwb->sync_issue);
280 if (!issue || !rwb->sync_cookie)
287 static inline unsigned int wbt_inflight(struct rq_wb *rwb)
292 ret += atomic_read(&rwb->rq_wait[i].inflight);
304 static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
306 struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
307 struct rq_depth *rqd = &rwb->rq_depth;
319 thislat = rwb_sync_issue_lat(rwb);
320 if (thislat > rwb->cur_win_nsec ||
321 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
336 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
337 wbt_inflight(rwb))
345 if (stat[READ].min > rwb->min_lat_nsec) {
357 static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
359 struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
360 struct rq_depth *rqd = &rwb->rq_depth;
362 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
363 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
366 static void calc_wb_limits(struct rq_wb *rwb)
368 if (rwb->min_lat_nsec == 0) {
369 rwb->wb_normal = rwb->wb_background = 0;
370 } else if (rwb->rq_depth.max_depth <= 2) {
371 rwb->wb_normal = rwb->rq_depth.max_depth;
372 rwb->wb_background = 1;
374 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
375 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
379 static void scale_up(struct rq_wb *rwb)
381 if (!rq_depth_scale_up(&rwb->rq_depth))
383 calc_wb_limits(rwb);
384 rwb->unknown_cnt = 0;
385 rwb_wake_all(rwb);
386 rwb_trace_step(rwb, tracepoint_string("scale up"));
389 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
391 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
393 calc_wb_limits(rwb);
394 rwb->unknown_cnt = 0;
395 rwb_trace_step(rwb, tracepoint_string("scale down"));
398 static void rwb_arm_timer(struct rq_wb *rwb)
400 struct rq_depth *rqd = &rwb->rq_depth;
409 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
416 rwb->cur_win_nsec = rwb->win_nsec;
419 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
424 struct rq_wb *rwb = cb->data;
425 struct rq_depth *rqd = &rwb->rq_depth;
426 unsigned int inflight = wbt_inflight(rwb);
429 if (!rwb->rqos.disk)
432 status = latency_exceeded(rwb, cb->stat);
434 trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight);
443 scale_down(rwb, true);
446 scale_up(rwb);
454 scale_up(rwb);
457 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
465 scale_up(rwb);
467 scale_down(rwb, false);
477 rwb_arm_timer(rwb);
480 static void wbt_update_limits(struct rq_wb *rwb)
482 struct rq_depth *rqd = &rwb->rq_depth;
488 calc_wb_limits(rwb);
490 rwb_wake_all(rwb);
524 static bool close_io(struct rq_wb *rwb)
528 return time_before(now, rwb->last_issue + HZ / 10) ||
529 time_before(now, rwb->last_comp + HZ / 10);
534 static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
539 return rwb->wb_background;
549 if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
550 limit = rwb->rq_depth.max_depth;
551 else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
556 limit = rwb->wb_background;
558 limit = rwb->wb_normal;
564 struct rq_wb *rwb;
572 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf));
578 wbt_rqw_done(data->rwb, rqw, data->wb_acct);
585 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
588 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
590 .rwb = rwb,
616 static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
620 if (!rwb_enabled(rwb))
637 struct rq_wb *rwb = RQWB(rqos);
638 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
649 struct rq_wb *rwb = RQWB(rqos);
652 flags = bio_to_wbt_flags(rwb, bio);
655 wb_timestamp(rwb, &rwb->last_issue);
659 __wbt_wait(rwb, flags, bio->bi_opf);
661 if (!blk_stat_is_active(rwb->cb))
662 rwb_arm_timer(rwb);
667 struct rq_wb *rwb = RQWB(rqos);
668 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
673 struct rq_wb *rwb = RQWB(rqos);
675 if (!rwb_enabled(rwb))
685 if (wbt_is_read(rq) && !rwb->sync_issue) {
686 rwb->sync_cookie = rq;
687 rwb->sync_issue = rq->io_start_time_ns;
693 struct rq_wb *rwb = RQWB(rqos);
694 if (!rwb_enabled(rwb))
696 if (rq == rwb->sync_cookie) {
697 rwb->sync_issue = 0;
698 rwb->sync_cookie = NULL;
772 struct rq_wb *rwb = RQWB(rqos);
774 blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
775 blk_stat_free_callback(rwb->cb);
776 kfree(rwb);
785 struct rq_wb *rwb;
788 rwb = RQWB(rqos);
789 if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
790 blk_stat_deactivate(rwb->cb);
791 rwb->enable_state = WBT_STATE_OFF_DEFAULT;
800 struct rq_wb *rwb = RQWB(rqos);
802 seq_printf(m, "%llu\n", rwb->cur_win_nsec);
809 struct rq_wb *rwb = RQWB(rqos);
811 seq_printf(m, "%d\n", rwb->enable_state);
826 struct rq_wb *rwb = RQWB(rqos);
831 atomic_read(&rwb->rq_wait[i].inflight));
838 struct rq_wb *rwb = RQWB(rqos);
840 seq_printf(m, "%lu\n", rwb->min_lat_nsec);
847 struct rq_wb *rwb = RQWB(rqos);
849 seq_printf(m, "%u\n", rwb->unknown_cnt);
856 struct rq_wb *rwb = RQWB(rqos);
858 seq_printf(m, "%u\n", rwb->wb_normal);
865 struct rq_wb *rwb = RQWB(rqos);
867 seq_printf(m, "%u\n", rwb->wb_background);
901 struct rq_wb *rwb;
905 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
906 if (!rwb)
909 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
910 if (!rwb->cb) {
911 kfree(rwb);
916 rq_wait_init(&rwb->rq_wait[i]);
918 rwb->last_comp = rwb->last_issue = jiffies;
919 rwb->win_nsec = RWB_WINDOW_NSEC;
920 rwb->enable_state = WBT_STATE_ON_DEFAULT;
921 rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
922 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
923 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
924 rwb->rq_depth.queue_depth = blk_queue_depth(q);
925 wbt_update_limits(rwb);
928 * Assign rwb and add the stats callback.
931 ret = rq_qos_add(&rwb->rqos, disk, RQ_QOS_WBT, &wbt_rqos_ops);
936 blk_stat_add_callback(q, rwb->cb);
941 blk_stat_free_callback(rwb->cb);
942 kfree(rwb);