xref: /kernel/linux/linux-5.10/block/blk-wbt.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * buffered writeback throttling. loosely based on CoDel. We can't drop
4 * packets for IO scheduling, so the logic is something like this:
5 *
6 * - Monitor latencies in a defined window of time.
7 * - If the minimum latency in the above window exceeds some target, increment
8 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
9 *   window is then shrunk to 100 / sqrt(scaling step + 1).
10 * - For any window where we don't have solid data on what the latencies
11 *   look like, retain status quo.
12 * - If latencies look good, decrement scaling step.
13 * - If we're only doing writes, allow the scaling step to go negative. This
14 *   will temporarily boost write performance, snapping back to a stable
15 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
16 *   positive scaling steps where we shrink the monitoring window, a negative
17 *   scaling step retains the default step==0 window size.
18 *
19 * Copyright (C) 2016 Jens Axboe
20 *
21 */
22#include <linux/kernel.h>
23#include <linux/blk_types.h>
24#include <linux/slab.h>
25#include <linux/backing-dev.h>
26#include <linux/swap.h>
27
28#include "blk-wbt.h"
29#include "blk-rq-qos.h"
30
31#define CREATE_TRACE_POINTS
32#include <trace/events/wbt.h>
33
34static inline void wbt_clear_state(struct request *rq)
35{
36	rq->wbt_flags = 0;
37}
38
39static inline enum wbt_flags wbt_flags(struct request *rq)
40{
41	return rq->wbt_flags;
42}
43
44static inline bool wbt_is_tracked(struct request *rq)
45{
46	return rq->wbt_flags & WBT_TRACKED;
47}
48
49static inline bool wbt_is_read(struct request *rq)
50{
51	return rq->wbt_flags & WBT_READ;
52}
53
54enum {
55	/*
56	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
57	 * from here depending on device stats
58	 */
59	RWB_DEF_DEPTH	= 16,
60
61	/*
62	 * 100msec window
63	 */
64	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,
65
66	/*
67	 * Disregard stats, if we don't meet this minimum
68	 */
69	RWB_MIN_WRITE_SAMPLES	= 3,
70
71	/*
72	 * If we have this number of consecutive windows with not enough
73	 * information to scale up or down, scale up.
74	 */
75	RWB_UNKNOWN_BUMP	= 5,
76};
77
78static inline bool rwb_enabled(struct rq_wb *rwb)
79{
80	return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
81		      rwb->wb_normal != 0;
82}
83
84static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
85{
86	if (rwb_enabled(rwb)) {
87		const unsigned long cur = jiffies;
88
89		if (cur != *var)
90			*var = cur;
91	}
92}
93
94/*
95 * If a task was rate throttled in balance_dirty_pages() within the last
96 * second or so, use that to indicate a higher cleaning rate.
97 */
98static bool wb_recent_wait(struct rq_wb *rwb)
99{
100	struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
101
102	return time_before(jiffies, wb->dirty_sleep + HZ);
103}
104
105static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
106					  enum wbt_flags wb_acct)
107{
108	if (wb_acct & WBT_KSWAPD)
109		return &rwb->rq_wait[WBT_RWQ_KSWAPD];
110	else if (wb_acct & WBT_DISCARD)
111		return &rwb->rq_wait[WBT_RWQ_DISCARD];
112
113	return &rwb->rq_wait[WBT_RWQ_BG];
114}
115
116static void rwb_wake_all(struct rq_wb *rwb)
117{
118	int i;
119
120	for (i = 0; i < WBT_NUM_RWQ; i++) {
121		struct rq_wait *rqw = &rwb->rq_wait[i];
122
123		if (wq_has_sleeper(&rqw->wait))
124			wake_up_all(&rqw->wait);
125	}
126}
127
128static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
129			 enum wbt_flags wb_acct)
130{
131	int inflight, limit;
132
133	inflight = atomic_dec_return(&rqw->inflight);
134
135	/*
136	 * wbt got disabled with IO in flight. Wake up any potential
137	 * waiters, we don't have to do more than that.
138	 */
139	if (unlikely(!rwb_enabled(rwb))) {
140		rwb_wake_all(rwb);
141		return;
142	}
143
144	/*
145	 * For discards, our limit is always the background. For writes, if
146	 * the device does write back caching, drop further down before we
147	 * wake people up.
148	 */
149	if (wb_acct & WBT_DISCARD)
150		limit = rwb->wb_background;
151	else if (rwb->wc && !wb_recent_wait(rwb))
152		limit = 0;
153	else
154		limit = rwb->wb_normal;
155
156	/*
157	 * Don't wake anyone up if we are above the normal limit.
158	 */
159	if (inflight && inflight >= limit)
160		return;
161
162	if (wq_has_sleeper(&rqw->wait)) {
163		int diff = limit - inflight;
164
165		if (!inflight || diff >= rwb->wb_background / 2)
166			wake_up_all(&rqw->wait);
167	}
168}
169
170static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
171{
172	struct rq_wb *rwb = RQWB(rqos);
173	struct rq_wait *rqw;
174
175	if (!(wb_acct & WBT_TRACKED))
176		return;
177
178	rqw = get_rq_wait(rwb, wb_acct);
179	wbt_rqw_done(rwb, rqw, wb_acct);
180}
181
182/*
183 * Called on completion of a request. Note that it's also called when
184 * a request is merged, when the request gets freed.
185 */
186static void wbt_done(struct rq_qos *rqos, struct request *rq)
187{
188	struct rq_wb *rwb = RQWB(rqos);
189
190	if (!wbt_is_tracked(rq)) {
191		if (rwb->sync_cookie == rq) {
192			rwb->sync_issue = 0;
193			rwb->sync_cookie = NULL;
194		}
195
196		if (wbt_is_read(rq))
197			wb_timestamp(rwb, &rwb->last_comp);
198	} else {
199		WARN_ON_ONCE(rq == rwb->sync_cookie);
200		__wbt_done(rqos, wbt_flags(rq));
201	}
202	wbt_clear_state(rq);
203}
204
205static inline bool stat_sample_valid(struct blk_rq_stat *stat)
206{
207	/*
208	 * We need at least one read sample, and a minimum of
209	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
210	 * that it's writes impacting us, and not just some sole read on
211	 * a device that is in a lower power state.
212	 */
213	return (stat[READ].nr_samples >= 1 &&
214		stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
215}
216
217static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
218{
219	u64 now, issue = READ_ONCE(rwb->sync_issue);
220
221	if (!issue || !rwb->sync_cookie)
222		return 0;
223
224	now = ktime_to_ns(ktime_get());
225	return now - issue;
226}
227
228enum {
229	LAT_OK = 1,
230	LAT_UNKNOWN,
231	LAT_UNKNOWN_WRITES,
232	LAT_EXCEEDED,
233};
234
235static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
236{
237	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
238	struct rq_depth *rqd = &rwb->rq_depth;
239	u64 thislat;
240
241	/*
242	 * If our stored sync issue exceeds the window size, or it
243	 * exceeds our min target AND we haven't logged any entries,
244	 * flag the latency as exceeded. wbt works off completion latencies,
245	 * but for a flooded device, a single sync IO can take a long time
246	 * to complete after being issued. If this time exceeds our
247	 * monitoring window AND we didn't see any other completions in that
248	 * window, then count that sync IO as a violation of the latency.
249	 */
250	thislat = rwb_sync_issue_lat(rwb);
251	if (thislat > rwb->cur_win_nsec ||
252	    (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
253		trace_wbt_lat(bdi, thislat);
254		return LAT_EXCEEDED;
255	}
256
257	/*
258	 * No read/write mix, if stat isn't valid
259	 */
260	if (!stat_sample_valid(stat)) {
261		/*
262		 * If we had writes in this stat window and the window is
263		 * current, we're only doing writes. If a task recently
264		 * waited or still has writes in flights, consider us doing
265		 * just writes as well.
266		 */
267		if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
268		    wbt_inflight(rwb))
269			return LAT_UNKNOWN_WRITES;
270		return LAT_UNKNOWN;
271	}
272
273	/*
274	 * If the 'min' latency exceeds our target, step down.
275	 */
276	if (stat[READ].min > rwb->min_lat_nsec) {
277		trace_wbt_lat(bdi, stat[READ].min);
278		trace_wbt_stat(bdi, stat);
279		return LAT_EXCEEDED;
280	}
281
282	if (rqd->scale_step)
283		trace_wbt_stat(bdi, stat);
284
285	return LAT_OK;
286}
287
288static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
289{
290	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
291	struct rq_depth *rqd = &rwb->rq_depth;
292
293	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
294			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
295}
296
297static void calc_wb_limits(struct rq_wb *rwb)
298{
299	if (rwb->min_lat_nsec == 0) {
300		rwb->wb_normal = rwb->wb_background = 0;
301	} else if (rwb->rq_depth.max_depth <= 2) {
302		rwb->wb_normal = rwb->rq_depth.max_depth;
303		rwb->wb_background = 1;
304	} else {
305		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
306		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
307	}
308}
309
310static void scale_up(struct rq_wb *rwb)
311{
312	if (!rq_depth_scale_up(&rwb->rq_depth))
313		return;
314	calc_wb_limits(rwb);
315	rwb->unknown_cnt = 0;
316	rwb_wake_all(rwb);
317	rwb_trace_step(rwb, tracepoint_string("scale up"));
318}
319
320static void scale_down(struct rq_wb *rwb, bool hard_throttle)
321{
322	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
323		return;
324	calc_wb_limits(rwb);
325	rwb->unknown_cnt = 0;
326	rwb_trace_step(rwb, tracepoint_string("scale down"));
327}
328
329static void rwb_arm_timer(struct rq_wb *rwb)
330{
331	struct rq_depth *rqd = &rwb->rq_depth;
332
333	if (rqd->scale_step > 0) {
334		/*
335		 * We should speed this up, using some variant of a fast
336		 * integer inverse square root calculation. Since we only do
337		 * this for every window expiration, it's not a huge deal,
338		 * though.
339		 */
340		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
341					int_sqrt((rqd->scale_step + 1) << 8));
342	} else {
343		/*
344		 * For step < 0, we don't want to increase/decrease the
345		 * window size.
346		 */
347		rwb->cur_win_nsec = rwb->win_nsec;
348	}
349
350	blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
351}
352
353static void wb_timer_fn(struct blk_stat_callback *cb)
354{
355	struct rq_wb *rwb = cb->data;
356	struct rq_depth *rqd = &rwb->rq_depth;
357	unsigned int inflight = wbt_inflight(rwb);
358	int status;
359
360	status = latency_exceeded(rwb, cb->stat);
361
362	trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
363			inflight);
364
365	/*
366	 * If we exceeded the latency target, step down. If we did not,
367	 * step one level up. If we don't know enough to say either exceeded
368	 * or ok, then don't do anything.
369	 */
370	switch (status) {
371	case LAT_EXCEEDED:
372		scale_down(rwb, true);
373		break;
374	case LAT_OK:
375		scale_up(rwb);
376		break;
377	case LAT_UNKNOWN_WRITES:
378		/*
379		 * We started a the center step, but don't have a valid
380		 * read/write sample, but we do have writes going on.
381		 * Allow step to go negative, to increase write perf.
382		 */
383		scale_up(rwb);
384		break;
385	case LAT_UNKNOWN:
386		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
387			break;
388		/*
389		 * We get here when previously scaled reduced depth, and we
390		 * currently don't have a valid read/write sample. For that
391		 * case, slowly return to center state (step == 0).
392		 */
393		if (rqd->scale_step > 0)
394			scale_up(rwb);
395		else if (rqd->scale_step < 0)
396			scale_down(rwb, false);
397		break;
398	default:
399		break;
400	}
401
402	/*
403	 * Re-arm timer, if we have IO in flight
404	 */
405	if (rqd->scale_step || inflight)
406		rwb_arm_timer(rwb);
407}
408
409static void wbt_update_limits(struct rq_wb *rwb)
410{
411	struct rq_depth *rqd = &rwb->rq_depth;
412
413	rqd->scale_step = 0;
414	rqd->scaled_max = false;
415
416	rq_depth_calc_max_depth(rqd);
417	calc_wb_limits(rwb);
418
419	rwb_wake_all(rwb);
420}
421
422u64 wbt_get_min_lat(struct request_queue *q)
423{
424	struct rq_qos *rqos = wbt_rq_qos(q);
425	if (!rqos)
426		return 0;
427	return RQWB(rqos)->min_lat_nsec;
428}
429
430void wbt_set_min_lat(struct request_queue *q, u64 val)
431{
432	struct rq_qos *rqos = wbt_rq_qos(q);
433	if (!rqos)
434		return;
435	RQWB(rqos)->min_lat_nsec = val;
436	RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
437	wbt_update_limits(RQWB(rqos));
438}
439
440
441static bool close_io(struct rq_wb *rwb)
442{
443	const unsigned long now = jiffies;
444
445	return time_before(now, rwb->last_issue + HZ / 10) ||
446		time_before(now, rwb->last_comp + HZ / 10);
447}
448
449#define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)
450
451static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
452{
453	unsigned int limit;
454
455	/*
456	 * If we got disabled, just return UINT_MAX. This ensures that
457	 * we'll properly inc a new IO, and dec+wakeup at the end.
458	 */
459	if (!rwb_enabled(rwb))
460		return UINT_MAX;
461
462	if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
463		return rwb->wb_background;
464
465	/*
466	 * At this point we know it's a buffered write. If this is
467	 * kswapd trying to free memory, or REQ_SYNC is set, then
468	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
469	 * that. If the write is marked as a background write, then use
470	 * the idle limit, or go to normal if we haven't had competing
471	 * IO for a bit.
472	 */
473	if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
474		limit = rwb->rq_depth.max_depth;
475	else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
476		/*
477		 * If less than 100ms since we completed unrelated IO,
478		 * limit us to half the depth for background writeback.
479		 */
480		limit = rwb->wb_background;
481	} else
482		limit = rwb->wb_normal;
483
484	return limit;
485}
486
487struct wbt_wait_data {
488	struct rq_wb *rwb;
489	enum wbt_flags wb_acct;
490	unsigned long rw;
491};
492
493static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
494{
495	struct wbt_wait_data *data = private_data;
496	return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
497}
498
499static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
500{
501	struct wbt_wait_data *data = private_data;
502	wbt_rqw_done(data->rwb, rqw, data->wb_acct);
503}
504
505/*
506 * Block if we will exceed our limit, or if we are currently waiting for
507 * the timer to kick off queuing again.
508 */
509static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
510		       unsigned long rw)
511{
512	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
513	struct wbt_wait_data data = {
514		.rwb = rwb,
515		.wb_acct = wb_acct,
516		.rw = rw,
517	};
518
519	rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
520}
521
522static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
523{
524	switch (bio_op(bio)) {
525	case REQ_OP_WRITE:
526		/*
527		 * Don't throttle WRITE_ODIRECT
528		 */
529		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
530		    (REQ_SYNC | REQ_IDLE))
531			return false;
532		fallthrough;
533	case REQ_OP_DISCARD:
534		return true;
535	default:
536		return false;
537	}
538}
539
540static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
541{
542	enum wbt_flags flags = 0;
543
544	if (!rwb_enabled(rwb))
545		return 0;
546
547	if (bio_op(bio) == REQ_OP_READ) {
548		flags = WBT_READ;
549	} else if (wbt_should_throttle(rwb, bio)) {
550		if (current_is_kswapd())
551			flags |= WBT_KSWAPD;
552		if (bio_op(bio) == REQ_OP_DISCARD)
553			flags |= WBT_DISCARD;
554		flags |= WBT_TRACKED;
555	}
556	return flags;
557}
558
559static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
560{
561	struct rq_wb *rwb = RQWB(rqos);
562	enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
563	__wbt_done(rqos, flags);
564}
565
566/*
567 * Returns true if the IO request should be accounted, false if not.
568 * May sleep, if we have exceeded the writeback limits. Caller can pass
569 * in an irq held spinlock, if it holds one when calling this function.
570 * If we do sleep, we'll release and re-grab it.
571 */
572static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
573{
574	struct rq_wb *rwb = RQWB(rqos);
575	enum wbt_flags flags;
576
577	flags = bio_to_wbt_flags(rwb, bio);
578	if (!(flags & WBT_TRACKED)) {
579		if (flags & WBT_READ)
580			wb_timestamp(rwb, &rwb->last_issue);
581		return;
582	}
583
584	__wbt_wait(rwb, flags, bio->bi_opf);
585
586	if (!blk_stat_is_active(rwb->cb))
587		rwb_arm_timer(rwb);
588}
589
590static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
591{
592	struct rq_wb *rwb = RQWB(rqos);
593	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
594}
595
596static void wbt_issue(struct rq_qos *rqos, struct request *rq)
597{
598	struct rq_wb *rwb = RQWB(rqos);
599
600	if (!rwb_enabled(rwb))
601		return;
602
603	/*
604	 * Track sync issue, in case it takes a long time to complete. Allows us
605	 * to react quicker, if a sync IO takes a long time to complete. Note
606	 * that this is just a hint. The request can go away when it completes,
607	 * so it's important we never dereference it. We only use the address to
608	 * compare with, which is why we store the sync_issue time locally.
609	 */
610	if (wbt_is_read(rq) && !rwb->sync_issue) {
611		rwb->sync_cookie = rq;
612		rwb->sync_issue = rq->io_start_time_ns;
613	}
614}
615
616static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
617{
618	struct rq_wb *rwb = RQWB(rqos);
619	if (!rwb_enabled(rwb))
620		return;
621	if (rq == rwb->sync_cookie) {
622		rwb->sync_issue = 0;
623		rwb->sync_cookie = NULL;
624	}
625}
626
627void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
628{
629	struct rq_qos *rqos = wbt_rq_qos(q);
630	if (rqos)
631		RQWB(rqos)->wc = write_cache_on;
632}
633
634/*
635 * Enable wbt if defaults are configured that way
636 */
637void wbt_enable_default(struct request_queue *q)
638{
639	struct rq_qos *rqos = wbt_rq_qos(q);
640
641	/* Throttling already enabled? */
642	if (rqos) {
643		if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
644			RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
645		return;
646	}
647
648	/* Queue not registered? Maybe shutting down... */
649	if (!blk_queue_registered(q))
650		return;
651
652	if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
653		wbt_init(q);
654}
655EXPORT_SYMBOL_GPL(wbt_enable_default);
656
657u64 wbt_default_latency_nsec(struct request_queue *q)
658{
659	/*
660	 * We default to 2msec for non-rotational storage, and 75msec
661	 * for rotational storage.
662	 */
663	if (blk_queue_nonrot(q))
664		return 2000000ULL;
665	else
666		return 75000000ULL;
667}
668
669static int wbt_data_dir(const struct request *rq)
670{
671	const int op = req_op(rq);
672
673	if (op == REQ_OP_READ)
674		return READ;
675	else if (op_is_write(op))
676		return WRITE;
677
678	/* don't account */
679	return -1;
680}
681
682static void wbt_queue_depth_changed(struct rq_qos *rqos)
683{
684	RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
685	wbt_update_limits(RQWB(rqos));
686}
687
688static void wbt_exit(struct rq_qos *rqos)
689{
690	struct rq_wb *rwb = RQWB(rqos);
691	struct request_queue *q = rqos->q;
692
693	blk_stat_remove_callback(q, rwb->cb);
694	blk_stat_free_callback(rwb->cb);
695	kfree(rwb);
696}
697
698/*
699 * Disable wbt, if enabled by default.
700 */
701void wbt_disable_default(struct request_queue *q)
702{
703	struct rq_qos *rqos = wbt_rq_qos(q);
704	struct rq_wb *rwb;
705	if (!rqos)
706		return;
707	rwb = RQWB(rqos);
708	if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
709		blk_stat_deactivate(rwb->cb);
710		rwb->enable_state = WBT_STATE_OFF_DEFAULT;
711	}
712}
713EXPORT_SYMBOL_GPL(wbt_disable_default);
714
715#ifdef CONFIG_BLK_DEBUG_FS
716static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
717{
718	struct rq_qos *rqos = data;
719	struct rq_wb *rwb = RQWB(rqos);
720
721	seq_printf(m, "%llu\n", rwb->cur_win_nsec);
722	return 0;
723}
724
725static int wbt_enabled_show(void *data, struct seq_file *m)
726{
727	struct rq_qos *rqos = data;
728	struct rq_wb *rwb = RQWB(rqos);
729
730	seq_printf(m, "%d\n", rwb->enable_state);
731	return 0;
732}
733
734static int wbt_id_show(void *data, struct seq_file *m)
735{
736	struct rq_qos *rqos = data;
737
738	seq_printf(m, "%u\n", rqos->id);
739	return 0;
740}
741
742static int wbt_inflight_show(void *data, struct seq_file *m)
743{
744	struct rq_qos *rqos = data;
745	struct rq_wb *rwb = RQWB(rqos);
746	int i;
747
748	for (i = 0; i < WBT_NUM_RWQ; i++)
749		seq_printf(m, "%d: inflight %d\n", i,
750			   atomic_read(&rwb->rq_wait[i].inflight));
751	return 0;
752}
753
754static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
755{
756	struct rq_qos *rqos = data;
757	struct rq_wb *rwb = RQWB(rqos);
758
759	seq_printf(m, "%lu\n", rwb->min_lat_nsec);
760	return 0;
761}
762
763static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
764{
765	struct rq_qos *rqos = data;
766	struct rq_wb *rwb = RQWB(rqos);
767
768	seq_printf(m, "%u\n", rwb->unknown_cnt);
769	return 0;
770}
771
772static int wbt_normal_show(void *data, struct seq_file *m)
773{
774	struct rq_qos *rqos = data;
775	struct rq_wb *rwb = RQWB(rqos);
776
777	seq_printf(m, "%u\n", rwb->wb_normal);
778	return 0;
779}
780
781static int wbt_background_show(void *data, struct seq_file *m)
782{
783	struct rq_qos *rqos = data;
784	struct rq_wb *rwb = RQWB(rqos);
785
786	seq_printf(m, "%u\n", rwb->wb_background);
787	return 0;
788}
789
790static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
791	{"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
792	{"enabled", 0400, wbt_enabled_show},
793	{"id", 0400, wbt_id_show},
794	{"inflight", 0400, wbt_inflight_show},
795	{"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
796	{"unknown_cnt", 0400, wbt_unknown_cnt_show},
797	{"wb_normal", 0400, wbt_normal_show},
798	{"wb_background", 0400, wbt_background_show},
799	{},
800};
801#endif
802
803static struct rq_qos_ops wbt_rqos_ops = {
804	.throttle = wbt_wait,
805	.issue = wbt_issue,
806	.track = wbt_track,
807	.requeue = wbt_requeue,
808	.done = wbt_done,
809	.cleanup = wbt_cleanup,
810	.queue_depth_changed = wbt_queue_depth_changed,
811	.exit = wbt_exit,
812#ifdef CONFIG_BLK_DEBUG_FS
813	.debugfs_attrs = wbt_debugfs_attrs,
814#endif
815};
816
817int wbt_init(struct request_queue *q)
818{
819	struct rq_wb *rwb;
820	int i;
821
822	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
823	if (!rwb)
824		return -ENOMEM;
825
826	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
827	if (!rwb->cb) {
828		kfree(rwb);
829		return -ENOMEM;
830	}
831
832	for (i = 0; i < WBT_NUM_RWQ; i++)
833		rq_wait_init(&rwb->rq_wait[i]);
834
835	rwb->rqos.id = RQ_QOS_WBT;
836	rwb->rqos.ops = &wbt_rqos_ops;
837	rwb->rqos.q = q;
838	rwb->last_comp = rwb->last_issue = jiffies;
839	rwb->win_nsec = RWB_WINDOW_NSEC;
840	rwb->enable_state = WBT_STATE_ON_DEFAULT;
841	rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
842	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
843	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
844
845	wbt_queue_depth_changed(&rwb->rqos);
846
847	/*
848	 * Assign rwb and add the stats callback.
849	 */
850	rq_qos_add(q, &rwb->rqos);
851	blk_stat_add_callback(q, rwb->cb);
852
853	return 0;
854}
855