xref: /kernel/linux/linux-5.10/block/mq-deadline.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 *  for the blk-mq scheduling framework
5 *
6 *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 */
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
12#include <linux/elevator.h>
13#include <linux/bio.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/compiler.h>
18#include <linux/rbtree.h>
19#include <linux/sbitmap.h>
20
21#include "blk.h"
22#include "blk-mq.h"
23#include "blk-mq-debugfs.h"
24#include "blk-mq-tag.h"
25#include "blk-mq-sched.h"
26
27/*
28 * See Documentation/block/deadline-iosched.rst
29 */
30static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
31static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32static const int writes_starved = 2;    /* max times reads can starve a write */
33static const int fifo_batch = 16;       /* # of sequential requests treated as one
34				     by the above parameters. For throughput. */
35
36struct deadline_data {
37	/*
38	 * run time data
39	 */
40
41	/*
42	 * requests (deadline_rq s) are present on both sort_list and fifo_list
43	 */
44	struct rb_root sort_list[2];
45	struct list_head fifo_list[2];
46
47	/*
48	 * next in sort order. read, write or both are NULL
49	 */
50	struct request *next_rq[2];
51	unsigned int batching;		/* number of sequential requests made */
52	unsigned int starved;		/* times reads have starved writes */
53
54	/*
55	 * settings that change how the i/o scheduler behaves
56	 */
57	int fifo_expire[2];
58	int fifo_batch;
59	int writes_starved;
60	int front_merges;
61
62	spinlock_t lock;
63	spinlock_t zone_lock;
64	struct list_head dispatch;
65};
66
67static inline struct rb_root *
68deadline_rb_root(struct deadline_data *dd, struct request *rq)
69{
70	return &dd->sort_list[rq_data_dir(rq)];
71}
72
73/*
74 * get the request after `rq' in sector-sorted order
75 */
76static inline struct request *
77deadline_latter_request(struct request *rq)
78{
79	struct rb_node *node = rb_next(&rq->rb_node);
80
81	if (node)
82		return rb_entry_rq(node);
83
84	return NULL;
85}
86
87static void
88deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
89{
90	struct rb_root *root = deadline_rb_root(dd, rq);
91
92	elv_rb_add(root, rq);
93}
94
95static inline void
96deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
97{
98	const int data_dir = rq_data_dir(rq);
99
100	if (dd->next_rq[data_dir] == rq)
101		dd->next_rq[data_dir] = deadline_latter_request(rq);
102
103	elv_rb_del(deadline_rb_root(dd, rq), rq);
104}
105
106/*
107 * remove rq from rbtree and fifo.
108 */
109static void deadline_remove_request(struct request_queue *q, struct request *rq)
110{
111	struct deadline_data *dd = q->elevator->elevator_data;
112
113	list_del_init(&rq->queuelist);
114
115	/*
116	 * We might not be on the rbtree, if we are doing an insert merge
117	 */
118	if (!RB_EMPTY_NODE(&rq->rb_node))
119		deadline_del_rq_rb(dd, rq);
120
121	elv_rqhash_del(q, rq);
122	if (q->last_merge == rq)
123		q->last_merge = NULL;
124}
125
126static void dd_request_merged(struct request_queue *q, struct request *req,
127			      enum elv_merge type)
128{
129	struct deadline_data *dd = q->elevator->elevator_data;
130
131	/*
132	 * if the merge was a front merge, we need to reposition request
133	 */
134	if (type == ELEVATOR_FRONT_MERGE) {
135		elv_rb_del(deadline_rb_root(dd, req), req);
136		deadline_add_rq_rb(dd, req);
137	}
138}
139
140static void dd_merged_requests(struct request_queue *q, struct request *req,
141			       struct request *next)
142{
143	/*
144	 * if next expires before rq, assign its expire time to rq
145	 * and move into next position (next will be deleted) in fifo
146	 */
147	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
148		if (time_before((unsigned long)next->fifo_time,
149				(unsigned long)req->fifo_time)) {
150			list_move(&req->queuelist, &next->queuelist);
151			req->fifo_time = next->fifo_time;
152		}
153	}
154
155	/*
156	 * kill knowledge of next, this one is a goner
157	 */
158	deadline_remove_request(q, next);
159}
160
161/*
162 * move an entry to dispatch queue
163 */
164static void
165deadline_move_request(struct deadline_data *dd, struct request *rq)
166{
167	const int data_dir = rq_data_dir(rq);
168
169	dd->next_rq[READ] = NULL;
170	dd->next_rq[WRITE] = NULL;
171	dd->next_rq[data_dir] = deadline_latter_request(rq);
172
173	/*
174	 * take it off the sort and fifo list
175	 */
176	deadline_remove_request(rq->q, rq);
177}
178
179/*
180 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
181 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
182 */
183static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
184{
185	struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
186
187	/*
188	 * rq is expired!
189	 */
190	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
191		return 1;
192
193	return 0;
194}
195
196/*
197 * For the specified data direction, return the next request to
198 * dispatch using arrival ordered lists.
199 */
200static struct request *
201deadline_fifo_request(struct deadline_data *dd, int data_dir)
202{
203	struct request *rq;
204	unsigned long flags;
205
206	if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
207		return NULL;
208
209	if (list_empty(&dd->fifo_list[data_dir]))
210		return NULL;
211
212	rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
213	if (data_dir == READ || !blk_queue_is_zoned(rq->q))
214		return rq;
215
216	/*
217	 * Look for a write request that can be dispatched, that is one with
218	 * an unlocked target zone.
219	 */
220	spin_lock_irqsave(&dd->zone_lock, flags);
221	list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
222		if (blk_req_can_dispatch_to_zone(rq))
223			goto out;
224	}
225	rq = NULL;
226out:
227	spin_unlock_irqrestore(&dd->zone_lock, flags);
228
229	return rq;
230}
231
232/*
233 * For the specified data direction, return the next request to
234 * dispatch using sector position sorted lists.
235 */
236static struct request *
237deadline_next_request(struct deadline_data *dd, int data_dir)
238{
239	struct request *rq;
240	unsigned long flags;
241
242	if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
243		return NULL;
244
245	rq = dd->next_rq[data_dir];
246	if (!rq)
247		return NULL;
248
249	if (data_dir == READ || !blk_queue_is_zoned(rq->q))
250		return rq;
251
252	/*
253	 * Look for a write request that can be dispatched, that is one with
254	 * an unlocked target zone.
255	 */
256	spin_lock_irqsave(&dd->zone_lock, flags);
257	while (rq) {
258		if (blk_req_can_dispatch_to_zone(rq))
259			break;
260		rq = deadline_latter_request(rq);
261	}
262	spin_unlock_irqrestore(&dd->zone_lock, flags);
263
264	return rq;
265}
266
267/*
268 * deadline_dispatch_requests selects the best request according to
269 * read/write expire, fifo_batch, etc
270 */
271static struct request *__dd_dispatch_request(struct deadline_data *dd)
272{
273	struct request *rq, *next_rq;
274	bool reads, writes;
275	int data_dir;
276
277	if (!list_empty(&dd->dispatch)) {
278		rq = list_first_entry(&dd->dispatch, struct request, queuelist);
279		list_del_init(&rq->queuelist);
280		goto done;
281	}
282
283	reads = !list_empty(&dd->fifo_list[READ]);
284	writes = !list_empty(&dd->fifo_list[WRITE]);
285
286	/*
287	 * batches are currently reads XOR writes
288	 */
289	rq = deadline_next_request(dd, WRITE);
290	if (!rq)
291		rq = deadline_next_request(dd, READ);
292
293	if (rq && dd->batching < dd->fifo_batch)
294		/* we have a next request are still entitled to batch */
295		goto dispatch_request;
296
297	/*
298	 * at this point we are not running a batch. select the appropriate
299	 * data direction (read / write)
300	 */
301
302	if (reads) {
303		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
304
305		if (deadline_fifo_request(dd, WRITE) &&
306		    (dd->starved++ >= dd->writes_starved))
307			goto dispatch_writes;
308
309		data_dir = READ;
310
311		goto dispatch_find_request;
312	}
313
314	/*
315	 * there are either no reads or writes have been starved
316	 */
317
318	if (writes) {
319dispatch_writes:
320		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
321
322		dd->starved = 0;
323
324		data_dir = WRITE;
325
326		goto dispatch_find_request;
327	}
328
329	return NULL;
330
331dispatch_find_request:
332	/*
333	 * we are not running a batch, find best request for selected data_dir
334	 */
335	next_rq = deadline_next_request(dd, data_dir);
336	if (deadline_check_fifo(dd, data_dir) || !next_rq) {
337		/*
338		 * A deadline has expired, the last request was in the other
339		 * direction, or we have run out of higher-sectored requests.
340		 * Start again from the request with the earliest expiry time.
341		 */
342		rq = deadline_fifo_request(dd, data_dir);
343	} else {
344		/*
345		 * The last req was the same dir and we have a next request in
346		 * sort order. No expired requests so continue on from here.
347		 */
348		rq = next_rq;
349	}
350
351	/*
352	 * For a zoned block device, if we only have writes queued and none of
353	 * them can be dispatched, rq will be NULL.
354	 */
355	if (!rq)
356		return NULL;
357
358	dd->batching = 0;
359
360dispatch_request:
361	/*
362	 * rq is the selected appropriate request.
363	 */
364	dd->batching++;
365	deadline_move_request(dd, rq);
366done:
367	/*
368	 * If the request needs its target zone locked, do it.
369	 */
370	blk_req_zone_write_lock(rq);
371	rq->rq_flags |= RQF_STARTED;
372	return rq;
373}
374
375/*
376 * One confusing aspect here is that we get called for a specific
377 * hardware queue, but we may return a request that is for a
378 * different hardware queue. This is because mq-deadline has shared
379 * state for all hardware queues, in terms of sorting, FIFOs, etc.
380 */
381static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
382{
383	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
384	struct request *rq;
385
386	spin_lock(&dd->lock);
387	rq = __dd_dispatch_request(dd);
388	spin_unlock(&dd->lock);
389	if (rq)
390		atomic_dec(&rq->mq_hctx->elevator_queued);
391
392	return rq;
393}
394
395static void dd_exit_queue(struct elevator_queue *e)
396{
397	struct deadline_data *dd = e->elevator_data;
398
399	BUG_ON(!list_empty(&dd->fifo_list[READ]));
400	BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
401
402	kfree(dd);
403}
404
405/*
406 * initialize elevator private data (deadline_data).
407 */
408static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
409{
410	struct deadline_data *dd;
411	struct elevator_queue *eq;
412
413	eq = elevator_alloc(q, e);
414	if (!eq)
415		return -ENOMEM;
416
417	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
418	if (!dd) {
419		kobject_put(&eq->kobj);
420		return -ENOMEM;
421	}
422	eq->elevator_data = dd;
423
424	INIT_LIST_HEAD(&dd->fifo_list[READ]);
425	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
426	dd->sort_list[READ] = RB_ROOT;
427	dd->sort_list[WRITE] = RB_ROOT;
428	dd->fifo_expire[READ] = read_expire;
429	dd->fifo_expire[WRITE] = write_expire;
430	dd->writes_starved = writes_starved;
431	dd->front_merges = 1;
432	dd->fifo_batch = fifo_batch;
433	spin_lock_init(&dd->lock);
434	spin_lock_init(&dd->zone_lock);
435	INIT_LIST_HEAD(&dd->dispatch);
436
437	q->elevator = eq;
438	return 0;
439}
440
441static int dd_request_merge(struct request_queue *q, struct request **rq,
442			    struct bio *bio)
443{
444	struct deadline_data *dd = q->elevator->elevator_data;
445	sector_t sector = bio_end_sector(bio);
446	struct request *__rq;
447
448	if (!dd->front_merges)
449		return ELEVATOR_NO_MERGE;
450
451	__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
452	if (__rq) {
453		BUG_ON(sector != blk_rq_pos(__rq));
454
455		if (elv_bio_merge_ok(__rq, bio)) {
456			*rq = __rq;
457			if (blk_discard_mergable(__rq))
458				return ELEVATOR_DISCARD_MERGE;
459			return ELEVATOR_FRONT_MERGE;
460		}
461	}
462
463	return ELEVATOR_NO_MERGE;
464}
465
466static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
467		unsigned int nr_segs)
468{
469	struct deadline_data *dd = q->elevator->elevator_data;
470	struct request *free = NULL;
471	bool ret;
472
473	spin_lock(&dd->lock);
474	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
475	spin_unlock(&dd->lock);
476
477	if (free)
478		blk_mq_free_request(free);
479
480	return ret;
481}
482
483/*
484 * add rq to rbtree and fifo
485 */
486static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
487			      bool at_head)
488{
489	struct request_queue *q = hctx->queue;
490	struct deadline_data *dd = q->elevator->elevator_data;
491	const int data_dir = rq_data_dir(rq);
492	LIST_HEAD(free);
493
494	/*
495	 * This may be a requeue of a write request that has locked its
496	 * target zone. If it is the case, this releases the zone lock.
497	 */
498	blk_req_zone_write_unlock(rq);
499
500	if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
501		blk_mq_free_requests(&free);
502		return;
503	}
504
505	blk_mq_sched_request_inserted(rq);
506
507	if (at_head || blk_rq_is_passthrough(rq)) {
508		if (at_head)
509			list_add(&rq->queuelist, &dd->dispatch);
510		else
511			list_add_tail(&rq->queuelist, &dd->dispatch);
512	} else {
513		deadline_add_rq_rb(dd, rq);
514
515		if (rq_mergeable(rq)) {
516			elv_rqhash_add(q, rq);
517			if (!q->last_merge)
518				q->last_merge = rq;
519		}
520
521		/*
522		 * set expire time and add to fifo list
523		 */
524		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
525		list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
526	}
527}
528
529static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
530			       struct list_head *list, bool at_head)
531{
532	struct request_queue *q = hctx->queue;
533	struct deadline_data *dd = q->elevator->elevator_data;
534
535	spin_lock(&dd->lock);
536	while (!list_empty(list)) {
537		struct request *rq;
538
539		rq = list_first_entry(list, struct request, queuelist);
540		list_del_init(&rq->queuelist);
541		dd_insert_request(hctx, rq, at_head);
542		atomic_inc(&hctx->elevator_queued);
543	}
544	spin_unlock(&dd->lock);
545}
546
547/*
548 * Nothing to do here. This is defined only to ensure that .finish_request
549 * method is called upon request completion.
550 */
551static void dd_prepare_request(struct request *rq)
552{
553}
554
555/*
556 * For zoned block devices, write unlock the target zone of
557 * completed write requests. Do this while holding the zone lock
558 * spinlock so that the zone is never unlocked while deadline_fifo_request()
559 * or deadline_next_request() are executing. This function is called for
560 * all requests, whether or not these requests complete successfully.
561 *
562 * For a zoned block device, __dd_dispatch_request() may have stopped
563 * dispatching requests if all the queued requests are write requests directed
564 * at zones that are already locked due to on-going write requests. To ensure
565 * write request dispatch progress in this case, mark the queue as needing a
566 * restart to ensure that the queue is run again after completion of the
567 * request and zones being unlocked.
568 */
569static void dd_finish_request(struct request *rq)
570{
571	struct request_queue *q = rq->q;
572
573	if (blk_queue_is_zoned(q)) {
574		struct deadline_data *dd = q->elevator->elevator_data;
575		unsigned long flags;
576
577		spin_lock_irqsave(&dd->zone_lock, flags);
578		blk_req_zone_write_unlock(rq);
579		if (!list_empty(&dd->fifo_list[WRITE]))
580			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
581		spin_unlock_irqrestore(&dd->zone_lock, flags);
582	}
583}
584
585static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
586{
587	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
588
589	if (!atomic_read(&hctx->elevator_queued))
590		return false;
591
592	return !list_empty_careful(&dd->dispatch) ||
593		!list_empty_careful(&dd->fifo_list[0]) ||
594		!list_empty_careful(&dd->fifo_list[1]);
595}
596
597/*
598 * sysfs parts below
599 */
600static ssize_t
601deadline_var_show(int var, char *page)
602{
603	return sprintf(page, "%d\n", var);
604}
605
606static void
607deadline_var_store(int *var, const char *page)
608{
609	char *p = (char *) page;
610
611	*var = simple_strtol(p, &p, 10);
612}
613
614#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
615static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
616{									\
617	struct deadline_data *dd = e->elevator_data;			\
618	int __data = __VAR;						\
619	if (__CONV)							\
620		__data = jiffies_to_msecs(__data);			\
621	return deadline_var_show(__data, (page));			\
622}
623SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
624SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
625SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
626SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
627SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
628#undef SHOW_FUNCTION
629
630#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
631static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
632{									\
633	struct deadline_data *dd = e->elevator_data;			\
634	int __data;							\
635	deadline_var_store(&__data, (page));				\
636	if (__data < (MIN))						\
637		__data = (MIN);						\
638	else if (__data > (MAX))					\
639		__data = (MAX);						\
640	if (__CONV)							\
641		*(__PTR) = msecs_to_jiffies(__data);			\
642	else								\
643		*(__PTR) = __data;					\
644	return count;							\
645}
646STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
647STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
648STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
649STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
650STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
651#undef STORE_FUNCTION
652
653#define DD_ATTR(name) \
654	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
655
656static struct elv_fs_entry deadline_attrs[] = {
657	DD_ATTR(read_expire),
658	DD_ATTR(write_expire),
659	DD_ATTR(writes_starved),
660	DD_ATTR(front_merges),
661	DD_ATTR(fifo_batch),
662	__ATTR_NULL
663};
664
665#ifdef CONFIG_BLK_DEBUG_FS
666#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name)				\
667static void *deadline_##name##_fifo_start(struct seq_file *m,		\
668					  loff_t *pos)			\
669	__acquires(&dd->lock)						\
670{									\
671	struct request_queue *q = m->private;				\
672	struct deadline_data *dd = q->elevator->elevator_data;		\
673									\
674	spin_lock(&dd->lock);						\
675	return seq_list_start(&dd->fifo_list[ddir], *pos);		\
676}									\
677									\
678static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
679					 loff_t *pos)			\
680{									\
681	struct request_queue *q = m->private;				\
682	struct deadline_data *dd = q->elevator->elevator_data;		\
683									\
684	return seq_list_next(v, &dd->fifo_list[ddir], pos);		\
685}									\
686									\
687static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
688	__releases(&dd->lock)						\
689{									\
690	struct request_queue *q = m->private;				\
691	struct deadline_data *dd = q->elevator->elevator_data;		\
692									\
693	spin_unlock(&dd->lock);						\
694}									\
695									\
696static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
697	.start	= deadline_##name##_fifo_start,				\
698	.next	= deadline_##name##_fifo_next,				\
699	.stop	= deadline_##name##_fifo_stop,				\
700	.show	= blk_mq_debugfs_rq_show,				\
701};									\
702									\
703static int deadline_##name##_next_rq_show(void *data,			\
704					  struct seq_file *m)		\
705{									\
706	struct request_queue *q = data;					\
707	struct deadline_data *dd = q->elevator->elevator_data;		\
708	struct request *rq = dd->next_rq[ddir];				\
709									\
710	if (rq)								\
711		__blk_mq_debugfs_rq_show(m, rq);			\
712	return 0;							\
713}
714DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
715DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
716#undef DEADLINE_DEBUGFS_DDIR_ATTRS
717
718static int deadline_batching_show(void *data, struct seq_file *m)
719{
720	struct request_queue *q = data;
721	struct deadline_data *dd = q->elevator->elevator_data;
722
723	seq_printf(m, "%u\n", dd->batching);
724	return 0;
725}
726
727static int deadline_starved_show(void *data, struct seq_file *m)
728{
729	struct request_queue *q = data;
730	struct deadline_data *dd = q->elevator->elevator_data;
731
732	seq_printf(m, "%u\n", dd->starved);
733	return 0;
734}
735
736static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
737	__acquires(&dd->lock)
738{
739	struct request_queue *q = m->private;
740	struct deadline_data *dd = q->elevator->elevator_data;
741
742	spin_lock(&dd->lock);
743	return seq_list_start(&dd->dispatch, *pos);
744}
745
746static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
747{
748	struct request_queue *q = m->private;
749	struct deadline_data *dd = q->elevator->elevator_data;
750
751	return seq_list_next(v, &dd->dispatch, pos);
752}
753
754static void deadline_dispatch_stop(struct seq_file *m, void *v)
755	__releases(&dd->lock)
756{
757	struct request_queue *q = m->private;
758	struct deadline_data *dd = q->elevator->elevator_data;
759
760	spin_unlock(&dd->lock);
761}
762
763static const struct seq_operations deadline_dispatch_seq_ops = {
764	.start	= deadline_dispatch_start,
765	.next	= deadline_dispatch_next,
766	.stop	= deadline_dispatch_stop,
767	.show	= blk_mq_debugfs_rq_show,
768};
769
770#define DEADLINE_QUEUE_DDIR_ATTRS(name)						\
771	{#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops},	\
772	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
773static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
774	DEADLINE_QUEUE_DDIR_ATTRS(read),
775	DEADLINE_QUEUE_DDIR_ATTRS(write),
776	{"batching", 0400, deadline_batching_show},
777	{"starved", 0400, deadline_starved_show},
778	{"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
779	{},
780};
781#undef DEADLINE_QUEUE_DDIR_ATTRS
782#endif
783
784static struct elevator_type mq_deadline = {
785	.ops = {
786		.insert_requests	= dd_insert_requests,
787		.dispatch_request	= dd_dispatch_request,
788		.prepare_request	= dd_prepare_request,
789		.finish_request		= dd_finish_request,
790		.next_request		= elv_rb_latter_request,
791		.former_request		= elv_rb_former_request,
792		.bio_merge		= dd_bio_merge,
793		.request_merge		= dd_request_merge,
794		.requests_merged	= dd_merged_requests,
795		.request_merged		= dd_request_merged,
796		.has_work		= dd_has_work,
797		.init_sched		= dd_init_queue,
798		.exit_sched		= dd_exit_queue,
799	},
800
801#ifdef CONFIG_BLK_DEBUG_FS
802	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
803#endif
804	.elevator_attrs = deadline_attrs,
805	.elevator_name = "mq-deadline",
806	.elevator_alias = "deadline",
807	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
808	.elevator_owner = THIS_MODULE,
809};
810MODULE_ALIAS("mq-deadline-iosched");
811
812static int __init deadline_init(void)
813{
814	return elv_register(&mq_deadline);
815}
816
817static void __exit deadline_exit(void)
818{
819	elv_unregister(&mq_deadline);
820}
821
822module_init(deadline_init);
823module_exit(deadline_exit);
824
825MODULE_AUTHOR("Jens Axboe");
826MODULE_LICENSE("GPL");
827MODULE_DESCRIPTION("MQ deadline IO scheduler");
828