xref: /kernel/linux/linux-5.10/block/blk-flush.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions to sequence PREFLUSH and FUA writes.
4 *
5 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
6 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
7 *
8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 * properties and hardware capability.
11 *
12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
14 * that the device cache should be flushed before the data is executed, and
15 * REQ_FUA means that the data must be on non-volatile media on request
16 * completion.
17 *
18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 * difference.  The requests are either completed immediately if there's no data
20 * or executed as normal requests otherwise.
21 *
22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24 *
25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27 *
28 * The actual execution of flush is double buffered.  Whenever a request
29 * needs to execute PRE or POSTFLUSH, it queues at
30 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
31 * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
32 * completes, all the requests which were pending are proceeded to the next
33 * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
34 * requests.
35 *
36 * Currently, the following conditions are used to determine when to issue
37 * flush.
38 *
39 * C1. At any given time, only one flush shall be in progress.  This makes
40 *     double buffering sufficient.
41 *
42 * C2. Flush is deferred if any request is executing DATA of its sequence.
43 *     This avoids issuing separate POSTFLUSHes for requests which shared
44 *     PREFLUSH.
45 *
46 * C3. The second condition is ignored if there is a request which has
47 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
48 *     starvation in the unlikely case where there are continuous stream of
49 *     FUA (without PREFLUSH) requests.
50 *
51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52 * is beneficial.
53 *
54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55 * Once while executing DATA and again after the whole sequence is
56 * complete.  The first completion updates the contained bio but doesn't
57 * finish it so that the bio submitter is notified only after the whole
58 * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
59 * req_bio_endio().
60 *
61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
62 * bio attached to it, which is guaranteed as they aren't allowed to be
63 * merged in the usual way.
64 */
65
66#include <linux/kernel.h>
67#include <linux/module.h>
68#include <linux/bio.h>
69#include <linux/blkdev.h>
70#include <linux/gfp.h>
71#include <linux/blk-mq.h>
72
73#include "blk.h"
74#include "blk-mq.h"
75#include "blk-mq-tag.h"
76#include "blk-mq-sched.h"
77
78/* PREFLUSH/FUA sequences */
79enum {
80	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
81	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
82	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
83	REQ_FSEQ_DONE		= (1 << 3),
84
85	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86				  REQ_FSEQ_POSTFLUSH,
87
88	/*
89	 * If flush has been pending longer than the following timeout,
90	 * it's issued even if flush_data requests are still in flight.
91	 */
92	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
93};
94
95static void blk_kick_flush(struct request_queue *q,
96			   struct blk_flush_queue *fq, unsigned int flags);
97
98static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
99{
100	unsigned int policy = 0;
101
102	if (blk_rq_sectors(rq))
103		policy |= REQ_FSEQ_DATA;
104
105	if (fflags & (1UL << QUEUE_FLAG_WC)) {
106		if (rq->cmd_flags & REQ_PREFLUSH)
107			policy |= REQ_FSEQ_PREFLUSH;
108		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109		    (rq->cmd_flags & REQ_FUA))
110			policy |= REQ_FSEQ_POSTFLUSH;
111	}
112	return policy;
113}
114
115static unsigned int blk_flush_cur_seq(struct request *rq)
116{
117	return 1 << ffz(rq->flush.seq);
118}
119
120static void blk_flush_restore_request(struct request *rq)
121{
122	/*
123	 * After flush data completion, @rq->bio is %NULL but we need to
124	 * complete the bio again.  @rq->biotail is guaranteed to equal the
125	 * original @rq->bio.  Restore it.
126	 */
127	rq->bio = rq->biotail;
128
129	/* make @rq a normal request */
130	rq->rq_flags &= ~RQF_FLUSH_SEQ;
131	rq->end_io = rq->flush.saved_end_io;
132}
133
134static void blk_flush_queue_rq(struct request *rq, bool add_front)
135{
136	blk_mq_add_to_requeue_list(rq, add_front, true);
137}
138
139static void blk_account_io_flush(struct request *rq)
140{
141	struct hd_struct *part = &rq->rq_disk->part0;
142
143	part_stat_lock();
144	part_stat_inc(part, ios[STAT_FLUSH]);
145	part_stat_add(part, nsecs[STAT_FLUSH],
146		      ktime_get_ns() - rq->start_time_ns);
147	part_stat_unlock();
148}
149
150/**
151 * blk_flush_complete_seq - complete flush sequence
152 * @rq: PREFLUSH/FUA request being sequenced
153 * @fq: flush queue
154 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
155 * @error: whether an error occurred
156 *
157 * @rq just completed @seq part of its flush sequence, record the
158 * completion and trigger the next step.
159 *
160 * CONTEXT:
161 * spin_lock_irq(fq->mq_flush_lock)
162 */
163static void blk_flush_complete_seq(struct request *rq,
164				   struct blk_flush_queue *fq,
165				   unsigned int seq, blk_status_t error)
166{
167	struct request_queue *q = rq->q;
168	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
169	unsigned int cmd_flags;
170
171	BUG_ON(rq->flush.seq & seq);
172	rq->flush.seq |= seq;
173	cmd_flags = rq->cmd_flags;
174
175	if (likely(!error))
176		seq = blk_flush_cur_seq(rq);
177	else
178		seq = REQ_FSEQ_DONE;
179
180	switch (seq) {
181	case REQ_FSEQ_PREFLUSH:
182	case REQ_FSEQ_POSTFLUSH:
183		/* queue for flush */
184		if (list_empty(pending))
185			fq->flush_pending_since = jiffies;
186		list_move_tail(&rq->flush.list, pending);
187		break;
188
189	case REQ_FSEQ_DATA:
190		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
191		blk_flush_queue_rq(rq, true);
192		break;
193
194	case REQ_FSEQ_DONE:
195		/*
196		 * @rq was previously adjusted by blk_insert_flush() for
197		 * flush sequencing and may already have gone through the
198		 * flush data request completion path.  Restore @rq for
199		 * normal completion and end it.
200		 */
201		BUG_ON(!list_empty(&rq->queuelist));
202		list_del_init(&rq->flush.list);
203		blk_flush_restore_request(rq);
204		blk_mq_end_request(rq, error);
205		break;
206
207	default:
208		BUG();
209	}
210
211	blk_kick_flush(q, fq, cmd_flags);
212}
213
214static void flush_end_io(struct request *flush_rq, blk_status_t error)
215{
216	struct request_queue *q = flush_rq->q;
217	struct list_head *running;
218	struct request *rq, *n;
219	unsigned long flags = 0;
220	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
221
222	/* release the tag's ownership to the req cloned from */
223	spin_lock_irqsave(&fq->mq_flush_lock, flags);
224
225	if (!refcount_dec_and_test(&flush_rq->ref)) {
226		fq->rq_status = error;
227		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
228		return;
229	}
230
231	blk_account_io_flush(flush_rq);
232	/*
233	 * Flush request has to be marked as IDLE when it is really ended
234	 * because its .end_io() is called from timeout code path too for
235	 * avoiding use-after-free.
236	 */
237	WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
238	if (fq->rq_status != BLK_STS_OK) {
239		error = fq->rq_status;
240		fq->rq_status = BLK_STS_OK;
241	}
242
243	if (!q->elevator) {
244		flush_rq->tag = BLK_MQ_NO_TAG;
245	} else {
246		blk_mq_put_driver_tag(flush_rq);
247		flush_rq->internal_tag = BLK_MQ_NO_TAG;
248	}
249
250	running = &fq->flush_queue[fq->flush_running_idx];
251	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
252
253	/* account completion of the flush request */
254	fq->flush_running_idx ^= 1;
255
256	/* and push the waiting requests to the next stage */
257	list_for_each_entry_safe(rq, n, running, flush.list) {
258		unsigned int seq = blk_flush_cur_seq(rq);
259
260		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
261		blk_flush_complete_seq(rq, fq, seq, error);
262	}
263
264	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
265}
266
267bool is_flush_rq(struct request *rq)
268{
269	return rq->end_io == flush_end_io;
270}
271
272/**
273 * blk_kick_flush - consider issuing flush request
274 * @q: request_queue being kicked
275 * @fq: flush queue
276 * @flags: cmd_flags of the original request
277 *
278 * Flush related states of @q have changed, consider issuing flush request.
279 * Please read the comment at the top of this file for more info.
280 *
281 * CONTEXT:
282 * spin_lock_irq(fq->mq_flush_lock)
283 *
284 */
285static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
286			   unsigned int flags)
287{
288	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
289	struct request *first_rq =
290		list_first_entry(pending, struct request, flush.list);
291	struct request *flush_rq = fq->flush_rq;
292
293	/* C1 described at the top of this file */
294	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
295		return;
296
297	/* C2 and C3 */
298	if (!list_empty(&fq->flush_data_in_flight) &&
299	    time_before(jiffies,
300			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
301		return;
302
303	/*
304	 * Issue flush and toggle pending_idx.  This makes pending_idx
305	 * different from running_idx, which means flush is in flight.
306	 */
307	fq->flush_pending_idx ^= 1;
308
309	blk_rq_init(q, flush_rq);
310
311	/*
312	 * In case of none scheduler, borrow tag from the first request
313	 * since they can't be in flight at the same time. And acquire
314	 * the tag's ownership for flush req.
315	 *
316	 * In case of IO scheduler, flush rq need to borrow scheduler tag
317	 * just for cheating put/get driver tag.
318	 */
319	flush_rq->mq_ctx = first_rq->mq_ctx;
320	flush_rq->mq_hctx = first_rq->mq_hctx;
321
322	if (!q->elevator) {
323		flush_rq->tag = first_rq->tag;
324
325		/*
326		 * We borrow data request's driver tag, so have to mark
327		 * this flush request as INFLIGHT for avoiding double
328		 * account of this driver tag
329		 */
330		flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
331	} else
332		flush_rq->internal_tag = first_rq->internal_tag;
333
334	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
335	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
336	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
337	flush_rq->rq_disk = first_rq->rq_disk;
338	flush_rq->end_io = flush_end_io;
339	/*
340	 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
341	 * implied in refcount_inc_not_zero() called from
342	 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
343	 * and READ flush_rq->end_io
344	 */
345	smp_wmb();
346	refcount_set(&flush_rq->ref, 1);
347
348	blk_flush_queue_rq(flush_rq, false);
349}
350
351static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
352{
353	struct request_queue *q = rq->q;
354	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
355	struct blk_mq_ctx *ctx = rq->mq_ctx;
356	unsigned long flags;
357	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
358
359	if (q->elevator) {
360		WARN_ON(rq->tag < 0);
361		blk_mq_put_driver_tag(rq);
362	}
363
364	/*
365	 * After populating an empty queue, kick it to avoid stall.  Read
366	 * the comment in flush_end_io().
367	 */
368	spin_lock_irqsave(&fq->mq_flush_lock, flags);
369	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
370	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
371
372	blk_mq_sched_restart(hctx);
373}
374
375/**
376 * blk_insert_flush - insert a new PREFLUSH/FUA request
377 * @rq: request to insert
378 *
379 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
380 * or __blk_mq_run_hw_queue() to dispatch request.
381 * @rq is being submitted.  Analyze what needs to be done and put it on the
382 * right queue.
383 */
384void blk_insert_flush(struct request *rq)
385{
386	struct request_queue *q = rq->q;
387	unsigned long fflags = q->queue_flags;	/* may change, cache */
388	unsigned int policy = blk_flush_policy(fflags, rq);
389	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
390
391	/*
392	 * @policy now records what operations need to be done.  Adjust
393	 * REQ_PREFLUSH and FUA for the driver.
394	 */
395	rq->cmd_flags &= ~REQ_PREFLUSH;
396	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
397		rq->cmd_flags &= ~REQ_FUA;
398
399	/*
400	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
401	 * of those flags, we have to set REQ_SYNC to avoid skewing
402	 * the request accounting.
403	 */
404	rq->cmd_flags |= REQ_SYNC;
405
406	/*
407	 * An empty flush handed down from a stacking driver may
408	 * translate into nothing if the underlying device does not
409	 * advertise a write-back cache.  In this case, simply
410	 * complete the request.
411	 */
412	if (!policy) {
413		blk_mq_end_request(rq, 0);
414		return;
415	}
416
417	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
418
419	/*
420	 * If there's data but flush is not necessary, the request can be
421	 * processed directly without going through flush machinery.  Queue
422	 * for normal execution.
423	 */
424	if ((policy & REQ_FSEQ_DATA) &&
425	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
426		blk_mq_request_bypass_insert(rq, false, false);
427		return;
428	}
429
430	/*
431	 * @rq should go through flush machinery.  Mark it part of flush
432	 * sequence and submit for further processing.
433	 */
434	memset(&rq->flush, 0, sizeof(rq->flush));
435	INIT_LIST_HEAD(&rq->flush.list);
436	rq->rq_flags |= RQF_FLUSH_SEQ;
437	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
438
439	rq->end_io = mq_flush_data_end_io;
440
441	spin_lock_irq(&fq->mq_flush_lock);
442	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
443	spin_unlock_irq(&fq->mq_flush_lock);
444}
445
446/**
447 * blkdev_issue_flush - queue a flush
448 * @bdev:	blockdev to issue flush for
449 * @gfp_mask:	memory allocation flags (for bio_alloc)
450 *
451 * Description:
452 *    Issue a flush for the block device in question.
453 */
454int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
455{
456	struct bio *bio;
457	int ret = 0;
458
459	bio = bio_alloc(gfp_mask, 0);
460	bio_set_dev(bio, bdev);
461	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
462
463	ret = submit_bio_wait(bio);
464	bio_put(bio);
465	return ret;
466}
467EXPORT_SYMBOL(blkdev_issue_flush);
468
469struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
470					      gfp_t flags)
471{
472	struct blk_flush_queue *fq;
473	int rq_sz = sizeof(struct request);
474
475	fq = kzalloc_node(sizeof(*fq), flags, node);
476	if (!fq)
477		goto fail;
478
479	spin_lock_init(&fq->mq_flush_lock);
480
481	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
482	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
483	if (!fq->flush_rq)
484		goto fail_rq;
485
486	INIT_LIST_HEAD(&fq->flush_queue[0]);
487	INIT_LIST_HEAD(&fq->flush_queue[1]);
488	INIT_LIST_HEAD(&fq->flush_data_in_flight);
489
490	return fq;
491
492 fail_rq:
493	kfree(fq);
494 fail:
495	return NULL;
496}
497
498void blk_free_flush_queue(struct blk_flush_queue *fq)
499{
500	/* bio based request queue hasn't flush queue */
501	if (!fq)
502		return;
503
504	kfree(fq->flush_rq);
505	kfree(fq);
506}
507
508/*
509 * Allow driver to set its own lock class to fq->mq_flush_lock for
510 * avoiding lockdep complaint.
511 *
512 * flush_end_io() may be called recursively from some driver, such as
513 * nvme-loop, so lockdep may complain 'possible recursive locking' because
514 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
515 * key. We need to assign different lock class for these driver's
516 * fq->mq_flush_lock for avoiding the lockdep warning.
517 *
518 * Use dynamically allocated lock class key for each 'blk_flush_queue'
519 * instance is over-kill, and more worse it introduces horrible boot delay
520 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
521 * is called for each hctx release. SCSI probing may synchronously create and
522 * destroy lots of MQ request_queues for non-existent devices, and some robot
523 * test kernel always enable lockdep option. It is observed that more than half
524 * an hour is taken during SCSI MQ probe with per-fq lock class.
525 */
526void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
527		struct lock_class_key *key)
528{
529	lockdep_set_class(&hctx->fq->mq_flush_lock, key);
530}
531EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
532