162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Budget Fair Queueing (BFQ) I/O scheduler.
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Based on ideas and code from CFQ:
662306a36Sopenharmony_ci * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
762306a36Sopenharmony_ci *
862306a36Sopenharmony_ci * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
962306a36Sopenharmony_ci *		      Paolo Valente <paolo.valente@unimore.it>
1062306a36Sopenharmony_ci *
1162306a36Sopenharmony_ci * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
1262306a36Sopenharmony_ci *                    Arianna Avanzini <avanzini@google.com>
1362306a36Sopenharmony_ci *
1462306a36Sopenharmony_ci * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
1562306a36Sopenharmony_ci *
1662306a36Sopenharmony_ci * BFQ is a proportional-share I/O scheduler, with some extra
1762306a36Sopenharmony_ci * low-latency capabilities. BFQ also supports full hierarchical
1862306a36Sopenharmony_ci * scheduling through cgroups. Next paragraphs provide an introduction
1962306a36Sopenharmony_ci * on BFQ inner workings. Details on BFQ benefits, usage and
2062306a36Sopenharmony_ci * limitations can be found in Documentation/block/bfq-iosched.rst.
2162306a36Sopenharmony_ci *
2262306a36Sopenharmony_ci * BFQ is a proportional-share storage-I/O scheduling algorithm based
2362306a36Sopenharmony_ci * on the slice-by-slice service scheme of CFQ. But BFQ assigns
2462306a36Sopenharmony_ci * budgets, measured in number of sectors, to processes instead of
2562306a36Sopenharmony_ci * time slices. The device is not granted to the in-service process
2662306a36Sopenharmony_ci * for a given time slice, but until it has exhausted its assigned
2762306a36Sopenharmony_ci * budget. This change from the time to the service domain enables BFQ
2862306a36Sopenharmony_ci * to distribute the device throughput among processes as desired,
2962306a36Sopenharmony_ci * without any distortion due to throughput fluctuations, or to device
3062306a36Sopenharmony_ci * internal queueing. BFQ uses an ad hoc internal scheduler, called
3162306a36Sopenharmony_ci * B-WF2Q+, to schedule processes according to their budgets. More
3262306a36Sopenharmony_ci * precisely, BFQ schedules queues associated with processes. Each
3362306a36Sopenharmony_ci * process/queue is assigned a user-configurable weight, and B-WF2Q+
3462306a36Sopenharmony_ci * guarantees that each queue receives a fraction of the throughput
3562306a36Sopenharmony_ci * proportional to its weight. Thanks to the accurate policy of
3662306a36Sopenharmony_ci * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
3762306a36Sopenharmony_ci * processes issuing sequential requests (to boost the throughput),
3862306a36Sopenharmony_ci * and yet guarantee a low latency to interactive and soft real-time
3962306a36Sopenharmony_ci * applications.
4062306a36Sopenharmony_ci *
4162306a36Sopenharmony_ci * In particular, to provide these low-latency guarantees, BFQ
4262306a36Sopenharmony_ci * explicitly privileges the I/O of two classes of time-sensitive
4362306a36Sopenharmony_ci * applications: interactive and soft real-time. In more detail, BFQ
4462306a36Sopenharmony_ci * behaves this way if the low_latency parameter is set (default
4562306a36Sopenharmony_ci * configuration). This feature enables BFQ to provide applications in
4662306a36Sopenharmony_ci * these classes with a very low latency.
4762306a36Sopenharmony_ci *
4862306a36Sopenharmony_ci * To implement this feature, BFQ constantly tries to detect whether
4962306a36Sopenharmony_ci * the I/O requests in a bfq_queue come from an interactive or a soft
5062306a36Sopenharmony_ci * real-time application. For brevity, in these cases, the queue is
5162306a36Sopenharmony_ci * said to be interactive or soft real-time. In both cases, BFQ
5262306a36Sopenharmony_ci * privileges the service of the queue, over that of non-interactive
5362306a36Sopenharmony_ci * and non-soft-real-time queues. This privileging is performed,
5462306a36Sopenharmony_ci * mainly, by raising the weight of the queue. So, for brevity, we
5562306a36Sopenharmony_ci * call just weight-raising periods the time periods during which a
5662306a36Sopenharmony_ci * queue is privileged, because deemed interactive or soft real-time.
5762306a36Sopenharmony_ci *
5862306a36Sopenharmony_ci * The detection of soft real-time queues/applications is described in
5962306a36Sopenharmony_ci * detail in the comments on the function
6062306a36Sopenharmony_ci * bfq_bfqq_softrt_next_start. On the other hand, the detection of an
6162306a36Sopenharmony_ci * interactive queue works as follows: a queue is deemed interactive
6262306a36Sopenharmony_ci * if it is constantly non empty only for a limited time interval,
6362306a36Sopenharmony_ci * after which it does become empty. The queue may be deemed
6462306a36Sopenharmony_ci * interactive again (for a limited time), if it restarts being
6562306a36Sopenharmony_ci * constantly non empty, provided that this happens only after the
6662306a36Sopenharmony_ci * queue has remained empty for a given minimum idle time.
6762306a36Sopenharmony_ci *
6862306a36Sopenharmony_ci * By default, BFQ computes automatically the above maximum time
6962306a36Sopenharmony_ci * interval, i.e., the time interval after which a constantly
7062306a36Sopenharmony_ci * non-empty queue stops being deemed interactive. Since a queue is
7162306a36Sopenharmony_ci * weight-raised while it is deemed interactive, this maximum time
7262306a36Sopenharmony_ci * interval happens to coincide with the (maximum) duration of the
7362306a36Sopenharmony_ci * weight-raising for interactive queues.
7462306a36Sopenharmony_ci *
7562306a36Sopenharmony_ci * Finally, BFQ also features additional heuristics for
7662306a36Sopenharmony_ci * preserving both a low latency and a high throughput on NCQ-capable,
7762306a36Sopenharmony_ci * rotational or flash-based devices, and to get the job done quickly
7862306a36Sopenharmony_ci * for applications consisting in many I/O-bound processes.
7962306a36Sopenharmony_ci *
8062306a36Sopenharmony_ci * NOTE: if the main or only goal, with a given device, is to achieve
8162306a36Sopenharmony_ci * the maximum-possible throughput at all times, then do switch off
8262306a36Sopenharmony_ci * all low-latency heuristics for that device, by setting low_latency
8362306a36Sopenharmony_ci * to 0.
8462306a36Sopenharmony_ci *
8562306a36Sopenharmony_ci * BFQ is described in [1], where also a reference to the initial,
8662306a36Sopenharmony_ci * more theoretical paper on BFQ can be found. The interested reader
8762306a36Sopenharmony_ci * can find in the latter paper full details on the main algorithm, as
8862306a36Sopenharmony_ci * well as formulas of the guarantees and formal proofs of all the
8962306a36Sopenharmony_ci * properties.  With respect to the version of BFQ presented in these
9062306a36Sopenharmony_ci * papers, this implementation adds a few more heuristics, such as the
9162306a36Sopenharmony_ci * ones that guarantee a low latency to interactive and soft real-time
9262306a36Sopenharmony_ci * applications, and a hierarchical extension based on H-WF2Q+.
9362306a36Sopenharmony_ci *
9462306a36Sopenharmony_ci * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
9562306a36Sopenharmony_ci * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
9662306a36Sopenharmony_ci * with O(log N) complexity derives from the one introduced with EEVDF
9762306a36Sopenharmony_ci * in [3].
9862306a36Sopenharmony_ci *
9962306a36Sopenharmony_ci * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
10062306a36Sopenharmony_ci *     Scheduler", Proceedings of the First Workshop on Mobile System
10162306a36Sopenharmony_ci *     Technologies (MST-2015), May 2015.
10262306a36Sopenharmony_ci *     http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
10362306a36Sopenharmony_ci *
10462306a36Sopenharmony_ci * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
10562306a36Sopenharmony_ci *     Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
10662306a36Sopenharmony_ci *     Oct 1997.
10762306a36Sopenharmony_ci *
10862306a36Sopenharmony_ci * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
10962306a36Sopenharmony_ci *
11062306a36Sopenharmony_ci * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
11162306a36Sopenharmony_ci *     First: A Flexible and Accurate Mechanism for Proportional Share
11262306a36Sopenharmony_ci *     Resource Allocation", technical report.
11362306a36Sopenharmony_ci *
11462306a36Sopenharmony_ci * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
11562306a36Sopenharmony_ci */
11662306a36Sopenharmony_ci#include <linux/module.h>
11762306a36Sopenharmony_ci#include <linux/slab.h>
11862306a36Sopenharmony_ci#include <linux/blkdev.h>
11962306a36Sopenharmony_ci#include <linux/cgroup.h>
12062306a36Sopenharmony_ci#include <linux/ktime.h>
12162306a36Sopenharmony_ci#include <linux/rbtree.h>
12262306a36Sopenharmony_ci#include <linux/ioprio.h>
12362306a36Sopenharmony_ci#include <linux/sbitmap.h>
12462306a36Sopenharmony_ci#include <linux/delay.h>
12562306a36Sopenharmony_ci#include <linux/backing-dev.h>
12662306a36Sopenharmony_ci
12762306a36Sopenharmony_ci#include <trace/events/block.h>
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_ci#include "elevator.h"
13062306a36Sopenharmony_ci#include "blk.h"
13162306a36Sopenharmony_ci#include "blk-mq.h"
13262306a36Sopenharmony_ci#include "blk-mq-sched.h"
13362306a36Sopenharmony_ci#include "bfq-iosched.h"
13462306a36Sopenharmony_ci#include "blk-wbt.h"
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci#define BFQ_BFQQ_FNS(name)						\
13762306a36Sopenharmony_civoid bfq_mark_bfqq_##name(struct bfq_queue *bfqq)			\
13862306a36Sopenharmony_ci{									\
13962306a36Sopenharmony_ci	__set_bit(BFQQF_##name, &(bfqq)->flags);			\
14062306a36Sopenharmony_ci}									\
14162306a36Sopenharmony_civoid bfq_clear_bfqq_##name(struct bfq_queue *bfqq)			\
14262306a36Sopenharmony_ci{									\
14362306a36Sopenharmony_ci	__clear_bit(BFQQF_##name, &(bfqq)->flags);		\
14462306a36Sopenharmony_ci}									\
14562306a36Sopenharmony_ciint bfq_bfqq_##name(const struct bfq_queue *bfqq)			\
14662306a36Sopenharmony_ci{									\
14762306a36Sopenharmony_ci	return test_bit(BFQQF_##name, &(bfqq)->flags);		\
14862306a36Sopenharmony_ci}
14962306a36Sopenharmony_ci
15062306a36Sopenharmony_ciBFQ_BFQQ_FNS(just_created);
15162306a36Sopenharmony_ciBFQ_BFQQ_FNS(busy);
15262306a36Sopenharmony_ciBFQ_BFQQ_FNS(wait_request);
15362306a36Sopenharmony_ciBFQ_BFQQ_FNS(non_blocking_wait_rq);
15462306a36Sopenharmony_ciBFQ_BFQQ_FNS(fifo_expire);
15562306a36Sopenharmony_ciBFQ_BFQQ_FNS(has_short_ttime);
15662306a36Sopenharmony_ciBFQ_BFQQ_FNS(sync);
15762306a36Sopenharmony_ciBFQ_BFQQ_FNS(IO_bound);
15862306a36Sopenharmony_ciBFQ_BFQQ_FNS(in_large_burst);
15962306a36Sopenharmony_ciBFQ_BFQQ_FNS(coop);
16062306a36Sopenharmony_ciBFQ_BFQQ_FNS(split_coop);
16162306a36Sopenharmony_ciBFQ_BFQQ_FNS(softrt_update);
16262306a36Sopenharmony_ci#undef BFQ_BFQQ_FNS						\
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_ci/* Expiration time of async (0) and sync (1) requests, in ns. */
16562306a36Sopenharmony_cistatic const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
16662306a36Sopenharmony_ci
16762306a36Sopenharmony_ci/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
16862306a36Sopenharmony_cistatic const int bfq_back_max = 16 * 1024;
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci/* Penalty of a backwards seek, in number of sectors. */
17162306a36Sopenharmony_cistatic const int bfq_back_penalty = 2;
17262306a36Sopenharmony_ci
17362306a36Sopenharmony_ci/* Idling period duration, in ns. */
17462306a36Sopenharmony_cistatic u64 bfq_slice_idle = NSEC_PER_SEC / 125;
17562306a36Sopenharmony_ci
17662306a36Sopenharmony_ci/* Minimum number of assigned budgets for which stats are safe to compute. */
17762306a36Sopenharmony_cistatic const int bfq_stats_min_budgets = 194;
17862306a36Sopenharmony_ci
17962306a36Sopenharmony_ci/* Default maximum budget values, in sectors and number of requests. */
18062306a36Sopenharmony_cistatic const int bfq_default_max_budget = 16 * 1024;
18162306a36Sopenharmony_ci
18262306a36Sopenharmony_ci/*
18362306a36Sopenharmony_ci * When a sync request is dispatched, the queue that contains that
18462306a36Sopenharmony_ci * request, and all the ancestor entities of that queue, are charged
18562306a36Sopenharmony_ci * with the number of sectors of the request. In contrast, if the
18662306a36Sopenharmony_ci * request is async, then the queue and its ancestor entities are
18762306a36Sopenharmony_ci * charged with the number of sectors of the request, multiplied by
18862306a36Sopenharmony_ci * the factor below. This throttles the bandwidth for async I/O,
18962306a36Sopenharmony_ci * w.r.t. to sync I/O, and it is done to counter the tendency of async
19062306a36Sopenharmony_ci * writes to steal I/O throughput to reads.
19162306a36Sopenharmony_ci *
19262306a36Sopenharmony_ci * The current value of this parameter is the result of a tuning with
19362306a36Sopenharmony_ci * several hardware and software configurations. We tried to find the
19462306a36Sopenharmony_ci * lowest value for which writes do not cause noticeable problems to
19562306a36Sopenharmony_ci * reads. In fact, the lower this parameter, the stabler I/O control,
19662306a36Sopenharmony_ci * in the following respect.  The lower this parameter is, the less
19762306a36Sopenharmony_ci * the bandwidth enjoyed by a group decreases
19862306a36Sopenharmony_ci * - when the group does writes, w.r.t. to when it does reads;
19962306a36Sopenharmony_ci * - when other groups do reads, w.r.t. to when they do writes.
20062306a36Sopenharmony_ci */
20162306a36Sopenharmony_cistatic const int bfq_async_charge_factor = 3;
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_ci/* Default timeout values, in jiffies, approximating CFQ defaults. */
20462306a36Sopenharmony_ciconst int bfq_timeout = HZ / 8;
20562306a36Sopenharmony_ci
20662306a36Sopenharmony_ci/*
20762306a36Sopenharmony_ci * Time limit for merging (see comments in bfq_setup_cooperator). Set
20862306a36Sopenharmony_ci * to the slowest value that, in our tests, proved to be effective in
20962306a36Sopenharmony_ci * removing false positives, while not causing true positives to miss
21062306a36Sopenharmony_ci * queue merging.
21162306a36Sopenharmony_ci *
21262306a36Sopenharmony_ci * As can be deduced from the low time limit below, queue merging, if
21362306a36Sopenharmony_ci * successful, happens at the very beginning of the I/O of the involved
21462306a36Sopenharmony_ci * cooperating processes, as a consequence of the arrival of the very
21562306a36Sopenharmony_ci * first requests from each cooperator.  After that, there is very
21662306a36Sopenharmony_ci * little chance to find cooperators.
21762306a36Sopenharmony_ci */
21862306a36Sopenharmony_cistatic const unsigned long bfq_merge_time_limit = HZ/10;
21962306a36Sopenharmony_ci
22062306a36Sopenharmony_cistatic struct kmem_cache *bfq_pool;
22162306a36Sopenharmony_ci
22262306a36Sopenharmony_ci/* Below this threshold (in ns), we consider thinktime immediate. */
22362306a36Sopenharmony_ci#define BFQ_MIN_TT		(2 * NSEC_PER_MSEC)
22462306a36Sopenharmony_ci
22562306a36Sopenharmony_ci/* hw_tag detection: parallel requests threshold and min samples needed. */
22662306a36Sopenharmony_ci#define BFQ_HW_QUEUE_THRESHOLD	3
22762306a36Sopenharmony_ci#define BFQ_HW_QUEUE_SAMPLES	32
22862306a36Sopenharmony_ci
22962306a36Sopenharmony_ci#define BFQQ_SEEK_THR		(sector_t)(8 * 100)
23062306a36Sopenharmony_ci#define BFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
23162306a36Sopenharmony_ci#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
23262306a36Sopenharmony_ci	(get_sdist(last_pos, rq) >			\
23362306a36Sopenharmony_ci	 BFQQ_SEEK_THR &&				\
23462306a36Sopenharmony_ci	 (!blk_queue_nonrot(bfqd->queue) ||		\
23562306a36Sopenharmony_ci	  blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
23662306a36Sopenharmony_ci#define BFQQ_CLOSE_THR		(sector_t)(8 * 1024)
23762306a36Sopenharmony_ci#define BFQQ_SEEKY(bfqq)	(hweight32(bfqq->seek_history) > 19)
23862306a36Sopenharmony_ci/*
23962306a36Sopenharmony_ci * Sync random I/O is likely to be confused with soft real-time I/O,
24062306a36Sopenharmony_ci * because it is characterized by limited throughput and apparently
24162306a36Sopenharmony_ci * isochronous arrival pattern. To avoid false positives, queues
24262306a36Sopenharmony_ci * containing only random (seeky) I/O are prevented from being tagged
24362306a36Sopenharmony_ci * as soft real-time.
24462306a36Sopenharmony_ci */
24562306a36Sopenharmony_ci#define BFQQ_TOTALLY_SEEKY(bfqq)	(bfqq->seek_history == -1)
24662306a36Sopenharmony_ci
24762306a36Sopenharmony_ci/* Min number of samples required to perform peak-rate update */
24862306a36Sopenharmony_ci#define BFQ_RATE_MIN_SAMPLES	32
24962306a36Sopenharmony_ci/* Min observation time interval required to perform a peak-rate update (ns) */
25062306a36Sopenharmony_ci#define BFQ_RATE_MIN_INTERVAL	(300*NSEC_PER_MSEC)
25162306a36Sopenharmony_ci/* Target observation time interval for a peak-rate update (ns) */
25262306a36Sopenharmony_ci#define BFQ_RATE_REF_INTERVAL	NSEC_PER_SEC
25362306a36Sopenharmony_ci
25462306a36Sopenharmony_ci/*
25562306a36Sopenharmony_ci * Shift used for peak-rate fixed precision calculations.
25662306a36Sopenharmony_ci * With
25762306a36Sopenharmony_ci * - the current shift: 16 positions
25862306a36Sopenharmony_ci * - the current type used to store rate: u32
25962306a36Sopenharmony_ci * - the current unit of measure for rate: [sectors/usec], or, more precisely,
26062306a36Sopenharmony_ci *   [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
26162306a36Sopenharmony_ci * the range of rates that can be stored is
26262306a36Sopenharmony_ci * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
26362306a36Sopenharmony_ci * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
26462306a36Sopenharmony_ci * [15, 65G] sectors/sec
26562306a36Sopenharmony_ci * Which, assuming a sector size of 512B, corresponds to a range of
26662306a36Sopenharmony_ci * [7.5K, 33T] B/sec
26762306a36Sopenharmony_ci */
26862306a36Sopenharmony_ci#define BFQ_RATE_SHIFT		16
26962306a36Sopenharmony_ci
27062306a36Sopenharmony_ci/*
27162306a36Sopenharmony_ci * When configured for computing the duration of the weight-raising
27262306a36Sopenharmony_ci * for interactive queues automatically (see the comments at the
27362306a36Sopenharmony_ci * beginning of this file), BFQ does it using the following formula:
27462306a36Sopenharmony_ci * duration = (ref_rate / r) * ref_wr_duration,
27562306a36Sopenharmony_ci * where r is the peak rate of the device, and ref_rate and
27662306a36Sopenharmony_ci * ref_wr_duration are two reference parameters.  In particular,
27762306a36Sopenharmony_ci * ref_rate is the peak rate of the reference storage device (see
27862306a36Sopenharmony_ci * below), and ref_wr_duration is about the maximum time needed, with
27962306a36Sopenharmony_ci * BFQ and while reading two files in parallel, to load typical large
28062306a36Sopenharmony_ci * applications on the reference device (see the comments on
28162306a36Sopenharmony_ci * max_service_from_wr below, for more details on how ref_wr_duration
28262306a36Sopenharmony_ci * is obtained).  In practice, the slower/faster the device at hand
28362306a36Sopenharmony_ci * is, the more/less it takes to load applications with respect to the
28462306a36Sopenharmony_ci * reference device.  Accordingly, the longer/shorter BFQ grants
28562306a36Sopenharmony_ci * weight raising to interactive applications.
28662306a36Sopenharmony_ci *
28762306a36Sopenharmony_ci * BFQ uses two different reference pairs (ref_rate, ref_wr_duration),
28862306a36Sopenharmony_ci * depending on whether the device is rotational or non-rotational.
28962306a36Sopenharmony_ci *
29062306a36Sopenharmony_ci * In the following definitions, ref_rate[0] and ref_wr_duration[0]
29162306a36Sopenharmony_ci * are the reference values for a rotational device, whereas
29262306a36Sopenharmony_ci * ref_rate[1] and ref_wr_duration[1] are the reference values for a
29362306a36Sopenharmony_ci * non-rotational device. The reference rates are not the actual peak
29462306a36Sopenharmony_ci * rates of the devices used as a reference, but slightly lower
29562306a36Sopenharmony_ci * values. The reason for using slightly lower values is that the
29662306a36Sopenharmony_ci * peak-rate estimator tends to yield slightly lower values than the
29762306a36Sopenharmony_ci * actual peak rate (it can yield the actual peak rate only if there
29862306a36Sopenharmony_ci * is only one process doing I/O, and the process does sequential
29962306a36Sopenharmony_ci * I/O).
30062306a36Sopenharmony_ci *
30162306a36Sopenharmony_ci * The reference peak rates are measured in sectors/usec, left-shifted
30262306a36Sopenharmony_ci * by BFQ_RATE_SHIFT.
30362306a36Sopenharmony_ci */
30462306a36Sopenharmony_cistatic int ref_rate[2] = {14000, 33000};
30562306a36Sopenharmony_ci/*
30662306a36Sopenharmony_ci * To improve readability, a conversion function is used to initialize
30762306a36Sopenharmony_ci * the following array, which entails that the array can be
30862306a36Sopenharmony_ci * initialized only in a function.
30962306a36Sopenharmony_ci */
31062306a36Sopenharmony_cistatic int ref_wr_duration[2];
31162306a36Sopenharmony_ci
31262306a36Sopenharmony_ci/*
31362306a36Sopenharmony_ci * BFQ uses the above-detailed, time-based weight-raising mechanism to
31462306a36Sopenharmony_ci * privilege interactive tasks. This mechanism is vulnerable to the
31562306a36Sopenharmony_ci * following false positives: I/O-bound applications that will go on
31662306a36Sopenharmony_ci * doing I/O for much longer than the duration of weight
31762306a36Sopenharmony_ci * raising. These applications have basically no benefit from being
31862306a36Sopenharmony_ci * weight-raised at the beginning of their I/O. On the opposite end,
31962306a36Sopenharmony_ci * while being weight-raised, these applications
32062306a36Sopenharmony_ci * a) unjustly steal throughput to applications that may actually need
32162306a36Sopenharmony_ci * low latency;
32262306a36Sopenharmony_ci * b) make BFQ uselessly perform device idling; device idling results
32362306a36Sopenharmony_ci * in loss of device throughput with most flash-based storage, and may
32462306a36Sopenharmony_ci * increase latencies when used purposelessly.
32562306a36Sopenharmony_ci *
32662306a36Sopenharmony_ci * BFQ tries to reduce these problems, by adopting the following
32762306a36Sopenharmony_ci * countermeasure. To introduce this countermeasure, we need first to
32862306a36Sopenharmony_ci * finish explaining how the duration of weight-raising for
32962306a36Sopenharmony_ci * interactive tasks is computed.
33062306a36Sopenharmony_ci *
33162306a36Sopenharmony_ci * For a bfq_queue deemed as interactive, the duration of weight
33262306a36Sopenharmony_ci * raising is dynamically adjusted, as a function of the estimated
33362306a36Sopenharmony_ci * peak rate of the device, so as to be equal to the time needed to
33462306a36Sopenharmony_ci * execute the 'largest' interactive task we benchmarked so far. By
33562306a36Sopenharmony_ci * largest task, we mean the task for which each involved process has
33662306a36Sopenharmony_ci * to do more I/O than for any of the other tasks we benchmarked. This
33762306a36Sopenharmony_ci * reference interactive task is the start-up of LibreOffice Writer,
33862306a36Sopenharmony_ci * and in this task each process/bfq_queue needs to have at most ~110K
33962306a36Sopenharmony_ci * sectors transferred.
34062306a36Sopenharmony_ci *
34162306a36Sopenharmony_ci * This last piece of information enables BFQ to reduce the actual
34262306a36Sopenharmony_ci * duration of weight-raising for at least one class of I/O-bound
34362306a36Sopenharmony_ci * applications: those doing sequential or quasi-sequential I/O. An
34462306a36Sopenharmony_ci * example is file copy. In fact, once started, the main I/O-bound
34562306a36Sopenharmony_ci * processes of these applications usually consume the above 110K
34662306a36Sopenharmony_ci * sectors in much less time than the processes of an application that
34762306a36Sopenharmony_ci * is starting, because these I/O-bound processes will greedily devote
34862306a36Sopenharmony_ci * almost all their CPU cycles only to their target,
34962306a36Sopenharmony_ci * throughput-friendly I/O operations. This is even more true if BFQ
35062306a36Sopenharmony_ci * happens to be underestimating the device peak rate, and thus
35162306a36Sopenharmony_ci * overestimating the duration of weight raising. But, according to
35262306a36Sopenharmony_ci * our measurements, once transferred 110K sectors, these processes
35362306a36Sopenharmony_ci * have no right to be weight-raised any longer.
35462306a36Sopenharmony_ci *
35562306a36Sopenharmony_ci * Basing on the last consideration, BFQ ends weight-raising for a
35662306a36Sopenharmony_ci * bfq_queue if the latter happens to have received an amount of
35762306a36Sopenharmony_ci * service at least equal to the following constant. The constant is
35862306a36Sopenharmony_ci * set to slightly more than 110K, to have a minimum safety margin.
35962306a36Sopenharmony_ci *
36062306a36Sopenharmony_ci * This early ending of weight-raising reduces the amount of time
36162306a36Sopenharmony_ci * during which interactive false positives cause the two problems
36262306a36Sopenharmony_ci * described at the beginning of these comments.
36362306a36Sopenharmony_ci */
36462306a36Sopenharmony_cistatic const unsigned long max_service_from_wr = 120000;
36562306a36Sopenharmony_ci
36662306a36Sopenharmony_ci/*
36762306a36Sopenharmony_ci * Maximum time between the creation of two queues, for stable merge
36862306a36Sopenharmony_ci * to be activated (in ms)
36962306a36Sopenharmony_ci */
37062306a36Sopenharmony_cistatic const unsigned long bfq_activation_stable_merging = 600;
37162306a36Sopenharmony_ci/*
37262306a36Sopenharmony_ci * Minimum time to be waited before evaluating delayed stable merge (in ms)
37362306a36Sopenharmony_ci */
37462306a36Sopenharmony_cistatic const unsigned long bfq_late_stable_merging = 600;
37562306a36Sopenharmony_ci
37662306a36Sopenharmony_ci#define RQ_BIC(rq)		((struct bfq_io_cq *)((rq)->elv.priv[0]))
37762306a36Sopenharmony_ci#define RQ_BFQQ(rq)		((rq)->elv.priv[1])
37862306a36Sopenharmony_ci
37962306a36Sopenharmony_cistruct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync,
38062306a36Sopenharmony_ci			      unsigned int actuator_idx)
38162306a36Sopenharmony_ci{
38262306a36Sopenharmony_ci	if (is_sync)
38362306a36Sopenharmony_ci		return bic->bfqq[1][actuator_idx];
38462306a36Sopenharmony_ci
38562306a36Sopenharmony_ci	return bic->bfqq[0][actuator_idx];
38662306a36Sopenharmony_ci}
38762306a36Sopenharmony_ci
38862306a36Sopenharmony_cistatic void bfq_put_stable_ref(struct bfq_queue *bfqq);
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_civoid bic_set_bfqq(struct bfq_io_cq *bic,
39162306a36Sopenharmony_ci		  struct bfq_queue *bfqq,
39262306a36Sopenharmony_ci		  bool is_sync,
39362306a36Sopenharmony_ci		  unsigned int actuator_idx)
39462306a36Sopenharmony_ci{
39562306a36Sopenharmony_ci	struct bfq_queue *old_bfqq = bic->bfqq[is_sync][actuator_idx];
39662306a36Sopenharmony_ci
39762306a36Sopenharmony_ci	/*
39862306a36Sopenharmony_ci	 * If bfqq != NULL, then a non-stable queue merge between
39962306a36Sopenharmony_ci	 * bic->bfqq and bfqq is happening here. This causes troubles
40062306a36Sopenharmony_ci	 * in the following case: bic->bfqq has also been scheduled
40162306a36Sopenharmony_ci	 * for a possible stable merge with bic->stable_merge_bfqq,
40262306a36Sopenharmony_ci	 * and bic->stable_merge_bfqq == bfqq happens to
40362306a36Sopenharmony_ci	 * hold. Troubles occur because bfqq may then undergo a split,
40462306a36Sopenharmony_ci	 * thereby becoming eligible for a stable merge. Yet, if
40562306a36Sopenharmony_ci	 * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
40662306a36Sopenharmony_ci	 * would be stably merged with itself. To avoid this anomaly,
40762306a36Sopenharmony_ci	 * we cancel the stable merge if
40862306a36Sopenharmony_ci	 * bic->stable_merge_bfqq == bfqq.
40962306a36Sopenharmony_ci	 */
41062306a36Sopenharmony_ci	struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[actuator_idx];
41162306a36Sopenharmony_ci
41262306a36Sopenharmony_ci	/* Clear bic pointer if bfqq is detached from this bic */
41362306a36Sopenharmony_ci	if (old_bfqq && old_bfqq->bic == bic)
41462306a36Sopenharmony_ci		old_bfqq->bic = NULL;
41562306a36Sopenharmony_ci
41662306a36Sopenharmony_ci	if (is_sync)
41762306a36Sopenharmony_ci		bic->bfqq[1][actuator_idx] = bfqq;
41862306a36Sopenharmony_ci	else
41962306a36Sopenharmony_ci		bic->bfqq[0][actuator_idx] = bfqq;
42062306a36Sopenharmony_ci
42162306a36Sopenharmony_ci	if (bfqq && bfqq_data->stable_merge_bfqq == bfqq) {
42262306a36Sopenharmony_ci		/*
42362306a36Sopenharmony_ci		 * Actually, these same instructions are executed also
42462306a36Sopenharmony_ci		 * in bfq_setup_cooperator, in case of abort or actual
42562306a36Sopenharmony_ci		 * execution of a stable merge. We could avoid
42662306a36Sopenharmony_ci		 * repeating these instructions there too, but if we
42762306a36Sopenharmony_ci		 * did so, we would nest even more complexity in this
42862306a36Sopenharmony_ci		 * function.
42962306a36Sopenharmony_ci		 */
43062306a36Sopenharmony_ci		bfq_put_stable_ref(bfqq_data->stable_merge_bfqq);
43162306a36Sopenharmony_ci
43262306a36Sopenharmony_ci		bfqq_data->stable_merge_bfqq = NULL;
43362306a36Sopenharmony_ci	}
43462306a36Sopenharmony_ci}
43562306a36Sopenharmony_ci
43662306a36Sopenharmony_cistruct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
43762306a36Sopenharmony_ci{
43862306a36Sopenharmony_ci	return bic->icq.q->elevator->elevator_data;
43962306a36Sopenharmony_ci}
44062306a36Sopenharmony_ci
44162306a36Sopenharmony_ci/**
44262306a36Sopenharmony_ci * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
44362306a36Sopenharmony_ci * @icq: the iocontext queue.
44462306a36Sopenharmony_ci */
44562306a36Sopenharmony_cistatic struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
44662306a36Sopenharmony_ci{
44762306a36Sopenharmony_ci	/* bic->icq is the first member, %NULL will convert to %NULL */
44862306a36Sopenharmony_ci	return container_of(icq, struct bfq_io_cq, icq);
44962306a36Sopenharmony_ci}
45062306a36Sopenharmony_ci
45162306a36Sopenharmony_ci/**
45262306a36Sopenharmony_ci * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
45362306a36Sopenharmony_ci * @q: the request queue.
45462306a36Sopenharmony_ci */
45562306a36Sopenharmony_cistatic struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
45662306a36Sopenharmony_ci{
45762306a36Sopenharmony_ci	struct bfq_io_cq *icq;
45862306a36Sopenharmony_ci	unsigned long flags;
45962306a36Sopenharmony_ci
46062306a36Sopenharmony_ci	if (!current->io_context)
46162306a36Sopenharmony_ci		return NULL;
46262306a36Sopenharmony_ci
46362306a36Sopenharmony_ci	spin_lock_irqsave(&q->queue_lock, flags);
46462306a36Sopenharmony_ci	icq = icq_to_bic(ioc_lookup_icq(q));
46562306a36Sopenharmony_ci	spin_unlock_irqrestore(&q->queue_lock, flags);
46662306a36Sopenharmony_ci
46762306a36Sopenharmony_ci	return icq;
46862306a36Sopenharmony_ci}
46962306a36Sopenharmony_ci
47062306a36Sopenharmony_ci/*
47162306a36Sopenharmony_ci * Scheduler run of queue, if there are requests pending and no one in the
47262306a36Sopenharmony_ci * driver that will restart queueing.
47362306a36Sopenharmony_ci */
47462306a36Sopenharmony_civoid bfq_schedule_dispatch(struct bfq_data *bfqd)
47562306a36Sopenharmony_ci{
47662306a36Sopenharmony_ci	lockdep_assert_held(&bfqd->lock);
47762306a36Sopenharmony_ci
47862306a36Sopenharmony_ci	if (bfqd->queued != 0) {
47962306a36Sopenharmony_ci		bfq_log(bfqd, "schedule dispatch");
48062306a36Sopenharmony_ci		blk_mq_run_hw_queues(bfqd->queue, true);
48162306a36Sopenharmony_ci	}
48262306a36Sopenharmony_ci}
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_ci#define bfq_class_idle(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
48562306a36Sopenharmony_ci
48662306a36Sopenharmony_ci#define bfq_sample_valid(samples)	((samples) > 80)
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_ci/*
48962306a36Sopenharmony_ci * Lifted from AS - choose which of rq1 and rq2 that is best served now.
49062306a36Sopenharmony_ci * We choose the request that is closer to the head right now.  Distance
49162306a36Sopenharmony_ci * behind the head is penalized and only allowed to a certain extent.
49262306a36Sopenharmony_ci */
49362306a36Sopenharmony_cistatic struct request *bfq_choose_req(struct bfq_data *bfqd,
49462306a36Sopenharmony_ci				      struct request *rq1,
49562306a36Sopenharmony_ci				      struct request *rq2,
49662306a36Sopenharmony_ci				      sector_t last)
49762306a36Sopenharmony_ci{
49862306a36Sopenharmony_ci	sector_t s1, s2, d1 = 0, d2 = 0;
49962306a36Sopenharmony_ci	unsigned long back_max;
50062306a36Sopenharmony_ci#define BFQ_RQ1_WRAP	0x01 /* request 1 wraps */
50162306a36Sopenharmony_ci#define BFQ_RQ2_WRAP	0x02 /* request 2 wraps */
50262306a36Sopenharmony_ci	unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
50362306a36Sopenharmony_ci
50462306a36Sopenharmony_ci	if (!rq1 || rq1 == rq2)
50562306a36Sopenharmony_ci		return rq2;
50662306a36Sopenharmony_ci	if (!rq2)
50762306a36Sopenharmony_ci		return rq1;
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_ci	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
51062306a36Sopenharmony_ci		return rq1;
51162306a36Sopenharmony_ci	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
51262306a36Sopenharmony_ci		return rq2;
51362306a36Sopenharmony_ci	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
51462306a36Sopenharmony_ci		return rq1;
51562306a36Sopenharmony_ci	else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
51662306a36Sopenharmony_ci		return rq2;
51762306a36Sopenharmony_ci
51862306a36Sopenharmony_ci	s1 = blk_rq_pos(rq1);
51962306a36Sopenharmony_ci	s2 = blk_rq_pos(rq2);
52062306a36Sopenharmony_ci
52162306a36Sopenharmony_ci	/*
52262306a36Sopenharmony_ci	 * By definition, 1KiB is 2 sectors.
52362306a36Sopenharmony_ci	 */
52462306a36Sopenharmony_ci	back_max = bfqd->bfq_back_max * 2;
52562306a36Sopenharmony_ci
52662306a36Sopenharmony_ci	/*
52762306a36Sopenharmony_ci	 * Strict one way elevator _except_ in the case where we allow
52862306a36Sopenharmony_ci	 * short backward seeks which are biased as twice the cost of a
52962306a36Sopenharmony_ci	 * similar forward seek.
53062306a36Sopenharmony_ci	 */
53162306a36Sopenharmony_ci	if (s1 >= last)
53262306a36Sopenharmony_ci		d1 = s1 - last;
53362306a36Sopenharmony_ci	else if (s1 + back_max >= last)
53462306a36Sopenharmony_ci		d1 = (last - s1) * bfqd->bfq_back_penalty;
53562306a36Sopenharmony_ci	else
53662306a36Sopenharmony_ci		wrap |= BFQ_RQ1_WRAP;
53762306a36Sopenharmony_ci
53862306a36Sopenharmony_ci	if (s2 >= last)
53962306a36Sopenharmony_ci		d2 = s2 - last;
54062306a36Sopenharmony_ci	else if (s2 + back_max >= last)
54162306a36Sopenharmony_ci		d2 = (last - s2) * bfqd->bfq_back_penalty;
54262306a36Sopenharmony_ci	else
54362306a36Sopenharmony_ci		wrap |= BFQ_RQ2_WRAP;
54462306a36Sopenharmony_ci
54562306a36Sopenharmony_ci	/* Found required data */
54662306a36Sopenharmony_ci
54762306a36Sopenharmony_ci	/*
54862306a36Sopenharmony_ci	 * By doing switch() on the bit mask "wrap" we avoid having to
54962306a36Sopenharmony_ci	 * check two variables for all permutations: --> faster!
55062306a36Sopenharmony_ci	 */
55162306a36Sopenharmony_ci	switch (wrap) {
55262306a36Sopenharmony_ci	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
55362306a36Sopenharmony_ci		if (d1 < d2)
55462306a36Sopenharmony_ci			return rq1;
55562306a36Sopenharmony_ci		else if (d2 < d1)
55662306a36Sopenharmony_ci			return rq2;
55762306a36Sopenharmony_ci
55862306a36Sopenharmony_ci		if (s1 >= s2)
55962306a36Sopenharmony_ci			return rq1;
56062306a36Sopenharmony_ci		else
56162306a36Sopenharmony_ci			return rq2;
56262306a36Sopenharmony_ci
56362306a36Sopenharmony_ci	case BFQ_RQ2_WRAP:
56462306a36Sopenharmony_ci		return rq1;
56562306a36Sopenharmony_ci	case BFQ_RQ1_WRAP:
56662306a36Sopenharmony_ci		return rq2;
56762306a36Sopenharmony_ci	case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
56862306a36Sopenharmony_ci	default:
56962306a36Sopenharmony_ci		/*
57062306a36Sopenharmony_ci		 * Since both rqs are wrapped,
57162306a36Sopenharmony_ci		 * start with the one that's further behind head
57262306a36Sopenharmony_ci		 * (--> only *one* back seek required),
57362306a36Sopenharmony_ci		 * since back seek takes more time than forward.
57462306a36Sopenharmony_ci		 */
57562306a36Sopenharmony_ci		if (s1 <= s2)
57662306a36Sopenharmony_ci			return rq1;
57762306a36Sopenharmony_ci		else
57862306a36Sopenharmony_ci			return rq2;
57962306a36Sopenharmony_ci	}
58062306a36Sopenharmony_ci}
58162306a36Sopenharmony_ci
58262306a36Sopenharmony_ci#define BFQ_LIMIT_INLINE_DEPTH 16
58362306a36Sopenharmony_ci
58462306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
58562306a36Sopenharmony_cistatic bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
58662306a36Sopenharmony_ci{
58762306a36Sopenharmony_ci	struct bfq_data *bfqd = bfqq->bfqd;
58862306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
58962306a36Sopenharmony_ci	struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
59062306a36Sopenharmony_ci	struct bfq_entity **entities = inline_entities;
59162306a36Sopenharmony_ci	int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
59262306a36Sopenharmony_ci	int class_idx = bfqq->ioprio_class - 1;
59362306a36Sopenharmony_ci	struct bfq_sched_data *sched_data;
59462306a36Sopenharmony_ci	unsigned long wsum;
59562306a36Sopenharmony_ci	bool ret = false;
59662306a36Sopenharmony_ci
59762306a36Sopenharmony_ci	if (!entity->on_st_or_in_serv)
59862306a36Sopenharmony_ci		return false;
59962306a36Sopenharmony_ci
60062306a36Sopenharmony_ciretry:
60162306a36Sopenharmony_ci	spin_lock_irq(&bfqd->lock);
60262306a36Sopenharmony_ci	/* +1 for bfqq entity, root cgroup not included */
60362306a36Sopenharmony_ci	depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
60462306a36Sopenharmony_ci	if (depth > alloc_depth) {
60562306a36Sopenharmony_ci		spin_unlock_irq(&bfqd->lock);
60662306a36Sopenharmony_ci		if (entities != inline_entities)
60762306a36Sopenharmony_ci			kfree(entities);
60862306a36Sopenharmony_ci		entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
60962306a36Sopenharmony_ci		if (!entities)
61062306a36Sopenharmony_ci			return false;
61162306a36Sopenharmony_ci		alloc_depth = depth;
61262306a36Sopenharmony_ci		goto retry;
61362306a36Sopenharmony_ci	}
61462306a36Sopenharmony_ci
61562306a36Sopenharmony_ci	sched_data = entity->sched_data;
61662306a36Sopenharmony_ci	/* Gather our ancestors as we need to traverse them in reverse order */
61762306a36Sopenharmony_ci	level = 0;
61862306a36Sopenharmony_ci	for_each_entity(entity) {
61962306a36Sopenharmony_ci		/*
62062306a36Sopenharmony_ci		 * If at some level entity is not even active, allow request
62162306a36Sopenharmony_ci		 * queueing so that BFQ knows there's work to do and activate
62262306a36Sopenharmony_ci		 * entities.
62362306a36Sopenharmony_ci		 */
62462306a36Sopenharmony_ci		if (!entity->on_st_or_in_serv)
62562306a36Sopenharmony_ci			goto out;
62662306a36Sopenharmony_ci		/* Uh, more parents than cgroup subsystem thinks? */
62762306a36Sopenharmony_ci		if (WARN_ON_ONCE(level >= depth))
62862306a36Sopenharmony_ci			break;
62962306a36Sopenharmony_ci		entities[level++] = entity;
63062306a36Sopenharmony_ci	}
63162306a36Sopenharmony_ci	WARN_ON_ONCE(level != depth);
63262306a36Sopenharmony_ci	for (level--; level >= 0; level--) {
63362306a36Sopenharmony_ci		entity = entities[level];
63462306a36Sopenharmony_ci		if (level > 0) {
63562306a36Sopenharmony_ci			wsum = bfq_entity_service_tree(entity)->wsum;
63662306a36Sopenharmony_ci		} else {
63762306a36Sopenharmony_ci			int i;
63862306a36Sopenharmony_ci			/*
63962306a36Sopenharmony_ci			 * For bfqq itself we take into account service trees
64062306a36Sopenharmony_ci			 * of all higher priority classes and multiply their
64162306a36Sopenharmony_ci			 * weights so that low prio queue from higher class
64262306a36Sopenharmony_ci			 * gets more requests than high prio queue from lower
64362306a36Sopenharmony_ci			 * class.
64462306a36Sopenharmony_ci			 */
64562306a36Sopenharmony_ci			wsum = 0;
64662306a36Sopenharmony_ci			for (i = 0; i <= class_idx; i++) {
64762306a36Sopenharmony_ci				wsum = wsum * IOPRIO_BE_NR +
64862306a36Sopenharmony_ci					sched_data->service_tree[i].wsum;
64962306a36Sopenharmony_ci			}
65062306a36Sopenharmony_ci		}
65162306a36Sopenharmony_ci		if (!wsum)
65262306a36Sopenharmony_ci			continue;
65362306a36Sopenharmony_ci		limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum);
65462306a36Sopenharmony_ci		if (entity->allocated >= limit) {
65562306a36Sopenharmony_ci			bfq_log_bfqq(bfqq->bfqd, bfqq,
65662306a36Sopenharmony_ci				"too many requests: allocated %d limit %d level %d",
65762306a36Sopenharmony_ci				entity->allocated, limit, level);
65862306a36Sopenharmony_ci			ret = true;
65962306a36Sopenharmony_ci			break;
66062306a36Sopenharmony_ci		}
66162306a36Sopenharmony_ci	}
66262306a36Sopenharmony_ciout:
66362306a36Sopenharmony_ci	spin_unlock_irq(&bfqd->lock);
66462306a36Sopenharmony_ci	if (entities != inline_entities)
66562306a36Sopenharmony_ci		kfree(entities);
66662306a36Sopenharmony_ci	return ret;
66762306a36Sopenharmony_ci}
66862306a36Sopenharmony_ci#else
66962306a36Sopenharmony_cistatic bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
67062306a36Sopenharmony_ci{
67162306a36Sopenharmony_ci	return false;
67262306a36Sopenharmony_ci}
67362306a36Sopenharmony_ci#endif
67462306a36Sopenharmony_ci
67562306a36Sopenharmony_ci/*
67662306a36Sopenharmony_ci * Async I/O can easily starve sync I/O (both sync reads and sync
67762306a36Sopenharmony_ci * writes), by consuming all tags. Similarly, storms of sync writes,
67862306a36Sopenharmony_ci * such as those that sync(2) may trigger, can starve sync reads.
67962306a36Sopenharmony_ci * Limit depths of async I/O and sync writes so as to counter both
68062306a36Sopenharmony_ci * problems.
68162306a36Sopenharmony_ci *
68262306a36Sopenharmony_ci * Also if a bfq queue or its parent cgroup consume more tags than would be
68362306a36Sopenharmony_ci * appropriate for their weight, we trim the available tag depth to 1. This
68462306a36Sopenharmony_ci * avoids a situation where one cgroup can starve another cgroup from tags and
68562306a36Sopenharmony_ci * thus block service differentiation among cgroups. Note that because the
68662306a36Sopenharmony_ci * queue / cgroup already has many requests allocated and queued, this does not
68762306a36Sopenharmony_ci * significantly affect service guarantees coming from the BFQ scheduling
68862306a36Sopenharmony_ci * algorithm.
68962306a36Sopenharmony_ci */
69062306a36Sopenharmony_cistatic void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
69162306a36Sopenharmony_ci{
69262306a36Sopenharmony_ci	struct bfq_data *bfqd = data->q->elevator->elevator_data;
69362306a36Sopenharmony_ci	struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
69462306a36Sopenharmony_ci	int depth;
69562306a36Sopenharmony_ci	unsigned limit = data->q->nr_requests;
69662306a36Sopenharmony_ci	unsigned int act_idx;
69762306a36Sopenharmony_ci
69862306a36Sopenharmony_ci	/* Sync reads have full depth available */
69962306a36Sopenharmony_ci	if (op_is_sync(opf) && !op_is_write(opf)) {
70062306a36Sopenharmony_ci		depth = 0;
70162306a36Sopenharmony_ci	} else {
70262306a36Sopenharmony_ci		depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
70362306a36Sopenharmony_ci		limit = (limit * depth) >> bfqd->full_depth_shift;
70462306a36Sopenharmony_ci	}
70562306a36Sopenharmony_ci
70662306a36Sopenharmony_ci	for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
70762306a36Sopenharmony_ci		struct bfq_queue *bfqq =
70862306a36Sopenharmony_ci			bic_to_bfqq(bic, op_is_sync(opf), act_idx);
70962306a36Sopenharmony_ci
71062306a36Sopenharmony_ci		/*
71162306a36Sopenharmony_ci		 * Does queue (or any parent entity) exceed number of
71262306a36Sopenharmony_ci		 * requests that should be available to it? Heavily
71362306a36Sopenharmony_ci		 * limit depth so that it cannot consume more
71462306a36Sopenharmony_ci		 * available requests and thus starve other entities.
71562306a36Sopenharmony_ci		 */
71662306a36Sopenharmony_ci		if (bfqq && bfqq_request_over_limit(bfqq, limit)) {
71762306a36Sopenharmony_ci			depth = 1;
71862306a36Sopenharmony_ci			break;
71962306a36Sopenharmony_ci		}
72062306a36Sopenharmony_ci	}
72162306a36Sopenharmony_ci	bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
72262306a36Sopenharmony_ci		__func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
72362306a36Sopenharmony_ci	if (depth)
72462306a36Sopenharmony_ci		data->shallow_depth = depth;
72562306a36Sopenharmony_ci}
72662306a36Sopenharmony_ci
72762306a36Sopenharmony_cistatic struct bfq_queue *
72862306a36Sopenharmony_cibfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
72962306a36Sopenharmony_ci		     sector_t sector, struct rb_node **ret_parent,
73062306a36Sopenharmony_ci		     struct rb_node ***rb_link)
73162306a36Sopenharmony_ci{
73262306a36Sopenharmony_ci	struct rb_node **p, *parent;
73362306a36Sopenharmony_ci	struct bfq_queue *bfqq = NULL;
73462306a36Sopenharmony_ci
73562306a36Sopenharmony_ci	parent = NULL;
73662306a36Sopenharmony_ci	p = &root->rb_node;
73762306a36Sopenharmony_ci	while (*p) {
73862306a36Sopenharmony_ci		struct rb_node **n;
73962306a36Sopenharmony_ci
74062306a36Sopenharmony_ci		parent = *p;
74162306a36Sopenharmony_ci		bfqq = rb_entry(parent, struct bfq_queue, pos_node);
74262306a36Sopenharmony_ci
74362306a36Sopenharmony_ci		/*
74462306a36Sopenharmony_ci		 * Sort strictly based on sector. Smallest to the left,
74562306a36Sopenharmony_ci		 * largest to the right.
74662306a36Sopenharmony_ci		 */
74762306a36Sopenharmony_ci		if (sector > blk_rq_pos(bfqq->next_rq))
74862306a36Sopenharmony_ci			n = &(*p)->rb_right;
74962306a36Sopenharmony_ci		else if (sector < blk_rq_pos(bfqq->next_rq))
75062306a36Sopenharmony_ci			n = &(*p)->rb_left;
75162306a36Sopenharmony_ci		else
75262306a36Sopenharmony_ci			break;
75362306a36Sopenharmony_ci		p = n;
75462306a36Sopenharmony_ci		bfqq = NULL;
75562306a36Sopenharmony_ci	}
75662306a36Sopenharmony_ci
75762306a36Sopenharmony_ci	*ret_parent = parent;
75862306a36Sopenharmony_ci	if (rb_link)
75962306a36Sopenharmony_ci		*rb_link = p;
76062306a36Sopenharmony_ci
76162306a36Sopenharmony_ci	bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
76262306a36Sopenharmony_ci		(unsigned long long)sector,
76362306a36Sopenharmony_ci		bfqq ? bfqq->pid : 0);
76462306a36Sopenharmony_ci
76562306a36Sopenharmony_ci	return bfqq;
76662306a36Sopenharmony_ci}
76762306a36Sopenharmony_ci
76862306a36Sopenharmony_cistatic bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
76962306a36Sopenharmony_ci{
77062306a36Sopenharmony_ci	return bfqq->service_from_backlogged > 0 &&
77162306a36Sopenharmony_ci		time_is_before_jiffies(bfqq->first_IO_time +
77262306a36Sopenharmony_ci				       bfq_merge_time_limit);
77362306a36Sopenharmony_ci}
77462306a36Sopenharmony_ci
77562306a36Sopenharmony_ci/*
77662306a36Sopenharmony_ci * The following function is not marked as __cold because it is
77762306a36Sopenharmony_ci * actually cold, but for the same performance goal described in the
77862306a36Sopenharmony_ci * comments on the likely() at the beginning of
77962306a36Sopenharmony_ci * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
78062306a36Sopenharmony_ci * execution time for the case where this function is not invoked, we
78162306a36Sopenharmony_ci * had to add an unlikely() in each involved if().
78262306a36Sopenharmony_ci */
78362306a36Sopenharmony_civoid __cold
78462306a36Sopenharmony_cibfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
78562306a36Sopenharmony_ci{
78662306a36Sopenharmony_ci	struct rb_node **p, *parent;
78762306a36Sopenharmony_ci	struct bfq_queue *__bfqq;
78862306a36Sopenharmony_ci
78962306a36Sopenharmony_ci	if (bfqq->pos_root) {
79062306a36Sopenharmony_ci		rb_erase(&bfqq->pos_node, bfqq->pos_root);
79162306a36Sopenharmony_ci		bfqq->pos_root = NULL;
79262306a36Sopenharmony_ci	}
79362306a36Sopenharmony_ci
79462306a36Sopenharmony_ci	/* oom_bfqq does not participate in queue merging */
79562306a36Sopenharmony_ci	if (bfqq == &bfqd->oom_bfqq)
79662306a36Sopenharmony_ci		return;
79762306a36Sopenharmony_ci
79862306a36Sopenharmony_ci	/*
79962306a36Sopenharmony_ci	 * bfqq cannot be merged any longer (see comments in
80062306a36Sopenharmony_ci	 * bfq_setup_cooperator): no point in adding bfqq into the
80162306a36Sopenharmony_ci	 * position tree.
80262306a36Sopenharmony_ci	 */
80362306a36Sopenharmony_ci	if (bfq_too_late_for_merging(bfqq))
80462306a36Sopenharmony_ci		return;
80562306a36Sopenharmony_ci
80662306a36Sopenharmony_ci	if (bfq_class_idle(bfqq))
80762306a36Sopenharmony_ci		return;
80862306a36Sopenharmony_ci	if (!bfqq->next_rq)
80962306a36Sopenharmony_ci		return;
81062306a36Sopenharmony_ci
81162306a36Sopenharmony_ci	bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree;
81262306a36Sopenharmony_ci	__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
81362306a36Sopenharmony_ci			blk_rq_pos(bfqq->next_rq), &parent, &p);
81462306a36Sopenharmony_ci	if (!__bfqq) {
81562306a36Sopenharmony_ci		rb_link_node(&bfqq->pos_node, parent, p);
81662306a36Sopenharmony_ci		rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
81762306a36Sopenharmony_ci	} else
81862306a36Sopenharmony_ci		bfqq->pos_root = NULL;
81962306a36Sopenharmony_ci}
82062306a36Sopenharmony_ci
82162306a36Sopenharmony_ci/*
82262306a36Sopenharmony_ci * The following function returns false either if every active queue
82362306a36Sopenharmony_ci * must receive the same share of the throughput (symmetric scenario),
82462306a36Sopenharmony_ci * or, as a special case, if bfqq must receive a share of the
82562306a36Sopenharmony_ci * throughput lower than or equal to the share that every other active
82662306a36Sopenharmony_ci * queue must receive.  If bfqq does sync I/O, then these are the only
82762306a36Sopenharmony_ci * two cases where bfqq happens to be guaranteed its share of the
82862306a36Sopenharmony_ci * throughput even if I/O dispatching is not plugged when bfqq remains
82962306a36Sopenharmony_ci * temporarily empty (for more details, see the comments in the
83062306a36Sopenharmony_ci * function bfq_better_to_idle()). For this reason, the return value
83162306a36Sopenharmony_ci * of this function is used to check whether I/O-dispatch plugging can
83262306a36Sopenharmony_ci * be avoided.
83362306a36Sopenharmony_ci *
83462306a36Sopenharmony_ci * The above first case (symmetric scenario) occurs when:
83562306a36Sopenharmony_ci * 1) all active queues have the same weight,
83662306a36Sopenharmony_ci * 2) all active queues belong to the same I/O-priority class,
83762306a36Sopenharmony_ci * 3) all active groups at the same level in the groups tree have the same
83862306a36Sopenharmony_ci *    weight,
83962306a36Sopenharmony_ci * 4) all active groups at the same level in the groups tree have the same
84062306a36Sopenharmony_ci *    number of children.
84162306a36Sopenharmony_ci *
84262306a36Sopenharmony_ci * Unfortunately, keeping the necessary state for evaluating exactly
84362306a36Sopenharmony_ci * the last two symmetry sub-conditions above would be quite complex
84462306a36Sopenharmony_ci * and time consuming. Therefore this function evaluates, instead,
84562306a36Sopenharmony_ci * only the following stronger three sub-conditions, for which it is
84662306a36Sopenharmony_ci * much easier to maintain the needed state:
84762306a36Sopenharmony_ci * 1) all active queues have the same weight,
84862306a36Sopenharmony_ci * 2) all active queues belong to the same I/O-priority class,
84962306a36Sopenharmony_ci * 3) there is at most one active group.
85062306a36Sopenharmony_ci * In particular, the last condition is always true if hierarchical
85162306a36Sopenharmony_ci * support or the cgroups interface are not enabled, thus no state
85262306a36Sopenharmony_ci * needs to be maintained in this case.
85362306a36Sopenharmony_ci */
85462306a36Sopenharmony_cistatic bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
85562306a36Sopenharmony_ci				   struct bfq_queue *bfqq)
85662306a36Sopenharmony_ci{
85762306a36Sopenharmony_ci	bool smallest_weight = bfqq &&
85862306a36Sopenharmony_ci		bfqq->weight_counter &&
85962306a36Sopenharmony_ci		bfqq->weight_counter ==
86062306a36Sopenharmony_ci		container_of(
86162306a36Sopenharmony_ci			rb_first_cached(&bfqd->queue_weights_tree),
86262306a36Sopenharmony_ci			struct bfq_weight_counter,
86362306a36Sopenharmony_ci			weights_node);
86462306a36Sopenharmony_ci
86562306a36Sopenharmony_ci	/*
86662306a36Sopenharmony_ci	 * For queue weights to differ, queue_weights_tree must contain
86762306a36Sopenharmony_ci	 * at least two nodes.
86862306a36Sopenharmony_ci	 */
86962306a36Sopenharmony_ci	bool varied_queue_weights = !smallest_weight &&
87062306a36Sopenharmony_ci		!RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
87162306a36Sopenharmony_ci		(bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
87262306a36Sopenharmony_ci		 bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
87362306a36Sopenharmony_ci
87462306a36Sopenharmony_ci	bool multiple_classes_busy =
87562306a36Sopenharmony_ci		(bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
87662306a36Sopenharmony_ci		(bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
87762306a36Sopenharmony_ci		(bfqd->busy_queues[1] && bfqd->busy_queues[2]);
87862306a36Sopenharmony_ci
87962306a36Sopenharmony_ci	return varied_queue_weights || multiple_classes_busy
88062306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
88162306a36Sopenharmony_ci	       || bfqd->num_groups_with_pending_reqs > 1
88262306a36Sopenharmony_ci#endif
88362306a36Sopenharmony_ci		;
88462306a36Sopenharmony_ci}
88562306a36Sopenharmony_ci
88662306a36Sopenharmony_ci/*
88762306a36Sopenharmony_ci * If the weight-counter tree passed as input contains no counter for
88862306a36Sopenharmony_ci * the weight of the input queue, then add that counter; otherwise just
88962306a36Sopenharmony_ci * increment the existing counter.
89062306a36Sopenharmony_ci *
89162306a36Sopenharmony_ci * Note that weight-counter trees contain few nodes in mostly symmetric
89262306a36Sopenharmony_ci * scenarios. For example, if all queues have the same weight, then the
89362306a36Sopenharmony_ci * weight-counter tree for the queues may contain at most one node.
89462306a36Sopenharmony_ci * This holds even if low_latency is on, because weight-raised queues
89562306a36Sopenharmony_ci * are not inserted in the tree.
89662306a36Sopenharmony_ci * In most scenarios, the rate at which nodes are created/destroyed
89762306a36Sopenharmony_ci * should be low too.
89862306a36Sopenharmony_ci */
89962306a36Sopenharmony_civoid bfq_weights_tree_add(struct bfq_queue *bfqq)
90062306a36Sopenharmony_ci{
90162306a36Sopenharmony_ci	struct rb_root_cached *root = &bfqq->bfqd->queue_weights_tree;
90262306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
90362306a36Sopenharmony_ci	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
90462306a36Sopenharmony_ci	bool leftmost = true;
90562306a36Sopenharmony_ci
90662306a36Sopenharmony_ci	/*
90762306a36Sopenharmony_ci	 * Do not insert if the queue is already associated with a
90862306a36Sopenharmony_ci	 * counter, which happens if:
90962306a36Sopenharmony_ci	 *   1) a request arrival has caused the queue to become both
91062306a36Sopenharmony_ci	 *      non-weight-raised, and hence change its weight, and
91162306a36Sopenharmony_ci	 *      backlogged; in this respect, each of the two events
91262306a36Sopenharmony_ci	 *      causes an invocation of this function,
91362306a36Sopenharmony_ci	 *   2) this is the invocation of this function caused by the
91462306a36Sopenharmony_ci	 *      second event. This second invocation is actually useless,
91562306a36Sopenharmony_ci	 *      and we handle this fact by exiting immediately. More
91662306a36Sopenharmony_ci	 *      efficient or clearer solutions might possibly be adopted.
91762306a36Sopenharmony_ci	 */
91862306a36Sopenharmony_ci	if (bfqq->weight_counter)
91962306a36Sopenharmony_ci		return;
92062306a36Sopenharmony_ci
92162306a36Sopenharmony_ci	while (*new) {
92262306a36Sopenharmony_ci		struct bfq_weight_counter *__counter = container_of(*new,
92362306a36Sopenharmony_ci						struct bfq_weight_counter,
92462306a36Sopenharmony_ci						weights_node);
92562306a36Sopenharmony_ci		parent = *new;
92662306a36Sopenharmony_ci
92762306a36Sopenharmony_ci		if (entity->weight == __counter->weight) {
92862306a36Sopenharmony_ci			bfqq->weight_counter = __counter;
92962306a36Sopenharmony_ci			goto inc_counter;
93062306a36Sopenharmony_ci		}
93162306a36Sopenharmony_ci		if (entity->weight < __counter->weight)
93262306a36Sopenharmony_ci			new = &((*new)->rb_left);
93362306a36Sopenharmony_ci		else {
93462306a36Sopenharmony_ci			new = &((*new)->rb_right);
93562306a36Sopenharmony_ci			leftmost = false;
93662306a36Sopenharmony_ci		}
93762306a36Sopenharmony_ci	}
93862306a36Sopenharmony_ci
93962306a36Sopenharmony_ci	bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
94062306a36Sopenharmony_ci				       GFP_ATOMIC);
94162306a36Sopenharmony_ci
94262306a36Sopenharmony_ci	/*
94362306a36Sopenharmony_ci	 * In the unlucky event of an allocation failure, we just
94462306a36Sopenharmony_ci	 * exit. This will cause the weight of queue to not be
94562306a36Sopenharmony_ci	 * considered in bfq_asymmetric_scenario, which, in its turn,
94662306a36Sopenharmony_ci	 * causes the scenario to be deemed wrongly symmetric in case
94762306a36Sopenharmony_ci	 * bfqq's weight would have been the only weight making the
94862306a36Sopenharmony_ci	 * scenario asymmetric.  On the bright side, no unbalance will
94962306a36Sopenharmony_ci	 * however occur when bfqq becomes inactive again (the
95062306a36Sopenharmony_ci	 * invocation of this function is triggered by an activation
95162306a36Sopenharmony_ci	 * of queue).  In fact, bfq_weights_tree_remove does nothing
95262306a36Sopenharmony_ci	 * if !bfqq->weight_counter.
95362306a36Sopenharmony_ci	 */
95462306a36Sopenharmony_ci	if (unlikely(!bfqq->weight_counter))
95562306a36Sopenharmony_ci		return;
95662306a36Sopenharmony_ci
95762306a36Sopenharmony_ci	bfqq->weight_counter->weight = entity->weight;
95862306a36Sopenharmony_ci	rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
95962306a36Sopenharmony_ci	rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
96062306a36Sopenharmony_ci				leftmost);
96162306a36Sopenharmony_ci
96262306a36Sopenharmony_ciinc_counter:
96362306a36Sopenharmony_ci	bfqq->weight_counter->num_active++;
96462306a36Sopenharmony_ci	bfqq->ref++;
96562306a36Sopenharmony_ci}
96662306a36Sopenharmony_ci
96762306a36Sopenharmony_ci/*
96862306a36Sopenharmony_ci * Decrement the weight counter associated with the queue, and, if the
96962306a36Sopenharmony_ci * counter reaches 0, remove the counter from the tree.
97062306a36Sopenharmony_ci * See the comments to the function bfq_weights_tree_add() for considerations
97162306a36Sopenharmony_ci * about overhead.
97262306a36Sopenharmony_ci */
97362306a36Sopenharmony_civoid bfq_weights_tree_remove(struct bfq_queue *bfqq)
97462306a36Sopenharmony_ci{
97562306a36Sopenharmony_ci	struct rb_root_cached *root;
97662306a36Sopenharmony_ci
97762306a36Sopenharmony_ci	if (!bfqq->weight_counter)
97862306a36Sopenharmony_ci		return;
97962306a36Sopenharmony_ci
98062306a36Sopenharmony_ci	root = &bfqq->bfqd->queue_weights_tree;
98162306a36Sopenharmony_ci	bfqq->weight_counter->num_active--;
98262306a36Sopenharmony_ci	if (bfqq->weight_counter->num_active > 0)
98362306a36Sopenharmony_ci		goto reset_entity_pointer;
98462306a36Sopenharmony_ci
98562306a36Sopenharmony_ci	rb_erase_cached(&bfqq->weight_counter->weights_node, root);
98662306a36Sopenharmony_ci	kfree(bfqq->weight_counter);
98762306a36Sopenharmony_ci
98862306a36Sopenharmony_cireset_entity_pointer:
98962306a36Sopenharmony_ci	bfqq->weight_counter = NULL;
99062306a36Sopenharmony_ci	bfq_put_queue(bfqq);
99162306a36Sopenharmony_ci}
99262306a36Sopenharmony_ci
99362306a36Sopenharmony_ci/*
99462306a36Sopenharmony_ci * Return expired entry, or NULL to just start from scratch in rbtree.
99562306a36Sopenharmony_ci */
99662306a36Sopenharmony_cistatic struct request *bfq_check_fifo(struct bfq_queue *bfqq,
99762306a36Sopenharmony_ci				      struct request *last)
99862306a36Sopenharmony_ci{
99962306a36Sopenharmony_ci	struct request *rq;
100062306a36Sopenharmony_ci
100162306a36Sopenharmony_ci	if (bfq_bfqq_fifo_expire(bfqq))
100262306a36Sopenharmony_ci		return NULL;
100362306a36Sopenharmony_ci
100462306a36Sopenharmony_ci	bfq_mark_bfqq_fifo_expire(bfqq);
100562306a36Sopenharmony_ci
100662306a36Sopenharmony_ci	rq = rq_entry_fifo(bfqq->fifo.next);
100762306a36Sopenharmony_ci
100862306a36Sopenharmony_ci	if (rq == last || ktime_get_ns() < rq->fifo_time)
100962306a36Sopenharmony_ci		return NULL;
101062306a36Sopenharmony_ci
101162306a36Sopenharmony_ci	bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
101262306a36Sopenharmony_ci	return rq;
101362306a36Sopenharmony_ci}
101462306a36Sopenharmony_ci
101562306a36Sopenharmony_cistatic struct request *bfq_find_next_rq(struct bfq_data *bfqd,
101662306a36Sopenharmony_ci					struct bfq_queue *bfqq,
101762306a36Sopenharmony_ci					struct request *last)
101862306a36Sopenharmony_ci{
101962306a36Sopenharmony_ci	struct rb_node *rbnext = rb_next(&last->rb_node);
102062306a36Sopenharmony_ci	struct rb_node *rbprev = rb_prev(&last->rb_node);
102162306a36Sopenharmony_ci	struct request *next, *prev = NULL;
102262306a36Sopenharmony_ci
102362306a36Sopenharmony_ci	/* Follow expired path, else get first next available. */
102462306a36Sopenharmony_ci	next = bfq_check_fifo(bfqq, last);
102562306a36Sopenharmony_ci	if (next)
102662306a36Sopenharmony_ci		return next;
102762306a36Sopenharmony_ci
102862306a36Sopenharmony_ci	if (rbprev)
102962306a36Sopenharmony_ci		prev = rb_entry_rq(rbprev);
103062306a36Sopenharmony_ci
103162306a36Sopenharmony_ci	if (rbnext)
103262306a36Sopenharmony_ci		next = rb_entry_rq(rbnext);
103362306a36Sopenharmony_ci	else {
103462306a36Sopenharmony_ci		rbnext = rb_first(&bfqq->sort_list);
103562306a36Sopenharmony_ci		if (rbnext && rbnext != &last->rb_node)
103662306a36Sopenharmony_ci			next = rb_entry_rq(rbnext);
103762306a36Sopenharmony_ci	}
103862306a36Sopenharmony_ci
103962306a36Sopenharmony_ci	return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
104062306a36Sopenharmony_ci}
104162306a36Sopenharmony_ci
104262306a36Sopenharmony_ci/* see the definition of bfq_async_charge_factor for details */
104362306a36Sopenharmony_cistatic unsigned long bfq_serv_to_charge(struct request *rq,
104462306a36Sopenharmony_ci					struct bfq_queue *bfqq)
104562306a36Sopenharmony_ci{
104662306a36Sopenharmony_ci	if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
104762306a36Sopenharmony_ci	    bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
104862306a36Sopenharmony_ci		return blk_rq_sectors(rq);
104962306a36Sopenharmony_ci
105062306a36Sopenharmony_ci	return blk_rq_sectors(rq) * bfq_async_charge_factor;
105162306a36Sopenharmony_ci}
105262306a36Sopenharmony_ci
105362306a36Sopenharmony_ci/**
105462306a36Sopenharmony_ci * bfq_updated_next_req - update the queue after a new next_rq selection.
105562306a36Sopenharmony_ci * @bfqd: the device data the queue belongs to.
105662306a36Sopenharmony_ci * @bfqq: the queue to update.
105762306a36Sopenharmony_ci *
105862306a36Sopenharmony_ci * If the first request of a queue changes we make sure that the queue
105962306a36Sopenharmony_ci * has enough budget to serve at least its first request (if the
106062306a36Sopenharmony_ci * request has grown).  We do this because if the queue has not enough
106162306a36Sopenharmony_ci * budget for its first request, it has to go through two dispatch
106262306a36Sopenharmony_ci * rounds to actually get it dispatched.
106362306a36Sopenharmony_ci */
106462306a36Sopenharmony_cistatic void bfq_updated_next_req(struct bfq_data *bfqd,
106562306a36Sopenharmony_ci				 struct bfq_queue *bfqq)
106662306a36Sopenharmony_ci{
106762306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
106862306a36Sopenharmony_ci	struct request *next_rq = bfqq->next_rq;
106962306a36Sopenharmony_ci	unsigned long new_budget;
107062306a36Sopenharmony_ci
107162306a36Sopenharmony_ci	if (!next_rq)
107262306a36Sopenharmony_ci		return;
107362306a36Sopenharmony_ci
107462306a36Sopenharmony_ci	if (bfqq == bfqd->in_service_queue)
107562306a36Sopenharmony_ci		/*
107662306a36Sopenharmony_ci		 * In order not to break guarantees, budgets cannot be
107762306a36Sopenharmony_ci		 * changed after an entity has been selected.
107862306a36Sopenharmony_ci		 */
107962306a36Sopenharmony_ci		return;
108062306a36Sopenharmony_ci
108162306a36Sopenharmony_ci	new_budget = max_t(unsigned long,
108262306a36Sopenharmony_ci			   max_t(unsigned long, bfqq->max_budget,
108362306a36Sopenharmony_ci				 bfq_serv_to_charge(next_rq, bfqq)),
108462306a36Sopenharmony_ci			   entity->service);
108562306a36Sopenharmony_ci	if (entity->budget != new_budget) {
108662306a36Sopenharmony_ci		entity->budget = new_budget;
108762306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
108862306a36Sopenharmony_ci					 new_budget);
108962306a36Sopenharmony_ci		bfq_requeue_bfqq(bfqd, bfqq, false);
109062306a36Sopenharmony_ci	}
109162306a36Sopenharmony_ci}
109262306a36Sopenharmony_ci
109362306a36Sopenharmony_cistatic unsigned int bfq_wr_duration(struct bfq_data *bfqd)
109462306a36Sopenharmony_ci{
109562306a36Sopenharmony_ci	u64 dur;
109662306a36Sopenharmony_ci
109762306a36Sopenharmony_ci	dur = bfqd->rate_dur_prod;
109862306a36Sopenharmony_ci	do_div(dur, bfqd->peak_rate);
109962306a36Sopenharmony_ci
110062306a36Sopenharmony_ci	/*
110162306a36Sopenharmony_ci	 * Limit duration between 3 and 25 seconds. The upper limit
110262306a36Sopenharmony_ci	 * has been conservatively set after the following worst case:
110362306a36Sopenharmony_ci	 * on a QEMU/KVM virtual machine
110462306a36Sopenharmony_ci	 * - running in a slow PC
110562306a36Sopenharmony_ci	 * - with a virtual disk stacked on a slow low-end 5400rpm HDD
110662306a36Sopenharmony_ci	 * - serving a heavy I/O workload, such as the sequential reading
110762306a36Sopenharmony_ci	 *   of several files
110862306a36Sopenharmony_ci	 * mplayer took 23 seconds to start, if constantly weight-raised.
110962306a36Sopenharmony_ci	 *
111062306a36Sopenharmony_ci	 * As for higher values than that accommodating the above bad
111162306a36Sopenharmony_ci	 * scenario, tests show that higher values would often yield
111262306a36Sopenharmony_ci	 * the opposite of the desired result, i.e., would worsen
111362306a36Sopenharmony_ci	 * responsiveness by allowing non-interactive applications to
111462306a36Sopenharmony_ci	 * preserve weight raising for too long.
111562306a36Sopenharmony_ci	 *
111662306a36Sopenharmony_ci	 * On the other end, lower values than 3 seconds make it
111762306a36Sopenharmony_ci	 * difficult for most interactive tasks to complete their jobs
111862306a36Sopenharmony_ci	 * before weight-raising finishes.
111962306a36Sopenharmony_ci	 */
112062306a36Sopenharmony_ci	return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000));
112162306a36Sopenharmony_ci}
112262306a36Sopenharmony_ci
112362306a36Sopenharmony_ci/* switch back from soft real-time to interactive weight raising */
112462306a36Sopenharmony_cistatic void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
112562306a36Sopenharmony_ci					  struct bfq_data *bfqd)
112662306a36Sopenharmony_ci{
112762306a36Sopenharmony_ci	bfqq->wr_coeff = bfqd->bfq_wr_coeff;
112862306a36Sopenharmony_ci	bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
112962306a36Sopenharmony_ci	bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
113062306a36Sopenharmony_ci}
113162306a36Sopenharmony_ci
113262306a36Sopenharmony_cistatic void
113362306a36Sopenharmony_cibfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
113462306a36Sopenharmony_ci		      struct bfq_io_cq *bic, bool bfq_already_existing)
113562306a36Sopenharmony_ci{
113662306a36Sopenharmony_ci	unsigned int old_wr_coeff = 1;
113762306a36Sopenharmony_ci	bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
113862306a36Sopenharmony_ci	unsigned int a_idx = bfqq->actuator_idx;
113962306a36Sopenharmony_ci	struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
114062306a36Sopenharmony_ci
114162306a36Sopenharmony_ci	if (bfqq_data->saved_has_short_ttime)
114262306a36Sopenharmony_ci		bfq_mark_bfqq_has_short_ttime(bfqq);
114362306a36Sopenharmony_ci	else
114462306a36Sopenharmony_ci		bfq_clear_bfqq_has_short_ttime(bfqq);
114562306a36Sopenharmony_ci
114662306a36Sopenharmony_ci	if (bfqq_data->saved_IO_bound)
114762306a36Sopenharmony_ci		bfq_mark_bfqq_IO_bound(bfqq);
114862306a36Sopenharmony_ci	else
114962306a36Sopenharmony_ci		bfq_clear_bfqq_IO_bound(bfqq);
115062306a36Sopenharmony_ci
115162306a36Sopenharmony_ci	bfqq->last_serv_time_ns = bfqq_data->saved_last_serv_time_ns;
115262306a36Sopenharmony_ci	bfqq->inject_limit = bfqq_data->saved_inject_limit;
115362306a36Sopenharmony_ci	bfqq->decrease_time_jif = bfqq_data->saved_decrease_time_jif;
115462306a36Sopenharmony_ci
115562306a36Sopenharmony_ci	bfqq->entity.new_weight = bfqq_data->saved_weight;
115662306a36Sopenharmony_ci	bfqq->ttime = bfqq_data->saved_ttime;
115762306a36Sopenharmony_ci	bfqq->io_start_time = bfqq_data->saved_io_start_time;
115862306a36Sopenharmony_ci	bfqq->tot_idle_time = bfqq_data->saved_tot_idle_time;
115962306a36Sopenharmony_ci	/*
116062306a36Sopenharmony_ci	 * Restore weight coefficient only if low_latency is on
116162306a36Sopenharmony_ci	 */
116262306a36Sopenharmony_ci	if (bfqd->low_latency) {
116362306a36Sopenharmony_ci		old_wr_coeff = bfqq->wr_coeff;
116462306a36Sopenharmony_ci		bfqq->wr_coeff = bfqq_data->saved_wr_coeff;
116562306a36Sopenharmony_ci	}
116662306a36Sopenharmony_ci	bfqq->service_from_wr = bfqq_data->saved_service_from_wr;
116762306a36Sopenharmony_ci	bfqq->wr_start_at_switch_to_srt =
116862306a36Sopenharmony_ci		bfqq_data->saved_wr_start_at_switch_to_srt;
116962306a36Sopenharmony_ci	bfqq->last_wr_start_finish = bfqq_data->saved_last_wr_start_finish;
117062306a36Sopenharmony_ci	bfqq->wr_cur_max_time = bfqq_data->saved_wr_cur_max_time;
117162306a36Sopenharmony_ci
117262306a36Sopenharmony_ci	if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
117362306a36Sopenharmony_ci	    time_is_before_jiffies(bfqq->last_wr_start_finish +
117462306a36Sopenharmony_ci				   bfqq->wr_cur_max_time))) {
117562306a36Sopenharmony_ci		if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
117662306a36Sopenharmony_ci		    !bfq_bfqq_in_large_burst(bfqq) &&
117762306a36Sopenharmony_ci		    time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
117862306a36Sopenharmony_ci					     bfq_wr_duration(bfqd))) {
117962306a36Sopenharmony_ci			switch_back_to_interactive_wr(bfqq, bfqd);
118062306a36Sopenharmony_ci		} else {
118162306a36Sopenharmony_ci			bfqq->wr_coeff = 1;
118262306a36Sopenharmony_ci			bfq_log_bfqq(bfqq->bfqd, bfqq,
118362306a36Sopenharmony_ci				     "resume state: switching off wr");
118462306a36Sopenharmony_ci		}
118562306a36Sopenharmony_ci	}
118662306a36Sopenharmony_ci
118762306a36Sopenharmony_ci	/* make sure weight will be updated, however we got here */
118862306a36Sopenharmony_ci	bfqq->entity.prio_changed = 1;
118962306a36Sopenharmony_ci
119062306a36Sopenharmony_ci	if (likely(!busy))
119162306a36Sopenharmony_ci		return;
119262306a36Sopenharmony_ci
119362306a36Sopenharmony_ci	if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
119462306a36Sopenharmony_ci		bfqd->wr_busy_queues++;
119562306a36Sopenharmony_ci	else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
119662306a36Sopenharmony_ci		bfqd->wr_busy_queues--;
119762306a36Sopenharmony_ci}
119862306a36Sopenharmony_ci
119962306a36Sopenharmony_cistatic int bfqq_process_refs(struct bfq_queue *bfqq)
120062306a36Sopenharmony_ci{
120162306a36Sopenharmony_ci	return bfqq->ref - bfqq->entity.allocated -
120262306a36Sopenharmony_ci		bfqq->entity.on_st_or_in_serv -
120362306a36Sopenharmony_ci		(bfqq->weight_counter != NULL) - bfqq->stable_ref;
120462306a36Sopenharmony_ci}
120562306a36Sopenharmony_ci
120662306a36Sopenharmony_ci/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
120762306a36Sopenharmony_cistatic void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
120862306a36Sopenharmony_ci{
120962306a36Sopenharmony_ci	struct bfq_queue *item;
121062306a36Sopenharmony_ci	struct hlist_node *n;
121162306a36Sopenharmony_ci
121262306a36Sopenharmony_ci	hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
121362306a36Sopenharmony_ci		hlist_del_init(&item->burst_list_node);
121462306a36Sopenharmony_ci
121562306a36Sopenharmony_ci	/*
121662306a36Sopenharmony_ci	 * Start the creation of a new burst list only if there is no
121762306a36Sopenharmony_ci	 * active queue. See comments on the conditional invocation of
121862306a36Sopenharmony_ci	 * bfq_handle_burst().
121962306a36Sopenharmony_ci	 */
122062306a36Sopenharmony_ci	if (bfq_tot_busy_queues(bfqd) == 0) {
122162306a36Sopenharmony_ci		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
122262306a36Sopenharmony_ci		bfqd->burst_size = 1;
122362306a36Sopenharmony_ci	} else
122462306a36Sopenharmony_ci		bfqd->burst_size = 0;
122562306a36Sopenharmony_ci
122662306a36Sopenharmony_ci	bfqd->burst_parent_entity = bfqq->entity.parent;
122762306a36Sopenharmony_ci}
122862306a36Sopenharmony_ci
122962306a36Sopenharmony_ci/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
123062306a36Sopenharmony_cistatic void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
123162306a36Sopenharmony_ci{
123262306a36Sopenharmony_ci	/* Increment burst size to take into account also bfqq */
123362306a36Sopenharmony_ci	bfqd->burst_size++;
123462306a36Sopenharmony_ci
123562306a36Sopenharmony_ci	if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
123662306a36Sopenharmony_ci		struct bfq_queue *pos, *bfqq_item;
123762306a36Sopenharmony_ci		struct hlist_node *n;
123862306a36Sopenharmony_ci
123962306a36Sopenharmony_ci		/*
124062306a36Sopenharmony_ci		 * Enough queues have been activated shortly after each
124162306a36Sopenharmony_ci		 * other to consider this burst as large.
124262306a36Sopenharmony_ci		 */
124362306a36Sopenharmony_ci		bfqd->large_burst = true;
124462306a36Sopenharmony_ci
124562306a36Sopenharmony_ci		/*
124662306a36Sopenharmony_ci		 * We can now mark all queues in the burst list as
124762306a36Sopenharmony_ci		 * belonging to a large burst.
124862306a36Sopenharmony_ci		 */
124962306a36Sopenharmony_ci		hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
125062306a36Sopenharmony_ci				     burst_list_node)
125162306a36Sopenharmony_ci			bfq_mark_bfqq_in_large_burst(bfqq_item);
125262306a36Sopenharmony_ci		bfq_mark_bfqq_in_large_burst(bfqq);
125362306a36Sopenharmony_ci
125462306a36Sopenharmony_ci		/*
125562306a36Sopenharmony_ci		 * From now on, and until the current burst finishes, any
125662306a36Sopenharmony_ci		 * new queue being activated shortly after the last queue
125762306a36Sopenharmony_ci		 * was inserted in the burst can be immediately marked as
125862306a36Sopenharmony_ci		 * belonging to a large burst. So the burst list is not
125962306a36Sopenharmony_ci		 * needed any more. Remove it.
126062306a36Sopenharmony_ci		 */
126162306a36Sopenharmony_ci		hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
126262306a36Sopenharmony_ci					  burst_list_node)
126362306a36Sopenharmony_ci			hlist_del_init(&pos->burst_list_node);
126462306a36Sopenharmony_ci	} else /*
126562306a36Sopenharmony_ci		* Burst not yet large: add bfqq to the burst list. Do
126662306a36Sopenharmony_ci		* not increment the ref counter for bfqq, because bfqq
126762306a36Sopenharmony_ci		* is removed from the burst list before freeing bfqq
126862306a36Sopenharmony_ci		* in put_queue.
126962306a36Sopenharmony_ci		*/
127062306a36Sopenharmony_ci		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
127162306a36Sopenharmony_ci}
127262306a36Sopenharmony_ci
127362306a36Sopenharmony_ci/*
127462306a36Sopenharmony_ci * If many queues belonging to the same group happen to be created
127562306a36Sopenharmony_ci * shortly after each other, then the processes associated with these
127662306a36Sopenharmony_ci * queues have typically a common goal. In particular, bursts of queue
127762306a36Sopenharmony_ci * creations are usually caused by services or applications that spawn
127862306a36Sopenharmony_ci * many parallel threads/processes. Examples are systemd during boot,
127962306a36Sopenharmony_ci * or git grep. To help these processes get their job done as soon as
128062306a36Sopenharmony_ci * possible, it is usually better to not grant either weight-raising
128162306a36Sopenharmony_ci * or device idling to their queues, unless these queues must be
128262306a36Sopenharmony_ci * protected from the I/O flowing through other active queues.
128362306a36Sopenharmony_ci *
128462306a36Sopenharmony_ci * In this comment we describe, firstly, the reasons why this fact
128562306a36Sopenharmony_ci * holds, and, secondly, the next function, which implements the main
128662306a36Sopenharmony_ci * steps needed to properly mark these queues so that they can then be
128762306a36Sopenharmony_ci * treated in a different way.
128862306a36Sopenharmony_ci *
128962306a36Sopenharmony_ci * The above services or applications benefit mostly from a high
129062306a36Sopenharmony_ci * throughput: the quicker the requests of the activated queues are
129162306a36Sopenharmony_ci * cumulatively served, the sooner the target job of these queues gets
129262306a36Sopenharmony_ci * completed. As a consequence, weight-raising any of these queues,
129362306a36Sopenharmony_ci * which also implies idling the device for it, is almost always
129462306a36Sopenharmony_ci * counterproductive, unless there are other active queues to isolate
129562306a36Sopenharmony_ci * these new queues from. If there no other active queues, then
129662306a36Sopenharmony_ci * weight-raising these new queues just lowers throughput in most
129762306a36Sopenharmony_ci * cases.
129862306a36Sopenharmony_ci *
129962306a36Sopenharmony_ci * On the other hand, a burst of queue creations may be caused also by
130062306a36Sopenharmony_ci * the start of an application that does not consist of a lot of
130162306a36Sopenharmony_ci * parallel I/O-bound threads. In fact, with a complex application,
130262306a36Sopenharmony_ci * several short processes may need to be executed to start-up the
130362306a36Sopenharmony_ci * application. In this respect, to start an application as quickly as
130462306a36Sopenharmony_ci * possible, the best thing to do is in any case to privilege the I/O
130562306a36Sopenharmony_ci * related to the application with respect to all other
130662306a36Sopenharmony_ci * I/O. Therefore, the best strategy to start as quickly as possible
130762306a36Sopenharmony_ci * an application that causes a burst of queue creations is to
130862306a36Sopenharmony_ci * weight-raise all the queues created during the burst. This is the
130962306a36Sopenharmony_ci * exact opposite of the best strategy for the other type of bursts.
131062306a36Sopenharmony_ci *
131162306a36Sopenharmony_ci * In the end, to take the best action for each of the two cases, the
131262306a36Sopenharmony_ci * two types of bursts need to be distinguished. Fortunately, this
131362306a36Sopenharmony_ci * seems relatively easy, by looking at the sizes of the bursts. In
131462306a36Sopenharmony_ci * particular, we found a threshold such that only bursts with a
131562306a36Sopenharmony_ci * larger size than that threshold are apparently caused by
131662306a36Sopenharmony_ci * services or commands such as systemd or git grep. For brevity,
131762306a36Sopenharmony_ci * hereafter we call just 'large' these bursts. BFQ *does not*
131862306a36Sopenharmony_ci * weight-raise queues whose creation occurs in a large burst. In
131962306a36Sopenharmony_ci * addition, for each of these queues BFQ performs or does not perform
132062306a36Sopenharmony_ci * idling depending on which choice boosts the throughput more. The
132162306a36Sopenharmony_ci * exact choice depends on the device and request pattern at
132262306a36Sopenharmony_ci * hand.
132362306a36Sopenharmony_ci *
132462306a36Sopenharmony_ci * Unfortunately, false positives may occur while an interactive task
132562306a36Sopenharmony_ci * is starting (e.g., an application is being started). The
132662306a36Sopenharmony_ci * consequence is that the queues associated with the task do not
132762306a36Sopenharmony_ci * enjoy weight raising as expected. Fortunately these false positives
132862306a36Sopenharmony_ci * are very rare. They typically occur if some service happens to
132962306a36Sopenharmony_ci * start doing I/O exactly when the interactive task starts.
133062306a36Sopenharmony_ci *
133162306a36Sopenharmony_ci * Turning back to the next function, it is invoked only if there are
133262306a36Sopenharmony_ci * no active queues (apart from active queues that would belong to the
133362306a36Sopenharmony_ci * same, possible burst bfqq would belong to), and it implements all
133462306a36Sopenharmony_ci * the steps needed to detect the occurrence of a large burst and to
133562306a36Sopenharmony_ci * properly mark all the queues belonging to it (so that they can then
133662306a36Sopenharmony_ci * be treated in a different way). This goal is achieved by
133762306a36Sopenharmony_ci * maintaining a "burst list" that holds, temporarily, the queues that
133862306a36Sopenharmony_ci * belong to the burst in progress. The list is then used to mark
133962306a36Sopenharmony_ci * these queues as belonging to a large burst if the burst does become
134062306a36Sopenharmony_ci * large. The main steps are the following.
134162306a36Sopenharmony_ci *
134262306a36Sopenharmony_ci * . when the very first queue is created, the queue is inserted into the
134362306a36Sopenharmony_ci *   list (as it could be the first queue in a possible burst)
134462306a36Sopenharmony_ci *
134562306a36Sopenharmony_ci * . if the current burst has not yet become large, and a queue Q that does
134662306a36Sopenharmony_ci *   not yet belong to the burst is activated shortly after the last time
134762306a36Sopenharmony_ci *   at which a new queue entered the burst list, then the function appends
134862306a36Sopenharmony_ci *   Q to the burst list
134962306a36Sopenharmony_ci *
135062306a36Sopenharmony_ci * . if, as a consequence of the previous step, the burst size reaches
135162306a36Sopenharmony_ci *   the large-burst threshold, then
135262306a36Sopenharmony_ci *
135362306a36Sopenharmony_ci *     . all the queues in the burst list are marked as belonging to a
135462306a36Sopenharmony_ci *       large burst
135562306a36Sopenharmony_ci *
135662306a36Sopenharmony_ci *     . the burst list is deleted; in fact, the burst list already served
135762306a36Sopenharmony_ci *       its purpose (keeping temporarily track of the queues in a burst,
135862306a36Sopenharmony_ci *       so as to be able to mark them as belonging to a large burst in the
135962306a36Sopenharmony_ci *       previous sub-step), and now is not needed any more
136062306a36Sopenharmony_ci *
136162306a36Sopenharmony_ci *     . the device enters a large-burst mode
136262306a36Sopenharmony_ci *
136362306a36Sopenharmony_ci * . if a queue Q that does not belong to the burst is created while
136462306a36Sopenharmony_ci *   the device is in large-burst mode and shortly after the last time
136562306a36Sopenharmony_ci *   at which a queue either entered the burst list or was marked as
136662306a36Sopenharmony_ci *   belonging to the current large burst, then Q is immediately marked
136762306a36Sopenharmony_ci *   as belonging to a large burst.
136862306a36Sopenharmony_ci *
136962306a36Sopenharmony_ci * . if a queue Q that does not belong to the burst is created a while
137062306a36Sopenharmony_ci *   later, i.e., not shortly after, than the last time at which a queue
137162306a36Sopenharmony_ci *   either entered the burst list or was marked as belonging to the
137262306a36Sopenharmony_ci *   current large burst, then the current burst is deemed as finished and:
137362306a36Sopenharmony_ci *
137462306a36Sopenharmony_ci *        . the large-burst mode is reset if set
137562306a36Sopenharmony_ci *
137662306a36Sopenharmony_ci *        . the burst list is emptied
137762306a36Sopenharmony_ci *
137862306a36Sopenharmony_ci *        . Q is inserted in the burst list, as Q may be the first queue
137962306a36Sopenharmony_ci *          in a possible new burst (then the burst list contains just Q
138062306a36Sopenharmony_ci *          after this step).
138162306a36Sopenharmony_ci */
138262306a36Sopenharmony_cistatic void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
138362306a36Sopenharmony_ci{
138462306a36Sopenharmony_ci	/*
138562306a36Sopenharmony_ci	 * If bfqq is already in the burst list or is part of a large
138662306a36Sopenharmony_ci	 * burst, or finally has just been split, then there is
138762306a36Sopenharmony_ci	 * nothing else to do.
138862306a36Sopenharmony_ci	 */
138962306a36Sopenharmony_ci	if (!hlist_unhashed(&bfqq->burst_list_node) ||
139062306a36Sopenharmony_ci	    bfq_bfqq_in_large_burst(bfqq) ||
139162306a36Sopenharmony_ci	    time_is_after_eq_jiffies(bfqq->split_time +
139262306a36Sopenharmony_ci				     msecs_to_jiffies(10)))
139362306a36Sopenharmony_ci		return;
139462306a36Sopenharmony_ci
139562306a36Sopenharmony_ci	/*
139662306a36Sopenharmony_ci	 * If bfqq's creation happens late enough, or bfqq belongs to
139762306a36Sopenharmony_ci	 * a different group than the burst group, then the current
139862306a36Sopenharmony_ci	 * burst is finished, and related data structures must be
139962306a36Sopenharmony_ci	 * reset.
140062306a36Sopenharmony_ci	 *
140162306a36Sopenharmony_ci	 * In this respect, consider the special case where bfqq is
140262306a36Sopenharmony_ci	 * the very first queue created after BFQ is selected for this
140362306a36Sopenharmony_ci	 * device. In this case, last_ins_in_burst and
140462306a36Sopenharmony_ci	 * burst_parent_entity are not yet significant when we get
140562306a36Sopenharmony_ci	 * here. But it is easy to verify that, whether or not the
140662306a36Sopenharmony_ci	 * following condition is true, bfqq will end up being
140762306a36Sopenharmony_ci	 * inserted into the burst list. In particular the list will
140862306a36Sopenharmony_ci	 * happen to contain only bfqq. And this is exactly what has
140962306a36Sopenharmony_ci	 * to happen, as bfqq may be the first queue of the first
141062306a36Sopenharmony_ci	 * burst.
141162306a36Sopenharmony_ci	 */
141262306a36Sopenharmony_ci	if (time_is_before_jiffies(bfqd->last_ins_in_burst +
141362306a36Sopenharmony_ci	    bfqd->bfq_burst_interval) ||
141462306a36Sopenharmony_ci	    bfqq->entity.parent != bfqd->burst_parent_entity) {
141562306a36Sopenharmony_ci		bfqd->large_burst = false;
141662306a36Sopenharmony_ci		bfq_reset_burst_list(bfqd, bfqq);
141762306a36Sopenharmony_ci		goto end;
141862306a36Sopenharmony_ci	}
141962306a36Sopenharmony_ci
142062306a36Sopenharmony_ci	/*
142162306a36Sopenharmony_ci	 * If we get here, then bfqq is being activated shortly after the
142262306a36Sopenharmony_ci	 * last queue. So, if the current burst is also large, we can mark
142362306a36Sopenharmony_ci	 * bfqq as belonging to this large burst immediately.
142462306a36Sopenharmony_ci	 */
142562306a36Sopenharmony_ci	if (bfqd->large_burst) {
142662306a36Sopenharmony_ci		bfq_mark_bfqq_in_large_burst(bfqq);
142762306a36Sopenharmony_ci		goto end;
142862306a36Sopenharmony_ci	}
142962306a36Sopenharmony_ci
143062306a36Sopenharmony_ci	/*
143162306a36Sopenharmony_ci	 * If we get here, then a large-burst state has not yet been
143262306a36Sopenharmony_ci	 * reached, but bfqq is being activated shortly after the last
143362306a36Sopenharmony_ci	 * queue. Then we add bfqq to the burst.
143462306a36Sopenharmony_ci	 */
143562306a36Sopenharmony_ci	bfq_add_to_burst(bfqd, bfqq);
143662306a36Sopenharmony_ciend:
143762306a36Sopenharmony_ci	/*
143862306a36Sopenharmony_ci	 * At this point, bfqq either has been added to the current
143962306a36Sopenharmony_ci	 * burst or has caused the current burst to terminate and a
144062306a36Sopenharmony_ci	 * possible new burst to start. In particular, in the second
144162306a36Sopenharmony_ci	 * case, bfqq has become the first queue in the possible new
144262306a36Sopenharmony_ci	 * burst.  In both cases last_ins_in_burst needs to be moved
144362306a36Sopenharmony_ci	 * forward.
144462306a36Sopenharmony_ci	 */
144562306a36Sopenharmony_ci	bfqd->last_ins_in_burst = jiffies;
144662306a36Sopenharmony_ci}
144762306a36Sopenharmony_ci
144862306a36Sopenharmony_cistatic int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
144962306a36Sopenharmony_ci{
145062306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
145162306a36Sopenharmony_ci
145262306a36Sopenharmony_ci	return entity->budget - entity->service;
145362306a36Sopenharmony_ci}
145462306a36Sopenharmony_ci
145562306a36Sopenharmony_ci/*
145662306a36Sopenharmony_ci * If enough samples have been computed, return the current max budget
145762306a36Sopenharmony_ci * stored in bfqd, which is dynamically updated according to the
145862306a36Sopenharmony_ci * estimated disk peak rate; otherwise return the default max budget
145962306a36Sopenharmony_ci */
146062306a36Sopenharmony_cistatic int bfq_max_budget(struct bfq_data *bfqd)
146162306a36Sopenharmony_ci{
146262306a36Sopenharmony_ci	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
146362306a36Sopenharmony_ci		return bfq_default_max_budget;
146462306a36Sopenharmony_ci	else
146562306a36Sopenharmony_ci		return bfqd->bfq_max_budget;
146662306a36Sopenharmony_ci}
146762306a36Sopenharmony_ci
146862306a36Sopenharmony_ci/*
146962306a36Sopenharmony_ci * Return min budget, which is a fraction of the current or default
147062306a36Sopenharmony_ci * max budget (trying with 1/32)
147162306a36Sopenharmony_ci */
147262306a36Sopenharmony_cistatic int bfq_min_budget(struct bfq_data *bfqd)
147362306a36Sopenharmony_ci{
147462306a36Sopenharmony_ci	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
147562306a36Sopenharmony_ci		return bfq_default_max_budget / 32;
147662306a36Sopenharmony_ci	else
147762306a36Sopenharmony_ci		return bfqd->bfq_max_budget / 32;
147862306a36Sopenharmony_ci}
147962306a36Sopenharmony_ci
148062306a36Sopenharmony_ci/*
148162306a36Sopenharmony_ci * The next function, invoked after the input queue bfqq switches from
148262306a36Sopenharmony_ci * idle to busy, updates the budget of bfqq. The function also tells
148362306a36Sopenharmony_ci * whether the in-service queue should be expired, by returning
148462306a36Sopenharmony_ci * true. The purpose of expiring the in-service queue is to give bfqq
148562306a36Sopenharmony_ci * the chance to possibly preempt the in-service queue, and the reason
148662306a36Sopenharmony_ci * for preempting the in-service queue is to achieve one of the two
148762306a36Sopenharmony_ci * goals below.
148862306a36Sopenharmony_ci *
148962306a36Sopenharmony_ci * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
149062306a36Sopenharmony_ci * expired because it has remained idle. In particular, bfqq may have
149162306a36Sopenharmony_ci * expired for one of the following two reasons:
149262306a36Sopenharmony_ci *
149362306a36Sopenharmony_ci * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
149462306a36Sopenharmony_ci *   and did not make it to issue a new request before its last
149562306a36Sopenharmony_ci *   request was served;
149662306a36Sopenharmony_ci *
149762306a36Sopenharmony_ci * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
149862306a36Sopenharmony_ci *   a new request before the expiration of the idling-time.
149962306a36Sopenharmony_ci *
150062306a36Sopenharmony_ci * Even if bfqq has expired for one of the above reasons, the process
150162306a36Sopenharmony_ci * associated with the queue may be however issuing requests greedily,
150262306a36Sopenharmony_ci * and thus be sensitive to the bandwidth it receives (bfqq may have
150362306a36Sopenharmony_ci * remained idle for other reasons: CPU high load, bfqq not enjoying
150462306a36Sopenharmony_ci * idling, I/O throttling somewhere in the path from the process to
150562306a36Sopenharmony_ci * the I/O scheduler, ...). But if, after every expiration for one of
150662306a36Sopenharmony_ci * the above two reasons, bfqq has to wait for the service of at least
150762306a36Sopenharmony_ci * one full budget of another queue before being served again, then
150862306a36Sopenharmony_ci * bfqq is likely to get a much lower bandwidth or resource time than
150962306a36Sopenharmony_ci * its reserved ones. To address this issue, two countermeasures need
151062306a36Sopenharmony_ci * to be taken.
151162306a36Sopenharmony_ci *
151262306a36Sopenharmony_ci * First, the budget and the timestamps of bfqq need to be updated in
151362306a36Sopenharmony_ci * a special way on bfqq reactivation: they need to be updated as if
151462306a36Sopenharmony_ci * bfqq did not remain idle and did not expire. In fact, if they are
151562306a36Sopenharmony_ci * computed as if bfqq expired and remained idle until reactivation,
151662306a36Sopenharmony_ci * then the process associated with bfqq is treated as if, instead of
151762306a36Sopenharmony_ci * being greedy, it stopped issuing requests when bfqq remained idle,
151862306a36Sopenharmony_ci * and restarts issuing requests only on this reactivation. In other
151962306a36Sopenharmony_ci * words, the scheduler does not help the process recover the "service
152062306a36Sopenharmony_ci * hole" between bfqq expiration and reactivation. As a consequence,
152162306a36Sopenharmony_ci * the process receives a lower bandwidth than its reserved one. In
152262306a36Sopenharmony_ci * contrast, to recover this hole, the budget must be updated as if
152362306a36Sopenharmony_ci * bfqq was not expired at all before this reactivation, i.e., it must
152462306a36Sopenharmony_ci * be set to the value of the remaining budget when bfqq was
152562306a36Sopenharmony_ci * expired. Along the same line, timestamps need to be assigned the
152662306a36Sopenharmony_ci * value they had the last time bfqq was selected for service, i.e.,
152762306a36Sopenharmony_ci * before last expiration. Thus timestamps need to be back-shifted
152862306a36Sopenharmony_ci * with respect to their normal computation (see [1] for more details
152962306a36Sopenharmony_ci * on this tricky aspect).
153062306a36Sopenharmony_ci *
153162306a36Sopenharmony_ci * Secondly, to allow the process to recover the hole, the in-service
153262306a36Sopenharmony_ci * queue must be expired too, to give bfqq the chance to preempt it
153362306a36Sopenharmony_ci * immediately. In fact, if bfqq has to wait for a full budget of the
153462306a36Sopenharmony_ci * in-service queue to be completed, then it may become impossible to
153562306a36Sopenharmony_ci * let the process recover the hole, even if the back-shifted
153662306a36Sopenharmony_ci * timestamps of bfqq are lower than those of the in-service queue. If
153762306a36Sopenharmony_ci * this happens for most or all of the holes, then the process may not
153862306a36Sopenharmony_ci * receive its reserved bandwidth. In this respect, it is worth noting
153962306a36Sopenharmony_ci * that, being the service of outstanding requests unpreemptible, a
154062306a36Sopenharmony_ci * little fraction of the holes may however be unrecoverable, thereby
154162306a36Sopenharmony_ci * causing a little loss of bandwidth.
154262306a36Sopenharmony_ci *
154362306a36Sopenharmony_ci * The last important point is detecting whether bfqq does need this
154462306a36Sopenharmony_ci * bandwidth recovery. In this respect, the next function deems the
154562306a36Sopenharmony_ci * process associated with bfqq greedy, and thus allows it to recover
154662306a36Sopenharmony_ci * the hole, if: 1) the process is waiting for the arrival of a new
154762306a36Sopenharmony_ci * request (which implies that bfqq expired for one of the above two
154862306a36Sopenharmony_ci * reasons), and 2) such a request has arrived soon. The first
154962306a36Sopenharmony_ci * condition is controlled through the flag non_blocking_wait_rq,
155062306a36Sopenharmony_ci * while the second through the flag arrived_in_time. If both
155162306a36Sopenharmony_ci * conditions hold, then the function computes the budget in the
155262306a36Sopenharmony_ci * above-described special way, and signals that the in-service queue
155362306a36Sopenharmony_ci * should be expired. Timestamp back-shifting is done later in
155462306a36Sopenharmony_ci * __bfq_activate_entity.
155562306a36Sopenharmony_ci *
155662306a36Sopenharmony_ci * 2. Reduce latency. Even if timestamps are not backshifted to let
155762306a36Sopenharmony_ci * the process associated with bfqq recover a service hole, bfqq may
155862306a36Sopenharmony_ci * however happen to have, after being (re)activated, a lower finish
155962306a36Sopenharmony_ci * timestamp than the in-service queue.	 That is, the next budget of
156062306a36Sopenharmony_ci * bfqq may have to be completed before the one of the in-service
156162306a36Sopenharmony_ci * queue. If this is the case, then preempting the in-service queue
156262306a36Sopenharmony_ci * allows this goal to be achieved, apart from the unpreemptible,
156362306a36Sopenharmony_ci * outstanding requests mentioned above.
156462306a36Sopenharmony_ci *
156562306a36Sopenharmony_ci * Unfortunately, regardless of which of the above two goals one wants
156662306a36Sopenharmony_ci * to achieve, service trees need first to be updated to know whether
156762306a36Sopenharmony_ci * the in-service queue must be preempted. To have service trees
156862306a36Sopenharmony_ci * correctly updated, the in-service queue must be expired and
156962306a36Sopenharmony_ci * rescheduled, and bfqq must be scheduled too. This is one of the
157062306a36Sopenharmony_ci * most costly operations (in future versions, the scheduling
157162306a36Sopenharmony_ci * mechanism may be re-designed in such a way to make it possible to
157262306a36Sopenharmony_ci * know whether preemption is needed without needing to update service
157362306a36Sopenharmony_ci * trees). In addition, queue preemptions almost always cause random
157462306a36Sopenharmony_ci * I/O, which may in turn cause loss of throughput. Finally, there may
157562306a36Sopenharmony_ci * even be no in-service queue when the next function is invoked (so,
157662306a36Sopenharmony_ci * no queue to compare timestamps with). Because of these facts, the
157762306a36Sopenharmony_ci * next function adopts the following simple scheme to avoid costly
157862306a36Sopenharmony_ci * operations, too frequent preemptions and too many dependencies on
157962306a36Sopenharmony_ci * the state of the scheduler: it requests the expiration of the
158062306a36Sopenharmony_ci * in-service queue (unconditionally) only for queues that need to
158162306a36Sopenharmony_ci * recover a hole. Then it delegates to other parts of the code the
158262306a36Sopenharmony_ci * responsibility of handling the above case 2.
158362306a36Sopenharmony_ci */
158462306a36Sopenharmony_cistatic bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
158562306a36Sopenharmony_ci						struct bfq_queue *bfqq,
158662306a36Sopenharmony_ci						bool arrived_in_time)
158762306a36Sopenharmony_ci{
158862306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
158962306a36Sopenharmony_ci
159062306a36Sopenharmony_ci	/*
159162306a36Sopenharmony_ci	 * In the next compound condition, we check also whether there
159262306a36Sopenharmony_ci	 * is some budget left, because otherwise there is no point in
159362306a36Sopenharmony_ci	 * trying to go on serving bfqq with this same budget: bfqq
159462306a36Sopenharmony_ci	 * would be expired immediately after being selected for
159562306a36Sopenharmony_ci	 * service. This would only cause useless overhead.
159662306a36Sopenharmony_ci	 */
159762306a36Sopenharmony_ci	if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
159862306a36Sopenharmony_ci	    bfq_bfqq_budget_left(bfqq) > 0) {
159962306a36Sopenharmony_ci		/*
160062306a36Sopenharmony_ci		 * We do not clear the flag non_blocking_wait_rq here, as
160162306a36Sopenharmony_ci		 * the latter is used in bfq_activate_bfqq to signal
160262306a36Sopenharmony_ci		 * that timestamps need to be back-shifted (and is
160362306a36Sopenharmony_ci		 * cleared right after).
160462306a36Sopenharmony_ci		 */
160562306a36Sopenharmony_ci
160662306a36Sopenharmony_ci		/*
160762306a36Sopenharmony_ci		 * In next assignment we rely on that either
160862306a36Sopenharmony_ci		 * entity->service or entity->budget are not updated
160962306a36Sopenharmony_ci		 * on expiration if bfqq is empty (see
161062306a36Sopenharmony_ci		 * __bfq_bfqq_recalc_budget). Thus both quantities
161162306a36Sopenharmony_ci		 * remain unchanged after such an expiration, and the
161262306a36Sopenharmony_ci		 * following statement therefore assigns to
161362306a36Sopenharmony_ci		 * entity->budget the remaining budget on such an
161462306a36Sopenharmony_ci		 * expiration.
161562306a36Sopenharmony_ci		 */
161662306a36Sopenharmony_ci		entity->budget = min_t(unsigned long,
161762306a36Sopenharmony_ci				       bfq_bfqq_budget_left(bfqq),
161862306a36Sopenharmony_ci				       bfqq->max_budget);
161962306a36Sopenharmony_ci
162062306a36Sopenharmony_ci		/*
162162306a36Sopenharmony_ci		 * At this point, we have used entity->service to get
162262306a36Sopenharmony_ci		 * the budget left (needed for updating
162362306a36Sopenharmony_ci		 * entity->budget). Thus we finally can, and have to,
162462306a36Sopenharmony_ci		 * reset entity->service. The latter must be reset
162562306a36Sopenharmony_ci		 * because bfqq would otherwise be charged again for
162662306a36Sopenharmony_ci		 * the service it has received during its previous
162762306a36Sopenharmony_ci		 * service slot(s).
162862306a36Sopenharmony_ci		 */
162962306a36Sopenharmony_ci		entity->service = 0;
163062306a36Sopenharmony_ci
163162306a36Sopenharmony_ci		return true;
163262306a36Sopenharmony_ci	}
163362306a36Sopenharmony_ci
163462306a36Sopenharmony_ci	/*
163562306a36Sopenharmony_ci	 * We can finally complete expiration, by setting service to 0.
163662306a36Sopenharmony_ci	 */
163762306a36Sopenharmony_ci	entity->service = 0;
163862306a36Sopenharmony_ci	entity->budget = max_t(unsigned long, bfqq->max_budget,
163962306a36Sopenharmony_ci			       bfq_serv_to_charge(bfqq->next_rq, bfqq));
164062306a36Sopenharmony_ci	bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
164162306a36Sopenharmony_ci	return false;
164262306a36Sopenharmony_ci}
164362306a36Sopenharmony_ci
164462306a36Sopenharmony_ci/*
164562306a36Sopenharmony_ci * Return the farthest past time instant according to jiffies
164662306a36Sopenharmony_ci * macros.
164762306a36Sopenharmony_ci */
164862306a36Sopenharmony_cistatic unsigned long bfq_smallest_from_now(void)
164962306a36Sopenharmony_ci{
165062306a36Sopenharmony_ci	return jiffies - MAX_JIFFY_OFFSET;
165162306a36Sopenharmony_ci}
165262306a36Sopenharmony_ci
165362306a36Sopenharmony_cistatic void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
165462306a36Sopenharmony_ci					     struct bfq_queue *bfqq,
165562306a36Sopenharmony_ci					     unsigned int old_wr_coeff,
165662306a36Sopenharmony_ci					     bool wr_or_deserves_wr,
165762306a36Sopenharmony_ci					     bool interactive,
165862306a36Sopenharmony_ci					     bool in_burst,
165962306a36Sopenharmony_ci					     bool soft_rt)
166062306a36Sopenharmony_ci{
166162306a36Sopenharmony_ci	if (old_wr_coeff == 1 && wr_or_deserves_wr) {
166262306a36Sopenharmony_ci		/* start a weight-raising period */
166362306a36Sopenharmony_ci		if (interactive) {
166462306a36Sopenharmony_ci			bfqq->service_from_wr = 0;
166562306a36Sopenharmony_ci			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
166662306a36Sopenharmony_ci			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
166762306a36Sopenharmony_ci		} else {
166862306a36Sopenharmony_ci			/*
166962306a36Sopenharmony_ci			 * No interactive weight raising in progress
167062306a36Sopenharmony_ci			 * here: assign minus infinity to
167162306a36Sopenharmony_ci			 * wr_start_at_switch_to_srt, to make sure
167262306a36Sopenharmony_ci			 * that, at the end of the soft-real-time
167362306a36Sopenharmony_ci			 * weight raising periods that is starting
167462306a36Sopenharmony_ci			 * now, no interactive weight-raising period
167562306a36Sopenharmony_ci			 * may be wrongly considered as still in
167662306a36Sopenharmony_ci			 * progress (and thus actually started by
167762306a36Sopenharmony_ci			 * mistake).
167862306a36Sopenharmony_ci			 */
167962306a36Sopenharmony_ci			bfqq->wr_start_at_switch_to_srt =
168062306a36Sopenharmony_ci				bfq_smallest_from_now();
168162306a36Sopenharmony_ci			bfqq->wr_coeff = bfqd->bfq_wr_coeff *
168262306a36Sopenharmony_ci				BFQ_SOFTRT_WEIGHT_FACTOR;
168362306a36Sopenharmony_ci			bfqq->wr_cur_max_time =
168462306a36Sopenharmony_ci				bfqd->bfq_wr_rt_max_time;
168562306a36Sopenharmony_ci		}
168662306a36Sopenharmony_ci
168762306a36Sopenharmony_ci		/*
168862306a36Sopenharmony_ci		 * If needed, further reduce budget to make sure it is
168962306a36Sopenharmony_ci		 * close to bfqq's backlog, so as to reduce the
169062306a36Sopenharmony_ci		 * scheduling-error component due to a too large
169162306a36Sopenharmony_ci		 * budget. Do not care about throughput consequences,
169262306a36Sopenharmony_ci		 * but only about latency. Finally, do not assign a
169362306a36Sopenharmony_ci		 * too small budget either, to avoid increasing
169462306a36Sopenharmony_ci		 * latency by causing too frequent expirations.
169562306a36Sopenharmony_ci		 */
169662306a36Sopenharmony_ci		bfqq->entity.budget = min_t(unsigned long,
169762306a36Sopenharmony_ci					    bfqq->entity.budget,
169862306a36Sopenharmony_ci					    2 * bfq_min_budget(bfqd));
169962306a36Sopenharmony_ci	} else if (old_wr_coeff > 1) {
170062306a36Sopenharmony_ci		if (interactive) { /* update wr coeff and duration */
170162306a36Sopenharmony_ci			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
170262306a36Sopenharmony_ci			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
170362306a36Sopenharmony_ci		} else if (in_burst)
170462306a36Sopenharmony_ci			bfqq->wr_coeff = 1;
170562306a36Sopenharmony_ci		else if (soft_rt) {
170662306a36Sopenharmony_ci			/*
170762306a36Sopenharmony_ci			 * The application is now or still meeting the
170862306a36Sopenharmony_ci			 * requirements for being deemed soft rt.  We
170962306a36Sopenharmony_ci			 * can then correctly and safely (re)charge
171062306a36Sopenharmony_ci			 * the weight-raising duration for the
171162306a36Sopenharmony_ci			 * application with the weight-raising
171262306a36Sopenharmony_ci			 * duration for soft rt applications.
171362306a36Sopenharmony_ci			 *
171462306a36Sopenharmony_ci			 * In particular, doing this recharge now, i.e.,
171562306a36Sopenharmony_ci			 * before the weight-raising period for the
171662306a36Sopenharmony_ci			 * application finishes, reduces the probability
171762306a36Sopenharmony_ci			 * of the following negative scenario:
171862306a36Sopenharmony_ci			 * 1) the weight of a soft rt application is
171962306a36Sopenharmony_ci			 *    raised at startup (as for any newly
172062306a36Sopenharmony_ci			 *    created application),
172162306a36Sopenharmony_ci			 * 2) since the application is not interactive,
172262306a36Sopenharmony_ci			 *    at a certain time weight-raising is
172362306a36Sopenharmony_ci			 *    stopped for the application,
172462306a36Sopenharmony_ci			 * 3) at that time the application happens to
172562306a36Sopenharmony_ci			 *    still have pending requests, and hence
172662306a36Sopenharmony_ci			 *    is destined to not have a chance to be
172762306a36Sopenharmony_ci			 *    deemed soft rt before these requests are
172862306a36Sopenharmony_ci			 *    completed (see the comments to the
172962306a36Sopenharmony_ci			 *    function bfq_bfqq_softrt_next_start()
173062306a36Sopenharmony_ci			 *    for details on soft rt detection),
173162306a36Sopenharmony_ci			 * 4) these pending requests experience a high
173262306a36Sopenharmony_ci			 *    latency because the application is not
173362306a36Sopenharmony_ci			 *    weight-raised while they are pending.
173462306a36Sopenharmony_ci			 */
173562306a36Sopenharmony_ci			if (bfqq->wr_cur_max_time !=
173662306a36Sopenharmony_ci				bfqd->bfq_wr_rt_max_time) {
173762306a36Sopenharmony_ci				bfqq->wr_start_at_switch_to_srt =
173862306a36Sopenharmony_ci					bfqq->last_wr_start_finish;
173962306a36Sopenharmony_ci
174062306a36Sopenharmony_ci				bfqq->wr_cur_max_time =
174162306a36Sopenharmony_ci					bfqd->bfq_wr_rt_max_time;
174262306a36Sopenharmony_ci				bfqq->wr_coeff = bfqd->bfq_wr_coeff *
174362306a36Sopenharmony_ci					BFQ_SOFTRT_WEIGHT_FACTOR;
174462306a36Sopenharmony_ci			}
174562306a36Sopenharmony_ci			bfqq->last_wr_start_finish = jiffies;
174662306a36Sopenharmony_ci		}
174762306a36Sopenharmony_ci	}
174862306a36Sopenharmony_ci}
174962306a36Sopenharmony_ci
175062306a36Sopenharmony_cistatic bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
175162306a36Sopenharmony_ci					struct bfq_queue *bfqq)
175262306a36Sopenharmony_ci{
175362306a36Sopenharmony_ci	return bfqq->dispatched == 0 &&
175462306a36Sopenharmony_ci		time_is_before_jiffies(
175562306a36Sopenharmony_ci			bfqq->budget_timeout +
175662306a36Sopenharmony_ci			bfqd->bfq_wr_min_idle_time);
175762306a36Sopenharmony_ci}
175862306a36Sopenharmony_ci
175962306a36Sopenharmony_ci
176062306a36Sopenharmony_ci/*
176162306a36Sopenharmony_ci * Return true if bfqq is in a higher priority class, or has a higher
176262306a36Sopenharmony_ci * weight than the in-service queue.
176362306a36Sopenharmony_ci */
176462306a36Sopenharmony_cistatic bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
176562306a36Sopenharmony_ci					    struct bfq_queue *in_serv_bfqq)
176662306a36Sopenharmony_ci{
176762306a36Sopenharmony_ci	int bfqq_weight, in_serv_weight;
176862306a36Sopenharmony_ci
176962306a36Sopenharmony_ci	if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
177062306a36Sopenharmony_ci		return true;
177162306a36Sopenharmony_ci
177262306a36Sopenharmony_ci	if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
177362306a36Sopenharmony_ci		bfqq_weight = bfqq->entity.weight;
177462306a36Sopenharmony_ci		in_serv_weight = in_serv_bfqq->entity.weight;
177562306a36Sopenharmony_ci	} else {
177662306a36Sopenharmony_ci		if (bfqq->entity.parent)
177762306a36Sopenharmony_ci			bfqq_weight = bfqq->entity.parent->weight;
177862306a36Sopenharmony_ci		else
177962306a36Sopenharmony_ci			bfqq_weight = bfqq->entity.weight;
178062306a36Sopenharmony_ci		if (in_serv_bfqq->entity.parent)
178162306a36Sopenharmony_ci			in_serv_weight = in_serv_bfqq->entity.parent->weight;
178262306a36Sopenharmony_ci		else
178362306a36Sopenharmony_ci			in_serv_weight = in_serv_bfqq->entity.weight;
178462306a36Sopenharmony_ci	}
178562306a36Sopenharmony_ci
178662306a36Sopenharmony_ci	return bfqq_weight > in_serv_weight;
178762306a36Sopenharmony_ci}
178862306a36Sopenharmony_ci
178962306a36Sopenharmony_ci/*
179062306a36Sopenharmony_ci * Get the index of the actuator that will serve bio.
179162306a36Sopenharmony_ci */
179262306a36Sopenharmony_cistatic unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio)
179362306a36Sopenharmony_ci{
179462306a36Sopenharmony_ci	unsigned int i;
179562306a36Sopenharmony_ci	sector_t end;
179662306a36Sopenharmony_ci
179762306a36Sopenharmony_ci	/* no search needed if one or zero ranges present */
179862306a36Sopenharmony_ci	if (bfqd->num_actuators == 1)
179962306a36Sopenharmony_ci		return 0;
180062306a36Sopenharmony_ci
180162306a36Sopenharmony_ci	/* bio_end_sector(bio) gives the sector after the last one */
180262306a36Sopenharmony_ci	end = bio_end_sector(bio) - 1;
180362306a36Sopenharmony_ci
180462306a36Sopenharmony_ci	for (i = 0; i < bfqd->num_actuators; i++) {
180562306a36Sopenharmony_ci		if (end >= bfqd->sector[i] &&
180662306a36Sopenharmony_ci		    end < bfqd->sector[i] + bfqd->nr_sectors[i])
180762306a36Sopenharmony_ci			return i;
180862306a36Sopenharmony_ci	}
180962306a36Sopenharmony_ci
181062306a36Sopenharmony_ci	WARN_ONCE(true,
181162306a36Sopenharmony_ci		  "bfq_actuator_index: bio sector out of ranges: end=%llu\n",
181262306a36Sopenharmony_ci		  end);
181362306a36Sopenharmony_ci	return 0;
181462306a36Sopenharmony_ci}
181562306a36Sopenharmony_ci
181662306a36Sopenharmony_cistatic bool bfq_better_to_idle(struct bfq_queue *bfqq);
181762306a36Sopenharmony_ci
181862306a36Sopenharmony_cistatic void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
181962306a36Sopenharmony_ci					     struct bfq_queue *bfqq,
182062306a36Sopenharmony_ci					     int old_wr_coeff,
182162306a36Sopenharmony_ci					     struct request *rq,
182262306a36Sopenharmony_ci					     bool *interactive)
182362306a36Sopenharmony_ci{
182462306a36Sopenharmony_ci	bool soft_rt, in_burst,	wr_or_deserves_wr,
182562306a36Sopenharmony_ci		bfqq_wants_to_preempt,
182662306a36Sopenharmony_ci		idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
182762306a36Sopenharmony_ci		/*
182862306a36Sopenharmony_ci		 * See the comments on
182962306a36Sopenharmony_ci		 * bfq_bfqq_update_budg_for_activation for
183062306a36Sopenharmony_ci		 * details on the usage of the next variable.
183162306a36Sopenharmony_ci		 */
183262306a36Sopenharmony_ci		arrived_in_time =  ktime_get_ns() <=
183362306a36Sopenharmony_ci			bfqq->ttime.last_end_request +
183462306a36Sopenharmony_ci			bfqd->bfq_slice_idle * 3;
183562306a36Sopenharmony_ci	unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
183662306a36Sopenharmony_ci	bool bfqq_non_merged_or_stably_merged =
183762306a36Sopenharmony_ci		bfqq->bic || RQ_BIC(rq)->bfqq_data[act_idx].stably_merged;
183862306a36Sopenharmony_ci
183962306a36Sopenharmony_ci	/*
184062306a36Sopenharmony_ci	 * bfqq deserves to be weight-raised if:
184162306a36Sopenharmony_ci	 * - it is sync,
184262306a36Sopenharmony_ci	 * - it does not belong to a large burst,
184362306a36Sopenharmony_ci	 * - it has been idle for enough time or is soft real-time,
184462306a36Sopenharmony_ci	 * - is linked to a bfq_io_cq (it is not shared in any sense),
184562306a36Sopenharmony_ci	 * - has a default weight (otherwise we assume the user wanted
184662306a36Sopenharmony_ci	 *   to control its weight explicitly)
184762306a36Sopenharmony_ci	 */
184862306a36Sopenharmony_ci	in_burst = bfq_bfqq_in_large_burst(bfqq);
184962306a36Sopenharmony_ci	soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
185062306a36Sopenharmony_ci		!BFQQ_TOTALLY_SEEKY(bfqq) &&
185162306a36Sopenharmony_ci		!in_burst &&
185262306a36Sopenharmony_ci		time_is_before_jiffies(bfqq->soft_rt_next_start) &&
185362306a36Sopenharmony_ci		bfqq->dispatched == 0 &&
185462306a36Sopenharmony_ci		bfqq->entity.new_weight == 40;
185562306a36Sopenharmony_ci	*interactive = !in_burst && idle_for_long_time &&
185662306a36Sopenharmony_ci		bfqq->entity.new_weight == 40;
185762306a36Sopenharmony_ci	/*
185862306a36Sopenharmony_ci	 * Merged bfq_queues are kept out of weight-raising
185962306a36Sopenharmony_ci	 * (low-latency) mechanisms. The reason is that these queues
186062306a36Sopenharmony_ci	 * are usually created for non-interactive and
186162306a36Sopenharmony_ci	 * non-soft-real-time tasks. Yet this is not the case for
186262306a36Sopenharmony_ci	 * stably-merged queues. These queues are merged just because
186362306a36Sopenharmony_ci	 * they are created shortly after each other. So they may
186462306a36Sopenharmony_ci	 * easily serve the I/O of an interactive or soft-real time
186562306a36Sopenharmony_ci	 * application, if the application happens to spawn multiple
186662306a36Sopenharmony_ci	 * processes. So let also stably-merged queued enjoy weight
186762306a36Sopenharmony_ci	 * raising.
186862306a36Sopenharmony_ci	 */
186962306a36Sopenharmony_ci	wr_or_deserves_wr = bfqd->low_latency &&
187062306a36Sopenharmony_ci		(bfqq->wr_coeff > 1 ||
187162306a36Sopenharmony_ci		 (bfq_bfqq_sync(bfqq) && bfqq_non_merged_or_stably_merged &&
187262306a36Sopenharmony_ci		  (*interactive || soft_rt)));
187362306a36Sopenharmony_ci
187462306a36Sopenharmony_ci	/*
187562306a36Sopenharmony_ci	 * Using the last flag, update budget and check whether bfqq
187662306a36Sopenharmony_ci	 * may want to preempt the in-service queue.
187762306a36Sopenharmony_ci	 */
187862306a36Sopenharmony_ci	bfqq_wants_to_preempt =
187962306a36Sopenharmony_ci		bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
188062306a36Sopenharmony_ci						    arrived_in_time);
188162306a36Sopenharmony_ci
188262306a36Sopenharmony_ci	/*
188362306a36Sopenharmony_ci	 * If bfqq happened to be activated in a burst, but has been
188462306a36Sopenharmony_ci	 * idle for much more than an interactive queue, then we
188562306a36Sopenharmony_ci	 * assume that, in the overall I/O initiated in the burst, the
188662306a36Sopenharmony_ci	 * I/O associated with bfqq is finished. So bfqq does not need
188762306a36Sopenharmony_ci	 * to be treated as a queue belonging to a burst
188862306a36Sopenharmony_ci	 * anymore. Accordingly, we reset bfqq's in_large_burst flag
188962306a36Sopenharmony_ci	 * if set, and remove bfqq from the burst list if it's
189062306a36Sopenharmony_ci	 * there. We do not decrement burst_size, because the fact
189162306a36Sopenharmony_ci	 * that bfqq does not need to belong to the burst list any
189262306a36Sopenharmony_ci	 * more does not invalidate the fact that bfqq was created in
189362306a36Sopenharmony_ci	 * a burst.
189462306a36Sopenharmony_ci	 */
189562306a36Sopenharmony_ci	if (likely(!bfq_bfqq_just_created(bfqq)) &&
189662306a36Sopenharmony_ci	    idle_for_long_time &&
189762306a36Sopenharmony_ci	    time_is_before_jiffies(
189862306a36Sopenharmony_ci		    bfqq->budget_timeout +
189962306a36Sopenharmony_ci		    msecs_to_jiffies(10000))) {
190062306a36Sopenharmony_ci		hlist_del_init(&bfqq->burst_list_node);
190162306a36Sopenharmony_ci		bfq_clear_bfqq_in_large_burst(bfqq);
190262306a36Sopenharmony_ci	}
190362306a36Sopenharmony_ci
190462306a36Sopenharmony_ci	bfq_clear_bfqq_just_created(bfqq);
190562306a36Sopenharmony_ci
190662306a36Sopenharmony_ci	if (bfqd->low_latency) {
190762306a36Sopenharmony_ci		if (unlikely(time_is_after_jiffies(bfqq->split_time)))
190862306a36Sopenharmony_ci			/* wraparound */
190962306a36Sopenharmony_ci			bfqq->split_time =
191062306a36Sopenharmony_ci				jiffies - bfqd->bfq_wr_min_idle_time - 1;
191162306a36Sopenharmony_ci
191262306a36Sopenharmony_ci		if (time_is_before_jiffies(bfqq->split_time +
191362306a36Sopenharmony_ci					   bfqd->bfq_wr_min_idle_time)) {
191462306a36Sopenharmony_ci			bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
191562306a36Sopenharmony_ci							 old_wr_coeff,
191662306a36Sopenharmony_ci							 wr_or_deserves_wr,
191762306a36Sopenharmony_ci							 *interactive,
191862306a36Sopenharmony_ci							 in_burst,
191962306a36Sopenharmony_ci							 soft_rt);
192062306a36Sopenharmony_ci
192162306a36Sopenharmony_ci			if (old_wr_coeff != bfqq->wr_coeff)
192262306a36Sopenharmony_ci				bfqq->entity.prio_changed = 1;
192362306a36Sopenharmony_ci		}
192462306a36Sopenharmony_ci	}
192562306a36Sopenharmony_ci
192662306a36Sopenharmony_ci	bfqq->last_idle_bklogged = jiffies;
192762306a36Sopenharmony_ci	bfqq->service_from_backlogged = 0;
192862306a36Sopenharmony_ci	bfq_clear_bfqq_softrt_update(bfqq);
192962306a36Sopenharmony_ci
193062306a36Sopenharmony_ci	bfq_add_bfqq_busy(bfqq);
193162306a36Sopenharmony_ci
193262306a36Sopenharmony_ci	/*
193362306a36Sopenharmony_ci	 * Expire in-service queue if preemption may be needed for
193462306a36Sopenharmony_ci	 * guarantees or throughput. As for guarantees, we care
193562306a36Sopenharmony_ci	 * explicitly about two cases. The first is that bfqq has to
193662306a36Sopenharmony_ci	 * recover a service hole, as explained in the comments on
193762306a36Sopenharmony_ci	 * bfq_bfqq_update_budg_for_activation(), i.e., that
193862306a36Sopenharmony_ci	 * bfqq_wants_to_preempt is true. However, if bfqq does not
193962306a36Sopenharmony_ci	 * carry time-critical I/O, then bfqq's bandwidth is less
194062306a36Sopenharmony_ci	 * important than that of queues that carry time-critical I/O.
194162306a36Sopenharmony_ci	 * So, as a further constraint, we consider this case only if
194262306a36Sopenharmony_ci	 * bfqq is at least as weight-raised, i.e., at least as time
194362306a36Sopenharmony_ci	 * critical, as the in-service queue.
194462306a36Sopenharmony_ci	 *
194562306a36Sopenharmony_ci	 * The second case is that bfqq is in a higher priority class,
194662306a36Sopenharmony_ci	 * or has a higher weight than the in-service queue. If this
194762306a36Sopenharmony_ci	 * condition does not hold, we don't care because, even if
194862306a36Sopenharmony_ci	 * bfqq does not start to be served immediately, the resulting
194962306a36Sopenharmony_ci	 * delay for bfqq's I/O is however lower or much lower than
195062306a36Sopenharmony_ci	 * the ideal completion time to be guaranteed to bfqq's I/O.
195162306a36Sopenharmony_ci	 *
195262306a36Sopenharmony_ci	 * In both cases, preemption is needed only if, according to
195362306a36Sopenharmony_ci	 * the timestamps of both bfqq and of the in-service queue,
195462306a36Sopenharmony_ci	 * bfqq actually is the next queue to serve. So, to reduce
195562306a36Sopenharmony_ci	 * useless preemptions, the return value of
195662306a36Sopenharmony_ci	 * next_queue_may_preempt() is considered in the next compound
195762306a36Sopenharmony_ci	 * condition too. Yet next_queue_may_preempt() just checks a
195862306a36Sopenharmony_ci	 * simple, necessary condition for bfqq to be the next queue
195962306a36Sopenharmony_ci	 * to serve. In fact, to evaluate a sufficient condition, the
196062306a36Sopenharmony_ci	 * timestamps of the in-service queue would need to be
196162306a36Sopenharmony_ci	 * updated, and this operation is quite costly (see the
196262306a36Sopenharmony_ci	 * comments on bfq_bfqq_update_budg_for_activation()).
196362306a36Sopenharmony_ci	 *
196462306a36Sopenharmony_ci	 * As for throughput, we ask bfq_better_to_idle() whether we
196562306a36Sopenharmony_ci	 * still need to plug I/O dispatching. If bfq_better_to_idle()
196662306a36Sopenharmony_ci	 * says no, then plugging is not needed any longer, either to
196762306a36Sopenharmony_ci	 * boost throughput or to perserve service guarantees. Then
196862306a36Sopenharmony_ci	 * the best option is to stop plugging I/O, as not doing so
196962306a36Sopenharmony_ci	 * would certainly lower throughput. We may end up in this
197062306a36Sopenharmony_ci	 * case if: (1) upon a dispatch attempt, we detected that it
197162306a36Sopenharmony_ci	 * was better to plug I/O dispatch, and to wait for a new
197262306a36Sopenharmony_ci	 * request to arrive for the currently in-service queue, but
197362306a36Sopenharmony_ci	 * (2) this switch of bfqq to busy changes the scenario.
197462306a36Sopenharmony_ci	 */
197562306a36Sopenharmony_ci	if (bfqd->in_service_queue &&
197662306a36Sopenharmony_ci	    ((bfqq_wants_to_preempt &&
197762306a36Sopenharmony_ci	      bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
197862306a36Sopenharmony_ci	     bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
197962306a36Sopenharmony_ci	     !bfq_better_to_idle(bfqd->in_service_queue)) &&
198062306a36Sopenharmony_ci	    next_queue_may_preempt(bfqd))
198162306a36Sopenharmony_ci		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
198262306a36Sopenharmony_ci				false, BFQQE_PREEMPTED);
198362306a36Sopenharmony_ci}
198462306a36Sopenharmony_ci
198562306a36Sopenharmony_cistatic void bfq_reset_inject_limit(struct bfq_data *bfqd,
198662306a36Sopenharmony_ci				   struct bfq_queue *bfqq)
198762306a36Sopenharmony_ci{
198862306a36Sopenharmony_ci	/* invalidate baseline total service time */
198962306a36Sopenharmony_ci	bfqq->last_serv_time_ns = 0;
199062306a36Sopenharmony_ci
199162306a36Sopenharmony_ci	/*
199262306a36Sopenharmony_ci	 * Reset pointer in case we are waiting for
199362306a36Sopenharmony_ci	 * some request completion.
199462306a36Sopenharmony_ci	 */
199562306a36Sopenharmony_ci	bfqd->waited_rq = NULL;
199662306a36Sopenharmony_ci
199762306a36Sopenharmony_ci	/*
199862306a36Sopenharmony_ci	 * If bfqq has a short think time, then start by setting the
199962306a36Sopenharmony_ci	 * inject limit to 0 prudentially, because the service time of
200062306a36Sopenharmony_ci	 * an injected I/O request may be higher than the think time
200162306a36Sopenharmony_ci	 * of bfqq, and therefore, if one request was injected when
200262306a36Sopenharmony_ci	 * bfqq remains empty, this injected request might delay the
200362306a36Sopenharmony_ci	 * service of the next I/O request for bfqq significantly. In
200462306a36Sopenharmony_ci	 * case bfqq can actually tolerate some injection, then the
200562306a36Sopenharmony_ci	 * adaptive update will however raise the limit soon. This
200662306a36Sopenharmony_ci	 * lucky circumstance holds exactly because bfqq has a short
200762306a36Sopenharmony_ci	 * think time, and thus, after remaining empty, is likely to
200862306a36Sopenharmony_ci	 * get new I/O enqueued---and then completed---before being
200962306a36Sopenharmony_ci	 * expired. This is the very pattern that gives the
201062306a36Sopenharmony_ci	 * limit-update algorithm the chance to measure the effect of
201162306a36Sopenharmony_ci	 * injection on request service times, and then to update the
201262306a36Sopenharmony_ci	 * limit accordingly.
201362306a36Sopenharmony_ci	 *
201462306a36Sopenharmony_ci	 * However, in the following special case, the inject limit is
201562306a36Sopenharmony_ci	 * left to 1 even if the think time is short: bfqq's I/O is
201662306a36Sopenharmony_ci	 * synchronized with that of some other queue, i.e., bfqq may
201762306a36Sopenharmony_ci	 * receive new I/O only after the I/O of the other queue is
201862306a36Sopenharmony_ci	 * completed. Keeping the inject limit to 1 allows the
201962306a36Sopenharmony_ci	 * blocking I/O to be served while bfqq is in service. And
202062306a36Sopenharmony_ci	 * this is very convenient both for bfqq and for overall
202162306a36Sopenharmony_ci	 * throughput, as explained in detail in the comments in
202262306a36Sopenharmony_ci	 * bfq_update_has_short_ttime().
202362306a36Sopenharmony_ci	 *
202462306a36Sopenharmony_ci	 * On the opposite end, if bfqq has a long think time, then
202562306a36Sopenharmony_ci	 * start directly by 1, because:
202662306a36Sopenharmony_ci	 * a) on the bright side, keeping at most one request in
202762306a36Sopenharmony_ci	 * service in the drive is unlikely to cause any harm to the
202862306a36Sopenharmony_ci	 * latency of bfqq's requests, as the service time of a single
202962306a36Sopenharmony_ci	 * request is likely to be lower than the think time of bfqq;
203062306a36Sopenharmony_ci	 * b) on the downside, after becoming empty, bfqq is likely to
203162306a36Sopenharmony_ci	 * expire before getting its next request. With this request
203262306a36Sopenharmony_ci	 * arrival pattern, it is very hard to sample total service
203362306a36Sopenharmony_ci	 * times and update the inject limit accordingly (see comments
203462306a36Sopenharmony_ci	 * on bfq_update_inject_limit()). So the limit is likely to be
203562306a36Sopenharmony_ci	 * never, or at least seldom, updated.  As a consequence, by
203662306a36Sopenharmony_ci	 * setting the limit to 1, we avoid that no injection ever
203762306a36Sopenharmony_ci	 * occurs with bfqq. On the downside, this proactive step
203862306a36Sopenharmony_ci	 * further reduces chances to actually compute the baseline
203962306a36Sopenharmony_ci	 * total service time. Thus it reduces chances to execute the
204062306a36Sopenharmony_ci	 * limit-update algorithm and possibly raise the limit to more
204162306a36Sopenharmony_ci	 * than 1.
204262306a36Sopenharmony_ci	 */
204362306a36Sopenharmony_ci	if (bfq_bfqq_has_short_ttime(bfqq))
204462306a36Sopenharmony_ci		bfqq->inject_limit = 0;
204562306a36Sopenharmony_ci	else
204662306a36Sopenharmony_ci		bfqq->inject_limit = 1;
204762306a36Sopenharmony_ci
204862306a36Sopenharmony_ci	bfqq->decrease_time_jif = jiffies;
204962306a36Sopenharmony_ci}
205062306a36Sopenharmony_ci
205162306a36Sopenharmony_cistatic void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
205262306a36Sopenharmony_ci{
205362306a36Sopenharmony_ci	u64 tot_io_time = now_ns - bfqq->io_start_time;
205462306a36Sopenharmony_ci
205562306a36Sopenharmony_ci	if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0)
205662306a36Sopenharmony_ci		bfqq->tot_idle_time +=
205762306a36Sopenharmony_ci			now_ns - bfqq->ttime.last_end_request;
205862306a36Sopenharmony_ci
205962306a36Sopenharmony_ci	if (unlikely(bfq_bfqq_just_created(bfqq)))
206062306a36Sopenharmony_ci		return;
206162306a36Sopenharmony_ci
206262306a36Sopenharmony_ci	/*
206362306a36Sopenharmony_ci	 * Must be busy for at least about 80% of the time to be
206462306a36Sopenharmony_ci	 * considered I/O bound.
206562306a36Sopenharmony_ci	 */
206662306a36Sopenharmony_ci	if (bfqq->tot_idle_time * 5 > tot_io_time)
206762306a36Sopenharmony_ci		bfq_clear_bfqq_IO_bound(bfqq);
206862306a36Sopenharmony_ci	else
206962306a36Sopenharmony_ci		bfq_mark_bfqq_IO_bound(bfqq);
207062306a36Sopenharmony_ci
207162306a36Sopenharmony_ci	/*
207262306a36Sopenharmony_ci	 * Keep an observation window of at most 200 ms in the past
207362306a36Sopenharmony_ci	 * from now.
207462306a36Sopenharmony_ci	 */
207562306a36Sopenharmony_ci	if (tot_io_time > 200 * NSEC_PER_MSEC) {
207662306a36Sopenharmony_ci		bfqq->io_start_time = now_ns - (tot_io_time>>1);
207762306a36Sopenharmony_ci		bfqq->tot_idle_time >>= 1;
207862306a36Sopenharmony_ci	}
207962306a36Sopenharmony_ci}
208062306a36Sopenharmony_ci
208162306a36Sopenharmony_ci/*
208262306a36Sopenharmony_ci * Detect whether bfqq's I/O seems synchronized with that of some
208362306a36Sopenharmony_ci * other queue, i.e., whether bfqq, after remaining empty, happens to
208462306a36Sopenharmony_ci * receive new I/O only right after some I/O request of the other
208562306a36Sopenharmony_ci * queue has been completed. We call waker queue the other queue, and
208662306a36Sopenharmony_ci * we assume, for simplicity, that bfqq may have at most one waker
208762306a36Sopenharmony_ci * queue.
208862306a36Sopenharmony_ci *
208962306a36Sopenharmony_ci * A remarkable throughput boost can be reached by unconditionally
209062306a36Sopenharmony_ci * injecting the I/O of the waker queue, every time a new
209162306a36Sopenharmony_ci * bfq_dispatch_request happens to be invoked while I/O is being
209262306a36Sopenharmony_ci * plugged for bfqq.  In addition to boosting throughput, this
209362306a36Sopenharmony_ci * unblocks bfqq's I/O, thereby improving bandwidth and latency for
209462306a36Sopenharmony_ci * bfqq. Note that these same results may be achieved with the general
209562306a36Sopenharmony_ci * injection mechanism, but less effectively. For details on this
209662306a36Sopenharmony_ci * aspect, see the comments on the choice of the queue for injection
209762306a36Sopenharmony_ci * in bfq_select_queue().
209862306a36Sopenharmony_ci *
209962306a36Sopenharmony_ci * Turning back to the detection of a waker queue, a queue Q is deemed as a
210062306a36Sopenharmony_ci * waker queue for bfqq if, for three consecutive times, bfqq happens to become
210162306a36Sopenharmony_ci * non empty right after a request of Q has been completed within given
210262306a36Sopenharmony_ci * timeout. In this respect, even if bfqq is empty, we do not check for a waker
210362306a36Sopenharmony_ci * if it still has some in-flight I/O. In fact, in this case bfqq is actually
210462306a36Sopenharmony_ci * still being served by the drive, and may receive new I/O on the completion
210562306a36Sopenharmony_ci * of some of the in-flight requests. In particular, on the first time, Q is
210662306a36Sopenharmony_ci * tentatively set as a candidate waker queue, while on the third consecutive
210762306a36Sopenharmony_ci * time that Q is detected, the field waker_bfqq is set to Q, to confirm that Q
210862306a36Sopenharmony_ci * is a waker queue for bfqq. These detection steps are performed only if bfqq
210962306a36Sopenharmony_ci * has a long think time, so as to make it more likely that bfqq's I/O is
211062306a36Sopenharmony_ci * actually being blocked by a synchronization. This last filter, plus the
211162306a36Sopenharmony_ci * above three-times requirement and time limit for detection, make false
211262306a36Sopenharmony_ci * positives less likely.
211362306a36Sopenharmony_ci *
211462306a36Sopenharmony_ci * NOTE
211562306a36Sopenharmony_ci *
211662306a36Sopenharmony_ci * The sooner a waker queue is detected, the sooner throughput can be
211762306a36Sopenharmony_ci * boosted by injecting I/O from the waker queue. Fortunately,
211862306a36Sopenharmony_ci * detection is likely to be actually fast, for the following
211962306a36Sopenharmony_ci * reasons. While blocked by synchronization, bfqq has a long think
212062306a36Sopenharmony_ci * time. This implies that bfqq's inject limit is at least equal to 1
212162306a36Sopenharmony_ci * (see the comments in bfq_update_inject_limit()). So, thanks to
212262306a36Sopenharmony_ci * injection, the waker queue is likely to be served during the very
212362306a36Sopenharmony_ci * first I/O-plugging time interval for bfqq. This triggers the first
212462306a36Sopenharmony_ci * step of the detection mechanism. Thanks again to injection, the
212562306a36Sopenharmony_ci * candidate waker queue is then likely to be confirmed no later than
212662306a36Sopenharmony_ci * during the next I/O-plugging interval for bfqq.
212762306a36Sopenharmony_ci *
212862306a36Sopenharmony_ci * ISSUE
212962306a36Sopenharmony_ci *
213062306a36Sopenharmony_ci * On queue merging all waker information is lost.
213162306a36Sopenharmony_ci */
213262306a36Sopenharmony_cistatic void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
213362306a36Sopenharmony_ci			    u64 now_ns)
213462306a36Sopenharmony_ci{
213562306a36Sopenharmony_ci	char waker_name[MAX_BFQQ_NAME_LENGTH];
213662306a36Sopenharmony_ci
213762306a36Sopenharmony_ci	if (!bfqd->last_completed_rq_bfqq ||
213862306a36Sopenharmony_ci	    bfqd->last_completed_rq_bfqq == bfqq ||
213962306a36Sopenharmony_ci	    bfq_bfqq_has_short_ttime(bfqq) ||
214062306a36Sopenharmony_ci	    now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC ||
214162306a36Sopenharmony_ci	    bfqd->last_completed_rq_bfqq == &bfqd->oom_bfqq ||
214262306a36Sopenharmony_ci	    bfqq == &bfqd->oom_bfqq)
214362306a36Sopenharmony_ci		return;
214462306a36Sopenharmony_ci
214562306a36Sopenharmony_ci	/*
214662306a36Sopenharmony_ci	 * We reset waker detection logic also if too much time has passed
214762306a36Sopenharmony_ci 	 * since the first detection. If wakeups are rare, pointless idling
214862306a36Sopenharmony_ci	 * doesn't hurt throughput that much. The condition below makes sure
214962306a36Sopenharmony_ci	 * we do not uselessly idle blocking waker in more than 1/64 cases.
215062306a36Sopenharmony_ci	 */
215162306a36Sopenharmony_ci	if (bfqd->last_completed_rq_bfqq !=
215262306a36Sopenharmony_ci	    bfqq->tentative_waker_bfqq ||
215362306a36Sopenharmony_ci	    now_ns > bfqq->waker_detection_started +
215462306a36Sopenharmony_ci					128 * (u64)bfqd->bfq_slice_idle) {
215562306a36Sopenharmony_ci		/*
215662306a36Sopenharmony_ci		 * First synchronization detected with a
215762306a36Sopenharmony_ci		 * candidate waker queue, or with a different
215862306a36Sopenharmony_ci		 * candidate waker queue from the current one.
215962306a36Sopenharmony_ci		 */
216062306a36Sopenharmony_ci		bfqq->tentative_waker_bfqq =
216162306a36Sopenharmony_ci			bfqd->last_completed_rq_bfqq;
216262306a36Sopenharmony_ci		bfqq->num_waker_detections = 1;
216362306a36Sopenharmony_ci		bfqq->waker_detection_started = now_ns;
216462306a36Sopenharmony_ci		bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name,
216562306a36Sopenharmony_ci			      MAX_BFQQ_NAME_LENGTH);
216662306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name);
216762306a36Sopenharmony_ci	} else /* Same tentative waker queue detected again */
216862306a36Sopenharmony_ci		bfqq->num_waker_detections++;
216962306a36Sopenharmony_ci
217062306a36Sopenharmony_ci	if (bfqq->num_waker_detections == 3) {
217162306a36Sopenharmony_ci		bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
217262306a36Sopenharmony_ci		bfqq->tentative_waker_bfqq = NULL;
217362306a36Sopenharmony_ci		bfq_bfqq_name(bfqq->waker_bfqq, waker_name,
217462306a36Sopenharmony_ci			      MAX_BFQQ_NAME_LENGTH);
217562306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name);
217662306a36Sopenharmony_ci
217762306a36Sopenharmony_ci		/*
217862306a36Sopenharmony_ci		 * If the waker queue disappears, then
217962306a36Sopenharmony_ci		 * bfqq->waker_bfqq must be reset. To
218062306a36Sopenharmony_ci		 * this goal, we maintain in each
218162306a36Sopenharmony_ci		 * waker queue a list, woken_list, of
218262306a36Sopenharmony_ci		 * all the queues that reference the
218362306a36Sopenharmony_ci		 * waker queue through their
218462306a36Sopenharmony_ci		 * waker_bfqq pointer. When the waker
218562306a36Sopenharmony_ci		 * queue exits, the waker_bfqq pointer
218662306a36Sopenharmony_ci		 * of all the queues in the woken_list
218762306a36Sopenharmony_ci		 * is reset.
218862306a36Sopenharmony_ci		 *
218962306a36Sopenharmony_ci		 * In addition, if bfqq is already in
219062306a36Sopenharmony_ci		 * the woken_list of a waker queue,
219162306a36Sopenharmony_ci		 * then, before being inserted into
219262306a36Sopenharmony_ci		 * the woken_list of a new waker
219362306a36Sopenharmony_ci		 * queue, bfqq must be removed from
219462306a36Sopenharmony_ci		 * the woken_list of the old waker
219562306a36Sopenharmony_ci		 * queue.
219662306a36Sopenharmony_ci		 */
219762306a36Sopenharmony_ci		if (!hlist_unhashed(&bfqq->woken_list_node))
219862306a36Sopenharmony_ci			hlist_del_init(&bfqq->woken_list_node);
219962306a36Sopenharmony_ci		hlist_add_head(&bfqq->woken_list_node,
220062306a36Sopenharmony_ci			       &bfqd->last_completed_rq_bfqq->woken_list);
220162306a36Sopenharmony_ci	}
220262306a36Sopenharmony_ci}
220362306a36Sopenharmony_ci
220462306a36Sopenharmony_cistatic void bfq_add_request(struct request *rq)
220562306a36Sopenharmony_ci{
220662306a36Sopenharmony_ci	struct bfq_queue *bfqq = RQ_BFQQ(rq);
220762306a36Sopenharmony_ci	struct bfq_data *bfqd = bfqq->bfqd;
220862306a36Sopenharmony_ci	struct request *next_rq, *prev;
220962306a36Sopenharmony_ci	unsigned int old_wr_coeff = bfqq->wr_coeff;
221062306a36Sopenharmony_ci	bool interactive = false;
221162306a36Sopenharmony_ci	u64 now_ns = ktime_get_ns();
221262306a36Sopenharmony_ci
221362306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
221462306a36Sopenharmony_ci	bfqq->queued[rq_is_sync(rq)]++;
221562306a36Sopenharmony_ci	/*
221662306a36Sopenharmony_ci	 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
221762306a36Sopenharmony_ci	 * may be read without holding the lock in bfq_has_work().
221862306a36Sopenharmony_ci	 */
221962306a36Sopenharmony_ci	WRITE_ONCE(bfqd->queued, bfqd->queued + 1);
222062306a36Sopenharmony_ci
222162306a36Sopenharmony_ci	if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
222262306a36Sopenharmony_ci		bfq_check_waker(bfqd, bfqq, now_ns);
222362306a36Sopenharmony_ci
222462306a36Sopenharmony_ci		/*
222562306a36Sopenharmony_ci		 * Periodically reset inject limit, to make sure that
222662306a36Sopenharmony_ci		 * the latter eventually drops in case workload
222762306a36Sopenharmony_ci		 * changes, see step (3) in the comments on
222862306a36Sopenharmony_ci		 * bfq_update_inject_limit().
222962306a36Sopenharmony_ci		 */
223062306a36Sopenharmony_ci		if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
223162306a36Sopenharmony_ci					     msecs_to_jiffies(1000)))
223262306a36Sopenharmony_ci			bfq_reset_inject_limit(bfqd, bfqq);
223362306a36Sopenharmony_ci
223462306a36Sopenharmony_ci		/*
223562306a36Sopenharmony_ci		 * The following conditions must hold to setup a new
223662306a36Sopenharmony_ci		 * sampling of total service time, and then a new
223762306a36Sopenharmony_ci		 * update of the inject limit:
223862306a36Sopenharmony_ci		 * - bfqq is in service, because the total service
223962306a36Sopenharmony_ci		 *   time is evaluated only for the I/O requests of
224062306a36Sopenharmony_ci		 *   the queues in service;
224162306a36Sopenharmony_ci		 * - this is the right occasion to compute or to
224262306a36Sopenharmony_ci		 *   lower the baseline total service time, because
224362306a36Sopenharmony_ci		 *   there are actually no requests in the drive,
224462306a36Sopenharmony_ci		 *   or
224562306a36Sopenharmony_ci		 *   the baseline total service time is available, and
224662306a36Sopenharmony_ci		 *   this is the right occasion to compute the other
224762306a36Sopenharmony_ci		 *   quantity needed to update the inject limit, i.e.,
224862306a36Sopenharmony_ci		 *   the total service time caused by the amount of
224962306a36Sopenharmony_ci		 *   injection allowed by the current value of the
225062306a36Sopenharmony_ci		 *   limit. It is the right occasion because injection
225162306a36Sopenharmony_ci		 *   has actually been performed during the service
225262306a36Sopenharmony_ci		 *   hole, and there are still in-flight requests,
225362306a36Sopenharmony_ci		 *   which are very likely to be exactly the injected
225462306a36Sopenharmony_ci		 *   requests, or part of them;
225562306a36Sopenharmony_ci		 * - the minimum interval for sampling the total
225662306a36Sopenharmony_ci		 *   service time and updating the inject limit has
225762306a36Sopenharmony_ci		 *   elapsed.
225862306a36Sopenharmony_ci		 */
225962306a36Sopenharmony_ci		if (bfqq == bfqd->in_service_queue &&
226062306a36Sopenharmony_ci		    (bfqd->tot_rq_in_driver == 0 ||
226162306a36Sopenharmony_ci		     (bfqq->last_serv_time_ns > 0 &&
226262306a36Sopenharmony_ci		      bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) &&
226362306a36Sopenharmony_ci		    time_is_before_eq_jiffies(bfqq->decrease_time_jif +
226462306a36Sopenharmony_ci					      msecs_to_jiffies(10))) {
226562306a36Sopenharmony_ci			bfqd->last_empty_occupied_ns = ktime_get_ns();
226662306a36Sopenharmony_ci			/*
226762306a36Sopenharmony_ci			 * Start the state machine for measuring the
226862306a36Sopenharmony_ci			 * total service time of rq: setting
226962306a36Sopenharmony_ci			 * wait_dispatch will cause bfqd->waited_rq to
227062306a36Sopenharmony_ci			 * be set when rq will be dispatched.
227162306a36Sopenharmony_ci			 */
227262306a36Sopenharmony_ci			bfqd->wait_dispatch = true;
227362306a36Sopenharmony_ci			/*
227462306a36Sopenharmony_ci			 * If there is no I/O in service in the drive,
227562306a36Sopenharmony_ci			 * then possible injection occurred before the
227662306a36Sopenharmony_ci			 * arrival of rq will not affect the total
227762306a36Sopenharmony_ci			 * service time of rq. So the injection limit
227862306a36Sopenharmony_ci			 * must not be updated as a function of such
227962306a36Sopenharmony_ci			 * total service time, unless new injection
228062306a36Sopenharmony_ci			 * occurs before rq is completed. To have the
228162306a36Sopenharmony_ci			 * injection limit updated only in the latter
228262306a36Sopenharmony_ci			 * case, reset rqs_injected here (rqs_injected
228362306a36Sopenharmony_ci			 * will be set in case injection is performed
228462306a36Sopenharmony_ci			 * on bfqq before rq is completed).
228562306a36Sopenharmony_ci			 */
228662306a36Sopenharmony_ci			if (bfqd->tot_rq_in_driver == 0)
228762306a36Sopenharmony_ci				bfqd->rqs_injected = false;
228862306a36Sopenharmony_ci		}
228962306a36Sopenharmony_ci	}
229062306a36Sopenharmony_ci
229162306a36Sopenharmony_ci	if (bfq_bfqq_sync(bfqq))
229262306a36Sopenharmony_ci		bfq_update_io_intensity(bfqq, now_ns);
229362306a36Sopenharmony_ci
229462306a36Sopenharmony_ci	elv_rb_add(&bfqq->sort_list, rq);
229562306a36Sopenharmony_ci
229662306a36Sopenharmony_ci	/*
229762306a36Sopenharmony_ci	 * Check if this request is a better next-serve candidate.
229862306a36Sopenharmony_ci	 */
229962306a36Sopenharmony_ci	prev = bfqq->next_rq;
230062306a36Sopenharmony_ci	next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
230162306a36Sopenharmony_ci	bfqq->next_rq = next_rq;
230262306a36Sopenharmony_ci
230362306a36Sopenharmony_ci	/*
230462306a36Sopenharmony_ci	 * Adjust priority tree position, if next_rq changes.
230562306a36Sopenharmony_ci	 * See comments on bfq_pos_tree_add_move() for the unlikely().
230662306a36Sopenharmony_ci	 */
230762306a36Sopenharmony_ci	if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
230862306a36Sopenharmony_ci		bfq_pos_tree_add_move(bfqd, bfqq);
230962306a36Sopenharmony_ci
231062306a36Sopenharmony_ci	if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
231162306a36Sopenharmony_ci		bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
231262306a36Sopenharmony_ci						 rq, &interactive);
231362306a36Sopenharmony_ci	else {
231462306a36Sopenharmony_ci		if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
231562306a36Sopenharmony_ci		    time_is_before_jiffies(
231662306a36Sopenharmony_ci				bfqq->last_wr_start_finish +
231762306a36Sopenharmony_ci				bfqd->bfq_wr_min_inter_arr_async)) {
231862306a36Sopenharmony_ci			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
231962306a36Sopenharmony_ci			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
232062306a36Sopenharmony_ci
232162306a36Sopenharmony_ci			bfqd->wr_busy_queues++;
232262306a36Sopenharmony_ci			bfqq->entity.prio_changed = 1;
232362306a36Sopenharmony_ci		}
232462306a36Sopenharmony_ci		if (prev != bfqq->next_rq)
232562306a36Sopenharmony_ci			bfq_updated_next_req(bfqd, bfqq);
232662306a36Sopenharmony_ci	}
232762306a36Sopenharmony_ci
232862306a36Sopenharmony_ci	/*
232962306a36Sopenharmony_ci	 * Assign jiffies to last_wr_start_finish in the following
233062306a36Sopenharmony_ci	 * cases:
233162306a36Sopenharmony_ci	 *
233262306a36Sopenharmony_ci	 * . if bfqq is not going to be weight-raised, because, for
233362306a36Sopenharmony_ci	 *   non weight-raised queues, last_wr_start_finish stores the
233462306a36Sopenharmony_ci	 *   arrival time of the last request; as of now, this piece
233562306a36Sopenharmony_ci	 *   of information is used only for deciding whether to
233662306a36Sopenharmony_ci	 *   weight-raise async queues
233762306a36Sopenharmony_ci	 *
233862306a36Sopenharmony_ci	 * . if bfqq is not weight-raised, because, if bfqq is now
233962306a36Sopenharmony_ci	 *   switching to weight-raised, then last_wr_start_finish
234062306a36Sopenharmony_ci	 *   stores the time when weight-raising starts
234162306a36Sopenharmony_ci	 *
234262306a36Sopenharmony_ci	 * . if bfqq is interactive, because, regardless of whether
234362306a36Sopenharmony_ci	 *   bfqq is currently weight-raised, the weight-raising
234462306a36Sopenharmony_ci	 *   period must start or restart (this case is considered
234562306a36Sopenharmony_ci	 *   separately because it is not detected by the above
234662306a36Sopenharmony_ci	 *   conditions, if bfqq is already weight-raised)
234762306a36Sopenharmony_ci	 *
234862306a36Sopenharmony_ci	 * last_wr_start_finish has to be updated also if bfqq is soft
234962306a36Sopenharmony_ci	 * real-time, because the weight-raising period is constantly
235062306a36Sopenharmony_ci	 * restarted on idle-to-busy transitions for these queues, but
235162306a36Sopenharmony_ci	 * this is already done in bfq_bfqq_handle_idle_busy_switch if
235262306a36Sopenharmony_ci	 * needed.
235362306a36Sopenharmony_ci	 */
235462306a36Sopenharmony_ci	if (bfqd->low_latency &&
235562306a36Sopenharmony_ci		(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
235662306a36Sopenharmony_ci		bfqq->last_wr_start_finish = jiffies;
235762306a36Sopenharmony_ci}
235862306a36Sopenharmony_ci
235962306a36Sopenharmony_cistatic struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
236062306a36Sopenharmony_ci					  struct bio *bio,
236162306a36Sopenharmony_ci					  struct request_queue *q)
236262306a36Sopenharmony_ci{
236362306a36Sopenharmony_ci	struct bfq_queue *bfqq = bfqd->bio_bfqq;
236462306a36Sopenharmony_ci
236562306a36Sopenharmony_ci
236662306a36Sopenharmony_ci	if (bfqq)
236762306a36Sopenharmony_ci		return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
236862306a36Sopenharmony_ci
236962306a36Sopenharmony_ci	return NULL;
237062306a36Sopenharmony_ci}
237162306a36Sopenharmony_ci
237262306a36Sopenharmony_cistatic sector_t get_sdist(sector_t last_pos, struct request *rq)
237362306a36Sopenharmony_ci{
237462306a36Sopenharmony_ci	if (last_pos)
237562306a36Sopenharmony_ci		return abs(blk_rq_pos(rq) - last_pos);
237662306a36Sopenharmony_ci
237762306a36Sopenharmony_ci	return 0;
237862306a36Sopenharmony_ci}
237962306a36Sopenharmony_ci
238062306a36Sopenharmony_cistatic void bfq_remove_request(struct request_queue *q,
238162306a36Sopenharmony_ci			       struct request *rq)
238262306a36Sopenharmony_ci{
238362306a36Sopenharmony_ci	struct bfq_queue *bfqq = RQ_BFQQ(rq);
238462306a36Sopenharmony_ci	struct bfq_data *bfqd = bfqq->bfqd;
238562306a36Sopenharmony_ci	const int sync = rq_is_sync(rq);
238662306a36Sopenharmony_ci
238762306a36Sopenharmony_ci	if (bfqq->next_rq == rq) {
238862306a36Sopenharmony_ci		bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
238962306a36Sopenharmony_ci		bfq_updated_next_req(bfqd, bfqq);
239062306a36Sopenharmony_ci	}
239162306a36Sopenharmony_ci
239262306a36Sopenharmony_ci	if (rq->queuelist.prev != &rq->queuelist)
239362306a36Sopenharmony_ci		list_del_init(&rq->queuelist);
239462306a36Sopenharmony_ci	bfqq->queued[sync]--;
239562306a36Sopenharmony_ci	/*
239662306a36Sopenharmony_ci	 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
239762306a36Sopenharmony_ci	 * may be read without holding the lock in bfq_has_work().
239862306a36Sopenharmony_ci	 */
239962306a36Sopenharmony_ci	WRITE_ONCE(bfqd->queued, bfqd->queued - 1);
240062306a36Sopenharmony_ci	elv_rb_del(&bfqq->sort_list, rq);
240162306a36Sopenharmony_ci
240262306a36Sopenharmony_ci	elv_rqhash_del(q, rq);
240362306a36Sopenharmony_ci	if (q->last_merge == rq)
240462306a36Sopenharmony_ci		q->last_merge = NULL;
240562306a36Sopenharmony_ci
240662306a36Sopenharmony_ci	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
240762306a36Sopenharmony_ci		bfqq->next_rq = NULL;
240862306a36Sopenharmony_ci
240962306a36Sopenharmony_ci		if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
241062306a36Sopenharmony_ci			bfq_del_bfqq_busy(bfqq, false);
241162306a36Sopenharmony_ci			/*
241262306a36Sopenharmony_ci			 * bfqq emptied. In normal operation, when
241362306a36Sopenharmony_ci			 * bfqq is empty, bfqq->entity.service and
241462306a36Sopenharmony_ci			 * bfqq->entity.budget must contain,
241562306a36Sopenharmony_ci			 * respectively, the service received and the
241662306a36Sopenharmony_ci			 * budget used last time bfqq emptied. These
241762306a36Sopenharmony_ci			 * facts do not hold in this case, as at least
241862306a36Sopenharmony_ci			 * this last removal occurred while bfqq is
241962306a36Sopenharmony_ci			 * not in service. To avoid inconsistencies,
242062306a36Sopenharmony_ci			 * reset both bfqq->entity.service and
242162306a36Sopenharmony_ci			 * bfqq->entity.budget, if bfqq has still a
242262306a36Sopenharmony_ci			 * process that may issue I/O requests to it.
242362306a36Sopenharmony_ci			 */
242462306a36Sopenharmony_ci			bfqq->entity.budget = bfqq->entity.service = 0;
242562306a36Sopenharmony_ci		}
242662306a36Sopenharmony_ci
242762306a36Sopenharmony_ci		/*
242862306a36Sopenharmony_ci		 * Remove queue from request-position tree as it is empty.
242962306a36Sopenharmony_ci		 */
243062306a36Sopenharmony_ci		if (bfqq->pos_root) {
243162306a36Sopenharmony_ci			rb_erase(&bfqq->pos_node, bfqq->pos_root);
243262306a36Sopenharmony_ci			bfqq->pos_root = NULL;
243362306a36Sopenharmony_ci		}
243462306a36Sopenharmony_ci	} else {
243562306a36Sopenharmony_ci		/* see comments on bfq_pos_tree_add_move() for the unlikely() */
243662306a36Sopenharmony_ci		if (unlikely(!bfqd->nonrot_with_queueing))
243762306a36Sopenharmony_ci			bfq_pos_tree_add_move(bfqd, bfqq);
243862306a36Sopenharmony_ci	}
243962306a36Sopenharmony_ci
244062306a36Sopenharmony_ci	if (rq->cmd_flags & REQ_META)
244162306a36Sopenharmony_ci		bfqq->meta_pending--;
244262306a36Sopenharmony_ci
244362306a36Sopenharmony_ci}
244462306a36Sopenharmony_ci
244562306a36Sopenharmony_cistatic bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
244662306a36Sopenharmony_ci		unsigned int nr_segs)
244762306a36Sopenharmony_ci{
244862306a36Sopenharmony_ci	struct bfq_data *bfqd = q->elevator->elevator_data;
244962306a36Sopenharmony_ci	struct request *free = NULL;
245062306a36Sopenharmony_ci	/*
245162306a36Sopenharmony_ci	 * bfq_bic_lookup grabs the queue_lock: invoke it now and
245262306a36Sopenharmony_ci	 * store its return value for later use, to avoid nesting
245362306a36Sopenharmony_ci	 * queue_lock inside the bfqd->lock. We assume that the bic
245462306a36Sopenharmony_ci	 * returned by bfq_bic_lookup does not go away before
245562306a36Sopenharmony_ci	 * bfqd->lock is taken.
245662306a36Sopenharmony_ci	 */
245762306a36Sopenharmony_ci	struct bfq_io_cq *bic = bfq_bic_lookup(q);
245862306a36Sopenharmony_ci	bool ret;
245962306a36Sopenharmony_ci
246062306a36Sopenharmony_ci	spin_lock_irq(&bfqd->lock);
246162306a36Sopenharmony_ci
246262306a36Sopenharmony_ci	if (bic) {
246362306a36Sopenharmony_ci		/*
246462306a36Sopenharmony_ci		 * Make sure cgroup info is uptodate for current process before
246562306a36Sopenharmony_ci		 * considering the merge.
246662306a36Sopenharmony_ci		 */
246762306a36Sopenharmony_ci		bfq_bic_update_cgroup(bic, bio);
246862306a36Sopenharmony_ci
246962306a36Sopenharmony_ci		bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf),
247062306a36Sopenharmony_ci					     bfq_actuator_index(bfqd, bio));
247162306a36Sopenharmony_ci	} else {
247262306a36Sopenharmony_ci		bfqd->bio_bfqq = NULL;
247362306a36Sopenharmony_ci	}
247462306a36Sopenharmony_ci	bfqd->bio_bic = bic;
247562306a36Sopenharmony_ci
247662306a36Sopenharmony_ci	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
247762306a36Sopenharmony_ci
247862306a36Sopenharmony_ci	spin_unlock_irq(&bfqd->lock);
247962306a36Sopenharmony_ci	if (free)
248062306a36Sopenharmony_ci		blk_mq_free_request(free);
248162306a36Sopenharmony_ci
248262306a36Sopenharmony_ci	return ret;
248362306a36Sopenharmony_ci}
248462306a36Sopenharmony_ci
248562306a36Sopenharmony_cistatic int bfq_request_merge(struct request_queue *q, struct request **req,
248662306a36Sopenharmony_ci			     struct bio *bio)
248762306a36Sopenharmony_ci{
248862306a36Sopenharmony_ci	struct bfq_data *bfqd = q->elevator->elevator_data;
248962306a36Sopenharmony_ci	struct request *__rq;
249062306a36Sopenharmony_ci
249162306a36Sopenharmony_ci	__rq = bfq_find_rq_fmerge(bfqd, bio, q);
249262306a36Sopenharmony_ci	if (__rq && elv_bio_merge_ok(__rq, bio)) {
249362306a36Sopenharmony_ci		*req = __rq;
249462306a36Sopenharmony_ci
249562306a36Sopenharmony_ci		if (blk_discard_mergable(__rq))
249662306a36Sopenharmony_ci			return ELEVATOR_DISCARD_MERGE;
249762306a36Sopenharmony_ci		return ELEVATOR_FRONT_MERGE;
249862306a36Sopenharmony_ci	}
249962306a36Sopenharmony_ci
250062306a36Sopenharmony_ci	return ELEVATOR_NO_MERGE;
250162306a36Sopenharmony_ci}
250262306a36Sopenharmony_ci
250362306a36Sopenharmony_cistatic void bfq_request_merged(struct request_queue *q, struct request *req,
250462306a36Sopenharmony_ci			       enum elv_merge type)
250562306a36Sopenharmony_ci{
250662306a36Sopenharmony_ci	if (type == ELEVATOR_FRONT_MERGE &&
250762306a36Sopenharmony_ci	    rb_prev(&req->rb_node) &&
250862306a36Sopenharmony_ci	    blk_rq_pos(req) <
250962306a36Sopenharmony_ci	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
251062306a36Sopenharmony_ci				    struct request, rb_node))) {
251162306a36Sopenharmony_ci		struct bfq_queue *bfqq = RQ_BFQQ(req);
251262306a36Sopenharmony_ci		struct bfq_data *bfqd;
251362306a36Sopenharmony_ci		struct request *prev, *next_rq;
251462306a36Sopenharmony_ci
251562306a36Sopenharmony_ci		if (!bfqq)
251662306a36Sopenharmony_ci			return;
251762306a36Sopenharmony_ci
251862306a36Sopenharmony_ci		bfqd = bfqq->bfqd;
251962306a36Sopenharmony_ci
252062306a36Sopenharmony_ci		/* Reposition request in its sort_list */
252162306a36Sopenharmony_ci		elv_rb_del(&bfqq->sort_list, req);
252262306a36Sopenharmony_ci		elv_rb_add(&bfqq->sort_list, req);
252362306a36Sopenharmony_ci
252462306a36Sopenharmony_ci		/* Choose next request to be served for bfqq */
252562306a36Sopenharmony_ci		prev = bfqq->next_rq;
252662306a36Sopenharmony_ci		next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
252762306a36Sopenharmony_ci					 bfqd->last_position);
252862306a36Sopenharmony_ci		bfqq->next_rq = next_rq;
252962306a36Sopenharmony_ci		/*
253062306a36Sopenharmony_ci		 * If next_rq changes, update both the queue's budget to
253162306a36Sopenharmony_ci		 * fit the new request and the queue's position in its
253262306a36Sopenharmony_ci		 * rq_pos_tree.
253362306a36Sopenharmony_ci		 */
253462306a36Sopenharmony_ci		if (prev != bfqq->next_rq) {
253562306a36Sopenharmony_ci			bfq_updated_next_req(bfqd, bfqq);
253662306a36Sopenharmony_ci			/*
253762306a36Sopenharmony_ci			 * See comments on bfq_pos_tree_add_move() for
253862306a36Sopenharmony_ci			 * the unlikely().
253962306a36Sopenharmony_ci			 */
254062306a36Sopenharmony_ci			if (unlikely(!bfqd->nonrot_with_queueing))
254162306a36Sopenharmony_ci				bfq_pos_tree_add_move(bfqd, bfqq);
254262306a36Sopenharmony_ci		}
254362306a36Sopenharmony_ci	}
254462306a36Sopenharmony_ci}
254562306a36Sopenharmony_ci
254662306a36Sopenharmony_ci/*
254762306a36Sopenharmony_ci * This function is called to notify the scheduler that the requests
254862306a36Sopenharmony_ci * rq and 'next' have been merged, with 'next' going away.  BFQ
254962306a36Sopenharmony_ci * exploits this hook to address the following issue: if 'next' has a
255062306a36Sopenharmony_ci * fifo_time lower that rq, then the fifo_time of rq must be set to
255162306a36Sopenharmony_ci * the value of 'next', to not forget the greater age of 'next'.
255262306a36Sopenharmony_ci *
255362306a36Sopenharmony_ci * NOTE: in this function we assume that rq is in a bfq_queue, basing
255462306a36Sopenharmony_ci * on that rq is picked from the hash table q->elevator->hash, which,
255562306a36Sopenharmony_ci * in its turn, is filled only with I/O requests present in
255662306a36Sopenharmony_ci * bfq_queues, while BFQ is in use for the request queue q. In fact,
255762306a36Sopenharmony_ci * the function that fills this hash table (elv_rqhash_add) is called
255862306a36Sopenharmony_ci * only by bfq_insert_request.
255962306a36Sopenharmony_ci */
256062306a36Sopenharmony_cistatic void bfq_requests_merged(struct request_queue *q, struct request *rq,
256162306a36Sopenharmony_ci				struct request *next)
256262306a36Sopenharmony_ci{
256362306a36Sopenharmony_ci	struct bfq_queue *bfqq = RQ_BFQQ(rq),
256462306a36Sopenharmony_ci		*next_bfqq = RQ_BFQQ(next);
256562306a36Sopenharmony_ci
256662306a36Sopenharmony_ci	if (!bfqq)
256762306a36Sopenharmony_ci		goto remove;
256862306a36Sopenharmony_ci
256962306a36Sopenharmony_ci	/*
257062306a36Sopenharmony_ci	 * If next and rq belong to the same bfq_queue and next is older
257162306a36Sopenharmony_ci	 * than rq, then reposition rq in the fifo (by substituting next
257262306a36Sopenharmony_ci	 * with rq). Otherwise, if next and rq belong to different
257362306a36Sopenharmony_ci	 * bfq_queues, never reposition rq: in fact, we would have to
257462306a36Sopenharmony_ci	 * reposition it with respect to next's position in its own fifo,
257562306a36Sopenharmony_ci	 * which would most certainly be too expensive with respect to
257662306a36Sopenharmony_ci	 * the benefits.
257762306a36Sopenharmony_ci	 */
257862306a36Sopenharmony_ci	if (bfqq == next_bfqq &&
257962306a36Sopenharmony_ci	    !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
258062306a36Sopenharmony_ci	    next->fifo_time < rq->fifo_time) {
258162306a36Sopenharmony_ci		list_del_init(&rq->queuelist);
258262306a36Sopenharmony_ci		list_replace_init(&next->queuelist, &rq->queuelist);
258362306a36Sopenharmony_ci		rq->fifo_time = next->fifo_time;
258462306a36Sopenharmony_ci	}
258562306a36Sopenharmony_ci
258662306a36Sopenharmony_ci	if (bfqq->next_rq == next)
258762306a36Sopenharmony_ci		bfqq->next_rq = rq;
258862306a36Sopenharmony_ci
258962306a36Sopenharmony_ci	bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
259062306a36Sopenharmony_ciremove:
259162306a36Sopenharmony_ci	/* Merged request may be in the IO scheduler. Remove it. */
259262306a36Sopenharmony_ci	if (!RB_EMPTY_NODE(&next->rb_node)) {
259362306a36Sopenharmony_ci		bfq_remove_request(next->q, next);
259462306a36Sopenharmony_ci		if (next_bfqq)
259562306a36Sopenharmony_ci			bfqg_stats_update_io_remove(bfqq_group(next_bfqq),
259662306a36Sopenharmony_ci						    next->cmd_flags);
259762306a36Sopenharmony_ci	}
259862306a36Sopenharmony_ci}
259962306a36Sopenharmony_ci
260062306a36Sopenharmony_ci/* Must be called with bfqq != NULL */
260162306a36Sopenharmony_cistatic void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
260262306a36Sopenharmony_ci{
260362306a36Sopenharmony_ci	/*
260462306a36Sopenharmony_ci	 * If bfqq has been enjoying interactive weight-raising, then
260562306a36Sopenharmony_ci	 * reset soft_rt_next_start. We do it for the following
260662306a36Sopenharmony_ci	 * reason. bfqq may have been conveying the I/O needed to load
260762306a36Sopenharmony_ci	 * a soft real-time application. Such an application actually
260862306a36Sopenharmony_ci	 * exhibits a soft real-time I/O pattern after it finishes
260962306a36Sopenharmony_ci	 * loading, and finally starts doing its job. But, if bfqq has
261062306a36Sopenharmony_ci	 * been receiving a lot of bandwidth so far (likely to happen
261162306a36Sopenharmony_ci	 * on a fast device), then soft_rt_next_start now contains a
261262306a36Sopenharmony_ci	 * high value that. So, without this reset, bfqq would be
261362306a36Sopenharmony_ci	 * prevented from being possibly considered as soft_rt for a
261462306a36Sopenharmony_ci	 * very long time.
261562306a36Sopenharmony_ci	 */
261662306a36Sopenharmony_ci
261762306a36Sopenharmony_ci	if (bfqq->wr_cur_max_time !=
261862306a36Sopenharmony_ci	    bfqq->bfqd->bfq_wr_rt_max_time)
261962306a36Sopenharmony_ci		bfqq->soft_rt_next_start = jiffies;
262062306a36Sopenharmony_ci
262162306a36Sopenharmony_ci	if (bfq_bfqq_busy(bfqq))
262262306a36Sopenharmony_ci		bfqq->bfqd->wr_busy_queues--;
262362306a36Sopenharmony_ci	bfqq->wr_coeff = 1;
262462306a36Sopenharmony_ci	bfqq->wr_cur_max_time = 0;
262562306a36Sopenharmony_ci	bfqq->last_wr_start_finish = jiffies;
262662306a36Sopenharmony_ci	/*
262762306a36Sopenharmony_ci	 * Trigger a weight change on the next invocation of
262862306a36Sopenharmony_ci	 * __bfq_entity_update_weight_prio.
262962306a36Sopenharmony_ci	 */
263062306a36Sopenharmony_ci	bfqq->entity.prio_changed = 1;
263162306a36Sopenharmony_ci}
263262306a36Sopenharmony_ci
263362306a36Sopenharmony_civoid bfq_end_wr_async_queues(struct bfq_data *bfqd,
263462306a36Sopenharmony_ci			     struct bfq_group *bfqg)
263562306a36Sopenharmony_ci{
263662306a36Sopenharmony_ci	int i, j, k;
263762306a36Sopenharmony_ci
263862306a36Sopenharmony_ci	for (k = 0; k < bfqd->num_actuators; k++) {
263962306a36Sopenharmony_ci		for (i = 0; i < 2; i++)
264062306a36Sopenharmony_ci			for (j = 0; j < IOPRIO_NR_LEVELS; j++)
264162306a36Sopenharmony_ci				if (bfqg->async_bfqq[i][j][k])
264262306a36Sopenharmony_ci					bfq_bfqq_end_wr(bfqg->async_bfqq[i][j][k]);
264362306a36Sopenharmony_ci		if (bfqg->async_idle_bfqq[k])
264462306a36Sopenharmony_ci			bfq_bfqq_end_wr(bfqg->async_idle_bfqq[k]);
264562306a36Sopenharmony_ci	}
264662306a36Sopenharmony_ci}
264762306a36Sopenharmony_ci
264862306a36Sopenharmony_cistatic void bfq_end_wr(struct bfq_data *bfqd)
264962306a36Sopenharmony_ci{
265062306a36Sopenharmony_ci	struct bfq_queue *bfqq;
265162306a36Sopenharmony_ci	int i;
265262306a36Sopenharmony_ci
265362306a36Sopenharmony_ci	spin_lock_irq(&bfqd->lock);
265462306a36Sopenharmony_ci
265562306a36Sopenharmony_ci	for (i = 0; i < bfqd->num_actuators; i++) {
265662306a36Sopenharmony_ci		list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
265762306a36Sopenharmony_ci			bfq_bfqq_end_wr(bfqq);
265862306a36Sopenharmony_ci	}
265962306a36Sopenharmony_ci	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
266062306a36Sopenharmony_ci		bfq_bfqq_end_wr(bfqq);
266162306a36Sopenharmony_ci	bfq_end_wr_async(bfqd);
266262306a36Sopenharmony_ci
266362306a36Sopenharmony_ci	spin_unlock_irq(&bfqd->lock);
266462306a36Sopenharmony_ci}
266562306a36Sopenharmony_ci
266662306a36Sopenharmony_cistatic sector_t bfq_io_struct_pos(void *io_struct, bool request)
266762306a36Sopenharmony_ci{
266862306a36Sopenharmony_ci	if (request)
266962306a36Sopenharmony_ci		return blk_rq_pos(io_struct);
267062306a36Sopenharmony_ci	else
267162306a36Sopenharmony_ci		return ((struct bio *)io_struct)->bi_iter.bi_sector;
267262306a36Sopenharmony_ci}
267362306a36Sopenharmony_ci
267462306a36Sopenharmony_cistatic int bfq_rq_close_to_sector(void *io_struct, bool request,
267562306a36Sopenharmony_ci				  sector_t sector)
267662306a36Sopenharmony_ci{
267762306a36Sopenharmony_ci	return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
267862306a36Sopenharmony_ci	       BFQQ_CLOSE_THR;
267962306a36Sopenharmony_ci}
268062306a36Sopenharmony_ci
268162306a36Sopenharmony_cistatic struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
268262306a36Sopenharmony_ci					 struct bfq_queue *bfqq,
268362306a36Sopenharmony_ci					 sector_t sector)
268462306a36Sopenharmony_ci{
268562306a36Sopenharmony_ci	struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree;
268662306a36Sopenharmony_ci	struct rb_node *parent, *node;
268762306a36Sopenharmony_ci	struct bfq_queue *__bfqq;
268862306a36Sopenharmony_ci
268962306a36Sopenharmony_ci	if (RB_EMPTY_ROOT(root))
269062306a36Sopenharmony_ci		return NULL;
269162306a36Sopenharmony_ci
269262306a36Sopenharmony_ci	/*
269362306a36Sopenharmony_ci	 * First, if we find a request starting at the end of the last
269462306a36Sopenharmony_ci	 * request, choose it.
269562306a36Sopenharmony_ci	 */
269662306a36Sopenharmony_ci	__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
269762306a36Sopenharmony_ci	if (__bfqq)
269862306a36Sopenharmony_ci		return __bfqq;
269962306a36Sopenharmony_ci
270062306a36Sopenharmony_ci	/*
270162306a36Sopenharmony_ci	 * If the exact sector wasn't found, the parent of the NULL leaf
270262306a36Sopenharmony_ci	 * will contain the closest sector (rq_pos_tree sorted by
270362306a36Sopenharmony_ci	 * next_request position).
270462306a36Sopenharmony_ci	 */
270562306a36Sopenharmony_ci	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
270662306a36Sopenharmony_ci	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
270762306a36Sopenharmony_ci		return __bfqq;
270862306a36Sopenharmony_ci
270962306a36Sopenharmony_ci	if (blk_rq_pos(__bfqq->next_rq) < sector)
271062306a36Sopenharmony_ci		node = rb_next(&__bfqq->pos_node);
271162306a36Sopenharmony_ci	else
271262306a36Sopenharmony_ci		node = rb_prev(&__bfqq->pos_node);
271362306a36Sopenharmony_ci	if (!node)
271462306a36Sopenharmony_ci		return NULL;
271562306a36Sopenharmony_ci
271662306a36Sopenharmony_ci	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
271762306a36Sopenharmony_ci	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
271862306a36Sopenharmony_ci		return __bfqq;
271962306a36Sopenharmony_ci
272062306a36Sopenharmony_ci	return NULL;
272162306a36Sopenharmony_ci}
272262306a36Sopenharmony_ci
272362306a36Sopenharmony_cistatic struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
272462306a36Sopenharmony_ci						   struct bfq_queue *cur_bfqq,
272562306a36Sopenharmony_ci						   sector_t sector)
272662306a36Sopenharmony_ci{
272762306a36Sopenharmony_ci	struct bfq_queue *bfqq;
272862306a36Sopenharmony_ci
272962306a36Sopenharmony_ci	/*
273062306a36Sopenharmony_ci	 * We shall notice if some of the queues are cooperating,
273162306a36Sopenharmony_ci	 * e.g., working closely on the same area of the device. In
273262306a36Sopenharmony_ci	 * that case, we can group them together and: 1) don't waste
273362306a36Sopenharmony_ci	 * time idling, and 2) serve the union of their requests in
273462306a36Sopenharmony_ci	 * the best possible order for throughput.
273562306a36Sopenharmony_ci	 */
273662306a36Sopenharmony_ci	bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
273762306a36Sopenharmony_ci	if (!bfqq || bfqq == cur_bfqq)
273862306a36Sopenharmony_ci		return NULL;
273962306a36Sopenharmony_ci
274062306a36Sopenharmony_ci	return bfqq;
274162306a36Sopenharmony_ci}
274262306a36Sopenharmony_ci
274362306a36Sopenharmony_cistatic struct bfq_queue *
274462306a36Sopenharmony_cibfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
274562306a36Sopenharmony_ci{
274662306a36Sopenharmony_ci	int process_refs, new_process_refs;
274762306a36Sopenharmony_ci	struct bfq_queue *__bfqq;
274862306a36Sopenharmony_ci
274962306a36Sopenharmony_ci	/*
275062306a36Sopenharmony_ci	 * If there are no process references on the new_bfqq, then it is
275162306a36Sopenharmony_ci	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
275262306a36Sopenharmony_ci	 * may have dropped their last reference (not just their last process
275362306a36Sopenharmony_ci	 * reference).
275462306a36Sopenharmony_ci	 */
275562306a36Sopenharmony_ci	if (!bfqq_process_refs(new_bfqq))
275662306a36Sopenharmony_ci		return NULL;
275762306a36Sopenharmony_ci
275862306a36Sopenharmony_ci	/* Avoid a circular list and skip interim queue merges. */
275962306a36Sopenharmony_ci	while ((__bfqq = new_bfqq->new_bfqq)) {
276062306a36Sopenharmony_ci		if (__bfqq == bfqq)
276162306a36Sopenharmony_ci			return NULL;
276262306a36Sopenharmony_ci		new_bfqq = __bfqq;
276362306a36Sopenharmony_ci	}
276462306a36Sopenharmony_ci
276562306a36Sopenharmony_ci	process_refs = bfqq_process_refs(bfqq);
276662306a36Sopenharmony_ci	new_process_refs = bfqq_process_refs(new_bfqq);
276762306a36Sopenharmony_ci	/*
276862306a36Sopenharmony_ci	 * If the process for the bfqq has gone away, there is no
276962306a36Sopenharmony_ci	 * sense in merging the queues.
277062306a36Sopenharmony_ci	 */
277162306a36Sopenharmony_ci	if (process_refs == 0 || new_process_refs == 0)
277262306a36Sopenharmony_ci		return NULL;
277362306a36Sopenharmony_ci
277462306a36Sopenharmony_ci	/*
277562306a36Sopenharmony_ci	 * Make sure merged queues belong to the same parent. Parents could
277662306a36Sopenharmony_ci	 * have changed since the time we decided the two queues are suitable
277762306a36Sopenharmony_ci	 * for merging.
277862306a36Sopenharmony_ci	 */
277962306a36Sopenharmony_ci	if (new_bfqq->entity.parent != bfqq->entity.parent)
278062306a36Sopenharmony_ci		return NULL;
278162306a36Sopenharmony_ci
278262306a36Sopenharmony_ci	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
278362306a36Sopenharmony_ci		new_bfqq->pid);
278462306a36Sopenharmony_ci
278562306a36Sopenharmony_ci	/*
278662306a36Sopenharmony_ci	 * Merging is just a redirection: the requests of the process
278762306a36Sopenharmony_ci	 * owning one of the two queues are redirected to the other queue.
278862306a36Sopenharmony_ci	 * The latter queue, in its turn, is set as shared if this is the
278962306a36Sopenharmony_ci	 * first time that the requests of some process are redirected to
279062306a36Sopenharmony_ci	 * it.
279162306a36Sopenharmony_ci	 *
279262306a36Sopenharmony_ci	 * We redirect bfqq to new_bfqq and not the opposite, because
279362306a36Sopenharmony_ci	 * we are in the context of the process owning bfqq, thus we
279462306a36Sopenharmony_ci	 * have the io_cq of this process. So we can immediately
279562306a36Sopenharmony_ci	 * configure this io_cq to redirect the requests of the
279662306a36Sopenharmony_ci	 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
279762306a36Sopenharmony_ci	 * not available any more (new_bfqq->bic == NULL).
279862306a36Sopenharmony_ci	 *
279962306a36Sopenharmony_ci	 * Anyway, even in case new_bfqq coincides with the in-service
280062306a36Sopenharmony_ci	 * queue, redirecting requests the in-service queue is the
280162306a36Sopenharmony_ci	 * best option, as we feed the in-service queue with new
280262306a36Sopenharmony_ci	 * requests close to the last request served and, by doing so,
280362306a36Sopenharmony_ci	 * are likely to increase the throughput.
280462306a36Sopenharmony_ci	 */
280562306a36Sopenharmony_ci	bfqq->new_bfqq = new_bfqq;
280662306a36Sopenharmony_ci	/*
280762306a36Sopenharmony_ci	 * The above assignment schedules the following redirections:
280862306a36Sopenharmony_ci	 * each time some I/O for bfqq arrives, the process that
280962306a36Sopenharmony_ci	 * generated that I/O is disassociated from bfqq and
281062306a36Sopenharmony_ci	 * associated with new_bfqq. Here we increases new_bfqq->ref
281162306a36Sopenharmony_ci	 * in advance, adding the number of processes that are
281262306a36Sopenharmony_ci	 * expected to be associated with new_bfqq as they happen to
281362306a36Sopenharmony_ci	 * issue I/O.
281462306a36Sopenharmony_ci	 */
281562306a36Sopenharmony_ci	new_bfqq->ref += process_refs;
281662306a36Sopenharmony_ci	return new_bfqq;
281762306a36Sopenharmony_ci}
281862306a36Sopenharmony_ci
281962306a36Sopenharmony_cistatic bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
282062306a36Sopenharmony_ci					struct bfq_queue *new_bfqq)
282162306a36Sopenharmony_ci{
282262306a36Sopenharmony_ci	if (bfq_too_late_for_merging(new_bfqq))
282362306a36Sopenharmony_ci		return false;
282462306a36Sopenharmony_ci
282562306a36Sopenharmony_ci	if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
282662306a36Sopenharmony_ci	    (bfqq->ioprio_class != new_bfqq->ioprio_class))
282762306a36Sopenharmony_ci		return false;
282862306a36Sopenharmony_ci
282962306a36Sopenharmony_ci	/*
283062306a36Sopenharmony_ci	 * If either of the queues has already been detected as seeky,
283162306a36Sopenharmony_ci	 * then merging it with the other queue is unlikely to lead to
283262306a36Sopenharmony_ci	 * sequential I/O.
283362306a36Sopenharmony_ci	 */
283462306a36Sopenharmony_ci	if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
283562306a36Sopenharmony_ci		return false;
283662306a36Sopenharmony_ci
283762306a36Sopenharmony_ci	/*
283862306a36Sopenharmony_ci	 * Interleaved I/O is known to be done by (some) applications
283962306a36Sopenharmony_ci	 * only for reads, so it does not make sense to merge async
284062306a36Sopenharmony_ci	 * queues.
284162306a36Sopenharmony_ci	 */
284262306a36Sopenharmony_ci	if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
284362306a36Sopenharmony_ci		return false;
284462306a36Sopenharmony_ci
284562306a36Sopenharmony_ci	return true;
284662306a36Sopenharmony_ci}
284762306a36Sopenharmony_ci
284862306a36Sopenharmony_cistatic bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
284962306a36Sopenharmony_ci					     struct bfq_queue *bfqq);
285062306a36Sopenharmony_ci
285162306a36Sopenharmony_cistatic struct bfq_queue *
285262306a36Sopenharmony_cibfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
285362306a36Sopenharmony_ci		       struct bfq_queue *stable_merge_bfqq,
285462306a36Sopenharmony_ci		       struct bfq_iocq_bfqq_data *bfqq_data)
285562306a36Sopenharmony_ci{
285662306a36Sopenharmony_ci	int proc_ref = min(bfqq_process_refs(bfqq),
285762306a36Sopenharmony_ci			   bfqq_process_refs(stable_merge_bfqq));
285862306a36Sopenharmony_ci	struct bfq_queue *new_bfqq = NULL;
285962306a36Sopenharmony_ci
286062306a36Sopenharmony_ci	bfqq_data->stable_merge_bfqq = NULL;
286162306a36Sopenharmony_ci	if (idling_boosts_thr_without_issues(bfqd, bfqq) || proc_ref == 0)
286262306a36Sopenharmony_ci		goto out;
286362306a36Sopenharmony_ci
286462306a36Sopenharmony_ci	/* next function will take at least one ref */
286562306a36Sopenharmony_ci	new_bfqq = bfq_setup_merge(bfqq, stable_merge_bfqq);
286662306a36Sopenharmony_ci
286762306a36Sopenharmony_ci	if (new_bfqq) {
286862306a36Sopenharmony_ci		bfqq_data->stably_merged = true;
286962306a36Sopenharmony_ci		if (new_bfqq->bic) {
287062306a36Sopenharmony_ci			unsigned int new_a_idx = new_bfqq->actuator_idx;
287162306a36Sopenharmony_ci			struct bfq_iocq_bfqq_data *new_bfqq_data =
287262306a36Sopenharmony_ci				&new_bfqq->bic->bfqq_data[new_a_idx];
287362306a36Sopenharmony_ci
287462306a36Sopenharmony_ci			new_bfqq_data->stably_merged = true;
287562306a36Sopenharmony_ci		}
287662306a36Sopenharmony_ci	}
287762306a36Sopenharmony_ci
287862306a36Sopenharmony_ciout:
287962306a36Sopenharmony_ci	/* deschedule stable merge, because done or aborted here */
288062306a36Sopenharmony_ci	bfq_put_stable_ref(stable_merge_bfqq);
288162306a36Sopenharmony_ci
288262306a36Sopenharmony_ci	return new_bfqq;
288362306a36Sopenharmony_ci}
288462306a36Sopenharmony_ci
288562306a36Sopenharmony_ci/*
288662306a36Sopenharmony_ci * Attempt to schedule a merge of bfqq with the currently in-service
288762306a36Sopenharmony_ci * queue or with a close queue among the scheduled queues.  Return
288862306a36Sopenharmony_ci * NULL if no merge was scheduled, a pointer to the shared bfq_queue
288962306a36Sopenharmony_ci * structure otherwise.
289062306a36Sopenharmony_ci *
289162306a36Sopenharmony_ci * The OOM queue is not allowed to participate to cooperation: in fact, since
289262306a36Sopenharmony_ci * the requests temporarily redirected to the OOM queue could be redirected
289362306a36Sopenharmony_ci * again to dedicated queues at any time, the state needed to correctly
289462306a36Sopenharmony_ci * handle merging with the OOM queue would be quite complex and expensive
289562306a36Sopenharmony_ci * to maintain. Besides, in such a critical condition as an out of memory,
289662306a36Sopenharmony_ci * the benefits of queue merging may be little relevant, or even negligible.
289762306a36Sopenharmony_ci *
289862306a36Sopenharmony_ci * WARNING: queue merging may impair fairness among non-weight raised
289962306a36Sopenharmony_ci * queues, for at least two reasons: 1) the original weight of a
290062306a36Sopenharmony_ci * merged queue may change during the merged state, 2) even being the
290162306a36Sopenharmony_ci * weight the same, a merged queue may be bloated with many more
290262306a36Sopenharmony_ci * requests than the ones produced by its originally-associated
290362306a36Sopenharmony_ci * process.
290462306a36Sopenharmony_ci */
290562306a36Sopenharmony_cistatic struct bfq_queue *
290662306a36Sopenharmony_cibfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
290762306a36Sopenharmony_ci		     void *io_struct, bool request, struct bfq_io_cq *bic)
290862306a36Sopenharmony_ci{
290962306a36Sopenharmony_ci	struct bfq_queue *in_service_bfqq, *new_bfqq;
291062306a36Sopenharmony_ci	unsigned int a_idx = bfqq->actuator_idx;
291162306a36Sopenharmony_ci	struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
291262306a36Sopenharmony_ci
291362306a36Sopenharmony_ci	/* if a merge has already been setup, then proceed with that first */
291462306a36Sopenharmony_ci	if (bfqq->new_bfqq)
291562306a36Sopenharmony_ci		return bfqq->new_bfqq;
291662306a36Sopenharmony_ci
291762306a36Sopenharmony_ci	/*
291862306a36Sopenharmony_ci	 * Check delayed stable merge for rotational or non-queueing
291962306a36Sopenharmony_ci	 * devs. For this branch to be executed, bfqq must not be
292062306a36Sopenharmony_ci	 * currently merged with some other queue (i.e., bfqq->bic
292162306a36Sopenharmony_ci	 * must be non null). If we considered also merged queues,
292262306a36Sopenharmony_ci	 * then we should also check whether bfqq has already been
292362306a36Sopenharmony_ci	 * merged with bic->stable_merge_bfqq. But this would be
292462306a36Sopenharmony_ci	 * costly and complicated.
292562306a36Sopenharmony_ci	 */
292662306a36Sopenharmony_ci	if (unlikely(!bfqd->nonrot_with_queueing)) {
292762306a36Sopenharmony_ci		/*
292862306a36Sopenharmony_ci		 * Make sure also that bfqq is sync, because
292962306a36Sopenharmony_ci		 * bic->stable_merge_bfqq may point to some queue (for
293062306a36Sopenharmony_ci		 * stable merging) also if bic is associated with a
293162306a36Sopenharmony_ci		 * sync queue, but this bfqq is async
293262306a36Sopenharmony_ci		 */
293362306a36Sopenharmony_ci		if (bfq_bfqq_sync(bfqq) && bfqq_data->stable_merge_bfqq &&
293462306a36Sopenharmony_ci		    !bfq_bfqq_just_created(bfqq) &&
293562306a36Sopenharmony_ci		    time_is_before_jiffies(bfqq->split_time +
293662306a36Sopenharmony_ci					  msecs_to_jiffies(bfq_late_stable_merging)) &&
293762306a36Sopenharmony_ci		    time_is_before_jiffies(bfqq->creation_time +
293862306a36Sopenharmony_ci					   msecs_to_jiffies(bfq_late_stable_merging))) {
293962306a36Sopenharmony_ci			struct bfq_queue *stable_merge_bfqq =
294062306a36Sopenharmony_ci				bfqq_data->stable_merge_bfqq;
294162306a36Sopenharmony_ci
294262306a36Sopenharmony_ci			return bfq_setup_stable_merge(bfqd, bfqq,
294362306a36Sopenharmony_ci						      stable_merge_bfqq,
294462306a36Sopenharmony_ci						      bfqq_data);
294562306a36Sopenharmony_ci		}
294662306a36Sopenharmony_ci	}
294762306a36Sopenharmony_ci
294862306a36Sopenharmony_ci	/*
294962306a36Sopenharmony_ci	 * Do not perform queue merging if the device is non
295062306a36Sopenharmony_ci	 * rotational and performs internal queueing. In fact, such a
295162306a36Sopenharmony_ci	 * device reaches a high speed through internal parallelism
295262306a36Sopenharmony_ci	 * and pipelining. This means that, to reach a high
295362306a36Sopenharmony_ci	 * throughput, it must have many requests enqueued at the same
295462306a36Sopenharmony_ci	 * time. But, in this configuration, the internal scheduling
295562306a36Sopenharmony_ci	 * algorithm of the device does exactly the job of queue
295662306a36Sopenharmony_ci	 * merging: it reorders requests so as to obtain as much as
295762306a36Sopenharmony_ci	 * possible a sequential I/O pattern. As a consequence, with
295862306a36Sopenharmony_ci	 * the workload generated by processes doing interleaved I/O,
295962306a36Sopenharmony_ci	 * the throughput reached by the device is likely to be the
296062306a36Sopenharmony_ci	 * same, with and without queue merging.
296162306a36Sopenharmony_ci	 *
296262306a36Sopenharmony_ci	 * Disabling merging also provides a remarkable benefit in
296362306a36Sopenharmony_ci	 * terms of throughput. Merging tends to make many workloads
296462306a36Sopenharmony_ci	 * artificially more uneven, because of shared queues
296562306a36Sopenharmony_ci	 * remaining non empty for incomparably more time than
296662306a36Sopenharmony_ci	 * non-merged queues. This may accentuate workload
296762306a36Sopenharmony_ci	 * asymmetries. For example, if one of the queues in a set of
296862306a36Sopenharmony_ci	 * merged queues has a higher weight than a normal queue, then
296962306a36Sopenharmony_ci	 * the shared queue may inherit such a high weight and, by
297062306a36Sopenharmony_ci	 * staying almost always active, may force BFQ to perform I/O
297162306a36Sopenharmony_ci	 * plugging most of the time. This evidently makes it harder
297262306a36Sopenharmony_ci	 * for BFQ to let the device reach a high throughput.
297362306a36Sopenharmony_ci	 *
297462306a36Sopenharmony_ci	 * Finally, the likely() macro below is not used because one
297562306a36Sopenharmony_ci	 * of the two branches is more likely than the other, but to
297662306a36Sopenharmony_ci	 * have the code path after the following if() executed as
297762306a36Sopenharmony_ci	 * fast as possible for the case of a non rotational device
297862306a36Sopenharmony_ci	 * with queueing. We want it because this is the fastest kind
297962306a36Sopenharmony_ci	 * of device. On the opposite end, the likely() may lengthen
298062306a36Sopenharmony_ci	 * the execution time of BFQ for the case of slower devices
298162306a36Sopenharmony_ci	 * (rotational or at least without queueing). But in this case
298262306a36Sopenharmony_ci	 * the execution time of BFQ matters very little, if not at
298362306a36Sopenharmony_ci	 * all.
298462306a36Sopenharmony_ci	 */
298562306a36Sopenharmony_ci	if (likely(bfqd->nonrot_with_queueing))
298662306a36Sopenharmony_ci		return NULL;
298762306a36Sopenharmony_ci
298862306a36Sopenharmony_ci	/*
298962306a36Sopenharmony_ci	 * Prevent bfqq from being merged if it has been created too
299062306a36Sopenharmony_ci	 * long ago. The idea is that true cooperating processes, and
299162306a36Sopenharmony_ci	 * thus their associated bfq_queues, are supposed to be
299262306a36Sopenharmony_ci	 * created shortly after each other. This is the case, e.g.,
299362306a36Sopenharmony_ci	 * for KVM/QEMU and dump I/O threads. Basing on this
299462306a36Sopenharmony_ci	 * assumption, the following filtering greatly reduces the
299562306a36Sopenharmony_ci	 * probability that two non-cooperating processes, which just
299662306a36Sopenharmony_ci	 * happen to do close I/O for some short time interval, have
299762306a36Sopenharmony_ci	 * their queues merged by mistake.
299862306a36Sopenharmony_ci	 */
299962306a36Sopenharmony_ci	if (bfq_too_late_for_merging(bfqq))
300062306a36Sopenharmony_ci		return NULL;
300162306a36Sopenharmony_ci
300262306a36Sopenharmony_ci	if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
300362306a36Sopenharmony_ci		return NULL;
300462306a36Sopenharmony_ci
300562306a36Sopenharmony_ci	/* If there is only one backlogged queue, don't search. */
300662306a36Sopenharmony_ci	if (bfq_tot_busy_queues(bfqd) == 1)
300762306a36Sopenharmony_ci		return NULL;
300862306a36Sopenharmony_ci
300962306a36Sopenharmony_ci	in_service_bfqq = bfqd->in_service_queue;
301062306a36Sopenharmony_ci
301162306a36Sopenharmony_ci	if (in_service_bfqq && in_service_bfqq != bfqq &&
301262306a36Sopenharmony_ci	    likely(in_service_bfqq != &bfqd->oom_bfqq) &&
301362306a36Sopenharmony_ci	    bfq_rq_close_to_sector(io_struct, request,
301462306a36Sopenharmony_ci				   bfqd->in_serv_last_pos) &&
301562306a36Sopenharmony_ci	    bfqq->entity.parent == in_service_bfqq->entity.parent &&
301662306a36Sopenharmony_ci	    bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
301762306a36Sopenharmony_ci		new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
301862306a36Sopenharmony_ci		if (new_bfqq)
301962306a36Sopenharmony_ci			return new_bfqq;
302062306a36Sopenharmony_ci	}
302162306a36Sopenharmony_ci	/*
302262306a36Sopenharmony_ci	 * Check whether there is a cooperator among currently scheduled
302362306a36Sopenharmony_ci	 * queues. The only thing we need is that the bio/request is not
302462306a36Sopenharmony_ci	 * NULL, as we need it to establish whether a cooperator exists.
302562306a36Sopenharmony_ci	 */
302662306a36Sopenharmony_ci	new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
302762306a36Sopenharmony_ci			bfq_io_struct_pos(io_struct, request));
302862306a36Sopenharmony_ci
302962306a36Sopenharmony_ci	if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
303062306a36Sopenharmony_ci	    bfq_may_be_close_cooperator(bfqq, new_bfqq))
303162306a36Sopenharmony_ci		return bfq_setup_merge(bfqq, new_bfqq);
303262306a36Sopenharmony_ci
303362306a36Sopenharmony_ci	return NULL;
303462306a36Sopenharmony_ci}
303562306a36Sopenharmony_ci
303662306a36Sopenharmony_cistatic void bfq_bfqq_save_state(struct bfq_queue *bfqq)
303762306a36Sopenharmony_ci{
303862306a36Sopenharmony_ci	struct bfq_io_cq *bic = bfqq->bic;
303962306a36Sopenharmony_ci	unsigned int a_idx = bfqq->actuator_idx;
304062306a36Sopenharmony_ci	struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
304162306a36Sopenharmony_ci
304262306a36Sopenharmony_ci	/*
304362306a36Sopenharmony_ci	 * If !bfqq->bic, the queue is already shared or its requests
304462306a36Sopenharmony_ci	 * have already been redirected to a shared queue; both idle window
304562306a36Sopenharmony_ci	 * and weight raising state have already been saved. Do nothing.
304662306a36Sopenharmony_ci	 */
304762306a36Sopenharmony_ci	if (!bic)
304862306a36Sopenharmony_ci		return;
304962306a36Sopenharmony_ci
305062306a36Sopenharmony_ci	bfqq_data->saved_last_serv_time_ns = bfqq->last_serv_time_ns;
305162306a36Sopenharmony_ci	bfqq_data->saved_inject_limit =	bfqq->inject_limit;
305262306a36Sopenharmony_ci	bfqq_data->saved_decrease_time_jif = bfqq->decrease_time_jif;
305362306a36Sopenharmony_ci
305462306a36Sopenharmony_ci	bfqq_data->saved_weight = bfqq->entity.orig_weight;
305562306a36Sopenharmony_ci	bfqq_data->saved_ttime = bfqq->ttime;
305662306a36Sopenharmony_ci	bfqq_data->saved_has_short_ttime =
305762306a36Sopenharmony_ci		bfq_bfqq_has_short_ttime(bfqq);
305862306a36Sopenharmony_ci	bfqq_data->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
305962306a36Sopenharmony_ci	bfqq_data->saved_io_start_time = bfqq->io_start_time;
306062306a36Sopenharmony_ci	bfqq_data->saved_tot_idle_time = bfqq->tot_idle_time;
306162306a36Sopenharmony_ci	bfqq_data->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
306262306a36Sopenharmony_ci	bfqq_data->was_in_burst_list =
306362306a36Sopenharmony_ci		!hlist_unhashed(&bfqq->burst_list_node);
306462306a36Sopenharmony_ci
306562306a36Sopenharmony_ci	if (unlikely(bfq_bfqq_just_created(bfqq) &&
306662306a36Sopenharmony_ci		     !bfq_bfqq_in_large_burst(bfqq) &&
306762306a36Sopenharmony_ci		     bfqq->bfqd->low_latency)) {
306862306a36Sopenharmony_ci		/*
306962306a36Sopenharmony_ci		 * bfqq being merged right after being created: bfqq
307062306a36Sopenharmony_ci		 * would have deserved interactive weight raising, but
307162306a36Sopenharmony_ci		 * did not make it to be set in a weight-raised state,
307262306a36Sopenharmony_ci		 * because of this early merge.	Store directly the
307362306a36Sopenharmony_ci		 * weight-raising state that would have been assigned
307462306a36Sopenharmony_ci		 * to bfqq, so that to avoid that bfqq unjustly fails
307562306a36Sopenharmony_ci		 * to enjoy weight raising if split soon.
307662306a36Sopenharmony_ci		 */
307762306a36Sopenharmony_ci		bfqq_data->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
307862306a36Sopenharmony_ci		bfqq_data->saved_wr_start_at_switch_to_srt =
307962306a36Sopenharmony_ci			bfq_smallest_from_now();
308062306a36Sopenharmony_ci		bfqq_data->saved_wr_cur_max_time =
308162306a36Sopenharmony_ci			bfq_wr_duration(bfqq->bfqd);
308262306a36Sopenharmony_ci		bfqq_data->saved_last_wr_start_finish = jiffies;
308362306a36Sopenharmony_ci	} else {
308462306a36Sopenharmony_ci		bfqq_data->saved_wr_coeff = bfqq->wr_coeff;
308562306a36Sopenharmony_ci		bfqq_data->saved_wr_start_at_switch_to_srt =
308662306a36Sopenharmony_ci			bfqq->wr_start_at_switch_to_srt;
308762306a36Sopenharmony_ci		bfqq_data->saved_service_from_wr =
308862306a36Sopenharmony_ci			bfqq->service_from_wr;
308962306a36Sopenharmony_ci		bfqq_data->saved_last_wr_start_finish =
309062306a36Sopenharmony_ci			bfqq->last_wr_start_finish;
309162306a36Sopenharmony_ci		bfqq_data->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
309262306a36Sopenharmony_ci	}
309362306a36Sopenharmony_ci}
309462306a36Sopenharmony_ci
309562306a36Sopenharmony_ci
309662306a36Sopenharmony_cistatic void
309762306a36Sopenharmony_cibfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, struct bfq_queue *new_bfqq)
309862306a36Sopenharmony_ci{
309962306a36Sopenharmony_ci	if (cur_bfqq->entity.parent &&
310062306a36Sopenharmony_ci	    cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq)
310162306a36Sopenharmony_ci		cur_bfqq->entity.parent->last_bfqq_created = new_bfqq;
310262306a36Sopenharmony_ci	else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq)
310362306a36Sopenharmony_ci		cur_bfqq->bfqd->last_bfqq_created = new_bfqq;
310462306a36Sopenharmony_ci}
310562306a36Sopenharmony_ci
310662306a36Sopenharmony_civoid bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
310762306a36Sopenharmony_ci{
310862306a36Sopenharmony_ci	/*
310962306a36Sopenharmony_ci	 * To prevent bfqq's service guarantees from being violated,
311062306a36Sopenharmony_ci	 * bfqq may be left busy, i.e., queued for service, even if
311162306a36Sopenharmony_ci	 * empty (see comments in __bfq_bfqq_expire() for
311262306a36Sopenharmony_ci	 * details). But, if no process will send requests to bfqq any
311362306a36Sopenharmony_ci	 * longer, then there is no point in keeping bfqq queued for
311462306a36Sopenharmony_ci	 * service. In addition, keeping bfqq queued for service, but
311562306a36Sopenharmony_ci	 * with no process ref any longer, may have caused bfqq to be
311662306a36Sopenharmony_ci	 * freed when dequeued from service. But this is assumed to
311762306a36Sopenharmony_ci	 * never happen.
311862306a36Sopenharmony_ci	 */
311962306a36Sopenharmony_ci	if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
312062306a36Sopenharmony_ci	    bfqq != bfqd->in_service_queue)
312162306a36Sopenharmony_ci		bfq_del_bfqq_busy(bfqq, false);
312262306a36Sopenharmony_ci
312362306a36Sopenharmony_ci	bfq_reassign_last_bfqq(bfqq, NULL);
312462306a36Sopenharmony_ci
312562306a36Sopenharmony_ci	bfq_put_queue(bfqq);
312662306a36Sopenharmony_ci}
312762306a36Sopenharmony_ci
312862306a36Sopenharmony_cistatic void
312962306a36Sopenharmony_cibfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
313062306a36Sopenharmony_ci		struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
313162306a36Sopenharmony_ci{
313262306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
313362306a36Sopenharmony_ci		(unsigned long)new_bfqq->pid);
313462306a36Sopenharmony_ci	/* Save weight raising and idle window of the merged queues */
313562306a36Sopenharmony_ci	bfq_bfqq_save_state(bfqq);
313662306a36Sopenharmony_ci	bfq_bfqq_save_state(new_bfqq);
313762306a36Sopenharmony_ci	if (bfq_bfqq_IO_bound(bfqq))
313862306a36Sopenharmony_ci		bfq_mark_bfqq_IO_bound(new_bfqq);
313962306a36Sopenharmony_ci	bfq_clear_bfqq_IO_bound(bfqq);
314062306a36Sopenharmony_ci
314162306a36Sopenharmony_ci	/*
314262306a36Sopenharmony_ci	 * The processes associated with bfqq are cooperators of the
314362306a36Sopenharmony_ci	 * processes associated with new_bfqq. So, if bfqq has a
314462306a36Sopenharmony_ci	 * waker, then assume that all these processes will be happy
314562306a36Sopenharmony_ci	 * to let bfqq's waker freely inject I/O when they have no
314662306a36Sopenharmony_ci	 * I/O.
314762306a36Sopenharmony_ci	 */
314862306a36Sopenharmony_ci	if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq &&
314962306a36Sopenharmony_ci	    bfqq->waker_bfqq != new_bfqq) {
315062306a36Sopenharmony_ci		new_bfqq->waker_bfqq = bfqq->waker_bfqq;
315162306a36Sopenharmony_ci		new_bfqq->tentative_waker_bfqq = NULL;
315262306a36Sopenharmony_ci
315362306a36Sopenharmony_ci		/*
315462306a36Sopenharmony_ci		 * If the waker queue disappears, then
315562306a36Sopenharmony_ci		 * new_bfqq->waker_bfqq must be reset. So insert
315662306a36Sopenharmony_ci		 * new_bfqq into the woken_list of the waker. See
315762306a36Sopenharmony_ci		 * bfq_check_waker for details.
315862306a36Sopenharmony_ci		 */
315962306a36Sopenharmony_ci		hlist_add_head(&new_bfqq->woken_list_node,
316062306a36Sopenharmony_ci			       &new_bfqq->waker_bfqq->woken_list);
316162306a36Sopenharmony_ci
316262306a36Sopenharmony_ci	}
316362306a36Sopenharmony_ci
316462306a36Sopenharmony_ci	/*
316562306a36Sopenharmony_ci	 * If bfqq is weight-raised, then let new_bfqq inherit
316662306a36Sopenharmony_ci	 * weight-raising. To reduce false positives, neglect the case
316762306a36Sopenharmony_ci	 * where bfqq has just been created, but has not yet made it
316862306a36Sopenharmony_ci	 * to be weight-raised (which may happen because EQM may merge
316962306a36Sopenharmony_ci	 * bfqq even before bfq_add_request is executed for the first
317062306a36Sopenharmony_ci	 * time for bfqq). Handling this case would however be very
317162306a36Sopenharmony_ci	 * easy, thanks to the flag just_created.
317262306a36Sopenharmony_ci	 */
317362306a36Sopenharmony_ci	if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
317462306a36Sopenharmony_ci		new_bfqq->wr_coeff = bfqq->wr_coeff;
317562306a36Sopenharmony_ci		new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
317662306a36Sopenharmony_ci		new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
317762306a36Sopenharmony_ci		new_bfqq->wr_start_at_switch_to_srt =
317862306a36Sopenharmony_ci			bfqq->wr_start_at_switch_to_srt;
317962306a36Sopenharmony_ci		if (bfq_bfqq_busy(new_bfqq))
318062306a36Sopenharmony_ci			bfqd->wr_busy_queues++;
318162306a36Sopenharmony_ci		new_bfqq->entity.prio_changed = 1;
318262306a36Sopenharmony_ci	}
318362306a36Sopenharmony_ci
318462306a36Sopenharmony_ci	if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
318562306a36Sopenharmony_ci		bfqq->wr_coeff = 1;
318662306a36Sopenharmony_ci		bfqq->entity.prio_changed = 1;
318762306a36Sopenharmony_ci		if (bfq_bfqq_busy(bfqq))
318862306a36Sopenharmony_ci			bfqd->wr_busy_queues--;
318962306a36Sopenharmony_ci	}
319062306a36Sopenharmony_ci
319162306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
319262306a36Sopenharmony_ci		     bfqd->wr_busy_queues);
319362306a36Sopenharmony_ci
319462306a36Sopenharmony_ci	/*
319562306a36Sopenharmony_ci	 * Merge queues (that is, let bic redirect its requests to new_bfqq)
319662306a36Sopenharmony_ci	 */
319762306a36Sopenharmony_ci	bic_set_bfqq(bic, new_bfqq, true, bfqq->actuator_idx);
319862306a36Sopenharmony_ci	bfq_mark_bfqq_coop(new_bfqq);
319962306a36Sopenharmony_ci	/*
320062306a36Sopenharmony_ci	 * new_bfqq now belongs to at least two bics (it is a shared queue):
320162306a36Sopenharmony_ci	 * set new_bfqq->bic to NULL. bfqq either:
320262306a36Sopenharmony_ci	 * - does not belong to any bic any more, and hence bfqq->bic must
320362306a36Sopenharmony_ci	 *   be set to NULL, or
320462306a36Sopenharmony_ci	 * - is a queue whose owning bics have already been redirected to a
320562306a36Sopenharmony_ci	 *   different queue, hence the queue is destined to not belong to
320662306a36Sopenharmony_ci	 *   any bic soon and bfqq->bic is already NULL (therefore the next
320762306a36Sopenharmony_ci	 *   assignment causes no harm).
320862306a36Sopenharmony_ci	 */
320962306a36Sopenharmony_ci	new_bfqq->bic = NULL;
321062306a36Sopenharmony_ci	/*
321162306a36Sopenharmony_ci	 * If the queue is shared, the pid is the pid of one of the associated
321262306a36Sopenharmony_ci	 * processes. Which pid depends on the exact sequence of merge events
321362306a36Sopenharmony_ci	 * the queue underwent. So printing such a pid is useless and confusing
321462306a36Sopenharmony_ci	 * because it reports a random pid between those of the associated
321562306a36Sopenharmony_ci	 * processes.
321662306a36Sopenharmony_ci	 * We mark such a queue with a pid -1, and then print SHARED instead of
321762306a36Sopenharmony_ci	 * a pid in logging messages.
321862306a36Sopenharmony_ci	 */
321962306a36Sopenharmony_ci	new_bfqq->pid = -1;
322062306a36Sopenharmony_ci	bfqq->bic = NULL;
322162306a36Sopenharmony_ci
322262306a36Sopenharmony_ci	bfq_reassign_last_bfqq(bfqq, new_bfqq);
322362306a36Sopenharmony_ci
322462306a36Sopenharmony_ci	bfq_release_process_ref(bfqd, bfqq);
322562306a36Sopenharmony_ci}
322662306a36Sopenharmony_ci
322762306a36Sopenharmony_cistatic bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
322862306a36Sopenharmony_ci				struct bio *bio)
322962306a36Sopenharmony_ci{
323062306a36Sopenharmony_ci	struct bfq_data *bfqd = q->elevator->elevator_data;
323162306a36Sopenharmony_ci	bool is_sync = op_is_sync(bio->bi_opf);
323262306a36Sopenharmony_ci	struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
323362306a36Sopenharmony_ci
323462306a36Sopenharmony_ci	/*
323562306a36Sopenharmony_ci	 * Disallow merge of a sync bio into an async request.
323662306a36Sopenharmony_ci	 */
323762306a36Sopenharmony_ci	if (is_sync && !rq_is_sync(rq))
323862306a36Sopenharmony_ci		return false;
323962306a36Sopenharmony_ci
324062306a36Sopenharmony_ci	/*
324162306a36Sopenharmony_ci	 * Lookup the bfqq that this bio will be queued with. Allow
324262306a36Sopenharmony_ci	 * merge only if rq is queued there.
324362306a36Sopenharmony_ci	 */
324462306a36Sopenharmony_ci	if (!bfqq)
324562306a36Sopenharmony_ci		return false;
324662306a36Sopenharmony_ci
324762306a36Sopenharmony_ci	/*
324862306a36Sopenharmony_ci	 * We take advantage of this function to perform an early merge
324962306a36Sopenharmony_ci	 * of the queues of possible cooperating processes.
325062306a36Sopenharmony_ci	 */
325162306a36Sopenharmony_ci	new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
325262306a36Sopenharmony_ci	if (new_bfqq) {
325362306a36Sopenharmony_ci		/*
325462306a36Sopenharmony_ci		 * bic still points to bfqq, then it has not yet been
325562306a36Sopenharmony_ci		 * redirected to some other bfq_queue, and a queue
325662306a36Sopenharmony_ci		 * merge between bfqq and new_bfqq can be safely
325762306a36Sopenharmony_ci		 * fulfilled, i.e., bic can be redirected to new_bfqq
325862306a36Sopenharmony_ci		 * and bfqq can be put.
325962306a36Sopenharmony_ci		 */
326062306a36Sopenharmony_ci		bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
326162306a36Sopenharmony_ci				new_bfqq);
326262306a36Sopenharmony_ci		/*
326362306a36Sopenharmony_ci		 * If we get here, bio will be queued into new_queue,
326462306a36Sopenharmony_ci		 * so use new_bfqq to decide whether bio and rq can be
326562306a36Sopenharmony_ci		 * merged.
326662306a36Sopenharmony_ci		 */
326762306a36Sopenharmony_ci		bfqq = new_bfqq;
326862306a36Sopenharmony_ci
326962306a36Sopenharmony_ci		/*
327062306a36Sopenharmony_ci		 * Change also bqfd->bio_bfqq, as
327162306a36Sopenharmony_ci		 * bfqd->bio_bic now points to new_bfqq, and
327262306a36Sopenharmony_ci		 * this function may be invoked again (and then may
327362306a36Sopenharmony_ci		 * use again bqfd->bio_bfqq).
327462306a36Sopenharmony_ci		 */
327562306a36Sopenharmony_ci		bfqd->bio_bfqq = bfqq;
327662306a36Sopenharmony_ci	}
327762306a36Sopenharmony_ci
327862306a36Sopenharmony_ci	return bfqq == RQ_BFQQ(rq);
327962306a36Sopenharmony_ci}
328062306a36Sopenharmony_ci
328162306a36Sopenharmony_ci/*
328262306a36Sopenharmony_ci * Set the maximum time for the in-service queue to consume its
328362306a36Sopenharmony_ci * budget. This prevents seeky processes from lowering the throughput.
328462306a36Sopenharmony_ci * In practice, a time-slice service scheme is used with seeky
328562306a36Sopenharmony_ci * processes.
328662306a36Sopenharmony_ci */
328762306a36Sopenharmony_cistatic void bfq_set_budget_timeout(struct bfq_data *bfqd,
328862306a36Sopenharmony_ci				   struct bfq_queue *bfqq)
328962306a36Sopenharmony_ci{
329062306a36Sopenharmony_ci	unsigned int timeout_coeff;
329162306a36Sopenharmony_ci
329262306a36Sopenharmony_ci	if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
329362306a36Sopenharmony_ci		timeout_coeff = 1;
329462306a36Sopenharmony_ci	else
329562306a36Sopenharmony_ci		timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
329662306a36Sopenharmony_ci
329762306a36Sopenharmony_ci	bfqd->last_budget_start = ktime_get();
329862306a36Sopenharmony_ci
329962306a36Sopenharmony_ci	bfqq->budget_timeout = jiffies +
330062306a36Sopenharmony_ci		bfqd->bfq_timeout * timeout_coeff;
330162306a36Sopenharmony_ci}
330262306a36Sopenharmony_ci
330362306a36Sopenharmony_cistatic void __bfq_set_in_service_queue(struct bfq_data *bfqd,
330462306a36Sopenharmony_ci				       struct bfq_queue *bfqq)
330562306a36Sopenharmony_ci{
330662306a36Sopenharmony_ci	if (bfqq) {
330762306a36Sopenharmony_ci		bfq_clear_bfqq_fifo_expire(bfqq);
330862306a36Sopenharmony_ci
330962306a36Sopenharmony_ci		bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
331062306a36Sopenharmony_ci
331162306a36Sopenharmony_ci		if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
331262306a36Sopenharmony_ci		    bfqq->wr_coeff > 1 &&
331362306a36Sopenharmony_ci		    bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
331462306a36Sopenharmony_ci		    time_is_before_jiffies(bfqq->budget_timeout)) {
331562306a36Sopenharmony_ci			/*
331662306a36Sopenharmony_ci			 * For soft real-time queues, move the start
331762306a36Sopenharmony_ci			 * of the weight-raising period forward by the
331862306a36Sopenharmony_ci			 * time the queue has not received any
331962306a36Sopenharmony_ci			 * service. Otherwise, a relatively long
332062306a36Sopenharmony_ci			 * service delay is likely to cause the
332162306a36Sopenharmony_ci			 * weight-raising period of the queue to end,
332262306a36Sopenharmony_ci			 * because of the short duration of the
332362306a36Sopenharmony_ci			 * weight-raising period of a soft real-time
332462306a36Sopenharmony_ci			 * queue.  It is worth noting that this move
332562306a36Sopenharmony_ci			 * is not so dangerous for the other queues,
332662306a36Sopenharmony_ci			 * because soft real-time queues are not
332762306a36Sopenharmony_ci			 * greedy.
332862306a36Sopenharmony_ci			 *
332962306a36Sopenharmony_ci			 * To not add a further variable, we use the
333062306a36Sopenharmony_ci			 * overloaded field budget_timeout to
333162306a36Sopenharmony_ci			 * determine for how long the queue has not
333262306a36Sopenharmony_ci			 * received service, i.e., how much time has
333362306a36Sopenharmony_ci			 * elapsed since the queue expired. However,
333462306a36Sopenharmony_ci			 * this is a little imprecise, because
333562306a36Sopenharmony_ci			 * budget_timeout is set to jiffies if bfqq
333662306a36Sopenharmony_ci			 * not only expires, but also remains with no
333762306a36Sopenharmony_ci			 * request.
333862306a36Sopenharmony_ci			 */
333962306a36Sopenharmony_ci			if (time_after(bfqq->budget_timeout,
334062306a36Sopenharmony_ci				       bfqq->last_wr_start_finish))
334162306a36Sopenharmony_ci				bfqq->last_wr_start_finish +=
334262306a36Sopenharmony_ci					jiffies - bfqq->budget_timeout;
334362306a36Sopenharmony_ci			else
334462306a36Sopenharmony_ci				bfqq->last_wr_start_finish = jiffies;
334562306a36Sopenharmony_ci		}
334662306a36Sopenharmony_ci
334762306a36Sopenharmony_ci		bfq_set_budget_timeout(bfqd, bfqq);
334862306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq,
334962306a36Sopenharmony_ci			     "set_in_service_queue, cur-budget = %d",
335062306a36Sopenharmony_ci			     bfqq->entity.budget);
335162306a36Sopenharmony_ci	}
335262306a36Sopenharmony_ci
335362306a36Sopenharmony_ci	bfqd->in_service_queue = bfqq;
335462306a36Sopenharmony_ci	bfqd->in_serv_last_pos = 0;
335562306a36Sopenharmony_ci}
335662306a36Sopenharmony_ci
335762306a36Sopenharmony_ci/*
335862306a36Sopenharmony_ci * Get and set a new queue for service.
335962306a36Sopenharmony_ci */
336062306a36Sopenharmony_cistatic struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
336162306a36Sopenharmony_ci{
336262306a36Sopenharmony_ci	struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
336362306a36Sopenharmony_ci
336462306a36Sopenharmony_ci	__bfq_set_in_service_queue(bfqd, bfqq);
336562306a36Sopenharmony_ci	return bfqq;
336662306a36Sopenharmony_ci}
336762306a36Sopenharmony_ci
336862306a36Sopenharmony_cistatic void bfq_arm_slice_timer(struct bfq_data *bfqd)
336962306a36Sopenharmony_ci{
337062306a36Sopenharmony_ci	struct bfq_queue *bfqq = bfqd->in_service_queue;
337162306a36Sopenharmony_ci	u32 sl;
337262306a36Sopenharmony_ci
337362306a36Sopenharmony_ci	bfq_mark_bfqq_wait_request(bfqq);
337462306a36Sopenharmony_ci
337562306a36Sopenharmony_ci	/*
337662306a36Sopenharmony_ci	 * We don't want to idle for seeks, but we do want to allow
337762306a36Sopenharmony_ci	 * fair distribution of slice time for a process doing back-to-back
337862306a36Sopenharmony_ci	 * seeks. So allow a little bit of time for him to submit a new rq.
337962306a36Sopenharmony_ci	 */
338062306a36Sopenharmony_ci	sl = bfqd->bfq_slice_idle;
338162306a36Sopenharmony_ci	/*
338262306a36Sopenharmony_ci	 * Unless the queue is being weight-raised or the scenario is
338362306a36Sopenharmony_ci	 * asymmetric, grant only minimum idle time if the queue
338462306a36Sopenharmony_ci	 * is seeky. A long idling is preserved for a weight-raised
338562306a36Sopenharmony_ci	 * queue, or, more in general, in an asymmetric scenario,
338662306a36Sopenharmony_ci	 * because a long idling is needed for guaranteeing to a queue
338762306a36Sopenharmony_ci	 * its reserved share of the throughput (in particular, it is
338862306a36Sopenharmony_ci	 * needed if the queue has a higher weight than some other
338962306a36Sopenharmony_ci	 * queue).
339062306a36Sopenharmony_ci	 */
339162306a36Sopenharmony_ci	if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
339262306a36Sopenharmony_ci	    !bfq_asymmetric_scenario(bfqd, bfqq))
339362306a36Sopenharmony_ci		sl = min_t(u64, sl, BFQ_MIN_TT);
339462306a36Sopenharmony_ci	else if (bfqq->wr_coeff > 1)
339562306a36Sopenharmony_ci		sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
339662306a36Sopenharmony_ci
339762306a36Sopenharmony_ci	bfqd->last_idling_start = ktime_get();
339862306a36Sopenharmony_ci	bfqd->last_idling_start_jiffies = jiffies;
339962306a36Sopenharmony_ci
340062306a36Sopenharmony_ci	hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
340162306a36Sopenharmony_ci		      HRTIMER_MODE_REL);
340262306a36Sopenharmony_ci	bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
340362306a36Sopenharmony_ci}
340462306a36Sopenharmony_ci
340562306a36Sopenharmony_ci/*
340662306a36Sopenharmony_ci * In autotuning mode, max_budget is dynamically recomputed as the
340762306a36Sopenharmony_ci * amount of sectors transferred in timeout at the estimated peak
340862306a36Sopenharmony_ci * rate. This enables BFQ to utilize a full timeslice with a full
340962306a36Sopenharmony_ci * budget, even if the in-service queue is served at peak rate. And
341062306a36Sopenharmony_ci * this maximises throughput with sequential workloads.
341162306a36Sopenharmony_ci */
341262306a36Sopenharmony_cistatic unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
341362306a36Sopenharmony_ci{
341462306a36Sopenharmony_ci	return (u64)bfqd->peak_rate * USEC_PER_MSEC *
341562306a36Sopenharmony_ci		jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
341662306a36Sopenharmony_ci}
341762306a36Sopenharmony_ci
341862306a36Sopenharmony_ci/*
341962306a36Sopenharmony_ci * Update parameters related to throughput and responsiveness, as a
342062306a36Sopenharmony_ci * function of the estimated peak rate. See comments on
342162306a36Sopenharmony_ci * bfq_calc_max_budget(), and on the ref_wr_duration array.
342262306a36Sopenharmony_ci */
342362306a36Sopenharmony_cistatic void update_thr_responsiveness_params(struct bfq_data *bfqd)
342462306a36Sopenharmony_ci{
342562306a36Sopenharmony_ci	if (bfqd->bfq_user_max_budget == 0) {
342662306a36Sopenharmony_ci		bfqd->bfq_max_budget =
342762306a36Sopenharmony_ci			bfq_calc_max_budget(bfqd);
342862306a36Sopenharmony_ci		bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
342962306a36Sopenharmony_ci	}
343062306a36Sopenharmony_ci}
343162306a36Sopenharmony_ci
343262306a36Sopenharmony_cistatic void bfq_reset_rate_computation(struct bfq_data *bfqd,
343362306a36Sopenharmony_ci				       struct request *rq)
343462306a36Sopenharmony_ci{
343562306a36Sopenharmony_ci	if (rq != NULL) { /* new rq dispatch now, reset accordingly */
343662306a36Sopenharmony_ci		bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
343762306a36Sopenharmony_ci		bfqd->peak_rate_samples = 1;
343862306a36Sopenharmony_ci		bfqd->sequential_samples = 0;
343962306a36Sopenharmony_ci		bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
344062306a36Sopenharmony_ci			blk_rq_sectors(rq);
344162306a36Sopenharmony_ci	} else /* no new rq dispatched, just reset the number of samples */
344262306a36Sopenharmony_ci		bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
344362306a36Sopenharmony_ci
344462306a36Sopenharmony_ci	bfq_log(bfqd,
344562306a36Sopenharmony_ci		"reset_rate_computation at end, sample %u/%u tot_sects %llu",
344662306a36Sopenharmony_ci		bfqd->peak_rate_samples, bfqd->sequential_samples,
344762306a36Sopenharmony_ci		bfqd->tot_sectors_dispatched);
344862306a36Sopenharmony_ci}
344962306a36Sopenharmony_ci
345062306a36Sopenharmony_cistatic void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
345162306a36Sopenharmony_ci{
345262306a36Sopenharmony_ci	u32 rate, weight, divisor;
345362306a36Sopenharmony_ci
345462306a36Sopenharmony_ci	/*
345562306a36Sopenharmony_ci	 * For the convergence property to hold (see comments on
345662306a36Sopenharmony_ci	 * bfq_update_peak_rate()) and for the assessment to be
345762306a36Sopenharmony_ci	 * reliable, a minimum number of samples must be present, and
345862306a36Sopenharmony_ci	 * a minimum amount of time must have elapsed. If not so, do
345962306a36Sopenharmony_ci	 * not compute new rate. Just reset parameters, to get ready
346062306a36Sopenharmony_ci	 * for a new evaluation attempt.
346162306a36Sopenharmony_ci	 */
346262306a36Sopenharmony_ci	if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
346362306a36Sopenharmony_ci	    bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
346462306a36Sopenharmony_ci		goto reset_computation;
346562306a36Sopenharmony_ci
346662306a36Sopenharmony_ci	/*
346762306a36Sopenharmony_ci	 * If a new request completion has occurred after last
346862306a36Sopenharmony_ci	 * dispatch, then, to approximate the rate at which requests
346962306a36Sopenharmony_ci	 * have been served by the device, it is more precise to
347062306a36Sopenharmony_ci	 * extend the observation interval to the last completion.
347162306a36Sopenharmony_ci	 */
347262306a36Sopenharmony_ci	bfqd->delta_from_first =
347362306a36Sopenharmony_ci		max_t(u64, bfqd->delta_from_first,
347462306a36Sopenharmony_ci		      bfqd->last_completion - bfqd->first_dispatch);
347562306a36Sopenharmony_ci
347662306a36Sopenharmony_ci	/*
347762306a36Sopenharmony_ci	 * Rate computed in sects/usec, and not sects/nsec, for
347862306a36Sopenharmony_ci	 * precision issues.
347962306a36Sopenharmony_ci	 */
348062306a36Sopenharmony_ci	rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
348162306a36Sopenharmony_ci			div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
348262306a36Sopenharmony_ci
348362306a36Sopenharmony_ci	/*
348462306a36Sopenharmony_ci	 * Peak rate not updated if:
348562306a36Sopenharmony_ci	 * - the percentage of sequential dispatches is below 3/4 of the
348662306a36Sopenharmony_ci	 *   total, and rate is below the current estimated peak rate
348762306a36Sopenharmony_ci	 * - rate is unreasonably high (> 20M sectors/sec)
348862306a36Sopenharmony_ci	 */
348962306a36Sopenharmony_ci	if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
349062306a36Sopenharmony_ci	     rate <= bfqd->peak_rate) ||
349162306a36Sopenharmony_ci		rate > 20<<BFQ_RATE_SHIFT)
349262306a36Sopenharmony_ci		goto reset_computation;
349362306a36Sopenharmony_ci
349462306a36Sopenharmony_ci	/*
349562306a36Sopenharmony_ci	 * We have to update the peak rate, at last! To this purpose,
349662306a36Sopenharmony_ci	 * we use a low-pass filter. We compute the smoothing constant
349762306a36Sopenharmony_ci	 * of the filter as a function of the 'weight' of the new
349862306a36Sopenharmony_ci	 * measured rate.
349962306a36Sopenharmony_ci	 *
350062306a36Sopenharmony_ci	 * As can be seen in next formulas, we define this weight as a
350162306a36Sopenharmony_ci	 * quantity proportional to how sequential the workload is,
350262306a36Sopenharmony_ci	 * and to how long the observation time interval is.
350362306a36Sopenharmony_ci	 *
350462306a36Sopenharmony_ci	 * The weight runs from 0 to 8. The maximum value of the
350562306a36Sopenharmony_ci	 * weight, 8, yields the minimum value for the smoothing
350662306a36Sopenharmony_ci	 * constant. At this minimum value for the smoothing constant,
350762306a36Sopenharmony_ci	 * the measured rate contributes for half of the next value of
350862306a36Sopenharmony_ci	 * the estimated peak rate.
350962306a36Sopenharmony_ci	 *
351062306a36Sopenharmony_ci	 * So, the first step is to compute the weight as a function
351162306a36Sopenharmony_ci	 * of how sequential the workload is. Note that the weight
351262306a36Sopenharmony_ci	 * cannot reach 9, because bfqd->sequential_samples cannot
351362306a36Sopenharmony_ci	 * become equal to bfqd->peak_rate_samples, which, in its
351462306a36Sopenharmony_ci	 * turn, holds true because bfqd->sequential_samples is not
351562306a36Sopenharmony_ci	 * incremented for the first sample.
351662306a36Sopenharmony_ci	 */
351762306a36Sopenharmony_ci	weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
351862306a36Sopenharmony_ci
351962306a36Sopenharmony_ci	/*
352062306a36Sopenharmony_ci	 * Second step: further refine the weight as a function of the
352162306a36Sopenharmony_ci	 * duration of the observation interval.
352262306a36Sopenharmony_ci	 */
352362306a36Sopenharmony_ci	weight = min_t(u32, 8,
352462306a36Sopenharmony_ci		       div_u64(weight * bfqd->delta_from_first,
352562306a36Sopenharmony_ci			       BFQ_RATE_REF_INTERVAL));
352662306a36Sopenharmony_ci
352762306a36Sopenharmony_ci	/*
352862306a36Sopenharmony_ci	 * Divisor ranging from 10, for minimum weight, to 2, for
352962306a36Sopenharmony_ci	 * maximum weight.
353062306a36Sopenharmony_ci	 */
353162306a36Sopenharmony_ci	divisor = 10 - weight;
353262306a36Sopenharmony_ci
353362306a36Sopenharmony_ci	/*
353462306a36Sopenharmony_ci	 * Finally, update peak rate:
353562306a36Sopenharmony_ci	 *
353662306a36Sopenharmony_ci	 * peak_rate = peak_rate * (divisor-1) / divisor  +  rate / divisor
353762306a36Sopenharmony_ci	 */
353862306a36Sopenharmony_ci	bfqd->peak_rate *= divisor-1;
353962306a36Sopenharmony_ci	bfqd->peak_rate /= divisor;
354062306a36Sopenharmony_ci	rate /= divisor; /* smoothing constant alpha = 1/divisor */
354162306a36Sopenharmony_ci
354262306a36Sopenharmony_ci	bfqd->peak_rate += rate;
354362306a36Sopenharmony_ci
354462306a36Sopenharmony_ci	/*
354562306a36Sopenharmony_ci	 * For a very slow device, bfqd->peak_rate can reach 0 (see
354662306a36Sopenharmony_ci	 * the minimum representable values reported in the comments
354762306a36Sopenharmony_ci	 * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
354862306a36Sopenharmony_ci	 * divisions by zero where bfqd->peak_rate is used as a
354962306a36Sopenharmony_ci	 * divisor.
355062306a36Sopenharmony_ci	 */
355162306a36Sopenharmony_ci	bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
355262306a36Sopenharmony_ci
355362306a36Sopenharmony_ci	update_thr_responsiveness_params(bfqd);
355462306a36Sopenharmony_ci
355562306a36Sopenharmony_cireset_computation:
355662306a36Sopenharmony_ci	bfq_reset_rate_computation(bfqd, rq);
355762306a36Sopenharmony_ci}
355862306a36Sopenharmony_ci
355962306a36Sopenharmony_ci/*
356062306a36Sopenharmony_ci * Update the read/write peak rate (the main quantity used for
356162306a36Sopenharmony_ci * auto-tuning, see update_thr_responsiveness_params()).
356262306a36Sopenharmony_ci *
356362306a36Sopenharmony_ci * It is not trivial to estimate the peak rate (correctly): because of
356462306a36Sopenharmony_ci * the presence of sw and hw queues between the scheduler and the
356562306a36Sopenharmony_ci * device components that finally serve I/O requests, it is hard to
356662306a36Sopenharmony_ci * say exactly when a given dispatched request is served inside the
356762306a36Sopenharmony_ci * device, and for how long. As a consequence, it is hard to know
356862306a36Sopenharmony_ci * precisely at what rate a given set of requests is actually served
356962306a36Sopenharmony_ci * by the device.
357062306a36Sopenharmony_ci *
357162306a36Sopenharmony_ci * On the opposite end, the dispatch time of any request is trivially
357262306a36Sopenharmony_ci * available, and, from this piece of information, the "dispatch rate"
357362306a36Sopenharmony_ci * of requests can be immediately computed. So, the idea in the next
357462306a36Sopenharmony_ci * function is to use what is known, namely request dispatch times
357562306a36Sopenharmony_ci * (plus, when useful, request completion times), to estimate what is
357662306a36Sopenharmony_ci * unknown, namely in-device request service rate.
357762306a36Sopenharmony_ci *
357862306a36Sopenharmony_ci * The main issue is that, because of the above facts, the rate at
357962306a36Sopenharmony_ci * which a certain set of requests is dispatched over a certain time
358062306a36Sopenharmony_ci * interval can vary greatly with respect to the rate at which the
358162306a36Sopenharmony_ci * same requests are then served. But, since the size of any
358262306a36Sopenharmony_ci * intermediate queue is limited, and the service scheme is lossless
358362306a36Sopenharmony_ci * (no request is silently dropped), the following obvious convergence
358462306a36Sopenharmony_ci * property holds: the number of requests dispatched MUST become
358562306a36Sopenharmony_ci * closer and closer to the number of requests completed as the
358662306a36Sopenharmony_ci * observation interval grows. This is the key property used in
358762306a36Sopenharmony_ci * the next function to estimate the peak service rate as a function
358862306a36Sopenharmony_ci * of the observed dispatch rate. The function assumes to be invoked
358962306a36Sopenharmony_ci * on every request dispatch.
359062306a36Sopenharmony_ci */
359162306a36Sopenharmony_cistatic void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
359262306a36Sopenharmony_ci{
359362306a36Sopenharmony_ci	u64 now_ns = ktime_get_ns();
359462306a36Sopenharmony_ci
359562306a36Sopenharmony_ci	if (bfqd->peak_rate_samples == 0) { /* first dispatch */
359662306a36Sopenharmony_ci		bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
359762306a36Sopenharmony_ci			bfqd->peak_rate_samples);
359862306a36Sopenharmony_ci		bfq_reset_rate_computation(bfqd, rq);
359962306a36Sopenharmony_ci		goto update_last_values; /* will add one sample */
360062306a36Sopenharmony_ci	}
360162306a36Sopenharmony_ci
360262306a36Sopenharmony_ci	/*
360362306a36Sopenharmony_ci	 * Device idle for very long: the observation interval lasting
360462306a36Sopenharmony_ci	 * up to this dispatch cannot be a valid observation interval
360562306a36Sopenharmony_ci	 * for computing a new peak rate (similarly to the late-
360662306a36Sopenharmony_ci	 * completion event in bfq_completed_request()). Go to
360762306a36Sopenharmony_ci	 * update_rate_and_reset to have the following three steps
360862306a36Sopenharmony_ci	 * taken:
360962306a36Sopenharmony_ci	 * - close the observation interval at the last (previous)
361062306a36Sopenharmony_ci	 *   request dispatch or completion
361162306a36Sopenharmony_ci	 * - compute rate, if possible, for that observation interval
361262306a36Sopenharmony_ci	 * - start a new observation interval with this dispatch
361362306a36Sopenharmony_ci	 */
361462306a36Sopenharmony_ci	if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
361562306a36Sopenharmony_ci	    bfqd->tot_rq_in_driver == 0)
361662306a36Sopenharmony_ci		goto update_rate_and_reset;
361762306a36Sopenharmony_ci
361862306a36Sopenharmony_ci	/* Update sampling information */
361962306a36Sopenharmony_ci	bfqd->peak_rate_samples++;
362062306a36Sopenharmony_ci
362162306a36Sopenharmony_ci	if ((bfqd->tot_rq_in_driver > 0 ||
362262306a36Sopenharmony_ci		now_ns - bfqd->last_completion < BFQ_MIN_TT)
362362306a36Sopenharmony_ci	    && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
362462306a36Sopenharmony_ci		bfqd->sequential_samples++;
362562306a36Sopenharmony_ci
362662306a36Sopenharmony_ci	bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
362762306a36Sopenharmony_ci
362862306a36Sopenharmony_ci	/* Reset max observed rq size every 32 dispatches */
362962306a36Sopenharmony_ci	if (likely(bfqd->peak_rate_samples % 32))
363062306a36Sopenharmony_ci		bfqd->last_rq_max_size =
363162306a36Sopenharmony_ci			max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
363262306a36Sopenharmony_ci	else
363362306a36Sopenharmony_ci		bfqd->last_rq_max_size = blk_rq_sectors(rq);
363462306a36Sopenharmony_ci
363562306a36Sopenharmony_ci	bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
363662306a36Sopenharmony_ci
363762306a36Sopenharmony_ci	/* Target observation interval not yet reached, go on sampling */
363862306a36Sopenharmony_ci	if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
363962306a36Sopenharmony_ci		goto update_last_values;
364062306a36Sopenharmony_ci
364162306a36Sopenharmony_ciupdate_rate_and_reset:
364262306a36Sopenharmony_ci	bfq_update_rate_reset(bfqd, rq);
364362306a36Sopenharmony_ciupdate_last_values:
364462306a36Sopenharmony_ci	bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
364562306a36Sopenharmony_ci	if (RQ_BFQQ(rq) == bfqd->in_service_queue)
364662306a36Sopenharmony_ci		bfqd->in_serv_last_pos = bfqd->last_position;
364762306a36Sopenharmony_ci	bfqd->last_dispatch = now_ns;
364862306a36Sopenharmony_ci}
364962306a36Sopenharmony_ci
365062306a36Sopenharmony_ci/*
365162306a36Sopenharmony_ci * Remove request from internal lists.
365262306a36Sopenharmony_ci */
365362306a36Sopenharmony_cistatic void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
365462306a36Sopenharmony_ci{
365562306a36Sopenharmony_ci	struct bfq_queue *bfqq = RQ_BFQQ(rq);
365662306a36Sopenharmony_ci
365762306a36Sopenharmony_ci	/*
365862306a36Sopenharmony_ci	 * For consistency, the next instruction should have been
365962306a36Sopenharmony_ci	 * executed after removing the request from the queue and
366062306a36Sopenharmony_ci	 * dispatching it.  We execute instead this instruction before
366162306a36Sopenharmony_ci	 * bfq_remove_request() (and hence introduce a temporary
366262306a36Sopenharmony_ci	 * inconsistency), for efficiency.  In fact, should this
366362306a36Sopenharmony_ci	 * dispatch occur for a non in-service bfqq, this anticipated
366462306a36Sopenharmony_ci	 * increment prevents two counters related to bfqq->dispatched
366562306a36Sopenharmony_ci	 * from risking to be, first, uselessly decremented, and then
366662306a36Sopenharmony_ci	 * incremented again when the (new) value of bfqq->dispatched
366762306a36Sopenharmony_ci	 * happens to be taken into account.
366862306a36Sopenharmony_ci	 */
366962306a36Sopenharmony_ci	bfqq->dispatched++;
367062306a36Sopenharmony_ci	bfq_update_peak_rate(q->elevator->elevator_data, rq);
367162306a36Sopenharmony_ci
367262306a36Sopenharmony_ci	bfq_remove_request(q, rq);
367362306a36Sopenharmony_ci}
367462306a36Sopenharmony_ci
367562306a36Sopenharmony_ci/*
367662306a36Sopenharmony_ci * There is a case where idling does not have to be performed for
367762306a36Sopenharmony_ci * throughput concerns, but to preserve the throughput share of
367862306a36Sopenharmony_ci * the process associated with bfqq.
367962306a36Sopenharmony_ci *
368062306a36Sopenharmony_ci * To introduce this case, we can note that allowing the drive
368162306a36Sopenharmony_ci * to enqueue more than one request at a time, and hence
368262306a36Sopenharmony_ci * delegating de facto final scheduling decisions to the
368362306a36Sopenharmony_ci * drive's internal scheduler, entails loss of control on the
368462306a36Sopenharmony_ci * actual request service order. In particular, the critical
368562306a36Sopenharmony_ci * situation is when requests from different processes happen
368662306a36Sopenharmony_ci * to be present, at the same time, in the internal queue(s)
368762306a36Sopenharmony_ci * of the drive. In such a situation, the drive, by deciding
368862306a36Sopenharmony_ci * the service order of the internally-queued requests, does
368962306a36Sopenharmony_ci * determine also the actual throughput distribution among
369062306a36Sopenharmony_ci * these processes. But the drive typically has no notion or
369162306a36Sopenharmony_ci * concern about per-process throughput distribution, and
369262306a36Sopenharmony_ci * makes its decisions only on a per-request basis. Therefore,
369362306a36Sopenharmony_ci * the service distribution enforced by the drive's internal
369462306a36Sopenharmony_ci * scheduler is likely to coincide with the desired throughput
369562306a36Sopenharmony_ci * distribution only in a completely symmetric, or favorably
369662306a36Sopenharmony_ci * skewed scenario where:
369762306a36Sopenharmony_ci * (i-a) each of these processes must get the same throughput as
369862306a36Sopenharmony_ci *	 the others,
369962306a36Sopenharmony_ci * (i-b) in case (i-a) does not hold, it holds that the process
370062306a36Sopenharmony_ci *       associated with bfqq must receive a lower or equal
370162306a36Sopenharmony_ci *	 throughput than any of the other processes;
370262306a36Sopenharmony_ci * (ii)  the I/O of each process has the same properties, in
370362306a36Sopenharmony_ci *       terms of locality (sequential or random), direction
370462306a36Sopenharmony_ci *       (reads or writes), request sizes, greediness
370562306a36Sopenharmony_ci *       (from I/O-bound to sporadic), and so on;
370662306a36Sopenharmony_ci
370762306a36Sopenharmony_ci * In fact, in such a scenario, the drive tends to treat the requests
370862306a36Sopenharmony_ci * of each process in about the same way as the requests of the
370962306a36Sopenharmony_ci * others, and thus to provide each of these processes with about the
371062306a36Sopenharmony_ci * same throughput.  This is exactly the desired throughput
371162306a36Sopenharmony_ci * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
371262306a36Sopenharmony_ci * even more convenient distribution for (the process associated with)
371362306a36Sopenharmony_ci * bfqq.
371462306a36Sopenharmony_ci *
371562306a36Sopenharmony_ci * In contrast, in any asymmetric or unfavorable scenario, device
371662306a36Sopenharmony_ci * idling (I/O-dispatch plugging) is certainly needed to guarantee
371762306a36Sopenharmony_ci * that bfqq receives its assigned fraction of the device throughput
371862306a36Sopenharmony_ci * (see [1] for details).
371962306a36Sopenharmony_ci *
372062306a36Sopenharmony_ci * The problem is that idling may significantly reduce throughput with
372162306a36Sopenharmony_ci * certain combinations of types of I/O and devices. An important
372262306a36Sopenharmony_ci * example is sync random I/O on flash storage with command
372362306a36Sopenharmony_ci * queueing. So, unless bfqq falls in cases where idling also boosts
372462306a36Sopenharmony_ci * throughput, it is important to check conditions (i-a), i(-b) and
372562306a36Sopenharmony_ci * (ii) accurately, so as to avoid idling when not strictly needed for
372662306a36Sopenharmony_ci * service guarantees.
372762306a36Sopenharmony_ci *
372862306a36Sopenharmony_ci * Unfortunately, it is extremely difficult to thoroughly check
372962306a36Sopenharmony_ci * condition (ii). And, in case there are active groups, it becomes
373062306a36Sopenharmony_ci * very difficult to check conditions (i-a) and (i-b) too.  In fact,
373162306a36Sopenharmony_ci * if there are active groups, then, for conditions (i-a) or (i-b) to
373262306a36Sopenharmony_ci * become false 'indirectly', it is enough that an active group
373362306a36Sopenharmony_ci * contains more active processes or sub-groups than some other active
373462306a36Sopenharmony_ci * group. More precisely, for conditions (i-a) or (i-b) to become
373562306a36Sopenharmony_ci * false because of such a group, it is not even necessary that the
373662306a36Sopenharmony_ci * group is (still) active: it is sufficient that, even if the group
373762306a36Sopenharmony_ci * has become inactive, some of its descendant processes still have
373862306a36Sopenharmony_ci * some request already dispatched but still waiting for
373962306a36Sopenharmony_ci * completion. In fact, requests have still to be guaranteed their
374062306a36Sopenharmony_ci * share of the throughput even after being dispatched. In this
374162306a36Sopenharmony_ci * respect, it is easy to show that, if a group frequently becomes
374262306a36Sopenharmony_ci * inactive while still having in-flight requests, and if, when this
374362306a36Sopenharmony_ci * happens, the group is not considered in the calculation of whether
374462306a36Sopenharmony_ci * the scenario is asymmetric, then the group may fail to be
374562306a36Sopenharmony_ci * guaranteed its fair share of the throughput (basically because
374662306a36Sopenharmony_ci * idling may not be performed for the descendant processes of the
374762306a36Sopenharmony_ci * group, but it had to be).  We address this issue with the following
374862306a36Sopenharmony_ci * bi-modal behavior, implemented in the function
374962306a36Sopenharmony_ci * bfq_asymmetric_scenario().
375062306a36Sopenharmony_ci *
375162306a36Sopenharmony_ci * If there are groups with requests waiting for completion
375262306a36Sopenharmony_ci * (as commented above, some of these groups may even be
375362306a36Sopenharmony_ci * already inactive), then the scenario is tagged as
375462306a36Sopenharmony_ci * asymmetric, conservatively, without checking any of the
375562306a36Sopenharmony_ci * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
375662306a36Sopenharmony_ci * This behavior matches also the fact that groups are created
375762306a36Sopenharmony_ci * exactly if controlling I/O is a primary concern (to
375862306a36Sopenharmony_ci * preserve bandwidth and latency guarantees).
375962306a36Sopenharmony_ci *
376062306a36Sopenharmony_ci * On the opposite end, if there are no groups with requests waiting
376162306a36Sopenharmony_ci * for completion, then only conditions (i-a) and (i-b) are actually
376262306a36Sopenharmony_ci * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
376362306a36Sopenharmony_ci * idling is not performed, regardless of whether condition (ii)
376462306a36Sopenharmony_ci * holds.  In other words, only if conditions (i-a) and (i-b) do not
376562306a36Sopenharmony_ci * hold, then idling is allowed, and the device tends to be prevented
376662306a36Sopenharmony_ci * from queueing many requests, possibly of several processes. Since
376762306a36Sopenharmony_ci * there are no groups with requests waiting for completion, then, to
376862306a36Sopenharmony_ci * control conditions (i-a) and (i-b) it is enough to check just
376962306a36Sopenharmony_ci * whether all the queues with requests waiting for completion also
377062306a36Sopenharmony_ci * have the same weight.
377162306a36Sopenharmony_ci *
377262306a36Sopenharmony_ci * Not checking condition (ii) evidently exposes bfqq to the
377362306a36Sopenharmony_ci * risk of getting less throughput than its fair share.
377462306a36Sopenharmony_ci * However, for queues with the same weight, a further
377562306a36Sopenharmony_ci * mechanism, preemption, mitigates or even eliminates this
377662306a36Sopenharmony_ci * problem. And it does so without consequences on overall
377762306a36Sopenharmony_ci * throughput. This mechanism and its benefits are explained
377862306a36Sopenharmony_ci * in the next three paragraphs.
377962306a36Sopenharmony_ci *
378062306a36Sopenharmony_ci * Even if a queue, say Q, is expired when it remains idle, Q
378162306a36Sopenharmony_ci * can still preempt the new in-service queue if the next
378262306a36Sopenharmony_ci * request of Q arrives soon (see the comments on
378362306a36Sopenharmony_ci * bfq_bfqq_update_budg_for_activation). If all queues and
378462306a36Sopenharmony_ci * groups have the same weight, this form of preemption,
378562306a36Sopenharmony_ci * combined with the hole-recovery heuristic described in the
378662306a36Sopenharmony_ci * comments on function bfq_bfqq_update_budg_for_activation,
378762306a36Sopenharmony_ci * are enough to preserve a correct bandwidth distribution in
378862306a36Sopenharmony_ci * the mid term, even without idling. In fact, even if not
378962306a36Sopenharmony_ci * idling allows the internal queues of the device to contain
379062306a36Sopenharmony_ci * many requests, and thus to reorder requests, we can rather
379162306a36Sopenharmony_ci * safely assume that the internal scheduler still preserves a
379262306a36Sopenharmony_ci * minimum of mid-term fairness.
379362306a36Sopenharmony_ci *
379462306a36Sopenharmony_ci * More precisely, this preemption-based, idleless approach
379562306a36Sopenharmony_ci * provides fairness in terms of IOPS, and not sectors per
379662306a36Sopenharmony_ci * second. This can be seen with a simple example. Suppose
379762306a36Sopenharmony_ci * that there are two queues with the same weight, but that
379862306a36Sopenharmony_ci * the first queue receives requests of 8 sectors, while the
379962306a36Sopenharmony_ci * second queue receives requests of 1024 sectors. In
380062306a36Sopenharmony_ci * addition, suppose that each of the two queues contains at
380162306a36Sopenharmony_ci * most one request at a time, which implies that each queue
380262306a36Sopenharmony_ci * always remains idle after it is served. Finally, after
380362306a36Sopenharmony_ci * remaining idle, each queue receives very quickly a new
380462306a36Sopenharmony_ci * request. It follows that the two queues are served
380562306a36Sopenharmony_ci * alternatively, preempting each other if needed. This
380662306a36Sopenharmony_ci * implies that, although both queues have the same weight,
380762306a36Sopenharmony_ci * the queue with large requests receives a service that is
380862306a36Sopenharmony_ci * 1024/8 times as high as the service received by the other
380962306a36Sopenharmony_ci * queue.
381062306a36Sopenharmony_ci *
381162306a36Sopenharmony_ci * The motivation for using preemption instead of idling (for
381262306a36Sopenharmony_ci * queues with the same weight) is that, by not idling,
381362306a36Sopenharmony_ci * service guarantees are preserved (completely or at least in
381462306a36Sopenharmony_ci * part) without minimally sacrificing throughput. And, if
381562306a36Sopenharmony_ci * there is no active group, then the primary expectation for
381662306a36Sopenharmony_ci * this device is probably a high throughput.
381762306a36Sopenharmony_ci *
381862306a36Sopenharmony_ci * We are now left only with explaining the two sub-conditions in the
381962306a36Sopenharmony_ci * additional compound condition that is checked below for deciding
382062306a36Sopenharmony_ci * whether the scenario is asymmetric. To explain the first
382162306a36Sopenharmony_ci * sub-condition, we need to add that the function
382262306a36Sopenharmony_ci * bfq_asymmetric_scenario checks the weights of only
382362306a36Sopenharmony_ci * non-weight-raised queues, for efficiency reasons (see comments on
382462306a36Sopenharmony_ci * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
382562306a36Sopenharmony_ci * is checked explicitly here. More precisely, the compound condition
382662306a36Sopenharmony_ci * below takes into account also the fact that, even if bfqq is being
382762306a36Sopenharmony_ci * weight-raised, the scenario is still symmetric if all queues with
382862306a36Sopenharmony_ci * requests waiting for completion happen to be
382962306a36Sopenharmony_ci * weight-raised. Actually, we should be even more precise here, and
383062306a36Sopenharmony_ci * differentiate between interactive weight raising and soft real-time
383162306a36Sopenharmony_ci * weight raising.
383262306a36Sopenharmony_ci *
383362306a36Sopenharmony_ci * The second sub-condition checked in the compound condition is
383462306a36Sopenharmony_ci * whether there is a fair amount of already in-flight I/O not
383562306a36Sopenharmony_ci * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
383662306a36Sopenharmony_ci * following reason. The drive may decide to serve in-flight
383762306a36Sopenharmony_ci * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
383862306a36Sopenharmony_ci * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
383962306a36Sopenharmony_ci * I/O-dispatching is not plugged, then, while bfqq remains empty, a
384062306a36Sopenharmony_ci * basically uncontrolled amount of I/O from other queues may be
384162306a36Sopenharmony_ci * dispatched too, possibly causing the service of bfqq's I/O to be
384262306a36Sopenharmony_ci * delayed even longer in the drive. This problem gets more and more
384362306a36Sopenharmony_ci * serious as the speed and the queue depth of the drive grow,
384462306a36Sopenharmony_ci * because, as these two quantities grow, the probability to find no
384562306a36Sopenharmony_ci * queue busy but many requests in flight grows too. By contrast,
384662306a36Sopenharmony_ci * plugging I/O dispatching minimizes the delay induced by already
384762306a36Sopenharmony_ci * in-flight I/O, and enables bfqq to recover the bandwidth it may
384862306a36Sopenharmony_ci * lose because of this delay.
384962306a36Sopenharmony_ci *
385062306a36Sopenharmony_ci * As a side note, it is worth considering that the above
385162306a36Sopenharmony_ci * device-idling countermeasures may however fail in the following
385262306a36Sopenharmony_ci * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
385362306a36Sopenharmony_ci * in a time period during which all symmetry sub-conditions hold, and
385462306a36Sopenharmony_ci * therefore the device is allowed to enqueue many requests, but at
385562306a36Sopenharmony_ci * some later point in time some sub-condition stops to hold, then it
385662306a36Sopenharmony_ci * may become impossible to make requests be served in the desired
385762306a36Sopenharmony_ci * order until all the requests already queued in the device have been
385862306a36Sopenharmony_ci * served. The last sub-condition commented above somewhat mitigates
385962306a36Sopenharmony_ci * this problem for weight-raised queues.
386062306a36Sopenharmony_ci *
386162306a36Sopenharmony_ci * However, as an additional mitigation for this problem, we preserve
386262306a36Sopenharmony_ci * plugging for a special symmetric case that may suddenly turn into
386362306a36Sopenharmony_ci * asymmetric: the case where only bfqq is busy. In this case, not
386462306a36Sopenharmony_ci * expiring bfqq does not cause any harm to any other queues in terms
386562306a36Sopenharmony_ci * of service guarantees. In contrast, it avoids the following unlucky
386662306a36Sopenharmony_ci * sequence of events: (1) bfqq is expired, (2) a new queue with a
386762306a36Sopenharmony_ci * lower weight than bfqq becomes busy (or more queues), (3) the new
386862306a36Sopenharmony_ci * queue is served until a new request arrives for bfqq, (4) when bfqq
386962306a36Sopenharmony_ci * is finally served, there are so many requests of the new queue in
387062306a36Sopenharmony_ci * the drive that the pending requests for bfqq take a lot of time to
387162306a36Sopenharmony_ci * be served. In particular, event (2) may case even already
387262306a36Sopenharmony_ci * dispatched requests of bfqq to be delayed, inside the drive. So, to
387362306a36Sopenharmony_ci * avoid this series of events, the scenario is preventively declared
387462306a36Sopenharmony_ci * as asymmetric also if bfqq is the only busy queues
387562306a36Sopenharmony_ci */
387662306a36Sopenharmony_cistatic bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
387762306a36Sopenharmony_ci						 struct bfq_queue *bfqq)
387862306a36Sopenharmony_ci{
387962306a36Sopenharmony_ci	int tot_busy_queues = bfq_tot_busy_queues(bfqd);
388062306a36Sopenharmony_ci
388162306a36Sopenharmony_ci	/* No point in idling for bfqq if it won't get requests any longer */
388262306a36Sopenharmony_ci	if (unlikely(!bfqq_process_refs(bfqq)))
388362306a36Sopenharmony_ci		return false;
388462306a36Sopenharmony_ci
388562306a36Sopenharmony_ci	return (bfqq->wr_coeff > 1 &&
388662306a36Sopenharmony_ci		(bfqd->wr_busy_queues < tot_busy_queues ||
388762306a36Sopenharmony_ci		 bfqd->tot_rq_in_driver >= bfqq->dispatched + 4)) ||
388862306a36Sopenharmony_ci		bfq_asymmetric_scenario(bfqd, bfqq) ||
388962306a36Sopenharmony_ci		tot_busy_queues == 1;
389062306a36Sopenharmony_ci}
389162306a36Sopenharmony_ci
389262306a36Sopenharmony_cistatic bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
389362306a36Sopenharmony_ci			      enum bfqq_expiration reason)
389462306a36Sopenharmony_ci{
389562306a36Sopenharmony_ci	/*
389662306a36Sopenharmony_ci	 * If this bfqq is shared between multiple processes, check
389762306a36Sopenharmony_ci	 * to make sure that those processes are still issuing I/Os
389862306a36Sopenharmony_ci	 * within the mean seek distance. If not, it may be time to
389962306a36Sopenharmony_ci	 * break the queues apart again.
390062306a36Sopenharmony_ci	 */
390162306a36Sopenharmony_ci	if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
390262306a36Sopenharmony_ci		bfq_mark_bfqq_split_coop(bfqq);
390362306a36Sopenharmony_ci
390462306a36Sopenharmony_ci	/*
390562306a36Sopenharmony_ci	 * Consider queues with a higher finish virtual time than
390662306a36Sopenharmony_ci	 * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
390762306a36Sopenharmony_ci	 * true, then bfqq's bandwidth would be violated if an
390862306a36Sopenharmony_ci	 * uncontrolled amount of I/O from these queues were
390962306a36Sopenharmony_ci	 * dispatched while bfqq is waiting for its new I/O to
391062306a36Sopenharmony_ci	 * arrive. This is exactly what may happen if this is a forced
391162306a36Sopenharmony_ci	 * expiration caused by a preemption attempt, and if bfqq is
391262306a36Sopenharmony_ci	 * not re-scheduled. To prevent this from happening, re-queue
391362306a36Sopenharmony_ci	 * bfqq if it needs I/O-dispatch plugging, even if it is
391462306a36Sopenharmony_ci	 * empty. By doing so, bfqq is granted to be served before the
391562306a36Sopenharmony_ci	 * above queues (provided that bfqq is of course eligible).
391662306a36Sopenharmony_ci	 */
391762306a36Sopenharmony_ci	if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
391862306a36Sopenharmony_ci	    !(reason == BFQQE_PREEMPTED &&
391962306a36Sopenharmony_ci	      idling_needed_for_service_guarantees(bfqd, bfqq))) {
392062306a36Sopenharmony_ci		if (bfqq->dispatched == 0)
392162306a36Sopenharmony_ci			/*
392262306a36Sopenharmony_ci			 * Overloading budget_timeout field to store
392362306a36Sopenharmony_ci			 * the time at which the queue remains with no
392462306a36Sopenharmony_ci			 * backlog and no outstanding request; used by
392562306a36Sopenharmony_ci			 * the weight-raising mechanism.
392662306a36Sopenharmony_ci			 */
392762306a36Sopenharmony_ci			bfqq->budget_timeout = jiffies;
392862306a36Sopenharmony_ci
392962306a36Sopenharmony_ci		bfq_del_bfqq_busy(bfqq, true);
393062306a36Sopenharmony_ci	} else {
393162306a36Sopenharmony_ci		bfq_requeue_bfqq(bfqd, bfqq, true);
393262306a36Sopenharmony_ci		/*
393362306a36Sopenharmony_ci		 * Resort priority tree of potential close cooperators.
393462306a36Sopenharmony_ci		 * See comments on bfq_pos_tree_add_move() for the unlikely().
393562306a36Sopenharmony_ci		 */
393662306a36Sopenharmony_ci		if (unlikely(!bfqd->nonrot_with_queueing &&
393762306a36Sopenharmony_ci			     !RB_EMPTY_ROOT(&bfqq->sort_list)))
393862306a36Sopenharmony_ci			bfq_pos_tree_add_move(bfqd, bfqq);
393962306a36Sopenharmony_ci	}
394062306a36Sopenharmony_ci
394162306a36Sopenharmony_ci	/*
394262306a36Sopenharmony_ci	 * All in-service entities must have been properly deactivated
394362306a36Sopenharmony_ci	 * or requeued before executing the next function, which
394462306a36Sopenharmony_ci	 * resets all in-service entities as no more in service. This
394562306a36Sopenharmony_ci	 * may cause bfqq to be freed. If this happens, the next
394662306a36Sopenharmony_ci	 * function returns true.
394762306a36Sopenharmony_ci	 */
394862306a36Sopenharmony_ci	return __bfq_bfqd_reset_in_service(bfqd);
394962306a36Sopenharmony_ci}
395062306a36Sopenharmony_ci
395162306a36Sopenharmony_ci/**
395262306a36Sopenharmony_ci * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
395362306a36Sopenharmony_ci * @bfqd: device data.
395462306a36Sopenharmony_ci * @bfqq: queue to update.
395562306a36Sopenharmony_ci * @reason: reason for expiration.
395662306a36Sopenharmony_ci *
395762306a36Sopenharmony_ci * Handle the feedback on @bfqq budget at queue expiration.
395862306a36Sopenharmony_ci * See the body for detailed comments.
395962306a36Sopenharmony_ci */
396062306a36Sopenharmony_cistatic void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
396162306a36Sopenharmony_ci				     struct bfq_queue *bfqq,
396262306a36Sopenharmony_ci				     enum bfqq_expiration reason)
396362306a36Sopenharmony_ci{
396462306a36Sopenharmony_ci	struct request *next_rq;
396562306a36Sopenharmony_ci	int budget, min_budget;
396662306a36Sopenharmony_ci
396762306a36Sopenharmony_ci	min_budget = bfq_min_budget(bfqd);
396862306a36Sopenharmony_ci
396962306a36Sopenharmony_ci	if (bfqq->wr_coeff == 1)
397062306a36Sopenharmony_ci		budget = bfqq->max_budget;
397162306a36Sopenharmony_ci	else /*
397262306a36Sopenharmony_ci	      * Use a constant, low budget for weight-raised queues,
397362306a36Sopenharmony_ci	      * to help achieve a low latency. Keep it slightly higher
397462306a36Sopenharmony_ci	      * than the minimum possible budget, to cause a little
397562306a36Sopenharmony_ci	      * bit fewer expirations.
397662306a36Sopenharmony_ci	      */
397762306a36Sopenharmony_ci		budget = 2 * min_budget;
397862306a36Sopenharmony_ci
397962306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
398062306a36Sopenharmony_ci		bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
398162306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
398262306a36Sopenharmony_ci		budget, bfq_min_budget(bfqd));
398362306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
398462306a36Sopenharmony_ci		bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
398562306a36Sopenharmony_ci
398662306a36Sopenharmony_ci	if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
398762306a36Sopenharmony_ci		switch (reason) {
398862306a36Sopenharmony_ci		/*
398962306a36Sopenharmony_ci		 * Caveat: in all the following cases we trade latency
399062306a36Sopenharmony_ci		 * for throughput.
399162306a36Sopenharmony_ci		 */
399262306a36Sopenharmony_ci		case BFQQE_TOO_IDLE:
399362306a36Sopenharmony_ci			/*
399462306a36Sopenharmony_ci			 * This is the only case where we may reduce
399562306a36Sopenharmony_ci			 * the budget: if there is no request of the
399662306a36Sopenharmony_ci			 * process still waiting for completion, then
399762306a36Sopenharmony_ci			 * we assume (tentatively) that the timer has
399862306a36Sopenharmony_ci			 * expired because the batch of requests of
399962306a36Sopenharmony_ci			 * the process could have been served with a
400062306a36Sopenharmony_ci			 * smaller budget.  Hence, betting that
400162306a36Sopenharmony_ci			 * process will behave in the same way when it
400262306a36Sopenharmony_ci			 * becomes backlogged again, we reduce its
400362306a36Sopenharmony_ci			 * next budget.  As long as we guess right,
400462306a36Sopenharmony_ci			 * this budget cut reduces the latency
400562306a36Sopenharmony_ci			 * experienced by the process.
400662306a36Sopenharmony_ci			 *
400762306a36Sopenharmony_ci			 * However, if there are still outstanding
400862306a36Sopenharmony_ci			 * requests, then the process may have not yet
400962306a36Sopenharmony_ci			 * issued its next request just because it is
401062306a36Sopenharmony_ci			 * still waiting for the completion of some of
401162306a36Sopenharmony_ci			 * the still outstanding ones.  So in this
401262306a36Sopenharmony_ci			 * subcase we do not reduce its budget, on the
401362306a36Sopenharmony_ci			 * contrary we increase it to possibly boost
401462306a36Sopenharmony_ci			 * the throughput, as discussed in the
401562306a36Sopenharmony_ci			 * comments to the BUDGET_TIMEOUT case.
401662306a36Sopenharmony_ci			 */
401762306a36Sopenharmony_ci			if (bfqq->dispatched > 0) /* still outstanding reqs */
401862306a36Sopenharmony_ci				budget = min(budget * 2, bfqd->bfq_max_budget);
401962306a36Sopenharmony_ci			else {
402062306a36Sopenharmony_ci				if (budget > 5 * min_budget)
402162306a36Sopenharmony_ci					budget -= 4 * min_budget;
402262306a36Sopenharmony_ci				else
402362306a36Sopenharmony_ci					budget = min_budget;
402462306a36Sopenharmony_ci			}
402562306a36Sopenharmony_ci			break;
402662306a36Sopenharmony_ci		case BFQQE_BUDGET_TIMEOUT:
402762306a36Sopenharmony_ci			/*
402862306a36Sopenharmony_ci			 * We double the budget here because it gives
402962306a36Sopenharmony_ci			 * the chance to boost the throughput if this
403062306a36Sopenharmony_ci			 * is not a seeky process (and has bumped into
403162306a36Sopenharmony_ci			 * this timeout because of, e.g., ZBR).
403262306a36Sopenharmony_ci			 */
403362306a36Sopenharmony_ci			budget = min(budget * 2, bfqd->bfq_max_budget);
403462306a36Sopenharmony_ci			break;
403562306a36Sopenharmony_ci		case BFQQE_BUDGET_EXHAUSTED:
403662306a36Sopenharmony_ci			/*
403762306a36Sopenharmony_ci			 * The process still has backlog, and did not
403862306a36Sopenharmony_ci			 * let either the budget timeout or the disk
403962306a36Sopenharmony_ci			 * idling timeout expire. Hence it is not
404062306a36Sopenharmony_ci			 * seeky, has a short thinktime and may be
404162306a36Sopenharmony_ci			 * happy with a higher budget too. So
404262306a36Sopenharmony_ci			 * definitely increase the budget of this good
404362306a36Sopenharmony_ci			 * candidate to boost the disk throughput.
404462306a36Sopenharmony_ci			 */
404562306a36Sopenharmony_ci			budget = min(budget * 4, bfqd->bfq_max_budget);
404662306a36Sopenharmony_ci			break;
404762306a36Sopenharmony_ci		case BFQQE_NO_MORE_REQUESTS:
404862306a36Sopenharmony_ci			/*
404962306a36Sopenharmony_ci			 * For queues that expire for this reason, it
405062306a36Sopenharmony_ci			 * is particularly important to keep the
405162306a36Sopenharmony_ci			 * budget close to the actual service they
405262306a36Sopenharmony_ci			 * need. Doing so reduces the timestamp
405362306a36Sopenharmony_ci			 * misalignment problem described in the
405462306a36Sopenharmony_ci			 * comments in the body of
405562306a36Sopenharmony_ci			 * __bfq_activate_entity. In fact, suppose
405662306a36Sopenharmony_ci			 * that a queue systematically expires for
405762306a36Sopenharmony_ci			 * BFQQE_NO_MORE_REQUESTS and presents a
405862306a36Sopenharmony_ci			 * new request in time to enjoy timestamp
405962306a36Sopenharmony_ci			 * back-shifting. The larger the budget of the
406062306a36Sopenharmony_ci			 * queue is with respect to the service the
406162306a36Sopenharmony_ci			 * queue actually requests in each service
406262306a36Sopenharmony_ci			 * slot, the more times the queue can be
406362306a36Sopenharmony_ci			 * reactivated with the same virtual finish
406462306a36Sopenharmony_ci			 * time. It follows that, even if this finish
406562306a36Sopenharmony_ci			 * time is pushed to the system virtual time
406662306a36Sopenharmony_ci			 * to reduce the consequent timestamp
406762306a36Sopenharmony_ci			 * misalignment, the queue unjustly enjoys for
406862306a36Sopenharmony_ci			 * many re-activations a lower finish time
406962306a36Sopenharmony_ci			 * than all newly activated queues.
407062306a36Sopenharmony_ci			 *
407162306a36Sopenharmony_ci			 * The service needed by bfqq is measured
407262306a36Sopenharmony_ci			 * quite precisely by bfqq->entity.service.
407362306a36Sopenharmony_ci			 * Since bfqq does not enjoy device idling,
407462306a36Sopenharmony_ci			 * bfqq->entity.service is equal to the number
407562306a36Sopenharmony_ci			 * of sectors that the process associated with
407662306a36Sopenharmony_ci			 * bfqq requested to read/write before waiting
407762306a36Sopenharmony_ci			 * for request completions, or blocking for
407862306a36Sopenharmony_ci			 * other reasons.
407962306a36Sopenharmony_ci			 */
408062306a36Sopenharmony_ci			budget = max_t(int, bfqq->entity.service, min_budget);
408162306a36Sopenharmony_ci			break;
408262306a36Sopenharmony_ci		default:
408362306a36Sopenharmony_ci			return;
408462306a36Sopenharmony_ci		}
408562306a36Sopenharmony_ci	} else if (!bfq_bfqq_sync(bfqq)) {
408662306a36Sopenharmony_ci		/*
408762306a36Sopenharmony_ci		 * Async queues get always the maximum possible
408862306a36Sopenharmony_ci		 * budget, as for them we do not care about latency
408962306a36Sopenharmony_ci		 * (in addition, their ability to dispatch is limited
409062306a36Sopenharmony_ci		 * by the charging factor).
409162306a36Sopenharmony_ci		 */
409262306a36Sopenharmony_ci		budget = bfqd->bfq_max_budget;
409362306a36Sopenharmony_ci	}
409462306a36Sopenharmony_ci
409562306a36Sopenharmony_ci	bfqq->max_budget = budget;
409662306a36Sopenharmony_ci
409762306a36Sopenharmony_ci	if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
409862306a36Sopenharmony_ci	    !bfqd->bfq_user_max_budget)
409962306a36Sopenharmony_ci		bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
410062306a36Sopenharmony_ci
410162306a36Sopenharmony_ci	/*
410262306a36Sopenharmony_ci	 * If there is still backlog, then assign a new budget, making
410362306a36Sopenharmony_ci	 * sure that it is large enough for the next request.  Since
410462306a36Sopenharmony_ci	 * the finish time of bfqq must be kept in sync with the
410562306a36Sopenharmony_ci	 * budget, be sure to call __bfq_bfqq_expire() *after* this
410662306a36Sopenharmony_ci	 * update.
410762306a36Sopenharmony_ci	 *
410862306a36Sopenharmony_ci	 * If there is no backlog, then no need to update the budget;
410962306a36Sopenharmony_ci	 * it will be updated on the arrival of a new request.
411062306a36Sopenharmony_ci	 */
411162306a36Sopenharmony_ci	next_rq = bfqq->next_rq;
411262306a36Sopenharmony_ci	if (next_rq)
411362306a36Sopenharmony_ci		bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
411462306a36Sopenharmony_ci					    bfq_serv_to_charge(next_rq, bfqq));
411562306a36Sopenharmony_ci
411662306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
411762306a36Sopenharmony_ci			next_rq ? blk_rq_sectors(next_rq) : 0,
411862306a36Sopenharmony_ci			bfqq->entity.budget);
411962306a36Sopenharmony_ci}
412062306a36Sopenharmony_ci
412162306a36Sopenharmony_ci/*
412262306a36Sopenharmony_ci * Return true if the process associated with bfqq is "slow". The slow
412362306a36Sopenharmony_ci * flag is used, in addition to the budget timeout, to reduce the
412462306a36Sopenharmony_ci * amount of service provided to seeky processes, and thus reduce
412562306a36Sopenharmony_ci * their chances to lower the throughput. More details in the comments
412662306a36Sopenharmony_ci * on the function bfq_bfqq_expire().
412762306a36Sopenharmony_ci *
412862306a36Sopenharmony_ci * An important observation is in order: as discussed in the comments
412962306a36Sopenharmony_ci * on the function bfq_update_peak_rate(), with devices with internal
413062306a36Sopenharmony_ci * queues, it is hard if ever possible to know when and for how long
413162306a36Sopenharmony_ci * an I/O request is processed by the device (apart from the trivial
413262306a36Sopenharmony_ci * I/O pattern where a new request is dispatched only after the
413362306a36Sopenharmony_ci * previous one has been completed). This makes it hard to evaluate
413462306a36Sopenharmony_ci * the real rate at which the I/O requests of each bfq_queue are
413562306a36Sopenharmony_ci * served.  In fact, for an I/O scheduler like BFQ, serving a
413662306a36Sopenharmony_ci * bfq_queue means just dispatching its requests during its service
413762306a36Sopenharmony_ci * slot (i.e., until the budget of the queue is exhausted, or the
413862306a36Sopenharmony_ci * queue remains idle, or, finally, a timeout fires). But, during the
413962306a36Sopenharmony_ci * service slot of a bfq_queue, around 100 ms at most, the device may
414062306a36Sopenharmony_ci * be even still processing requests of bfq_queues served in previous
414162306a36Sopenharmony_ci * service slots. On the opposite end, the requests of the in-service
414262306a36Sopenharmony_ci * bfq_queue may be completed after the service slot of the queue
414362306a36Sopenharmony_ci * finishes.
414462306a36Sopenharmony_ci *
414562306a36Sopenharmony_ci * Anyway, unless more sophisticated solutions are used
414662306a36Sopenharmony_ci * (where possible), the sum of the sizes of the requests dispatched
414762306a36Sopenharmony_ci * during the service slot of a bfq_queue is probably the only
414862306a36Sopenharmony_ci * approximation available for the service received by the bfq_queue
414962306a36Sopenharmony_ci * during its service slot. And this sum is the quantity used in this
415062306a36Sopenharmony_ci * function to evaluate the I/O speed of a process.
415162306a36Sopenharmony_ci */
415262306a36Sopenharmony_cistatic bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
415362306a36Sopenharmony_ci				 bool compensate, unsigned long *delta_ms)
415462306a36Sopenharmony_ci{
415562306a36Sopenharmony_ci	ktime_t delta_ktime;
415662306a36Sopenharmony_ci	u32 delta_usecs;
415762306a36Sopenharmony_ci	bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
415862306a36Sopenharmony_ci
415962306a36Sopenharmony_ci	if (!bfq_bfqq_sync(bfqq))
416062306a36Sopenharmony_ci		return false;
416162306a36Sopenharmony_ci
416262306a36Sopenharmony_ci	if (compensate)
416362306a36Sopenharmony_ci		delta_ktime = bfqd->last_idling_start;
416462306a36Sopenharmony_ci	else
416562306a36Sopenharmony_ci		delta_ktime = ktime_get();
416662306a36Sopenharmony_ci	delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
416762306a36Sopenharmony_ci	delta_usecs = ktime_to_us(delta_ktime);
416862306a36Sopenharmony_ci
416962306a36Sopenharmony_ci	/* don't use too short time intervals */
417062306a36Sopenharmony_ci	if (delta_usecs < 1000) {
417162306a36Sopenharmony_ci		if (blk_queue_nonrot(bfqd->queue))
417262306a36Sopenharmony_ci			 /*
417362306a36Sopenharmony_ci			  * give same worst-case guarantees as idling
417462306a36Sopenharmony_ci			  * for seeky
417562306a36Sopenharmony_ci			  */
417662306a36Sopenharmony_ci			*delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
417762306a36Sopenharmony_ci		else /* charge at least one seek */
417862306a36Sopenharmony_ci			*delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
417962306a36Sopenharmony_ci
418062306a36Sopenharmony_ci		return slow;
418162306a36Sopenharmony_ci	}
418262306a36Sopenharmony_ci
418362306a36Sopenharmony_ci	*delta_ms = delta_usecs / USEC_PER_MSEC;
418462306a36Sopenharmony_ci
418562306a36Sopenharmony_ci	/*
418662306a36Sopenharmony_ci	 * Use only long (> 20ms) intervals to filter out excessive
418762306a36Sopenharmony_ci	 * spikes in service rate estimation.
418862306a36Sopenharmony_ci	 */
418962306a36Sopenharmony_ci	if (delta_usecs > 20000) {
419062306a36Sopenharmony_ci		/*
419162306a36Sopenharmony_ci		 * Caveat for rotational devices: processes doing I/O
419262306a36Sopenharmony_ci		 * in the slower disk zones tend to be slow(er) even
419362306a36Sopenharmony_ci		 * if not seeky. In this respect, the estimated peak
419462306a36Sopenharmony_ci		 * rate is likely to be an average over the disk
419562306a36Sopenharmony_ci		 * surface. Accordingly, to not be too harsh with
419662306a36Sopenharmony_ci		 * unlucky processes, a process is deemed slow only if
419762306a36Sopenharmony_ci		 * its rate has been lower than half of the estimated
419862306a36Sopenharmony_ci		 * peak rate.
419962306a36Sopenharmony_ci		 */
420062306a36Sopenharmony_ci		slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
420162306a36Sopenharmony_ci	}
420262306a36Sopenharmony_ci
420362306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
420462306a36Sopenharmony_ci
420562306a36Sopenharmony_ci	return slow;
420662306a36Sopenharmony_ci}
420762306a36Sopenharmony_ci
420862306a36Sopenharmony_ci/*
420962306a36Sopenharmony_ci * To be deemed as soft real-time, an application must meet two
421062306a36Sopenharmony_ci * requirements. First, the application must not require an average
421162306a36Sopenharmony_ci * bandwidth higher than the approximate bandwidth required to playback or
421262306a36Sopenharmony_ci * record a compressed high-definition video.
421362306a36Sopenharmony_ci * The next function is invoked on the completion of the last request of a
421462306a36Sopenharmony_ci * batch, to compute the next-start time instant, soft_rt_next_start, such
421562306a36Sopenharmony_ci * that, if the next request of the application does not arrive before
421662306a36Sopenharmony_ci * soft_rt_next_start, then the above requirement on the bandwidth is met.
421762306a36Sopenharmony_ci *
421862306a36Sopenharmony_ci * The second requirement is that the request pattern of the application is
421962306a36Sopenharmony_ci * isochronous, i.e., that, after issuing a request or a batch of requests,
422062306a36Sopenharmony_ci * the application stops issuing new requests until all its pending requests
422162306a36Sopenharmony_ci * have been completed. After that, the application may issue a new batch,
422262306a36Sopenharmony_ci * and so on.
422362306a36Sopenharmony_ci * For this reason the next function is invoked to compute
422462306a36Sopenharmony_ci * soft_rt_next_start only for applications that meet this requirement,
422562306a36Sopenharmony_ci * whereas soft_rt_next_start is set to infinity for applications that do
422662306a36Sopenharmony_ci * not.
422762306a36Sopenharmony_ci *
422862306a36Sopenharmony_ci * Unfortunately, even a greedy (i.e., I/O-bound) application may
422962306a36Sopenharmony_ci * happen to meet, occasionally or systematically, both the above
423062306a36Sopenharmony_ci * bandwidth and isochrony requirements. This may happen at least in
423162306a36Sopenharmony_ci * the following circumstances. First, if the CPU load is high. The
423262306a36Sopenharmony_ci * application may stop issuing requests while the CPUs are busy
423362306a36Sopenharmony_ci * serving other processes, then restart, then stop again for a while,
423462306a36Sopenharmony_ci * and so on. The other circumstances are related to the storage
423562306a36Sopenharmony_ci * device: the storage device is highly loaded or reaches a low-enough
423662306a36Sopenharmony_ci * throughput with the I/O of the application (e.g., because the I/O
423762306a36Sopenharmony_ci * is random and/or the device is slow). In all these cases, the
423862306a36Sopenharmony_ci * I/O of the application may be simply slowed down enough to meet
423962306a36Sopenharmony_ci * the bandwidth and isochrony requirements. To reduce the probability
424062306a36Sopenharmony_ci * that greedy applications are deemed as soft real-time in these
424162306a36Sopenharmony_ci * corner cases, a further rule is used in the computation of
424262306a36Sopenharmony_ci * soft_rt_next_start: the return value of this function is forced to
424362306a36Sopenharmony_ci * be higher than the maximum between the following two quantities.
424462306a36Sopenharmony_ci *
424562306a36Sopenharmony_ci * (a) Current time plus: (1) the maximum time for which the arrival
424662306a36Sopenharmony_ci *     of a request is waited for when a sync queue becomes idle,
424762306a36Sopenharmony_ci *     namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
424862306a36Sopenharmony_ci *     postpone for a moment the reason for adding a few extra
424962306a36Sopenharmony_ci *     jiffies; we get back to it after next item (b).  Lower-bounding
425062306a36Sopenharmony_ci *     the return value of this function with the current time plus
425162306a36Sopenharmony_ci *     bfqd->bfq_slice_idle tends to filter out greedy applications,
425262306a36Sopenharmony_ci *     because the latter issue their next request as soon as possible
425362306a36Sopenharmony_ci *     after the last one has been completed. In contrast, a soft
425462306a36Sopenharmony_ci *     real-time application spends some time processing data, after a
425562306a36Sopenharmony_ci *     batch of its requests has been completed.
425662306a36Sopenharmony_ci *
425762306a36Sopenharmony_ci * (b) Current value of bfqq->soft_rt_next_start. As pointed out
425862306a36Sopenharmony_ci *     above, greedy applications may happen to meet both the
425962306a36Sopenharmony_ci *     bandwidth and isochrony requirements under heavy CPU or
426062306a36Sopenharmony_ci *     storage-device load. In more detail, in these scenarios, these
426162306a36Sopenharmony_ci *     applications happen, only for limited time periods, to do I/O
426262306a36Sopenharmony_ci *     slowly enough to meet all the requirements described so far,
426362306a36Sopenharmony_ci *     including the filtering in above item (a). These slow-speed
426462306a36Sopenharmony_ci *     time intervals are usually interspersed between other time
426562306a36Sopenharmony_ci *     intervals during which these applications do I/O at a very high
426662306a36Sopenharmony_ci *     speed. Fortunately, exactly because of the high speed of the
426762306a36Sopenharmony_ci *     I/O in the high-speed intervals, the values returned by this
426862306a36Sopenharmony_ci *     function happen to be so high, near the end of any such
426962306a36Sopenharmony_ci *     high-speed interval, to be likely to fall *after* the end of
427062306a36Sopenharmony_ci *     the low-speed time interval that follows. These high values are
427162306a36Sopenharmony_ci *     stored in bfqq->soft_rt_next_start after each invocation of
427262306a36Sopenharmony_ci *     this function. As a consequence, if the last value of
427362306a36Sopenharmony_ci *     bfqq->soft_rt_next_start is constantly used to lower-bound the
427462306a36Sopenharmony_ci *     next value that this function may return, then, from the very
427562306a36Sopenharmony_ci *     beginning of a low-speed interval, bfqq->soft_rt_next_start is
427662306a36Sopenharmony_ci *     likely to be constantly kept so high that any I/O request
427762306a36Sopenharmony_ci *     issued during the low-speed interval is considered as arriving
427862306a36Sopenharmony_ci *     to soon for the application to be deemed as soft
427962306a36Sopenharmony_ci *     real-time. Then, in the high-speed interval that follows, the
428062306a36Sopenharmony_ci *     application will not be deemed as soft real-time, just because
428162306a36Sopenharmony_ci *     it will do I/O at a high speed. And so on.
428262306a36Sopenharmony_ci *
428362306a36Sopenharmony_ci * Getting back to the filtering in item (a), in the following two
428462306a36Sopenharmony_ci * cases this filtering might be easily passed by a greedy
428562306a36Sopenharmony_ci * application, if the reference quantity was just
428662306a36Sopenharmony_ci * bfqd->bfq_slice_idle:
428762306a36Sopenharmony_ci * 1) HZ is so low that the duration of a jiffy is comparable to or
428862306a36Sopenharmony_ci *    higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
428962306a36Sopenharmony_ci *    devices with HZ=100. The time granularity may be so coarse
429062306a36Sopenharmony_ci *    that the approximation, in jiffies, of bfqd->bfq_slice_idle
429162306a36Sopenharmony_ci *    is rather lower than the exact value.
429262306a36Sopenharmony_ci * 2) jiffies, instead of increasing at a constant rate, may stop increasing
429362306a36Sopenharmony_ci *    for a while, then suddenly 'jump' by several units to recover the lost
429462306a36Sopenharmony_ci *    increments. This seems to happen, e.g., inside virtual machines.
429562306a36Sopenharmony_ci * To address this issue, in the filtering in (a) we do not use as a
429662306a36Sopenharmony_ci * reference time interval just bfqd->bfq_slice_idle, but
429762306a36Sopenharmony_ci * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
429862306a36Sopenharmony_ci * minimum number of jiffies for which the filter seems to be quite
429962306a36Sopenharmony_ci * precise also in embedded systems and KVM/QEMU virtual machines.
430062306a36Sopenharmony_ci */
430162306a36Sopenharmony_cistatic unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
430262306a36Sopenharmony_ci						struct bfq_queue *bfqq)
430362306a36Sopenharmony_ci{
430462306a36Sopenharmony_ci	return max3(bfqq->soft_rt_next_start,
430562306a36Sopenharmony_ci		    bfqq->last_idle_bklogged +
430662306a36Sopenharmony_ci		    HZ * bfqq->service_from_backlogged /
430762306a36Sopenharmony_ci		    bfqd->bfq_wr_max_softrt_rate,
430862306a36Sopenharmony_ci		    jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
430962306a36Sopenharmony_ci}
431062306a36Sopenharmony_ci
431162306a36Sopenharmony_ci/**
431262306a36Sopenharmony_ci * bfq_bfqq_expire - expire a queue.
431362306a36Sopenharmony_ci * @bfqd: device owning the queue.
431462306a36Sopenharmony_ci * @bfqq: the queue to expire.
431562306a36Sopenharmony_ci * @compensate: if true, compensate for the time spent idling.
431662306a36Sopenharmony_ci * @reason: the reason causing the expiration.
431762306a36Sopenharmony_ci *
431862306a36Sopenharmony_ci * If the process associated with bfqq does slow I/O (e.g., because it
431962306a36Sopenharmony_ci * issues random requests), we charge bfqq with the time it has been
432062306a36Sopenharmony_ci * in service instead of the service it has received (see
432162306a36Sopenharmony_ci * bfq_bfqq_charge_time for details on how this goal is achieved). As
432262306a36Sopenharmony_ci * a consequence, bfqq will typically get higher timestamps upon
432362306a36Sopenharmony_ci * reactivation, and hence it will be rescheduled as if it had
432462306a36Sopenharmony_ci * received more service than what it has actually received. In the
432562306a36Sopenharmony_ci * end, bfqq receives less service in proportion to how slowly its
432662306a36Sopenharmony_ci * associated process consumes its budgets (and hence how seriously it
432762306a36Sopenharmony_ci * tends to lower the throughput). In addition, this time-charging
432862306a36Sopenharmony_ci * strategy guarantees time fairness among slow processes. In
432962306a36Sopenharmony_ci * contrast, if the process associated with bfqq is not slow, we
433062306a36Sopenharmony_ci * charge bfqq exactly with the service it has received.
433162306a36Sopenharmony_ci *
433262306a36Sopenharmony_ci * Charging time to the first type of queues and the exact service to
433362306a36Sopenharmony_ci * the other has the effect of using the WF2Q+ policy to schedule the
433462306a36Sopenharmony_ci * former on a timeslice basis, without violating service domain
433562306a36Sopenharmony_ci * guarantees among the latter.
433662306a36Sopenharmony_ci */
433762306a36Sopenharmony_civoid bfq_bfqq_expire(struct bfq_data *bfqd,
433862306a36Sopenharmony_ci		     struct bfq_queue *bfqq,
433962306a36Sopenharmony_ci		     bool compensate,
434062306a36Sopenharmony_ci		     enum bfqq_expiration reason)
434162306a36Sopenharmony_ci{
434262306a36Sopenharmony_ci	bool slow;
434362306a36Sopenharmony_ci	unsigned long delta = 0;
434462306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
434562306a36Sopenharmony_ci
434662306a36Sopenharmony_ci	/*
434762306a36Sopenharmony_ci	 * Check whether the process is slow (see bfq_bfqq_is_slow).
434862306a36Sopenharmony_ci	 */
434962306a36Sopenharmony_ci	slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, &delta);
435062306a36Sopenharmony_ci
435162306a36Sopenharmony_ci	/*
435262306a36Sopenharmony_ci	 * As above explained, charge slow (typically seeky) and
435362306a36Sopenharmony_ci	 * timed-out queues with the time and not the service
435462306a36Sopenharmony_ci	 * received, to favor sequential workloads.
435562306a36Sopenharmony_ci	 *
435662306a36Sopenharmony_ci	 * Processes doing I/O in the slower disk zones will tend to
435762306a36Sopenharmony_ci	 * be slow(er) even if not seeky. Therefore, since the
435862306a36Sopenharmony_ci	 * estimated peak rate is actually an average over the disk
435962306a36Sopenharmony_ci	 * surface, these processes may timeout just for bad luck. To
436062306a36Sopenharmony_ci	 * avoid punishing them, do not charge time to processes that
436162306a36Sopenharmony_ci	 * succeeded in consuming at least 2/3 of their budget. This
436262306a36Sopenharmony_ci	 * allows BFQ to preserve enough elasticity to still perform
436362306a36Sopenharmony_ci	 * bandwidth, and not time, distribution with little unlucky
436462306a36Sopenharmony_ci	 * or quasi-sequential processes.
436562306a36Sopenharmony_ci	 */
436662306a36Sopenharmony_ci	if (bfqq->wr_coeff == 1 &&
436762306a36Sopenharmony_ci	    (slow ||
436862306a36Sopenharmony_ci	     (reason == BFQQE_BUDGET_TIMEOUT &&
436962306a36Sopenharmony_ci	      bfq_bfqq_budget_left(bfqq) >=  entity->budget / 3)))
437062306a36Sopenharmony_ci		bfq_bfqq_charge_time(bfqd, bfqq, delta);
437162306a36Sopenharmony_ci
437262306a36Sopenharmony_ci	if (bfqd->low_latency && bfqq->wr_coeff == 1)
437362306a36Sopenharmony_ci		bfqq->last_wr_start_finish = jiffies;
437462306a36Sopenharmony_ci
437562306a36Sopenharmony_ci	if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
437662306a36Sopenharmony_ci	    RB_EMPTY_ROOT(&bfqq->sort_list)) {
437762306a36Sopenharmony_ci		/*
437862306a36Sopenharmony_ci		 * If we get here, and there are no outstanding
437962306a36Sopenharmony_ci		 * requests, then the request pattern is isochronous
438062306a36Sopenharmony_ci		 * (see the comments on the function
438162306a36Sopenharmony_ci		 * bfq_bfqq_softrt_next_start()). Therefore we can
438262306a36Sopenharmony_ci		 * compute soft_rt_next_start.
438362306a36Sopenharmony_ci		 *
438462306a36Sopenharmony_ci		 * If, instead, the queue still has outstanding
438562306a36Sopenharmony_ci		 * requests, then we have to wait for the completion
438662306a36Sopenharmony_ci		 * of all the outstanding requests to discover whether
438762306a36Sopenharmony_ci		 * the request pattern is actually isochronous.
438862306a36Sopenharmony_ci		 */
438962306a36Sopenharmony_ci		if (bfqq->dispatched == 0)
439062306a36Sopenharmony_ci			bfqq->soft_rt_next_start =
439162306a36Sopenharmony_ci				bfq_bfqq_softrt_next_start(bfqd, bfqq);
439262306a36Sopenharmony_ci		else if (bfqq->dispatched > 0) {
439362306a36Sopenharmony_ci			/*
439462306a36Sopenharmony_ci			 * Schedule an update of soft_rt_next_start to when
439562306a36Sopenharmony_ci			 * the task may be discovered to be isochronous.
439662306a36Sopenharmony_ci			 */
439762306a36Sopenharmony_ci			bfq_mark_bfqq_softrt_update(bfqq);
439862306a36Sopenharmony_ci		}
439962306a36Sopenharmony_ci	}
440062306a36Sopenharmony_ci
440162306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq,
440262306a36Sopenharmony_ci		"expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
440362306a36Sopenharmony_ci		slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
440462306a36Sopenharmony_ci
440562306a36Sopenharmony_ci	/*
440662306a36Sopenharmony_ci	 * bfqq expired, so no total service time needs to be computed
440762306a36Sopenharmony_ci	 * any longer: reset state machine for measuring total service
440862306a36Sopenharmony_ci	 * times.
440962306a36Sopenharmony_ci	 */
441062306a36Sopenharmony_ci	bfqd->rqs_injected = bfqd->wait_dispatch = false;
441162306a36Sopenharmony_ci	bfqd->waited_rq = NULL;
441262306a36Sopenharmony_ci
441362306a36Sopenharmony_ci	/*
441462306a36Sopenharmony_ci	 * Increase, decrease or leave budget unchanged according to
441562306a36Sopenharmony_ci	 * reason.
441662306a36Sopenharmony_ci	 */
441762306a36Sopenharmony_ci	__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
441862306a36Sopenharmony_ci	if (__bfq_bfqq_expire(bfqd, bfqq, reason))
441962306a36Sopenharmony_ci		/* bfqq is gone, no more actions on it */
442062306a36Sopenharmony_ci		return;
442162306a36Sopenharmony_ci
442262306a36Sopenharmony_ci	/* mark bfqq as waiting a request only if a bic still points to it */
442362306a36Sopenharmony_ci	if (!bfq_bfqq_busy(bfqq) &&
442462306a36Sopenharmony_ci	    reason != BFQQE_BUDGET_TIMEOUT &&
442562306a36Sopenharmony_ci	    reason != BFQQE_BUDGET_EXHAUSTED) {
442662306a36Sopenharmony_ci		bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
442762306a36Sopenharmony_ci		/*
442862306a36Sopenharmony_ci		 * Not setting service to 0, because, if the next rq
442962306a36Sopenharmony_ci		 * arrives in time, the queue will go on receiving
443062306a36Sopenharmony_ci		 * service with this same budget (as if it never expired)
443162306a36Sopenharmony_ci		 */
443262306a36Sopenharmony_ci	} else
443362306a36Sopenharmony_ci		entity->service = 0;
443462306a36Sopenharmony_ci
443562306a36Sopenharmony_ci	/*
443662306a36Sopenharmony_ci	 * Reset the received-service counter for every parent entity.
443762306a36Sopenharmony_ci	 * Differently from what happens with bfqq->entity.service,
443862306a36Sopenharmony_ci	 * the resetting of this counter never needs to be postponed
443962306a36Sopenharmony_ci	 * for parent entities. In fact, in case bfqq may have a
444062306a36Sopenharmony_ci	 * chance to go on being served using the last, partially
444162306a36Sopenharmony_ci	 * consumed budget, bfqq->entity.service needs to be kept,
444262306a36Sopenharmony_ci	 * because if bfqq then actually goes on being served using
444362306a36Sopenharmony_ci	 * the same budget, the last value of bfqq->entity.service is
444462306a36Sopenharmony_ci	 * needed to properly decrement bfqq->entity.budget by the
444562306a36Sopenharmony_ci	 * portion already consumed. In contrast, it is not necessary
444662306a36Sopenharmony_ci	 * to keep entity->service for parent entities too, because
444762306a36Sopenharmony_ci	 * the bubble up of the new value of bfqq->entity.budget will
444862306a36Sopenharmony_ci	 * make sure that the budgets of parent entities are correct,
444962306a36Sopenharmony_ci	 * even in case bfqq and thus parent entities go on receiving
445062306a36Sopenharmony_ci	 * service with the same budget.
445162306a36Sopenharmony_ci	 */
445262306a36Sopenharmony_ci	entity = entity->parent;
445362306a36Sopenharmony_ci	for_each_entity(entity)
445462306a36Sopenharmony_ci		entity->service = 0;
445562306a36Sopenharmony_ci}
445662306a36Sopenharmony_ci
445762306a36Sopenharmony_ci/*
445862306a36Sopenharmony_ci * Budget timeout is not implemented through a dedicated timer, but
445962306a36Sopenharmony_ci * just checked on request arrivals and completions, as well as on
446062306a36Sopenharmony_ci * idle timer expirations.
446162306a36Sopenharmony_ci */
446262306a36Sopenharmony_cistatic bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
446362306a36Sopenharmony_ci{
446462306a36Sopenharmony_ci	return time_is_before_eq_jiffies(bfqq->budget_timeout);
446562306a36Sopenharmony_ci}
446662306a36Sopenharmony_ci
446762306a36Sopenharmony_ci/*
446862306a36Sopenharmony_ci * If we expire a queue that is actively waiting (i.e., with the
446962306a36Sopenharmony_ci * device idled) for the arrival of a new request, then we may incur
447062306a36Sopenharmony_ci * the timestamp misalignment problem described in the body of the
447162306a36Sopenharmony_ci * function __bfq_activate_entity. Hence we return true only if this
447262306a36Sopenharmony_ci * condition does not hold, or if the queue is slow enough to deserve
447362306a36Sopenharmony_ci * only to be kicked off for preserving a high throughput.
447462306a36Sopenharmony_ci */
447562306a36Sopenharmony_cistatic bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
447662306a36Sopenharmony_ci{
447762306a36Sopenharmony_ci	bfq_log_bfqq(bfqq->bfqd, bfqq,
447862306a36Sopenharmony_ci		"may_budget_timeout: wait_request %d left %d timeout %d",
447962306a36Sopenharmony_ci		bfq_bfqq_wait_request(bfqq),
448062306a36Sopenharmony_ci			bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
448162306a36Sopenharmony_ci		bfq_bfqq_budget_timeout(bfqq));
448262306a36Sopenharmony_ci
448362306a36Sopenharmony_ci	return (!bfq_bfqq_wait_request(bfqq) ||
448462306a36Sopenharmony_ci		bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
448562306a36Sopenharmony_ci		&&
448662306a36Sopenharmony_ci		bfq_bfqq_budget_timeout(bfqq);
448762306a36Sopenharmony_ci}
448862306a36Sopenharmony_ci
448962306a36Sopenharmony_cistatic bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
449062306a36Sopenharmony_ci					     struct bfq_queue *bfqq)
449162306a36Sopenharmony_ci{
449262306a36Sopenharmony_ci	bool rot_without_queueing =
449362306a36Sopenharmony_ci		!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
449462306a36Sopenharmony_ci		bfqq_sequential_and_IO_bound,
449562306a36Sopenharmony_ci		idling_boosts_thr;
449662306a36Sopenharmony_ci
449762306a36Sopenharmony_ci	/* No point in idling for bfqq if it won't get requests any longer */
449862306a36Sopenharmony_ci	if (unlikely(!bfqq_process_refs(bfqq)))
449962306a36Sopenharmony_ci		return false;
450062306a36Sopenharmony_ci
450162306a36Sopenharmony_ci	bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
450262306a36Sopenharmony_ci		bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
450362306a36Sopenharmony_ci
450462306a36Sopenharmony_ci	/*
450562306a36Sopenharmony_ci	 * The next variable takes into account the cases where idling
450662306a36Sopenharmony_ci	 * boosts the throughput.
450762306a36Sopenharmony_ci	 *
450862306a36Sopenharmony_ci	 * The value of the variable is computed considering, first, that
450962306a36Sopenharmony_ci	 * idling is virtually always beneficial for the throughput if:
451062306a36Sopenharmony_ci	 * (a) the device is not NCQ-capable and rotational, or
451162306a36Sopenharmony_ci	 * (b) regardless of the presence of NCQ, the device is rotational and
451262306a36Sopenharmony_ci	 *     the request pattern for bfqq is I/O-bound and sequential, or
451362306a36Sopenharmony_ci	 * (c) regardless of whether it is rotational, the device is
451462306a36Sopenharmony_ci	 *     not NCQ-capable and the request pattern for bfqq is
451562306a36Sopenharmony_ci	 *     I/O-bound and sequential.
451662306a36Sopenharmony_ci	 *
451762306a36Sopenharmony_ci	 * Secondly, and in contrast to the above item (b), idling an
451862306a36Sopenharmony_ci	 * NCQ-capable flash-based device would not boost the
451962306a36Sopenharmony_ci	 * throughput even with sequential I/O; rather it would lower
452062306a36Sopenharmony_ci	 * the throughput in proportion to how fast the device
452162306a36Sopenharmony_ci	 * is. Accordingly, the next variable is true if any of the
452262306a36Sopenharmony_ci	 * above conditions (a), (b) or (c) is true, and, in
452362306a36Sopenharmony_ci	 * particular, happens to be false if bfqd is an NCQ-capable
452462306a36Sopenharmony_ci	 * flash-based device.
452562306a36Sopenharmony_ci	 */
452662306a36Sopenharmony_ci	idling_boosts_thr = rot_without_queueing ||
452762306a36Sopenharmony_ci		((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
452862306a36Sopenharmony_ci		 bfqq_sequential_and_IO_bound);
452962306a36Sopenharmony_ci
453062306a36Sopenharmony_ci	/*
453162306a36Sopenharmony_ci	 * The return value of this function is equal to that of
453262306a36Sopenharmony_ci	 * idling_boosts_thr, unless a special case holds. In this
453362306a36Sopenharmony_ci	 * special case, described below, idling may cause problems to
453462306a36Sopenharmony_ci	 * weight-raised queues.
453562306a36Sopenharmony_ci	 *
453662306a36Sopenharmony_ci	 * When the request pool is saturated (e.g., in the presence
453762306a36Sopenharmony_ci	 * of write hogs), if the processes associated with
453862306a36Sopenharmony_ci	 * non-weight-raised queues ask for requests at a lower rate,
453962306a36Sopenharmony_ci	 * then processes associated with weight-raised queues have a
454062306a36Sopenharmony_ci	 * higher probability to get a request from the pool
454162306a36Sopenharmony_ci	 * immediately (or at least soon) when they need one. Thus
454262306a36Sopenharmony_ci	 * they have a higher probability to actually get a fraction
454362306a36Sopenharmony_ci	 * of the device throughput proportional to their high
454462306a36Sopenharmony_ci	 * weight. This is especially true with NCQ-capable drives,
454562306a36Sopenharmony_ci	 * which enqueue several requests in advance, and further
454662306a36Sopenharmony_ci	 * reorder internally-queued requests.
454762306a36Sopenharmony_ci	 *
454862306a36Sopenharmony_ci	 * For this reason, we force to false the return value if
454962306a36Sopenharmony_ci	 * there are weight-raised busy queues. In this case, and if
455062306a36Sopenharmony_ci	 * bfqq is not weight-raised, this guarantees that the device
455162306a36Sopenharmony_ci	 * is not idled for bfqq (if, instead, bfqq is weight-raised,
455262306a36Sopenharmony_ci	 * then idling will be guaranteed by another variable, see
455362306a36Sopenharmony_ci	 * below). Combined with the timestamping rules of BFQ (see
455462306a36Sopenharmony_ci	 * [1] for details), this behavior causes bfqq, and hence any
455562306a36Sopenharmony_ci	 * sync non-weight-raised queue, to get a lower number of
455662306a36Sopenharmony_ci	 * requests served, and thus to ask for a lower number of
455762306a36Sopenharmony_ci	 * requests from the request pool, before the busy
455862306a36Sopenharmony_ci	 * weight-raised queues get served again. This often mitigates
455962306a36Sopenharmony_ci	 * starvation problems in the presence of heavy write
456062306a36Sopenharmony_ci	 * workloads and NCQ, thereby guaranteeing a higher
456162306a36Sopenharmony_ci	 * application and system responsiveness in these hostile
456262306a36Sopenharmony_ci	 * scenarios.
456362306a36Sopenharmony_ci	 */
456462306a36Sopenharmony_ci	return idling_boosts_thr &&
456562306a36Sopenharmony_ci		bfqd->wr_busy_queues == 0;
456662306a36Sopenharmony_ci}
456762306a36Sopenharmony_ci
456862306a36Sopenharmony_ci/*
456962306a36Sopenharmony_ci * For a queue that becomes empty, device idling is allowed only if
457062306a36Sopenharmony_ci * this function returns true for that queue. As a consequence, since
457162306a36Sopenharmony_ci * device idling plays a critical role for both throughput boosting
457262306a36Sopenharmony_ci * and service guarantees, the return value of this function plays a
457362306a36Sopenharmony_ci * critical role as well.
457462306a36Sopenharmony_ci *
457562306a36Sopenharmony_ci * In a nutshell, this function returns true only if idling is
457662306a36Sopenharmony_ci * beneficial for throughput or, even if detrimental for throughput,
457762306a36Sopenharmony_ci * idling is however necessary to preserve service guarantees (low
457862306a36Sopenharmony_ci * latency, desired throughput distribution, ...). In particular, on
457962306a36Sopenharmony_ci * NCQ-capable devices, this function tries to return false, so as to
458062306a36Sopenharmony_ci * help keep the drives' internal queues full, whenever this helps the
458162306a36Sopenharmony_ci * device boost the throughput without causing any service-guarantee
458262306a36Sopenharmony_ci * issue.
458362306a36Sopenharmony_ci *
458462306a36Sopenharmony_ci * Most of the issues taken into account to get the return value of
458562306a36Sopenharmony_ci * this function are not trivial. We discuss these issues in the two
458662306a36Sopenharmony_ci * functions providing the main pieces of information needed by this
458762306a36Sopenharmony_ci * function.
458862306a36Sopenharmony_ci */
458962306a36Sopenharmony_cistatic bool bfq_better_to_idle(struct bfq_queue *bfqq)
459062306a36Sopenharmony_ci{
459162306a36Sopenharmony_ci	struct bfq_data *bfqd = bfqq->bfqd;
459262306a36Sopenharmony_ci	bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
459362306a36Sopenharmony_ci
459462306a36Sopenharmony_ci	/* No point in idling for bfqq if it won't get requests any longer */
459562306a36Sopenharmony_ci	if (unlikely(!bfqq_process_refs(bfqq)))
459662306a36Sopenharmony_ci		return false;
459762306a36Sopenharmony_ci
459862306a36Sopenharmony_ci	if (unlikely(bfqd->strict_guarantees))
459962306a36Sopenharmony_ci		return true;
460062306a36Sopenharmony_ci
460162306a36Sopenharmony_ci	/*
460262306a36Sopenharmony_ci	 * Idling is performed only if slice_idle > 0. In addition, we
460362306a36Sopenharmony_ci	 * do not idle if
460462306a36Sopenharmony_ci	 * (a) bfqq is async
460562306a36Sopenharmony_ci	 * (b) bfqq is in the idle io prio class: in this case we do
460662306a36Sopenharmony_ci	 * not idle because we want to minimize the bandwidth that
460762306a36Sopenharmony_ci	 * queues in this class can steal to higher-priority queues
460862306a36Sopenharmony_ci	 */
460962306a36Sopenharmony_ci	if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
461062306a36Sopenharmony_ci	   bfq_class_idle(bfqq))
461162306a36Sopenharmony_ci		return false;
461262306a36Sopenharmony_ci
461362306a36Sopenharmony_ci	idling_boosts_thr_with_no_issue =
461462306a36Sopenharmony_ci		idling_boosts_thr_without_issues(bfqd, bfqq);
461562306a36Sopenharmony_ci
461662306a36Sopenharmony_ci	idling_needed_for_service_guar =
461762306a36Sopenharmony_ci		idling_needed_for_service_guarantees(bfqd, bfqq);
461862306a36Sopenharmony_ci
461962306a36Sopenharmony_ci	/*
462062306a36Sopenharmony_ci	 * We have now the two components we need to compute the
462162306a36Sopenharmony_ci	 * return value of the function, which is true only if idling
462262306a36Sopenharmony_ci	 * either boosts the throughput (without issues), or is
462362306a36Sopenharmony_ci	 * necessary to preserve service guarantees.
462462306a36Sopenharmony_ci	 */
462562306a36Sopenharmony_ci	return idling_boosts_thr_with_no_issue ||
462662306a36Sopenharmony_ci		idling_needed_for_service_guar;
462762306a36Sopenharmony_ci}
462862306a36Sopenharmony_ci
462962306a36Sopenharmony_ci/*
463062306a36Sopenharmony_ci * If the in-service queue is empty but the function bfq_better_to_idle
463162306a36Sopenharmony_ci * returns true, then:
463262306a36Sopenharmony_ci * 1) the queue must remain in service and cannot be expired, and
463362306a36Sopenharmony_ci * 2) the device must be idled to wait for the possible arrival of a new
463462306a36Sopenharmony_ci *    request for the queue.
463562306a36Sopenharmony_ci * See the comments on the function bfq_better_to_idle for the reasons
463662306a36Sopenharmony_ci * why performing device idling is the best choice to boost the throughput
463762306a36Sopenharmony_ci * and preserve service guarantees when bfq_better_to_idle itself
463862306a36Sopenharmony_ci * returns true.
463962306a36Sopenharmony_ci */
464062306a36Sopenharmony_cistatic bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
464162306a36Sopenharmony_ci{
464262306a36Sopenharmony_ci	return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
464362306a36Sopenharmony_ci}
464462306a36Sopenharmony_ci
464562306a36Sopenharmony_ci/*
464662306a36Sopenharmony_ci * This function chooses the queue from which to pick the next extra
464762306a36Sopenharmony_ci * I/O request to inject, if it finds a compatible queue. See the
464862306a36Sopenharmony_ci * comments on bfq_update_inject_limit() for details on the injection
464962306a36Sopenharmony_ci * mechanism, and for the definitions of the quantities mentioned
465062306a36Sopenharmony_ci * below.
465162306a36Sopenharmony_ci */
465262306a36Sopenharmony_cistatic struct bfq_queue *
465362306a36Sopenharmony_cibfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
465462306a36Sopenharmony_ci{
465562306a36Sopenharmony_ci	struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
465662306a36Sopenharmony_ci	unsigned int limit = in_serv_bfqq->inject_limit;
465762306a36Sopenharmony_ci	int i;
465862306a36Sopenharmony_ci
465962306a36Sopenharmony_ci	/*
466062306a36Sopenharmony_ci	 * If
466162306a36Sopenharmony_ci	 * - bfqq is not weight-raised and therefore does not carry
466262306a36Sopenharmony_ci	 *   time-critical I/O,
466362306a36Sopenharmony_ci	 * or
466462306a36Sopenharmony_ci	 * - regardless of whether bfqq is weight-raised, bfqq has
466562306a36Sopenharmony_ci	 *   however a long think time, during which it can absorb the
466662306a36Sopenharmony_ci	 *   effect of an appropriate number of extra I/O requests
466762306a36Sopenharmony_ci	 *   from other queues (see bfq_update_inject_limit for
466862306a36Sopenharmony_ci	 *   details on the computation of this number);
466962306a36Sopenharmony_ci	 * then injection can be performed without restrictions.
467062306a36Sopenharmony_ci	 */
467162306a36Sopenharmony_ci	bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
467262306a36Sopenharmony_ci		!bfq_bfqq_has_short_ttime(in_serv_bfqq);
467362306a36Sopenharmony_ci
467462306a36Sopenharmony_ci	/*
467562306a36Sopenharmony_ci	 * If
467662306a36Sopenharmony_ci	 * - the baseline total service time could not be sampled yet,
467762306a36Sopenharmony_ci	 *   so the inject limit happens to be still 0, and
467862306a36Sopenharmony_ci	 * - a lot of time has elapsed since the plugging of I/O
467962306a36Sopenharmony_ci	 *   dispatching started, so drive speed is being wasted
468062306a36Sopenharmony_ci	 *   significantly;
468162306a36Sopenharmony_ci	 * then temporarily raise inject limit to one request.
468262306a36Sopenharmony_ci	 */
468362306a36Sopenharmony_ci	if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
468462306a36Sopenharmony_ci	    bfq_bfqq_wait_request(in_serv_bfqq) &&
468562306a36Sopenharmony_ci	    time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
468662306a36Sopenharmony_ci				      bfqd->bfq_slice_idle)
468762306a36Sopenharmony_ci		)
468862306a36Sopenharmony_ci		limit = 1;
468962306a36Sopenharmony_ci
469062306a36Sopenharmony_ci	if (bfqd->tot_rq_in_driver >= limit)
469162306a36Sopenharmony_ci		return NULL;
469262306a36Sopenharmony_ci
469362306a36Sopenharmony_ci	/*
469462306a36Sopenharmony_ci	 * Linear search of the source queue for injection; but, with
469562306a36Sopenharmony_ci	 * a high probability, very few steps are needed to find a
469662306a36Sopenharmony_ci	 * candidate queue, i.e., a queue with enough budget left for
469762306a36Sopenharmony_ci	 * its next request. In fact:
469862306a36Sopenharmony_ci	 * - BFQ dynamically updates the budget of every queue so as
469962306a36Sopenharmony_ci	 *   to accommodate the expected backlog of the queue;
470062306a36Sopenharmony_ci	 * - if a queue gets all its requests dispatched as injected
470162306a36Sopenharmony_ci	 *   service, then the queue is removed from the active list
470262306a36Sopenharmony_ci	 *   (and re-added only if it gets new requests, but then it
470362306a36Sopenharmony_ci	 *   is assigned again enough budget for its new backlog).
470462306a36Sopenharmony_ci	 */
470562306a36Sopenharmony_ci	for (i = 0; i < bfqd->num_actuators; i++) {
470662306a36Sopenharmony_ci		list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
470762306a36Sopenharmony_ci			if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
470862306a36Sopenharmony_ci				(in_serv_always_inject || bfqq->wr_coeff > 1) &&
470962306a36Sopenharmony_ci				bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
471062306a36Sopenharmony_ci				bfq_bfqq_budget_left(bfqq)) {
471162306a36Sopenharmony_ci			/*
471262306a36Sopenharmony_ci			 * Allow for only one large in-flight request
471362306a36Sopenharmony_ci			 * on non-rotational devices, for the
471462306a36Sopenharmony_ci			 * following reason. On non-rotationl drives,
471562306a36Sopenharmony_ci			 * large requests take much longer than
471662306a36Sopenharmony_ci			 * smaller requests to be served. In addition,
471762306a36Sopenharmony_ci			 * the drive prefers to serve large requests
471862306a36Sopenharmony_ci			 * w.r.t. to small ones, if it can choose. So,
471962306a36Sopenharmony_ci			 * having more than one large requests queued
472062306a36Sopenharmony_ci			 * in the drive may easily make the next first
472162306a36Sopenharmony_ci			 * request of the in-service queue wait for so
472262306a36Sopenharmony_ci			 * long to break bfqq's service guarantees. On
472362306a36Sopenharmony_ci			 * the bright side, large requests let the
472462306a36Sopenharmony_ci			 * drive reach a very high throughput, even if
472562306a36Sopenharmony_ci			 * there is only one in-flight large request
472662306a36Sopenharmony_ci			 * at a time.
472762306a36Sopenharmony_ci			 */
472862306a36Sopenharmony_ci			if (blk_queue_nonrot(bfqd->queue) &&
472962306a36Sopenharmony_ci			    blk_rq_sectors(bfqq->next_rq) >=
473062306a36Sopenharmony_ci			    BFQQ_SECT_THR_NONROT &&
473162306a36Sopenharmony_ci			    bfqd->tot_rq_in_driver >= 1)
473262306a36Sopenharmony_ci				continue;
473362306a36Sopenharmony_ci			else {
473462306a36Sopenharmony_ci				bfqd->rqs_injected = true;
473562306a36Sopenharmony_ci				return bfqq;
473662306a36Sopenharmony_ci			}
473762306a36Sopenharmony_ci		}
473862306a36Sopenharmony_ci	}
473962306a36Sopenharmony_ci
474062306a36Sopenharmony_ci	return NULL;
474162306a36Sopenharmony_ci}
474262306a36Sopenharmony_ci
474362306a36Sopenharmony_cistatic struct bfq_queue *
474462306a36Sopenharmony_cibfq_find_active_bfqq_for_actuator(struct bfq_data *bfqd, int idx)
474562306a36Sopenharmony_ci{
474662306a36Sopenharmony_ci	struct bfq_queue *bfqq;
474762306a36Sopenharmony_ci
474862306a36Sopenharmony_ci	if (bfqd->in_service_queue &&
474962306a36Sopenharmony_ci	    bfqd->in_service_queue->actuator_idx == idx)
475062306a36Sopenharmony_ci		return bfqd->in_service_queue;
475162306a36Sopenharmony_ci
475262306a36Sopenharmony_ci	list_for_each_entry(bfqq, &bfqd->active_list[idx], bfqq_list) {
475362306a36Sopenharmony_ci		if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
475462306a36Sopenharmony_ci			bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
475562306a36Sopenharmony_ci				bfq_bfqq_budget_left(bfqq)) {
475662306a36Sopenharmony_ci			return bfqq;
475762306a36Sopenharmony_ci		}
475862306a36Sopenharmony_ci	}
475962306a36Sopenharmony_ci
476062306a36Sopenharmony_ci	return NULL;
476162306a36Sopenharmony_ci}
476262306a36Sopenharmony_ci
476362306a36Sopenharmony_ci/*
476462306a36Sopenharmony_ci * Perform a linear scan of each actuator, until an actuator is found
476562306a36Sopenharmony_ci * for which the following three conditions hold: the load of the
476662306a36Sopenharmony_ci * actuator is below the threshold (see comments on
476762306a36Sopenharmony_ci * actuator_load_threshold for details) and lower than that of the
476862306a36Sopenharmony_ci * next actuator (comments on this extra condition below), and there
476962306a36Sopenharmony_ci * is a queue that contains I/O for that actuator. On success, return
477062306a36Sopenharmony_ci * that queue.
477162306a36Sopenharmony_ci *
477262306a36Sopenharmony_ci * Performing a plain linear scan entails a prioritization among
477362306a36Sopenharmony_ci * actuators. The extra condition above breaks this prioritization and
477462306a36Sopenharmony_ci * tends to distribute injection uniformly across actuators.
477562306a36Sopenharmony_ci */
477662306a36Sopenharmony_cistatic struct bfq_queue *
477762306a36Sopenharmony_cibfq_find_bfqq_for_underused_actuator(struct bfq_data *bfqd)
477862306a36Sopenharmony_ci{
477962306a36Sopenharmony_ci	int i;
478062306a36Sopenharmony_ci
478162306a36Sopenharmony_ci	for (i = 0 ; i < bfqd->num_actuators; i++) {
478262306a36Sopenharmony_ci		if (bfqd->rq_in_driver[i] < bfqd->actuator_load_threshold &&
478362306a36Sopenharmony_ci		    (i == bfqd->num_actuators - 1 ||
478462306a36Sopenharmony_ci		     bfqd->rq_in_driver[i] < bfqd->rq_in_driver[i+1])) {
478562306a36Sopenharmony_ci			struct bfq_queue *bfqq =
478662306a36Sopenharmony_ci				bfq_find_active_bfqq_for_actuator(bfqd, i);
478762306a36Sopenharmony_ci
478862306a36Sopenharmony_ci			if (bfqq)
478962306a36Sopenharmony_ci				return bfqq;
479062306a36Sopenharmony_ci		}
479162306a36Sopenharmony_ci	}
479262306a36Sopenharmony_ci
479362306a36Sopenharmony_ci	return NULL;
479462306a36Sopenharmony_ci}
479562306a36Sopenharmony_ci
479662306a36Sopenharmony_ci
479762306a36Sopenharmony_ci/*
479862306a36Sopenharmony_ci * Select a queue for service.  If we have a current queue in service,
479962306a36Sopenharmony_ci * check whether to continue servicing it, or retrieve and set a new one.
480062306a36Sopenharmony_ci */
480162306a36Sopenharmony_cistatic struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
480262306a36Sopenharmony_ci{
480362306a36Sopenharmony_ci	struct bfq_queue *bfqq, *inject_bfqq;
480462306a36Sopenharmony_ci	struct request *next_rq;
480562306a36Sopenharmony_ci	enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
480662306a36Sopenharmony_ci
480762306a36Sopenharmony_ci	bfqq = bfqd->in_service_queue;
480862306a36Sopenharmony_ci	if (!bfqq)
480962306a36Sopenharmony_ci		goto new_queue;
481062306a36Sopenharmony_ci
481162306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
481262306a36Sopenharmony_ci
481362306a36Sopenharmony_ci	/*
481462306a36Sopenharmony_ci	 * Do not expire bfqq for budget timeout if bfqq may be about
481562306a36Sopenharmony_ci	 * to enjoy device idling. The reason why, in this case, we
481662306a36Sopenharmony_ci	 * prevent bfqq from expiring is the same as in the comments
481762306a36Sopenharmony_ci	 * on the case where bfq_bfqq_must_idle() returns true, in
481862306a36Sopenharmony_ci	 * bfq_completed_request().
481962306a36Sopenharmony_ci	 */
482062306a36Sopenharmony_ci	if (bfq_may_expire_for_budg_timeout(bfqq) &&
482162306a36Sopenharmony_ci	    !bfq_bfqq_must_idle(bfqq))
482262306a36Sopenharmony_ci		goto expire;
482362306a36Sopenharmony_ci
482462306a36Sopenharmony_cicheck_queue:
482562306a36Sopenharmony_ci	/*
482662306a36Sopenharmony_ci	 *  If some actuator is underutilized, but the in-service
482762306a36Sopenharmony_ci	 *  queue does not contain I/O for that actuator, then try to
482862306a36Sopenharmony_ci	 *  inject I/O for that actuator.
482962306a36Sopenharmony_ci	 */
483062306a36Sopenharmony_ci	inject_bfqq = bfq_find_bfqq_for_underused_actuator(bfqd);
483162306a36Sopenharmony_ci	if (inject_bfqq && inject_bfqq != bfqq)
483262306a36Sopenharmony_ci		return inject_bfqq;
483362306a36Sopenharmony_ci
483462306a36Sopenharmony_ci	/*
483562306a36Sopenharmony_ci	 * This loop is rarely executed more than once. Even when it
483662306a36Sopenharmony_ci	 * happens, it is much more convenient to re-execute this loop
483762306a36Sopenharmony_ci	 * than to return NULL and trigger a new dispatch to get a
483862306a36Sopenharmony_ci	 * request served.
483962306a36Sopenharmony_ci	 */
484062306a36Sopenharmony_ci	next_rq = bfqq->next_rq;
484162306a36Sopenharmony_ci	/*
484262306a36Sopenharmony_ci	 * If bfqq has requests queued and it has enough budget left to
484362306a36Sopenharmony_ci	 * serve them, keep the queue, otherwise expire it.
484462306a36Sopenharmony_ci	 */
484562306a36Sopenharmony_ci	if (next_rq) {
484662306a36Sopenharmony_ci		if (bfq_serv_to_charge(next_rq, bfqq) >
484762306a36Sopenharmony_ci			bfq_bfqq_budget_left(bfqq)) {
484862306a36Sopenharmony_ci			/*
484962306a36Sopenharmony_ci			 * Expire the queue for budget exhaustion,
485062306a36Sopenharmony_ci			 * which makes sure that the next budget is
485162306a36Sopenharmony_ci			 * enough to serve the next request, even if
485262306a36Sopenharmony_ci			 * it comes from the fifo expired path.
485362306a36Sopenharmony_ci			 */
485462306a36Sopenharmony_ci			reason = BFQQE_BUDGET_EXHAUSTED;
485562306a36Sopenharmony_ci			goto expire;
485662306a36Sopenharmony_ci		} else {
485762306a36Sopenharmony_ci			/*
485862306a36Sopenharmony_ci			 * The idle timer may be pending because we may
485962306a36Sopenharmony_ci			 * not disable disk idling even when a new request
486062306a36Sopenharmony_ci			 * arrives.
486162306a36Sopenharmony_ci			 */
486262306a36Sopenharmony_ci			if (bfq_bfqq_wait_request(bfqq)) {
486362306a36Sopenharmony_ci				/*
486462306a36Sopenharmony_ci				 * If we get here: 1) at least a new request
486562306a36Sopenharmony_ci				 * has arrived but we have not disabled the
486662306a36Sopenharmony_ci				 * timer because the request was too small,
486762306a36Sopenharmony_ci				 * 2) then the block layer has unplugged
486862306a36Sopenharmony_ci				 * the device, causing the dispatch to be
486962306a36Sopenharmony_ci				 * invoked.
487062306a36Sopenharmony_ci				 *
487162306a36Sopenharmony_ci				 * Since the device is unplugged, now the
487262306a36Sopenharmony_ci				 * requests are probably large enough to
487362306a36Sopenharmony_ci				 * provide a reasonable throughput.
487462306a36Sopenharmony_ci				 * So we disable idling.
487562306a36Sopenharmony_ci				 */
487662306a36Sopenharmony_ci				bfq_clear_bfqq_wait_request(bfqq);
487762306a36Sopenharmony_ci				hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
487862306a36Sopenharmony_ci			}
487962306a36Sopenharmony_ci			goto keep_queue;
488062306a36Sopenharmony_ci		}
488162306a36Sopenharmony_ci	}
488262306a36Sopenharmony_ci
488362306a36Sopenharmony_ci	/*
488462306a36Sopenharmony_ci	 * No requests pending. However, if the in-service queue is idling
488562306a36Sopenharmony_ci	 * for a new request, or has requests waiting for a completion and
488662306a36Sopenharmony_ci	 * may idle after their completion, then keep it anyway.
488762306a36Sopenharmony_ci	 *
488862306a36Sopenharmony_ci	 * Yet, inject service from other queues if it boosts
488962306a36Sopenharmony_ci	 * throughput and is possible.
489062306a36Sopenharmony_ci	 */
489162306a36Sopenharmony_ci	if (bfq_bfqq_wait_request(bfqq) ||
489262306a36Sopenharmony_ci	    (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
489362306a36Sopenharmony_ci		unsigned int act_idx = bfqq->actuator_idx;
489462306a36Sopenharmony_ci		struct bfq_queue *async_bfqq = NULL;
489562306a36Sopenharmony_ci		struct bfq_queue *blocked_bfqq =
489662306a36Sopenharmony_ci			!hlist_empty(&bfqq->woken_list) ?
489762306a36Sopenharmony_ci			container_of(bfqq->woken_list.first,
489862306a36Sopenharmony_ci				     struct bfq_queue,
489962306a36Sopenharmony_ci				     woken_list_node)
490062306a36Sopenharmony_ci			: NULL;
490162306a36Sopenharmony_ci
490262306a36Sopenharmony_ci		if (bfqq->bic && bfqq->bic->bfqq[0][act_idx] &&
490362306a36Sopenharmony_ci		    bfq_bfqq_busy(bfqq->bic->bfqq[0][act_idx]) &&
490462306a36Sopenharmony_ci		    bfqq->bic->bfqq[0][act_idx]->next_rq)
490562306a36Sopenharmony_ci			async_bfqq = bfqq->bic->bfqq[0][act_idx];
490662306a36Sopenharmony_ci		/*
490762306a36Sopenharmony_ci		 * The next four mutually-exclusive ifs decide
490862306a36Sopenharmony_ci		 * whether to try injection, and choose the queue to
490962306a36Sopenharmony_ci		 * pick an I/O request from.
491062306a36Sopenharmony_ci		 *
491162306a36Sopenharmony_ci		 * The first if checks whether the process associated
491262306a36Sopenharmony_ci		 * with bfqq has also async I/O pending. If so, it
491362306a36Sopenharmony_ci		 * injects such I/O unconditionally. Injecting async
491462306a36Sopenharmony_ci		 * I/O from the same process can cause no harm to the
491562306a36Sopenharmony_ci		 * process. On the contrary, it can only increase
491662306a36Sopenharmony_ci		 * bandwidth and reduce latency for the process.
491762306a36Sopenharmony_ci		 *
491862306a36Sopenharmony_ci		 * The second if checks whether there happens to be a
491962306a36Sopenharmony_ci		 * non-empty waker queue for bfqq, i.e., a queue whose
492062306a36Sopenharmony_ci		 * I/O needs to be completed for bfqq to receive new
492162306a36Sopenharmony_ci		 * I/O. This happens, e.g., if bfqq is associated with
492262306a36Sopenharmony_ci		 * a process that does some sync. A sync generates
492362306a36Sopenharmony_ci		 * extra blocking I/O, which must be completed before
492462306a36Sopenharmony_ci		 * the process associated with bfqq can go on with its
492562306a36Sopenharmony_ci		 * I/O. If the I/O of the waker queue is not served,
492662306a36Sopenharmony_ci		 * then bfqq remains empty, and no I/O is dispatched,
492762306a36Sopenharmony_ci		 * until the idle timeout fires for bfqq. This is
492862306a36Sopenharmony_ci		 * likely to result in lower bandwidth and higher
492962306a36Sopenharmony_ci		 * latencies for bfqq, and in a severe loss of total
493062306a36Sopenharmony_ci		 * throughput. The best action to take is therefore to
493162306a36Sopenharmony_ci		 * serve the waker queue as soon as possible. So do it
493262306a36Sopenharmony_ci		 * (without relying on the third alternative below for
493362306a36Sopenharmony_ci		 * eventually serving waker_bfqq's I/O; see the last
493462306a36Sopenharmony_ci		 * paragraph for further details). This systematic
493562306a36Sopenharmony_ci		 * injection of I/O from the waker queue does not
493662306a36Sopenharmony_ci		 * cause any delay to bfqq's I/O. On the contrary,
493762306a36Sopenharmony_ci		 * next bfqq's I/O is brought forward dramatically,
493862306a36Sopenharmony_ci		 * for it is not blocked for milliseconds.
493962306a36Sopenharmony_ci		 *
494062306a36Sopenharmony_ci		 * The third if checks whether there is a queue woken
494162306a36Sopenharmony_ci		 * by bfqq, and currently with pending I/O. Such a
494262306a36Sopenharmony_ci		 * woken queue does not steal bandwidth from bfqq,
494362306a36Sopenharmony_ci		 * because it remains soon without I/O if bfqq is not
494462306a36Sopenharmony_ci		 * served. So there is virtually no risk of loss of
494562306a36Sopenharmony_ci		 * bandwidth for bfqq if this woken queue has I/O
494662306a36Sopenharmony_ci		 * dispatched while bfqq is waiting for new I/O.
494762306a36Sopenharmony_ci		 *
494862306a36Sopenharmony_ci		 * The fourth if checks whether bfqq is a queue for
494962306a36Sopenharmony_ci		 * which it is better to avoid injection. It is so if
495062306a36Sopenharmony_ci		 * bfqq delivers more throughput when served without
495162306a36Sopenharmony_ci		 * any further I/O from other queues in the middle, or
495262306a36Sopenharmony_ci		 * if the service times of bfqq's I/O requests both
495362306a36Sopenharmony_ci		 * count more than overall throughput, and may be
495462306a36Sopenharmony_ci		 * easily increased by injection (this happens if bfqq
495562306a36Sopenharmony_ci		 * has a short think time). If none of these
495662306a36Sopenharmony_ci		 * conditions holds, then a candidate queue for
495762306a36Sopenharmony_ci		 * injection is looked for through
495862306a36Sopenharmony_ci		 * bfq_choose_bfqq_for_injection(). Note that the
495962306a36Sopenharmony_ci		 * latter may return NULL (for example if the inject
496062306a36Sopenharmony_ci		 * limit for bfqq is currently 0).
496162306a36Sopenharmony_ci		 *
496262306a36Sopenharmony_ci		 * NOTE: motivation for the second alternative
496362306a36Sopenharmony_ci		 *
496462306a36Sopenharmony_ci		 * Thanks to the way the inject limit is updated in
496562306a36Sopenharmony_ci		 * bfq_update_has_short_ttime(), it is rather likely
496662306a36Sopenharmony_ci		 * that, if I/O is being plugged for bfqq and the
496762306a36Sopenharmony_ci		 * waker queue has pending I/O requests that are
496862306a36Sopenharmony_ci		 * blocking bfqq's I/O, then the fourth alternative
496962306a36Sopenharmony_ci		 * above lets the waker queue get served before the
497062306a36Sopenharmony_ci		 * I/O-plugging timeout fires. So one may deem the
497162306a36Sopenharmony_ci		 * second alternative superfluous. It is not, because
497262306a36Sopenharmony_ci		 * the fourth alternative may be way less effective in
497362306a36Sopenharmony_ci		 * case of a synchronization. For two main
497462306a36Sopenharmony_ci		 * reasons. First, throughput may be low because the
497562306a36Sopenharmony_ci		 * inject limit may be too low to guarantee the same
497662306a36Sopenharmony_ci		 * amount of injected I/O, from the waker queue or
497762306a36Sopenharmony_ci		 * other queues, that the second alternative
497862306a36Sopenharmony_ci		 * guarantees (the second alternative unconditionally
497962306a36Sopenharmony_ci		 * injects a pending I/O request of the waker queue
498062306a36Sopenharmony_ci		 * for each bfq_dispatch_request()). Second, with the
498162306a36Sopenharmony_ci		 * fourth alternative, the duration of the plugging,
498262306a36Sopenharmony_ci		 * i.e., the time before bfqq finally receives new I/O,
498362306a36Sopenharmony_ci		 * may not be minimized, because the waker queue may
498462306a36Sopenharmony_ci		 * happen to be served only after other queues.
498562306a36Sopenharmony_ci		 */
498662306a36Sopenharmony_ci		if (async_bfqq &&
498762306a36Sopenharmony_ci		    icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
498862306a36Sopenharmony_ci		    bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
498962306a36Sopenharmony_ci		    bfq_bfqq_budget_left(async_bfqq))
499062306a36Sopenharmony_ci			bfqq = async_bfqq;
499162306a36Sopenharmony_ci		else if (bfqq->waker_bfqq &&
499262306a36Sopenharmony_ci			   bfq_bfqq_busy(bfqq->waker_bfqq) &&
499362306a36Sopenharmony_ci			   bfqq->waker_bfqq->next_rq &&
499462306a36Sopenharmony_ci			   bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
499562306a36Sopenharmony_ci					      bfqq->waker_bfqq) <=
499662306a36Sopenharmony_ci			   bfq_bfqq_budget_left(bfqq->waker_bfqq)
499762306a36Sopenharmony_ci			)
499862306a36Sopenharmony_ci			bfqq = bfqq->waker_bfqq;
499962306a36Sopenharmony_ci		else if (blocked_bfqq &&
500062306a36Sopenharmony_ci			   bfq_bfqq_busy(blocked_bfqq) &&
500162306a36Sopenharmony_ci			   blocked_bfqq->next_rq &&
500262306a36Sopenharmony_ci			   bfq_serv_to_charge(blocked_bfqq->next_rq,
500362306a36Sopenharmony_ci					      blocked_bfqq) <=
500462306a36Sopenharmony_ci			   bfq_bfqq_budget_left(blocked_bfqq)
500562306a36Sopenharmony_ci			)
500662306a36Sopenharmony_ci			bfqq = blocked_bfqq;
500762306a36Sopenharmony_ci		else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
500862306a36Sopenharmony_ci			 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
500962306a36Sopenharmony_ci			  !bfq_bfqq_has_short_ttime(bfqq)))
501062306a36Sopenharmony_ci			bfqq = bfq_choose_bfqq_for_injection(bfqd);
501162306a36Sopenharmony_ci		else
501262306a36Sopenharmony_ci			bfqq = NULL;
501362306a36Sopenharmony_ci
501462306a36Sopenharmony_ci		goto keep_queue;
501562306a36Sopenharmony_ci	}
501662306a36Sopenharmony_ci
501762306a36Sopenharmony_ci	reason = BFQQE_NO_MORE_REQUESTS;
501862306a36Sopenharmony_ciexpire:
501962306a36Sopenharmony_ci	bfq_bfqq_expire(bfqd, bfqq, false, reason);
502062306a36Sopenharmony_cinew_queue:
502162306a36Sopenharmony_ci	bfqq = bfq_set_in_service_queue(bfqd);
502262306a36Sopenharmony_ci	if (bfqq) {
502362306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
502462306a36Sopenharmony_ci		goto check_queue;
502562306a36Sopenharmony_ci	}
502662306a36Sopenharmony_cikeep_queue:
502762306a36Sopenharmony_ci	if (bfqq)
502862306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
502962306a36Sopenharmony_ci	else
503062306a36Sopenharmony_ci		bfq_log(bfqd, "select_queue: no queue returned");
503162306a36Sopenharmony_ci
503262306a36Sopenharmony_ci	return bfqq;
503362306a36Sopenharmony_ci}
503462306a36Sopenharmony_ci
503562306a36Sopenharmony_cistatic void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
503662306a36Sopenharmony_ci{
503762306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
503862306a36Sopenharmony_ci
503962306a36Sopenharmony_ci	if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
504062306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq,
504162306a36Sopenharmony_ci			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
504262306a36Sopenharmony_ci			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
504362306a36Sopenharmony_ci			jiffies_to_msecs(bfqq->wr_cur_max_time),
504462306a36Sopenharmony_ci			bfqq->wr_coeff,
504562306a36Sopenharmony_ci			bfqq->entity.weight, bfqq->entity.orig_weight);
504662306a36Sopenharmony_ci
504762306a36Sopenharmony_ci		if (entity->prio_changed)
504862306a36Sopenharmony_ci			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
504962306a36Sopenharmony_ci
505062306a36Sopenharmony_ci		/*
505162306a36Sopenharmony_ci		 * If the queue was activated in a burst, or too much
505262306a36Sopenharmony_ci		 * time has elapsed from the beginning of this
505362306a36Sopenharmony_ci		 * weight-raising period, then end weight raising.
505462306a36Sopenharmony_ci		 */
505562306a36Sopenharmony_ci		if (bfq_bfqq_in_large_burst(bfqq))
505662306a36Sopenharmony_ci			bfq_bfqq_end_wr(bfqq);
505762306a36Sopenharmony_ci		else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
505862306a36Sopenharmony_ci						bfqq->wr_cur_max_time)) {
505962306a36Sopenharmony_ci			if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
506062306a36Sopenharmony_ci			time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
506162306a36Sopenharmony_ci					       bfq_wr_duration(bfqd))) {
506262306a36Sopenharmony_ci				/*
506362306a36Sopenharmony_ci				 * Either in interactive weight
506462306a36Sopenharmony_ci				 * raising, or in soft_rt weight
506562306a36Sopenharmony_ci				 * raising with the
506662306a36Sopenharmony_ci				 * interactive-weight-raising period
506762306a36Sopenharmony_ci				 * elapsed (so no switch back to
506862306a36Sopenharmony_ci				 * interactive weight raising).
506962306a36Sopenharmony_ci				 */
507062306a36Sopenharmony_ci				bfq_bfqq_end_wr(bfqq);
507162306a36Sopenharmony_ci			} else { /*
507262306a36Sopenharmony_ci				  * soft_rt finishing while still in
507362306a36Sopenharmony_ci				  * interactive period, switch back to
507462306a36Sopenharmony_ci				  * interactive weight raising
507562306a36Sopenharmony_ci				  */
507662306a36Sopenharmony_ci				switch_back_to_interactive_wr(bfqq, bfqd);
507762306a36Sopenharmony_ci				bfqq->entity.prio_changed = 1;
507862306a36Sopenharmony_ci			}
507962306a36Sopenharmony_ci		}
508062306a36Sopenharmony_ci		if (bfqq->wr_coeff > 1 &&
508162306a36Sopenharmony_ci		    bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
508262306a36Sopenharmony_ci		    bfqq->service_from_wr > max_service_from_wr) {
508362306a36Sopenharmony_ci			/* see comments on max_service_from_wr */
508462306a36Sopenharmony_ci			bfq_bfqq_end_wr(bfqq);
508562306a36Sopenharmony_ci		}
508662306a36Sopenharmony_ci	}
508762306a36Sopenharmony_ci	/*
508862306a36Sopenharmony_ci	 * To improve latency (for this or other queues), immediately
508962306a36Sopenharmony_ci	 * update weight both if it must be raised and if it must be
509062306a36Sopenharmony_ci	 * lowered. Since, entity may be on some active tree here, and
509162306a36Sopenharmony_ci	 * might have a pending change of its ioprio class, invoke
509262306a36Sopenharmony_ci	 * next function with the last parameter unset (see the
509362306a36Sopenharmony_ci	 * comments on the function).
509462306a36Sopenharmony_ci	 */
509562306a36Sopenharmony_ci	if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
509662306a36Sopenharmony_ci		__bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
509762306a36Sopenharmony_ci						entity, false);
509862306a36Sopenharmony_ci}
509962306a36Sopenharmony_ci
510062306a36Sopenharmony_ci/*
510162306a36Sopenharmony_ci * Dispatch next request from bfqq.
510262306a36Sopenharmony_ci */
510362306a36Sopenharmony_cistatic struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
510462306a36Sopenharmony_ci						 struct bfq_queue *bfqq)
510562306a36Sopenharmony_ci{
510662306a36Sopenharmony_ci	struct request *rq = bfqq->next_rq;
510762306a36Sopenharmony_ci	unsigned long service_to_charge;
510862306a36Sopenharmony_ci
510962306a36Sopenharmony_ci	service_to_charge = bfq_serv_to_charge(rq, bfqq);
511062306a36Sopenharmony_ci
511162306a36Sopenharmony_ci	bfq_bfqq_served(bfqq, service_to_charge);
511262306a36Sopenharmony_ci
511362306a36Sopenharmony_ci	if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
511462306a36Sopenharmony_ci		bfqd->wait_dispatch = false;
511562306a36Sopenharmony_ci		bfqd->waited_rq = rq;
511662306a36Sopenharmony_ci	}
511762306a36Sopenharmony_ci
511862306a36Sopenharmony_ci	bfq_dispatch_remove(bfqd->queue, rq);
511962306a36Sopenharmony_ci
512062306a36Sopenharmony_ci	if (bfqq != bfqd->in_service_queue)
512162306a36Sopenharmony_ci		return rq;
512262306a36Sopenharmony_ci
512362306a36Sopenharmony_ci	/*
512462306a36Sopenharmony_ci	 * If weight raising has to terminate for bfqq, then next
512562306a36Sopenharmony_ci	 * function causes an immediate update of bfqq's weight,
512662306a36Sopenharmony_ci	 * without waiting for next activation. As a consequence, on
512762306a36Sopenharmony_ci	 * expiration, bfqq will be timestamped as if has never been
512862306a36Sopenharmony_ci	 * weight-raised during this service slot, even if it has
512962306a36Sopenharmony_ci	 * received part or even most of the service as a
513062306a36Sopenharmony_ci	 * weight-raised queue. This inflates bfqq's timestamps, which
513162306a36Sopenharmony_ci	 * is beneficial, as bfqq is then more willing to leave the
513262306a36Sopenharmony_ci	 * device immediately to possible other weight-raised queues.
513362306a36Sopenharmony_ci	 */
513462306a36Sopenharmony_ci	bfq_update_wr_data(bfqd, bfqq);
513562306a36Sopenharmony_ci
513662306a36Sopenharmony_ci	/*
513762306a36Sopenharmony_ci	 * Expire bfqq, pretending that its budget expired, if bfqq
513862306a36Sopenharmony_ci	 * belongs to CLASS_IDLE and other queues are waiting for
513962306a36Sopenharmony_ci	 * service.
514062306a36Sopenharmony_ci	 */
514162306a36Sopenharmony_ci	if (bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq))
514262306a36Sopenharmony_ci		bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
514362306a36Sopenharmony_ci
514462306a36Sopenharmony_ci	return rq;
514562306a36Sopenharmony_ci}
514662306a36Sopenharmony_ci
514762306a36Sopenharmony_cistatic bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
514862306a36Sopenharmony_ci{
514962306a36Sopenharmony_ci	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
515062306a36Sopenharmony_ci
515162306a36Sopenharmony_ci	/*
515262306a36Sopenharmony_ci	 * Avoiding lock: a race on bfqd->queued should cause at
515362306a36Sopenharmony_ci	 * most a call to dispatch for nothing
515462306a36Sopenharmony_ci	 */
515562306a36Sopenharmony_ci	return !list_empty_careful(&bfqd->dispatch) ||
515662306a36Sopenharmony_ci		READ_ONCE(bfqd->queued);
515762306a36Sopenharmony_ci}
515862306a36Sopenharmony_ci
515962306a36Sopenharmony_cistatic struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
516062306a36Sopenharmony_ci{
516162306a36Sopenharmony_ci	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
516262306a36Sopenharmony_ci	struct request *rq = NULL;
516362306a36Sopenharmony_ci	struct bfq_queue *bfqq = NULL;
516462306a36Sopenharmony_ci
516562306a36Sopenharmony_ci	if (!list_empty(&bfqd->dispatch)) {
516662306a36Sopenharmony_ci		rq = list_first_entry(&bfqd->dispatch, struct request,
516762306a36Sopenharmony_ci				      queuelist);
516862306a36Sopenharmony_ci		list_del_init(&rq->queuelist);
516962306a36Sopenharmony_ci
517062306a36Sopenharmony_ci		bfqq = RQ_BFQQ(rq);
517162306a36Sopenharmony_ci
517262306a36Sopenharmony_ci		if (bfqq) {
517362306a36Sopenharmony_ci			/*
517462306a36Sopenharmony_ci			 * Increment counters here, because this
517562306a36Sopenharmony_ci			 * dispatch does not follow the standard
517662306a36Sopenharmony_ci			 * dispatch flow (where counters are
517762306a36Sopenharmony_ci			 * incremented)
517862306a36Sopenharmony_ci			 */
517962306a36Sopenharmony_ci			bfqq->dispatched++;
518062306a36Sopenharmony_ci
518162306a36Sopenharmony_ci			goto inc_in_driver_start_rq;
518262306a36Sopenharmony_ci		}
518362306a36Sopenharmony_ci
518462306a36Sopenharmony_ci		/*
518562306a36Sopenharmony_ci		 * We exploit the bfq_finish_requeue_request hook to
518662306a36Sopenharmony_ci		 * decrement tot_rq_in_driver, but
518762306a36Sopenharmony_ci		 * bfq_finish_requeue_request will not be invoked on
518862306a36Sopenharmony_ci		 * this request. So, to avoid unbalance, just start
518962306a36Sopenharmony_ci		 * this request, without incrementing tot_rq_in_driver. As
519062306a36Sopenharmony_ci		 * a negative consequence, tot_rq_in_driver is deceptively
519162306a36Sopenharmony_ci		 * lower than it should be while this request is in
519262306a36Sopenharmony_ci		 * service. This may cause bfq_schedule_dispatch to be
519362306a36Sopenharmony_ci		 * invoked uselessly.
519462306a36Sopenharmony_ci		 *
519562306a36Sopenharmony_ci		 * As for implementing an exact solution, the
519662306a36Sopenharmony_ci		 * bfq_finish_requeue_request hook, if defined, is
519762306a36Sopenharmony_ci		 * probably invoked also on this request. So, by
519862306a36Sopenharmony_ci		 * exploiting this hook, we could 1) increment
519962306a36Sopenharmony_ci		 * tot_rq_in_driver here, and 2) decrement it in
520062306a36Sopenharmony_ci		 * bfq_finish_requeue_request. Such a solution would
520162306a36Sopenharmony_ci		 * let the value of the counter be always accurate,
520262306a36Sopenharmony_ci		 * but it would entail using an extra interface
520362306a36Sopenharmony_ci		 * function. This cost seems higher than the benefit,
520462306a36Sopenharmony_ci		 * being the frequency of non-elevator-private
520562306a36Sopenharmony_ci		 * requests very low.
520662306a36Sopenharmony_ci		 */
520762306a36Sopenharmony_ci		goto start_rq;
520862306a36Sopenharmony_ci	}
520962306a36Sopenharmony_ci
521062306a36Sopenharmony_ci	bfq_log(bfqd, "dispatch requests: %d busy queues",
521162306a36Sopenharmony_ci		bfq_tot_busy_queues(bfqd));
521262306a36Sopenharmony_ci
521362306a36Sopenharmony_ci	if (bfq_tot_busy_queues(bfqd) == 0)
521462306a36Sopenharmony_ci		goto exit;
521562306a36Sopenharmony_ci
521662306a36Sopenharmony_ci	/*
521762306a36Sopenharmony_ci	 * Force device to serve one request at a time if
521862306a36Sopenharmony_ci	 * strict_guarantees is true. Forcing this service scheme is
521962306a36Sopenharmony_ci	 * currently the ONLY way to guarantee that the request
522062306a36Sopenharmony_ci	 * service order enforced by the scheduler is respected by a
522162306a36Sopenharmony_ci	 * queueing device. Otherwise the device is free even to make
522262306a36Sopenharmony_ci	 * some unlucky request wait for as long as the device
522362306a36Sopenharmony_ci	 * wishes.
522462306a36Sopenharmony_ci	 *
522562306a36Sopenharmony_ci	 * Of course, serving one request at a time may cause loss of
522662306a36Sopenharmony_ci	 * throughput.
522762306a36Sopenharmony_ci	 */
522862306a36Sopenharmony_ci	if (bfqd->strict_guarantees && bfqd->tot_rq_in_driver > 0)
522962306a36Sopenharmony_ci		goto exit;
523062306a36Sopenharmony_ci
523162306a36Sopenharmony_ci	bfqq = bfq_select_queue(bfqd);
523262306a36Sopenharmony_ci	if (!bfqq)
523362306a36Sopenharmony_ci		goto exit;
523462306a36Sopenharmony_ci
523562306a36Sopenharmony_ci	rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
523662306a36Sopenharmony_ci
523762306a36Sopenharmony_ci	if (rq) {
523862306a36Sopenharmony_ciinc_in_driver_start_rq:
523962306a36Sopenharmony_ci		bfqd->rq_in_driver[bfqq->actuator_idx]++;
524062306a36Sopenharmony_ci		bfqd->tot_rq_in_driver++;
524162306a36Sopenharmony_cistart_rq:
524262306a36Sopenharmony_ci		rq->rq_flags |= RQF_STARTED;
524362306a36Sopenharmony_ci	}
524462306a36Sopenharmony_ciexit:
524562306a36Sopenharmony_ci	return rq;
524662306a36Sopenharmony_ci}
524762306a36Sopenharmony_ci
524862306a36Sopenharmony_ci#ifdef CONFIG_BFQ_CGROUP_DEBUG
524962306a36Sopenharmony_cistatic void bfq_update_dispatch_stats(struct request_queue *q,
525062306a36Sopenharmony_ci				      struct request *rq,
525162306a36Sopenharmony_ci				      struct bfq_queue *in_serv_queue,
525262306a36Sopenharmony_ci				      bool idle_timer_disabled)
525362306a36Sopenharmony_ci{
525462306a36Sopenharmony_ci	struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
525562306a36Sopenharmony_ci
525662306a36Sopenharmony_ci	if (!idle_timer_disabled && !bfqq)
525762306a36Sopenharmony_ci		return;
525862306a36Sopenharmony_ci
525962306a36Sopenharmony_ci	/*
526062306a36Sopenharmony_ci	 * rq and bfqq are guaranteed to exist until this function
526162306a36Sopenharmony_ci	 * ends, for the following reasons. First, rq can be
526262306a36Sopenharmony_ci	 * dispatched to the device, and then can be completed and
526362306a36Sopenharmony_ci	 * freed, only after this function ends. Second, rq cannot be
526462306a36Sopenharmony_ci	 * merged (and thus freed because of a merge) any longer,
526562306a36Sopenharmony_ci	 * because it has already started. Thus rq cannot be freed
526662306a36Sopenharmony_ci	 * before this function ends, and, since rq has a reference to
526762306a36Sopenharmony_ci	 * bfqq, the same guarantee holds for bfqq too.
526862306a36Sopenharmony_ci	 *
526962306a36Sopenharmony_ci	 * In addition, the following queue lock guarantees that
527062306a36Sopenharmony_ci	 * bfqq_group(bfqq) exists as well.
527162306a36Sopenharmony_ci	 */
527262306a36Sopenharmony_ci	spin_lock_irq(&q->queue_lock);
527362306a36Sopenharmony_ci	if (idle_timer_disabled)
527462306a36Sopenharmony_ci		/*
527562306a36Sopenharmony_ci		 * Since the idle timer has been disabled,
527662306a36Sopenharmony_ci		 * in_serv_queue contained some request when
527762306a36Sopenharmony_ci		 * __bfq_dispatch_request was invoked above, which
527862306a36Sopenharmony_ci		 * implies that rq was picked exactly from
527962306a36Sopenharmony_ci		 * in_serv_queue. Thus in_serv_queue == bfqq, and is
528062306a36Sopenharmony_ci		 * therefore guaranteed to exist because of the above
528162306a36Sopenharmony_ci		 * arguments.
528262306a36Sopenharmony_ci		 */
528362306a36Sopenharmony_ci		bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
528462306a36Sopenharmony_ci	if (bfqq) {
528562306a36Sopenharmony_ci		struct bfq_group *bfqg = bfqq_group(bfqq);
528662306a36Sopenharmony_ci
528762306a36Sopenharmony_ci		bfqg_stats_update_avg_queue_size(bfqg);
528862306a36Sopenharmony_ci		bfqg_stats_set_start_empty_time(bfqg);
528962306a36Sopenharmony_ci		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
529062306a36Sopenharmony_ci	}
529162306a36Sopenharmony_ci	spin_unlock_irq(&q->queue_lock);
529262306a36Sopenharmony_ci}
529362306a36Sopenharmony_ci#else
529462306a36Sopenharmony_cistatic inline void bfq_update_dispatch_stats(struct request_queue *q,
529562306a36Sopenharmony_ci					     struct request *rq,
529662306a36Sopenharmony_ci					     struct bfq_queue *in_serv_queue,
529762306a36Sopenharmony_ci					     bool idle_timer_disabled) {}
529862306a36Sopenharmony_ci#endif /* CONFIG_BFQ_CGROUP_DEBUG */
529962306a36Sopenharmony_ci
530062306a36Sopenharmony_cistatic struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
530162306a36Sopenharmony_ci{
530262306a36Sopenharmony_ci	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
530362306a36Sopenharmony_ci	struct request *rq;
530462306a36Sopenharmony_ci	struct bfq_queue *in_serv_queue;
530562306a36Sopenharmony_ci	bool waiting_rq, idle_timer_disabled = false;
530662306a36Sopenharmony_ci
530762306a36Sopenharmony_ci	spin_lock_irq(&bfqd->lock);
530862306a36Sopenharmony_ci
530962306a36Sopenharmony_ci	in_serv_queue = bfqd->in_service_queue;
531062306a36Sopenharmony_ci	waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
531162306a36Sopenharmony_ci
531262306a36Sopenharmony_ci	rq = __bfq_dispatch_request(hctx);
531362306a36Sopenharmony_ci	if (in_serv_queue == bfqd->in_service_queue) {
531462306a36Sopenharmony_ci		idle_timer_disabled =
531562306a36Sopenharmony_ci			waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
531662306a36Sopenharmony_ci	}
531762306a36Sopenharmony_ci
531862306a36Sopenharmony_ci	spin_unlock_irq(&bfqd->lock);
531962306a36Sopenharmony_ci	bfq_update_dispatch_stats(hctx->queue, rq,
532062306a36Sopenharmony_ci			idle_timer_disabled ? in_serv_queue : NULL,
532162306a36Sopenharmony_ci				idle_timer_disabled);
532262306a36Sopenharmony_ci
532362306a36Sopenharmony_ci	return rq;
532462306a36Sopenharmony_ci}
532562306a36Sopenharmony_ci
532662306a36Sopenharmony_ci/*
532762306a36Sopenharmony_ci * Task holds one reference to the queue, dropped when task exits.  Each rq
532862306a36Sopenharmony_ci * in-flight on this queue also holds a reference, dropped when rq is freed.
532962306a36Sopenharmony_ci *
533062306a36Sopenharmony_ci * Scheduler lock must be held here. Recall not to use bfqq after calling
533162306a36Sopenharmony_ci * this function on it.
533262306a36Sopenharmony_ci */
533362306a36Sopenharmony_civoid bfq_put_queue(struct bfq_queue *bfqq)
533462306a36Sopenharmony_ci{
533562306a36Sopenharmony_ci	struct bfq_queue *item;
533662306a36Sopenharmony_ci	struct hlist_node *n;
533762306a36Sopenharmony_ci	struct bfq_group *bfqg = bfqq_group(bfqq);
533862306a36Sopenharmony_ci
533962306a36Sopenharmony_ci	bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
534062306a36Sopenharmony_ci
534162306a36Sopenharmony_ci	bfqq->ref--;
534262306a36Sopenharmony_ci	if (bfqq->ref)
534362306a36Sopenharmony_ci		return;
534462306a36Sopenharmony_ci
534562306a36Sopenharmony_ci	if (!hlist_unhashed(&bfqq->burst_list_node)) {
534662306a36Sopenharmony_ci		hlist_del_init(&bfqq->burst_list_node);
534762306a36Sopenharmony_ci		/*
534862306a36Sopenharmony_ci		 * Decrement also burst size after the removal, if the
534962306a36Sopenharmony_ci		 * process associated with bfqq is exiting, and thus
535062306a36Sopenharmony_ci		 * does not contribute to the burst any longer. This
535162306a36Sopenharmony_ci		 * decrement helps filter out false positives of large
535262306a36Sopenharmony_ci		 * bursts, when some short-lived process (often due to
535362306a36Sopenharmony_ci		 * the execution of commands by some service) happens
535462306a36Sopenharmony_ci		 * to start and exit while a complex application is
535562306a36Sopenharmony_ci		 * starting, and thus spawning several processes that
535662306a36Sopenharmony_ci		 * do I/O (and that *must not* be treated as a large
535762306a36Sopenharmony_ci		 * burst, see comments on bfq_handle_burst).
535862306a36Sopenharmony_ci		 *
535962306a36Sopenharmony_ci		 * In particular, the decrement is performed only if:
536062306a36Sopenharmony_ci		 * 1) bfqq is not a merged queue, because, if it is,
536162306a36Sopenharmony_ci		 * then this free of bfqq is not triggered by the exit
536262306a36Sopenharmony_ci		 * of the process bfqq is associated with, but exactly
536362306a36Sopenharmony_ci		 * by the fact that bfqq has just been merged.
536462306a36Sopenharmony_ci		 * 2) burst_size is greater than 0, to handle
536562306a36Sopenharmony_ci		 * unbalanced decrements. Unbalanced decrements may
536662306a36Sopenharmony_ci		 * happen in te following case: bfqq is inserted into
536762306a36Sopenharmony_ci		 * the current burst list--without incrementing
536862306a36Sopenharmony_ci		 * bust_size--because of a split, but the current
536962306a36Sopenharmony_ci		 * burst list is not the burst list bfqq belonged to
537062306a36Sopenharmony_ci		 * (see comments on the case of a split in
537162306a36Sopenharmony_ci		 * bfq_set_request).
537262306a36Sopenharmony_ci		 */
537362306a36Sopenharmony_ci		if (bfqq->bic && bfqq->bfqd->burst_size > 0)
537462306a36Sopenharmony_ci			bfqq->bfqd->burst_size--;
537562306a36Sopenharmony_ci	}
537662306a36Sopenharmony_ci
537762306a36Sopenharmony_ci	/*
537862306a36Sopenharmony_ci	 * bfqq does not exist any longer, so it cannot be woken by
537962306a36Sopenharmony_ci	 * any other queue, and cannot wake any other queue. Then bfqq
538062306a36Sopenharmony_ci	 * must be removed from the woken list of its possible waker
538162306a36Sopenharmony_ci	 * queue, and all queues in the woken list of bfqq must stop
538262306a36Sopenharmony_ci	 * having a waker queue. Strictly speaking, these updates
538362306a36Sopenharmony_ci	 * should be performed when bfqq remains with no I/O source
538462306a36Sopenharmony_ci	 * attached to it, which happens before bfqq gets freed. In
538562306a36Sopenharmony_ci	 * particular, this happens when the last process associated
538662306a36Sopenharmony_ci	 * with bfqq exits or gets associated with a different
538762306a36Sopenharmony_ci	 * queue. However, both events lead to bfqq being freed soon,
538862306a36Sopenharmony_ci	 * and dangling references would come out only after bfqq gets
538962306a36Sopenharmony_ci	 * freed. So these updates are done here, as a simple and safe
539062306a36Sopenharmony_ci	 * way to handle all cases.
539162306a36Sopenharmony_ci	 */
539262306a36Sopenharmony_ci	/* remove bfqq from woken list */
539362306a36Sopenharmony_ci	if (!hlist_unhashed(&bfqq->woken_list_node))
539462306a36Sopenharmony_ci		hlist_del_init(&bfqq->woken_list_node);
539562306a36Sopenharmony_ci
539662306a36Sopenharmony_ci	/* reset waker for all queues in woken list */
539762306a36Sopenharmony_ci	hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
539862306a36Sopenharmony_ci				  woken_list_node) {
539962306a36Sopenharmony_ci		item->waker_bfqq = NULL;
540062306a36Sopenharmony_ci		hlist_del_init(&item->woken_list_node);
540162306a36Sopenharmony_ci	}
540262306a36Sopenharmony_ci
540362306a36Sopenharmony_ci	if (bfqq->bfqd->last_completed_rq_bfqq == bfqq)
540462306a36Sopenharmony_ci		bfqq->bfqd->last_completed_rq_bfqq = NULL;
540562306a36Sopenharmony_ci
540662306a36Sopenharmony_ci	WARN_ON_ONCE(!list_empty(&bfqq->fifo));
540762306a36Sopenharmony_ci	WARN_ON_ONCE(!RB_EMPTY_ROOT(&bfqq->sort_list));
540862306a36Sopenharmony_ci	WARN_ON_ONCE(bfqq->dispatched);
540962306a36Sopenharmony_ci
541062306a36Sopenharmony_ci	kmem_cache_free(bfq_pool, bfqq);
541162306a36Sopenharmony_ci	bfqg_and_blkg_put(bfqg);
541262306a36Sopenharmony_ci}
541362306a36Sopenharmony_ci
541462306a36Sopenharmony_cistatic void bfq_put_stable_ref(struct bfq_queue *bfqq)
541562306a36Sopenharmony_ci{
541662306a36Sopenharmony_ci	bfqq->stable_ref--;
541762306a36Sopenharmony_ci	bfq_put_queue(bfqq);
541862306a36Sopenharmony_ci}
541962306a36Sopenharmony_ci
542062306a36Sopenharmony_civoid bfq_put_cooperator(struct bfq_queue *bfqq)
542162306a36Sopenharmony_ci{
542262306a36Sopenharmony_ci	struct bfq_queue *__bfqq, *next;
542362306a36Sopenharmony_ci
542462306a36Sopenharmony_ci	/*
542562306a36Sopenharmony_ci	 * If this queue was scheduled to merge with another queue, be
542662306a36Sopenharmony_ci	 * sure to drop the reference taken on that queue (and others in
542762306a36Sopenharmony_ci	 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
542862306a36Sopenharmony_ci	 */
542962306a36Sopenharmony_ci	__bfqq = bfqq->new_bfqq;
543062306a36Sopenharmony_ci	while (__bfqq) {
543162306a36Sopenharmony_ci		next = __bfqq->new_bfqq;
543262306a36Sopenharmony_ci		bfq_put_queue(__bfqq);
543362306a36Sopenharmony_ci		__bfqq = next;
543462306a36Sopenharmony_ci	}
543562306a36Sopenharmony_ci}
543662306a36Sopenharmony_ci
543762306a36Sopenharmony_cistatic void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
543862306a36Sopenharmony_ci{
543962306a36Sopenharmony_ci	if (bfqq == bfqd->in_service_queue) {
544062306a36Sopenharmony_ci		__bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
544162306a36Sopenharmony_ci		bfq_schedule_dispatch(bfqd);
544262306a36Sopenharmony_ci	}
544362306a36Sopenharmony_ci
544462306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
544562306a36Sopenharmony_ci
544662306a36Sopenharmony_ci	bfq_put_cooperator(bfqq);
544762306a36Sopenharmony_ci
544862306a36Sopenharmony_ci	bfq_release_process_ref(bfqd, bfqq);
544962306a36Sopenharmony_ci}
545062306a36Sopenharmony_ci
545162306a36Sopenharmony_cistatic void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync,
545262306a36Sopenharmony_ci			      unsigned int actuator_idx)
545362306a36Sopenharmony_ci{
545462306a36Sopenharmony_ci	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, actuator_idx);
545562306a36Sopenharmony_ci	struct bfq_data *bfqd;
545662306a36Sopenharmony_ci
545762306a36Sopenharmony_ci	if (bfqq)
545862306a36Sopenharmony_ci		bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
545962306a36Sopenharmony_ci
546062306a36Sopenharmony_ci	if (bfqq && bfqd) {
546162306a36Sopenharmony_ci		bic_set_bfqq(bic, NULL, is_sync, actuator_idx);
546262306a36Sopenharmony_ci		bfq_exit_bfqq(bfqd, bfqq);
546362306a36Sopenharmony_ci	}
546462306a36Sopenharmony_ci}
546562306a36Sopenharmony_ci
546662306a36Sopenharmony_cistatic void bfq_exit_icq(struct io_cq *icq)
546762306a36Sopenharmony_ci{
546862306a36Sopenharmony_ci	struct bfq_io_cq *bic = icq_to_bic(icq);
546962306a36Sopenharmony_ci	struct bfq_data *bfqd = bic_to_bfqd(bic);
547062306a36Sopenharmony_ci	unsigned long flags;
547162306a36Sopenharmony_ci	unsigned int act_idx;
547262306a36Sopenharmony_ci	/*
547362306a36Sopenharmony_ci	 * If bfqd and thus bfqd->num_actuators is not available any
547462306a36Sopenharmony_ci	 * longer, then cycle over all possible per-actuator bfqqs in
547562306a36Sopenharmony_ci	 * next loop. We rely on bic being zeroed on creation, and
547662306a36Sopenharmony_ci	 * therefore on its unused per-actuator fields being NULL.
547762306a36Sopenharmony_ci	 */
547862306a36Sopenharmony_ci	unsigned int num_actuators = BFQ_MAX_ACTUATORS;
547962306a36Sopenharmony_ci	struct bfq_iocq_bfqq_data *bfqq_data = bic->bfqq_data;
548062306a36Sopenharmony_ci
548162306a36Sopenharmony_ci	/*
548262306a36Sopenharmony_ci	 * bfqd is NULL if scheduler already exited, and in that case
548362306a36Sopenharmony_ci	 * this is the last time these queues are accessed.
548462306a36Sopenharmony_ci	 */
548562306a36Sopenharmony_ci	if (bfqd) {
548662306a36Sopenharmony_ci		spin_lock_irqsave(&bfqd->lock, flags);
548762306a36Sopenharmony_ci		num_actuators = bfqd->num_actuators;
548862306a36Sopenharmony_ci	}
548962306a36Sopenharmony_ci
549062306a36Sopenharmony_ci	for (act_idx = 0; act_idx < num_actuators; act_idx++) {
549162306a36Sopenharmony_ci		if (bfqq_data[act_idx].stable_merge_bfqq)
549262306a36Sopenharmony_ci			bfq_put_stable_ref(bfqq_data[act_idx].stable_merge_bfqq);
549362306a36Sopenharmony_ci
549462306a36Sopenharmony_ci		bfq_exit_icq_bfqq(bic, true, act_idx);
549562306a36Sopenharmony_ci		bfq_exit_icq_bfqq(bic, false, act_idx);
549662306a36Sopenharmony_ci	}
549762306a36Sopenharmony_ci
549862306a36Sopenharmony_ci	if (bfqd)
549962306a36Sopenharmony_ci		spin_unlock_irqrestore(&bfqd->lock, flags);
550062306a36Sopenharmony_ci}
550162306a36Sopenharmony_ci
550262306a36Sopenharmony_ci/*
550362306a36Sopenharmony_ci * Update the entity prio values; note that the new values will not
550462306a36Sopenharmony_ci * be used until the next (re)activation.
550562306a36Sopenharmony_ci */
550662306a36Sopenharmony_cistatic void
550762306a36Sopenharmony_cibfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
550862306a36Sopenharmony_ci{
550962306a36Sopenharmony_ci	struct task_struct *tsk = current;
551062306a36Sopenharmony_ci	int ioprio_class;
551162306a36Sopenharmony_ci	struct bfq_data *bfqd = bfqq->bfqd;
551262306a36Sopenharmony_ci
551362306a36Sopenharmony_ci	if (!bfqd)
551462306a36Sopenharmony_ci		return;
551562306a36Sopenharmony_ci
551662306a36Sopenharmony_ci	ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
551762306a36Sopenharmony_ci	switch (ioprio_class) {
551862306a36Sopenharmony_ci	default:
551962306a36Sopenharmony_ci		pr_err("bdi %s: bfq: bad prio class %d\n",
552062306a36Sopenharmony_ci			bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
552162306a36Sopenharmony_ci			ioprio_class);
552262306a36Sopenharmony_ci		fallthrough;
552362306a36Sopenharmony_ci	case IOPRIO_CLASS_NONE:
552462306a36Sopenharmony_ci		/*
552562306a36Sopenharmony_ci		 * No prio set, inherit CPU scheduling settings.
552662306a36Sopenharmony_ci		 */
552762306a36Sopenharmony_ci		bfqq->new_ioprio = task_nice_ioprio(tsk);
552862306a36Sopenharmony_ci		bfqq->new_ioprio_class = task_nice_ioclass(tsk);
552962306a36Sopenharmony_ci		break;
553062306a36Sopenharmony_ci	case IOPRIO_CLASS_RT:
553162306a36Sopenharmony_ci		bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
553262306a36Sopenharmony_ci		bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
553362306a36Sopenharmony_ci		break;
553462306a36Sopenharmony_ci	case IOPRIO_CLASS_BE:
553562306a36Sopenharmony_ci		bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
553662306a36Sopenharmony_ci		bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
553762306a36Sopenharmony_ci		break;
553862306a36Sopenharmony_ci	case IOPRIO_CLASS_IDLE:
553962306a36Sopenharmony_ci		bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
554062306a36Sopenharmony_ci		bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
554162306a36Sopenharmony_ci		break;
554262306a36Sopenharmony_ci	}
554362306a36Sopenharmony_ci
554462306a36Sopenharmony_ci	if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
554562306a36Sopenharmony_ci		pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
554662306a36Sopenharmony_ci			bfqq->new_ioprio);
554762306a36Sopenharmony_ci		bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
554862306a36Sopenharmony_ci	}
554962306a36Sopenharmony_ci
555062306a36Sopenharmony_ci	bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
555162306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d",
555262306a36Sopenharmony_ci		     bfqq->new_ioprio, bfqq->entity.new_weight);
555362306a36Sopenharmony_ci	bfqq->entity.prio_changed = 1;
555462306a36Sopenharmony_ci}
555562306a36Sopenharmony_ci
555662306a36Sopenharmony_cistatic struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
555762306a36Sopenharmony_ci				       struct bio *bio, bool is_sync,
555862306a36Sopenharmony_ci				       struct bfq_io_cq *bic,
555962306a36Sopenharmony_ci				       bool respawn);
556062306a36Sopenharmony_ci
556162306a36Sopenharmony_cistatic void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
556262306a36Sopenharmony_ci{
556362306a36Sopenharmony_ci	struct bfq_data *bfqd = bic_to_bfqd(bic);
556462306a36Sopenharmony_ci	struct bfq_queue *bfqq;
556562306a36Sopenharmony_ci	int ioprio = bic->icq.ioc->ioprio;
556662306a36Sopenharmony_ci
556762306a36Sopenharmony_ci	/*
556862306a36Sopenharmony_ci	 * This condition may trigger on a newly created bic, be sure to
556962306a36Sopenharmony_ci	 * drop the lock before returning.
557062306a36Sopenharmony_ci	 */
557162306a36Sopenharmony_ci	if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
557262306a36Sopenharmony_ci		return;
557362306a36Sopenharmony_ci
557462306a36Sopenharmony_ci	bic->ioprio = ioprio;
557562306a36Sopenharmony_ci
557662306a36Sopenharmony_ci	bfqq = bic_to_bfqq(bic, false, bfq_actuator_index(bfqd, bio));
557762306a36Sopenharmony_ci	if (bfqq) {
557862306a36Sopenharmony_ci		struct bfq_queue *old_bfqq = bfqq;
557962306a36Sopenharmony_ci
558062306a36Sopenharmony_ci		bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
558162306a36Sopenharmony_ci		bic_set_bfqq(bic, bfqq, false, bfq_actuator_index(bfqd, bio));
558262306a36Sopenharmony_ci		bfq_release_process_ref(bfqd, old_bfqq);
558362306a36Sopenharmony_ci	}
558462306a36Sopenharmony_ci
558562306a36Sopenharmony_ci	bfqq = bic_to_bfqq(bic, true, bfq_actuator_index(bfqd, bio));
558662306a36Sopenharmony_ci	if (bfqq)
558762306a36Sopenharmony_ci		bfq_set_next_ioprio_data(bfqq, bic);
558862306a36Sopenharmony_ci}
558962306a36Sopenharmony_ci
559062306a36Sopenharmony_cistatic void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
559162306a36Sopenharmony_ci			  struct bfq_io_cq *bic, pid_t pid, int is_sync,
559262306a36Sopenharmony_ci			  unsigned int act_idx)
559362306a36Sopenharmony_ci{
559462306a36Sopenharmony_ci	u64 now_ns = ktime_get_ns();
559562306a36Sopenharmony_ci
559662306a36Sopenharmony_ci	bfqq->actuator_idx = act_idx;
559762306a36Sopenharmony_ci	RB_CLEAR_NODE(&bfqq->entity.rb_node);
559862306a36Sopenharmony_ci	INIT_LIST_HEAD(&bfqq->fifo);
559962306a36Sopenharmony_ci	INIT_HLIST_NODE(&bfqq->burst_list_node);
560062306a36Sopenharmony_ci	INIT_HLIST_NODE(&bfqq->woken_list_node);
560162306a36Sopenharmony_ci	INIT_HLIST_HEAD(&bfqq->woken_list);
560262306a36Sopenharmony_ci
560362306a36Sopenharmony_ci	bfqq->ref = 0;
560462306a36Sopenharmony_ci	bfqq->bfqd = bfqd;
560562306a36Sopenharmony_ci
560662306a36Sopenharmony_ci	if (bic)
560762306a36Sopenharmony_ci		bfq_set_next_ioprio_data(bfqq, bic);
560862306a36Sopenharmony_ci
560962306a36Sopenharmony_ci	if (is_sync) {
561062306a36Sopenharmony_ci		/*
561162306a36Sopenharmony_ci		 * No need to mark as has_short_ttime if in
561262306a36Sopenharmony_ci		 * idle_class, because no device idling is performed
561362306a36Sopenharmony_ci		 * for queues in idle class
561462306a36Sopenharmony_ci		 */
561562306a36Sopenharmony_ci		if (!bfq_class_idle(bfqq))
561662306a36Sopenharmony_ci			/* tentatively mark as has_short_ttime */
561762306a36Sopenharmony_ci			bfq_mark_bfqq_has_short_ttime(bfqq);
561862306a36Sopenharmony_ci		bfq_mark_bfqq_sync(bfqq);
561962306a36Sopenharmony_ci		bfq_mark_bfqq_just_created(bfqq);
562062306a36Sopenharmony_ci	} else
562162306a36Sopenharmony_ci		bfq_clear_bfqq_sync(bfqq);
562262306a36Sopenharmony_ci
562362306a36Sopenharmony_ci	/* set end request to minus infinity from now */
562462306a36Sopenharmony_ci	bfqq->ttime.last_end_request = now_ns + 1;
562562306a36Sopenharmony_ci
562662306a36Sopenharmony_ci	bfqq->creation_time = jiffies;
562762306a36Sopenharmony_ci
562862306a36Sopenharmony_ci	bfqq->io_start_time = now_ns;
562962306a36Sopenharmony_ci
563062306a36Sopenharmony_ci	bfq_mark_bfqq_IO_bound(bfqq);
563162306a36Sopenharmony_ci
563262306a36Sopenharmony_ci	bfqq->pid = pid;
563362306a36Sopenharmony_ci
563462306a36Sopenharmony_ci	/* Tentative initial value to trade off between thr and lat */
563562306a36Sopenharmony_ci	bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
563662306a36Sopenharmony_ci	bfqq->budget_timeout = bfq_smallest_from_now();
563762306a36Sopenharmony_ci
563862306a36Sopenharmony_ci	bfqq->wr_coeff = 1;
563962306a36Sopenharmony_ci	bfqq->last_wr_start_finish = jiffies;
564062306a36Sopenharmony_ci	bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
564162306a36Sopenharmony_ci	bfqq->split_time = bfq_smallest_from_now();
564262306a36Sopenharmony_ci
564362306a36Sopenharmony_ci	/*
564462306a36Sopenharmony_ci	 * To not forget the possibly high bandwidth consumed by a
564562306a36Sopenharmony_ci	 * process/queue in the recent past,
564662306a36Sopenharmony_ci	 * bfq_bfqq_softrt_next_start() returns a value at least equal
564762306a36Sopenharmony_ci	 * to the current value of bfqq->soft_rt_next_start (see
564862306a36Sopenharmony_ci	 * comments on bfq_bfqq_softrt_next_start).  Set
564962306a36Sopenharmony_ci	 * soft_rt_next_start to now, to mean that bfqq has consumed
565062306a36Sopenharmony_ci	 * no bandwidth so far.
565162306a36Sopenharmony_ci	 */
565262306a36Sopenharmony_ci	bfqq->soft_rt_next_start = jiffies;
565362306a36Sopenharmony_ci
565462306a36Sopenharmony_ci	/* first request is almost certainly seeky */
565562306a36Sopenharmony_ci	bfqq->seek_history = 1;
565662306a36Sopenharmony_ci
565762306a36Sopenharmony_ci	bfqq->decrease_time_jif = jiffies;
565862306a36Sopenharmony_ci}
565962306a36Sopenharmony_ci
566062306a36Sopenharmony_cistatic struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
566162306a36Sopenharmony_ci					       struct bfq_group *bfqg,
566262306a36Sopenharmony_ci					       int ioprio_class, int ioprio, int act_idx)
566362306a36Sopenharmony_ci{
566462306a36Sopenharmony_ci	switch (ioprio_class) {
566562306a36Sopenharmony_ci	case IOPRIO_CLASS_RT:
566662306a36Sopenharmony_ci		return &bfqg->async_bfqq[0][ioprio][act_idx];
566762306a36Sopenharmony_ci	case IOPRIO_CLASS_NONE:
566862306a36Sopenharmony_ci		ioprio = IOPRIO_BE_NORM;
566962306a36Sopenharmony_ci		fallthrough;
567062306a36Sopenharmony_ci	case IOPRIO_CLASS_BE:
567162306a36Sopenharmony_ci		return &bfqg->async_bfqq[1][ioprio][act_idx];
567262306a36Sopenharmony_ci	case IOPRIO_CLASS_IDLE:
567362306a36Sopenharmony_ci		return &bfqg->async_idle_bfqq[act_idx];
567462306a36Sopenharmony_ci	default:
567562306a36Sopenharmony_ci		return NULL;
567662306a36Sopenharmony_ci	}
567762306a36Sopenharmony_ci}
567862306a36Sopenharmony_ci
567962306a36Sopenharmony_cistatic struct bfq_queue *
568062306a36Sopenharmony_cibfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
568162306a36Sopenharmony_ci			  struct bfq_io_cq *bic,
568262306a36Sopenharmony_ci			  struct bfq_queue *last_bfqq_created)
568362306a36Sopenharmony_ci{
568462306a36Sopenharmony_ci	unsigned int a_idx = last_bfqq_created->actuator_idx;
568562306a36Sopenharmony_ci	struct bfq_queue *new_bfqq =
568662306a36Sopenharmony_ci		bfq_setup_merge(bfqq, last_bfqq_created);
568762306a36Sopenharmony_ci
568862306a36Sopenharmony_ci	if (!new_bfqq)
568962306a36Sopenharmony_ci		return bfqq;
569062306a36Sopenharmony_ci
569162306a36Sopenharmony_ci	if (new_bfqq->bic)
569262306a36Sopenharmony_ci		new_bfqq->bic->bfqq_data[a_idx].stably_merged = true;
569362306a36Sopenharmony_ci	bic->bfqq_data[a_idx].stably_merged = true;
569462306a36Sopenharmony_ci
569562306a36Sopenharmony_ci	/*
569662306a36Sopenharmony_ci	 * Reusing merge functions. This implies that
569762306a36Sopenharmony_ci	 * bfqq->bic must be set too, for
569862306a36Sopenharmony_ci	 * bfq_merge_bfqqs to correctly save bfqq's
569962306a36Sopenharmony_ci	 * state before killing it.
570062306a36Sopenharmony_ci	 */
570162306a36Sopenharmony_ci	bfqq->bic = bic;
570262306a36Sopenharmony_ci	bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
570362306a36Sopenharmony_ci
570462306a36Sopenharmony_ci	return new_bfqq;
570562306a36Sopenharmony_ci}
570662306a36Sopenharmony_ci
570762306a36Sopenharmony_ci/*
570862306a36Sopenharmony_ci * Many throughput-sensitive workloads are made of several parallel
570962306a36Sopenharmony_ci * I/O flows, with all flows generated by the same application, or
571062306a36Sopenharmony_ci * more generically by the same task (e.g., system boot). The most
571162306a36Sopenharmony_ci * counterproductive action with these workloads is plugging I/O
571262306a36Sopenharmony_ci * dispatch when one of the bfq_queues associated with these flows
571362306a36Sopenharmony_ci * remains temporarily empty.
571462306a36Sopenharmony_ci *
571562306a36Sopenharmony_ci * To avoid this plugging, BFQ has been using a burst-handling
571662306a36Sopenharmony_ci * mechanism for years now. This mechanism has proven effective for
571762306a36Sopenharmony_ci * throughput, and not detrimental for service guarantees. The
571862306a36Sopenharmony_ci * following function pushes this mechanism a little bit further,
571962306a36Sopenharmony_ci * basing on the following two facts.
572062306a36Sopenharmony_ci *
572162306a36Sopenharmony_ci * First, all the I/O flows of a the same application or task
572262306a36Sopenharmony_ci * contribute to the execution/completion of that common application
572362306a36Sopenharmony_ci * or task. So the performance figures that matter are total
572462306a36Sopenharmony_ci * throughput of the flows and task-wide I/O latency.  In particular,
572562306a36Sopenharmony_ci * these flows do not need to be protected from each other, in terms
572662306a36Sopenharmony_ci * of individual bandwidth or latency.
572762306a36Sopenharmony_ci *
572862306a36Sopenharmony_ci * Second, the above fact holds regardless of the number of flows.
572962306a36Sopenharmony_ci *
573062306a36Sopenharmony_ci * Putting these two facts together, this commits merges stably the
573162306a36Sopenharmony_ci * bfq_queues associated with these I/O flows, i.e., with the
573262306a36Sopenharmony_ci * processes that generate these IO/ flows, regardless of how many the
573362306a36Sopenharmony_ci * involved processes are.
573462306a36Sopenharmony_ci *
573562306a36Sopenharmony_ci * To decide whether a set of bfq_queues is actually associated with
573662306a36Sopenharmony_ci * the I/O flows of a common application or task, and to merge these
573762306a36Sopenharmony_ci * queues stably, this function operates as follows: given a bfq_queue,
573862306a36Sopenharmony_ci * say Q2, currently being created, and the last bfq_queue, say Q1,
573962306a36Sopenharmony_ci * created before Q2, Q2 is merged stably with Q1 if
574062306a36Sopenharmony_ci * - very little time has elapsed since when Q1 was created
574162306a36Sopenharmony_ci * - Q2 has the same ioprio as Q1
574262306a36Sopenharmony_ci * - Q2 belongs to the same group as Q1
574362306a36Sopenharmony_ci *
574462306a36Sopenharmony_ci * Merging bfq_queues also reduces scheduling overhead. A fio test
574562306a36Sopenharmony_ci * with ten random readers on /dev/nullb shows a throughput boost of
574662306a36Sopenharmony_ci * 40%, with a quadcore. Since BFQ's execution time amounts to ~50% of
574762306a36Sopenharmony_ci * the total per-request processing time, the above throughput boost
574862306a36Sopenharmony_ci * implies that BFQ's overhead is reduced by more than 50%.
574962306a36Sopenharmony_ci *
575062306a36Sopenharmony_ci * This new mechanism most certainly obsoletes the current
575162306a36Sopenharmony_ci * burst-handling heuristics. We keep those heuristics for the moment.
575262306a36Sopenharmony_ci */
575362306a36Sopenharmony_cistatic struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd,
575462306a36Sopenharmony_ci						      struct bfq_queue *bfqq,
575562306a36Sopenharmony_ci						      struct bfq_io_cq *bic)
575662306a36Sopenharmony_ci{
575762306a36Sopenharmony_ci	struct bfq_queue **source_bfqq = bfqq->entity.parent ?
575862306a36Sopenharmony_ci		&bfqq->entity.parent->last_bfqq_created :
575962306a36Sopenharmony_ci		&bfqd->last_bfqq_created;
576062306a36Sopenharmony_ci
576162306a36Sopenharmony_ci	struct bfq_queue *last_bfqq_created = *source_bfqq;
576262306a36Sopenharmony_ci
576362306a36Sopenharmony_ci	/*
576462306a36Sopenharmony_ci	 * If last_bfqq_created has not been set yet, then init it. If
576562306a36Sopenharmony_ci	 * it has been set already, but too long ago, then move it
576662306a36Sopenharmony_ci	 * forward to bfqq. Finally, move also if bfqq belongs to a
576762306a36Sopenharmony_ci	 * different group than last_bfqq_created, or if bfqq has a
576862306a36Sopenharmony_ci	 * different ioprio, ioprio_class or actuator_idx. If none of
576962306a36Sopenharmony_ci	 * these conditions holds true, then try an early stable merge
577062306a36Sopenharmony_ci	 * or schedule a delayed stable merge. As for the condition on
577162306a36Sopenharmony_ci	 * actuator_idx, the reason is that, if queues associated with
577262306a36Sopenharmony_ci	 * different actuators are merged, then control is lost on
577362306a36Sopenharmony_ci	 * each actuator. Therefore some actuator may be
577462306a36Sopenharmony_ci	 * underutilized, and throughput may decrease.
577562306a36Sopenharmony_ci	 *
577662306a36Sopenharmony_ci	 * A delayed merge is scheduled (instead of performing an
577762306a36Sopenharmony_ci	 * early merge), in case bfqq might soon prove to be more
577862306a36Sopenharmony_ci	 * throughput-beneficial if not merged. Currently this is
577962306a36Sopenharmony_ci	 * possible only if bfqd is rotational with no queueing. For
578062306a36Sopenharmony_ci	 * such a drive, not merging bfqq is better for throughput if
578162306a36Sopenharmony_ci	 * bfqq happens to contain sequential I/O. So, we wait a
578262306a36Sopenharmony_ci	 * little bit for enough I/O to flow through bfqq. After that,
578362306a36Sopenharmony_ci	 * if such an I/O is sequential, then the merge is
578462306a36Sopenharmony_ci	 * canceled. Otherwise the merge is finally performed.
578562306a36Sopenharmony_ci	 */
578662306a36Sopenharmony_ci	if (!last_bfqq_created ||
578762306a36Sopenharmony_ci	    time_before(last_bfqq_created->creation_time +
578862306a36Sopenharmony_ci			msecs_to_jiffies(bfq_activation_stable_merging),
578962306a36Sopenharmony_ci			bfqq->creation_time) ||
579062306a36Sopenharmony_ci		bfqq->entity.parent != last_bfqq_created->entity.parent ||
579162306a36Sopenharmony_ci		bfqq->ioprio != last_bfqq_created->ioprio ||
579262306a36Sopenharmony_ci		bfqq->ioprio_class != last_bfqq_created->ioprio_class ||
579362306a36Sopenharmony_ci		bfqq->actuator_idx != last_bfqq_created->actuator_idx)
579462306a36Sopenharmony_ci		*source_bfqq = bfqq;
579562306a36Sopenharmony_ci	else if (time_after_eq(last_bfqq_created->creation_time +
579662306a36Sopenharmony_ci				 bfqd->bfq_burst_interval,
579762306a36Sopenharmony_ci				 bfqq->creation_time)) {
579862306a36Sopenharmony_ci		if (likely(bfqd->nonrot_with_queueing))
579962306a36Sopenharmony_ci			/*
580062306a36Sopenharmony_ci			 * With this type of drive, leaving
580162306a36Sopenharmony_ci			 * bfqq alone may provide no
580262306a36Sopenharmony_ci			 * throughput benefits compared with
580362306a36Sopenharmony_ci			 * merging bfqq. So merge bfqq now.
580462306a36Sopenharmony_ci			 */
580562306a36Sopenharmony_ci			bfqq = bfq_do_early_stable_merge(bfqd, bfqq,
580662306a36Sopenharmony_ci							 bic,
580762306a36Sopenharmony_ci							 last_bfqq_created);
580862306a36Sopenharmony_ci		else { /* schedule tentative stable merge */
580962306a36Sopenharmony_ci			/*
581062306a36Sopenharmony_ci			 * get reference on last_bfqq_created,
581162306a36Sopenharmony_ci			 * to prevent it from being freed,
581262306a36Sopenharmony_ci			 * until we decide whether to merge
581362306a36Sopenharmony_ci			 */
581462306a36Sopenharmony_ci			last_bfqq_created->ref++;
581562306a36Sopenharmony_ci			/*
581662306a36Sopenharmony_ci			 * need to keep track of stable refs, to
581762306a36Sopenharmony_ci			 * compute process refs correctly
581862306a36Sopenharmony_ci			 */
581962306a36Sopenharmony_ci			last_bfqq_created->stable_ref++;
582062306a36Sopenharmony_ci			/*
582162306a36Sopenharmony_ci			 * Record the bfqq to merge to.
582262306a36Sopenharmony_ci			 */
582362306a36Sopenharmony_ci			bic->bfqq_data[last_bfqq_created->actuator_idx].stable_merge_bfqq =
582462306a36Sopenharmony_ci				last_bfqq_created;
582562306a36Sopenharmony_ci		}
582662306a36Sopenharmony_ci	}
582762306a36Sopenharmony_ci
582862306a36Sopenharmony_ci	return bfqq;
582962306a36Sopenharmony_ci}
583062306a36Sopenharmony_ci
583162306a36Sopenharmony_ci
583262306a36Sopenharmony_cistatic struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
583362306a36Sopenharmony_ci				       struct bio *bio, bool is_sync,
583462306a36Sopenharmony_ci				       struct bfq_io_cq *bic,
583562306a36Sopenharmony_ci				       bool respawn)
583662306a36Sopenharmony_ci{
583762306a36Sopenharmony_ci	const int ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
583862306a36Sopenharmony_ci	const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
583962306a36Sopenharmony_ci	struct bfq_queue **async_bfqq = NULL;
584062306a36Sopenharmony_ci	struct bfq_queue *bfqq;
584162306a36Sopenharmony_ci	struct bfq_group *bfqg;
584262306a36Sopenharmony_ci
584362306a36Sopenharmony_ci	bfqg = bfq_bio_bfqg(bfqd, bio);
584462306a36Sopenharmony_ci	if (!is_sync) {
584562306a36Sopenharmony_ci		async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
584662306a36Sopenharmony_ci						  ioprio,
584762306a36Sopenharmony_ci						  bfq_actuator_index(bfqd, bio));
584862306a36Sopenharmony_ci		bfqq = *async_bfqq;
584962306a36Sopenharmony_ci		if (bfqq)
585062306a36Sopenharmony_ci			goto out;
585162306a36Sopenharmony_ci	}
585262306a36Sopenharmony_ci
585362306a36Sopenharmony_ci	bfqq = kmem_cache_alloc_node(bfq_pool,
585462306a36Sopenharmony_ci				     GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
585562306a36Sopenharmony_ci				     bfqd->queue->node);
585662306a36Sopenharmony_ci
585762306a36Sopenharmony_ci	if (bfqq) {
585862306a36Sopenharmony_ci		bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
585962306a36Sopenharmony_ci			      is_sync, bfq_actuator_index(bfqd, bio));
586062306a36Sopenharmony_ci		bfq_init_entity(&bfqq->entity, bfqg);
586162306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "allocated");
586262306a36Sopenharmony_ci	} else {
586362306a36Sopenharmony_ci		bfqq = &bfqd->oom_bfqq;
586462306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
586562306a36Sopenharmony_ci		goto out;
586662306a36Sopenharmony_ci	}
586762306a36Sopenharmony_ci
586862306a36Sopenharmony_ci	/*
586962306a36Sopenharmony_ci	 * Pin the queue now that it's allocated, scheduler exit will
587062306a36Sopenharmony_ci	 * prune it.
587162306a36Sopenharmony_ci	 */
587262306a36Sopenharmony_ci	if (async_bfqq) {
587362306a36Sopenharmony_ci		bfqq->ref++; /*
587462306a36Sopenharmony_ci			      * Extra group reference, w.r.t. sync
587562306a36Sopenharmony_ci			      * queue. This extra reference is removed
587662306a36Sopenharmony_ci			      * only if bfqq->bfqg disappears, to
587762306a36Sopenharmony_ci			      * guarantee that this queue is not freed
587862306a36Sopenharmony_ci			      * until its group goes away.
587962306a36Sopenharmony_ci			      */
588062306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
588162306a36Sopenharmony_ci			     bfqq, bfqq->ref);
588262306a36Sopenharmony_ci		*async_bfqq = bfqq;
588362306a36Sopenharmony_ci	}
588462306a36Sopenharmony_ci
588562306a36Sopenharmony_ciout:
588662306a36Sopenharmony_ci	bfqq->ref++; /* get a process reference to this queue */
588762306a36Sopenharmony_ci
588862306a36Sopenharmony_ci	if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
588962306a36Sopenharmony_ci		bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
589062306a36Sopenharmony_ci	return bfqq;
589162306a36Sopenharmony_ci}
589262306a36Sopenharmony_ci
589362306a36Sopenharmony_cistatic void bfq_update_io_thinktime(struct bfq_data *bfqd,
589462306a36Sopenharmony_ci				    struct bfq_queue *bfqq)
589562306a36Sopenharmony_ci{
589662306a36Sopenharmony_ci	struct bfq_ttime *ttime = &bfqq->ttime;
589762306a36Sopenharmony_ci	u64 elapsed;
589862306a36Sopenharmony_ci
589962306a36Sopenharmony_ci	/*
590062306a36Sopenharmony_ci	 * We are really interested in how long it takes for the queue to
590162306a36Sopenharmony_ci	 * become busy when there is no outstanding IO for this queue. So
590262306a36Sopenharmony_ci	 * ignore cases when the bfq queue has already IO queued.
590362306a36Sopenharmony_ci	 */
590462306a36Sopenharmony_ci	if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
590562306a36Sopenharmony_ci		return;
590662306a36Sopenharmony_ci	elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
590762306a36Sopenharmony_ci	elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
590862306a36Sopenharmony_ci
590962306a36Sopenharmony_ci	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
591062306a36Sopenharmony_ci	ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
591162306a36Sopenharmony_ci	ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
591262306a36Sopenharmony_ci				     ttime->ttime_samples);
591362306a36Sopenharmony_ci}
591462306a36Sopenharmony_ci
591562306a36Sopenharmony_cistatic void
591662306a36Sopenharmony_cibfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
591762306a36Sopenharmony_ci		       struct request *rq)
591862306a36Sopenharmony_ci{
591962306a36Sopenharmony_ci	bfqq->seek_history <<= 1;
592062306a36Sopenharmony_ci	bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
592162306a36Sopenharmony_ci
592262306a36Sopenharmony_ci	if (bfqq->wr_coeff > 1 &&
592362306a36Sopenharmony_ci	    bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
592462306a36Sopenharmony_ci	    BFQQ_TOTALLY_SEEKY(bfqq)) {
592562306a36Sopenharmony_ci		if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
592662306a36Sopenharmony_ci					   bfq_wr_duration(bfqd))) {
592762306a36Sopenharmony_ci			/*
592862306a36Sopenharmony_ci			 * In soft_rt weight raising with the
592962306a36Sopenharmony_ci			 * interactive-weight-raising period
593062306a36Sopenharmony_ci			 * elapsed (so no switch back to
593162306a36Sopenharmony_ci			 * interactive weight raising).
593262306a36Sopenharmony_ci			 */
593362306a36Sopenharmony_ci			bfq_bfqq_end_wr(bfqq);
593462306a36Sopenharmony_ci		} else { /*
593562306a36Sopenharmony_ci			  * stopping soft_rt weight raising
593662306a36Sopenharmony_ci			  * while still in interactive period,
593762306a36Sopenharmony_ci			  * switch back to interactive weight
593862306a36Sopenharmony_ci			  * raising
593962306a36Sopenharmony_ci			  */
594062306a36Sopenharmony_ci			switch_back_to_interactive_wr(bfqq, bfqd);
594162306a36Sopenharmony_ci			bfqq->entity.prio_changed = 1;
594262306a36Sopenharmony_ci		}
594362306a36Sopenharmony_ci	}
594462306a36Sopenharmony_ci}
594562306a36Sopenharmony_ci
594662306a36Sopenharmony_cistatic void bfq_update_has_short_ttime(struct bfq_data *bfqd,
594762306a36Sopenharmony_ci				       struct bfq_queue *bfqq,
594862306a36Sopenharmony_ci				       struct bfq_io_cq *bic)
594962306a36Sopenharmony_ci{
595062306a36Sopenharmony_ci	bool has_short_ttime = true, state_changed;
595162306a36Sopenharmony_ci
595262306a36Sopenharmony_ci	/*
595362306a36Sopenharmony_ci	 * No need to update has_short_ttime if bfqq is async or in
595462306a36Sopenharmony_ci	 * idle io prio class, or if bfq_slice_idle is zero, because
595562306a36Sopenharmony_ci	 * no device idling is performed for bfqq in this case.
595662306a36Sopenharmony_ci	 */
595762306a36Sopenharmony_ci	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
595862306a36Sopenharmony_ci	    bfqd->bfq_slice_idle == 0)
595962306a36Sopenharmony_ci		return;
596062306a36Sopenharmony_ci
596162306a36Sopenharmony_ci	/* Idle window just restored, statistics are meaningless. */
596262306a36Sopenharmony_ci	if (time_is_after_eq_jiffies(bfqq->split_time +
596362306a36Sopenharmony_ci				     bfqd->bfq_wr_min_idle_time))
596462306a36Sopenharmony_ci		return;
596562306a36Sopenharmony_ci
596662306a36Sopenharmony_ci	/* Think time is infinite if no process is linked to
596762306a36Sopenharmony_ci	 * bfqq. Otherwise check average think time to decide whether
596862306a36Sopenharmony_ci	 * to mark as has_short_ttime. To this goal, compare average
596962306a36Sopenharmony_ci	 * think time with half the I/O-plugging timeout.
597062306a36Sopenharmony_ci	 */
597162306a36Sopenharmony_ci	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
597262306a36Sopenharmony_ci	    (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
597362306a36Sopenharmony_ci	     bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
597462306a36Sopenharmony_ci		has_short_ttime = false;
597562306a36Sopenharmony_ci
597662306a36Sopenharmony_ci	state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
597762306a36Sopenharmony_ci
597862306a36Sopenharmony_ci	if (has_short_ttime)
597962306a36Sopenharmony_ci		bfq_mark_bfqq_has_short_ttime(bfqq);
598062306a36Sopenharmony_ci	else
598162306a36Sopenharmony_ci		bfq_clear_bfqq_has_short_ttime(bfqq);
598262306a36Sopenharmony_ci
598362306a36Sopenharmony_ci	/*
598462306a36Sopenharmony_ci	 * Until the base value for the total service time gets
598562306a36Sopenharmony_ci	 * finally computed for bfqq, the inject limit does depend on
598662306a36Sopenharmony_ci	 * the think-time state (short|long). In particular, the limit
598762306a36Sopenharmony_ci	 * is 0 or 1 if the think time is deemed, respectively, as
598862306a36Sopenharmony_ci	 * short or long (details in the comments in
598962306a36Sopenharmony_ci	 * bfq_update_inject_limit()). Accordingly, the next
599062306a36Sopenharmony_ci	 * instructions reset the inject limit if the think-time state
599162306a36Sopenharmony_ci	 * has changed and the above base value is still to be
599262306a36Sopenharmony_ci	 * computed.
599362306a36Sopenharmony_ci	 *
599462306a36Sopenharmony_ci	 * However, the reset is performed only if more than 100 ms
599562306a36Sopenharmony_ci	 * have elapsed since the last update of the inject limit, or
599662306a36Sopenharmony_ci	 * (inclusive) if the change is from short to long think
599762306a36Sopenharmony_ci	 * time. The reason for this waiting is as follows.
599862306a36Sopenharmony_ci	 *
599962306a36Sopenharmony_ci	 * bfqq may have a long think time because of a
600062306a36Sopenharmony_ci	 * synchronization with some other queue, i.e., because the
600162306a36Sopenharmony_ci	 * I/O of some other queue may need to be completed for bfqq
600262306a36Sopenharmony_ci	 * to receive new I/O. Details in the comments on the choice
600362306a36Sopenharmony_ci	 * of the queue for injection in bfq_select_queue().
600462306a36Sopenharmony_ci	 *
600562306a36Sopenharmony_ci	 * As stressed in those comments, if such a synchronization is
600662306a36Sopenharmony_ci	 * actually in place, then, without injection on bfqq, the
600762306a36Sopenharmony_ci	 * blocking I/O cannot happen to served while bfqq is in
600862306a36Sopenharmony_ci	 * service. As a consequence, if bfqq is granted
600962306a36Sopenharmony_ci	 * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
601062306a36Sopenharmony_ci	 * is dispatched, until the idle timeout fires. This is likely
601162306a36Sopenharmony_ci	 * to result in lower bandwidth and higher latencies for bfqq,
601262306a36Sopenharmony_ci	 * and in a severe loss of total throughput.
601362306a36Sopenharmony_ci	 *
601462306a36Sopenharmony_ci	 * On the opposite end, a non-zero inject limit may allow the
601562306a36Sopenharmony_ci	 * I/O that blocks bfqq to be executed soon, and therefore
601662306a36Sopenharmony_ci	 * bfqq to receive new I/O soon.
601762306a36Sopenharmony_ci	 *
601862306a36Sopenharmony_ci	 * But, if the blocking gets actually eliminated, then the
601962306a36Sopenharmony_ci	 * next think-time sample for bfqq may be very low. This in
602062306a36Sopenharmony_ci	 * turn may cause bfqq's think time to be deemed
602162306a36Sopenharmony_ci	 * short. Without the 100 ms barrier, this new state change
602262306a36Sopenharmony_ci	 * would cause the body of the next if to be executed
602362306a36Sopenharmony_ci	 * immediately. But this would set to 0 the inject
602462306a36Sopenharmony_ci	 * limit. Without injection, the blocking I/O would cause the
602562306a36Sopenharmony_ci	 * think time of bfqq to become long again, and therefore the
602662306a36Sopenharmony_ci	 * inject limit to be raised again, and so on. The only effect
602762306a36Sopenharmony_ci	 * of such a steady oscillation between the two think-time
602862306a36Sopenharmony_ci	 * states would be to prevent effective injection on bfqq.
602962306a36Sopenharmony_ci	 *
603062306a36Sopenharmony_ci	 * In contrast, if the inject limit is not reset during such a
603162306a36Sopenharmony_ci	 * long time interval as 100 ms, then the number of short
603262306a36Sopenharmony_ci	 * think time samples can grow significantly before the reset
603362306a36Sopenharmony_ci	 * is performed. As a consequence, the think time state can
603462306a36Sopenharmony_ci	 * become stable before the reset. Therefore there will be no
603562306a36Sopenharmony_ci	 * state change when the 100 ms elapse, and no reset of the
603662306a36Sopenharmony_ci	 * inject limit. The inject limit remains steadily equal to 1
603762306a36Sopenharmony_ci	 * both during and after the 100 ms. So injection can be
603862306a36Sopenharmony_ci	 * performed at all times, and throughput gets boosted.
603962306a36Sopenharmony_ci	 *
604062306a36Sopenharmony_ci	 * An inject limit equal to 1 is however in conflict, in
604162306a36Sopenharmony_ci	 * general, with the fact that the think time of bfqq is
604262306a36Sopenharmony_ci	 * short, because injection may be likely to delay bfqq's I/O
604362306a36Sopenharmony_ci	 * (as explained in the comments in
604462306a36Sopenharmony_ci	 * bfq_update_inject_limit()). But this does not happen in
604562306a36Sopenharmony_ci	 * this special case, because bfqq's low think time is due to
604662306a36Sopenharmony_ci	 * an effective handling of a synchronization, through
604762306a36Sopenharmony_ci	 * injection. In this special case, bfqq's I/O does not get
604862306a36Sopenharmony_ci	 * delayed by injection; on the contrary, bfqq's I/O is
604962306a36Sopenharmony_ci	 * brought forward, because it is not blocked for
605062306a36Sopenharmony_ci	 * milliseconds.
605162306a36Sopenharmony_ci	 *
605262306a36Sopenharmony_ci	 * In addition, serving the blocking I/O much sooner, and much
605362306a36Sopenharmony_ci	 * more frequently than once per I/O-plugging timeout, makes
605462306a36Sopenharmony_ci	 * it much quicker to detect a waker queue (the concept of
605562306a36Sopenharmony_ci	 * waker queue is defined in the comments in
605662306a36Sopenharmony_ci	 * bfq_add_request()). This makes it possible to start sooner
605762306a36Sopenharmony_ci	 * to boost throughput more effectively, by injecting the I/O
605862306a36Sopenharmony_ci	 * of the waker queue unconditionally on every
605962306a36Sopenharmony_ci	 * bfq_dispatch_request().
606062306a36Sopenharmony_ci	 *
606162306a36Sopenharmony_ci	 * One last, important benefit of not resetting the inject
606262306a36Sopenharmony_ci	 * limit before 100 ms is that, during this time interval, the
606362306a36Sopenharmony_ci	 * base value for the total service time is likely to get
606462306a36Sopenharmony_ci	 * finally computed for bfqq, freeing the inject limit from
606562306a36Sopenharmony_ci	 * its relation with the think time.
606662306a36Sopenharmony_ci	 */
606762306a36Sopenharmony_ci	if (state_changed && bfqq->last_serv_time_ns == 0 &&
606862306a36Sopenharmony_ci	    (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
606962306a36Sopenharmony_ci				      msecs_to_jiffies(100)) ||
607062306a36Sopenharmony_ci	     !has_short_ttime))
607162306a36Sopenharmony_ci		bfq_reset_inject_limit(bfqd, bfqq);
607262306a36Sopenharmony_ci}
607362306a36Sopenharmony_ci
607462306a36Sopenharmony_ci/*
607562306a36Sopenharmony_ci * Called when a new fs request (rq) is added to bfqq.  Check if there's
607662306a36Sopenharmony_ci * something we should do about it.
607762306a36Sopenharmony_ci */
607862306a36Sopenharmony_cistatic void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
607962306a36Sopenharmony_ci			    struct request *rq)
608062306a36Sopenharmony_ci{
608162306a36Sopenharmony_ci	if (rq->cmd_flags & REQ_META)
608262306a36Sopenharmony_ci		bfqq->meta_pending++;
608362306a36Sopenharmony_ci
608462306a36Sopenharmony_ci	bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
608562306a36Sopenharmony_ci
608662306a36Sopenharmony_ci	if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
608762306a36Sopenharmony_ci		bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
608862306a36Sopenharmony_ci				 blk_rq_sectors(rq) < 32;
608962306a36Sopenharmony_ci		bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
609062306a36Sopenharmony_ci
609162306a36Sopenharmony_ci		/*
609262306a36Sopenharmony_ci		 * There is just this request queued: if
609362306a36Sopenharmony_ci		 * - the request is small, and
609462306a36Sopenharmony_ci		 * - we are idling to boost throughput, and
609562306a36Sopenharmony_ci		 * - the queue is not to be expired,
609662306a36Sopenharmony_ci		 * then just exit.
609762306a36Sopenharmony_ci		 *
609862306a36Sopenharmony_ci		 * In this way, if the device is being idled to wait
609962306a36Sopenharmony_ci		 * for a new request from the in-service queue, we
610062306a36Sopenharmony_ci		 * avoid unplugging the device and committing the
610162306a36Sopenharmony_ci		 * device to serve just a small request. In contrast
610262306a36Sopenharmony_ci		 * we wait for the block layer to decide when to
610362306a36Sopenharmony_ci		 * unplug the device: hopefully, new requests will be
610462306a36Sopenharmony_ci		 * merged to this one quickly, then the device will be
610562306a36Sopenharmony_ci		 * unplugged and larger requests will be dispatched.
610662306a36Sopenharmony_ci		 */
610762306a36Sopenharmony_ci		if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
610862306a36Sopenharmony_ci		    !budget_timeout)
610962306a36Sopenharmony_ci			return;
611062306a36Sopenharmony_ci
611162306a36Sopenharmony_ci		/*
611262306a36Sopenharmony_ci		 * A large enough request arrived, or idling is being
611362306a36Sopenharmony_ci		 * performed to preserve service guarantees, or
611462306a36Sopenharmony_ci		 * finally the queue is to be expired: in all these
611562306a36Sopenharmony_ci		 * cases disk idling is to be stopped, so clear
611662306a36Sopenharmony_ci		 * wait_request flag and reset timer.
611762306a36Sopenharmony_ci		 */
611862306a36Sopenharmony_ci		bfq_clear_bfqq_wait_request(bfqq);
611962306a36Sopenharmony_ci		hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
612062306a36Sopenharmony_ci
612162306a36Sopenharmony_ci		/*
612262306a36Sopenharmony_ci		 * The queue is not empty, because a new request just
612362306a36Sopenharmony_ci		 * arrived. Hence we can safely expire the queue, in
612462306a36Sopenharmony_ci		 * case of budget timeout, without risking that the
612562306a36Sopenharmony_ci		 * timestamps of the queue are not updated correctly.
612662306a36Sopenharmony_ci		 * See [1] for more details.
612762306a36Sopenharmony_ci		 */
612862306a36Sopenharmony_ci		if (budget_timeout)
612962306a36Sopenharmony_ci			bfq_bfqq_expire(bfqd, bfqq, false,
613062306a36Sopenharmony_ci					BFQQE_BUDGET_TIMEOUT);
613162306a36Sopenharmony_ci	}
613262306a36Sopenharmony_ci}
613362306a36Sopenharmony_ci
613462306a36Sopenharmony_cistatic void bfqq_request_allocated(struct bfq_queue *bfqq)
613562306a36Sopenharmony_ci{
613662306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
613762306a36Sopenharmony_ci
613862306a36Sopenharmony_ci	for_each_entity(entity)
613962306a36Sopenharmony_ci		entity->allocated++;
614062306a36Sopenharmony_ci}
614162306a36Sopenharmony_ci
614262306a36Sopenharmony_cistatic void bfqq_request_freed(struct bfq_queue *bfqq)
614362306a36Sopenharmony_ci{
614462306a36Sopenharmony_ci	struct bfq_entity *entity = &bfqq->entity;
614562306a36Sopenharmony_ci
614662306a36Sopenharmony_ci	for_each_entity(entity)
614762306a36Sopenharmony_ci		entity->allocated--;
614862306a36Sopenharmony_ci}
614962306a36Sopenharmony_ci
615062306a36Sopenharmony_ci/* returns true if it causes the idle timer to be disabled */
615162306a36Sopenharmony_cistatic bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
615262306a36Sopenharmony_ci{
615362306a36Sopenharmony_ci	struct bfq_queue *bfqq = RQ_BFQQ(rq),
615462306a36Sopenharmony_ci		*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
615562306a36Sopenharmony_ci						 RQ_BIC(rq));
615662306a36Sopenharmony_ci	bool waiting, idle_timer_disabled = false;
615762306a36Sopenharmony_ci
615862306a36Sopenharmony_ci	if (new_bfqq) {
615962306a36Sopenharmony_ci		/*
616062306a36Sopenharmony_ci		 * Release the request's reference to the old bfqq
616162306a36Sopenharmony_ci		 * and make sure one is taken to the shared queue.
616262306a36Sopenharmony_ci		 */
616362306a36Sopenharmony_ci		bfqq_request_allocated(new_bfqq);
616462306a36Sopenharmony_ci		bfqq_request_freed(bfqq);
616562306a36Sopenharmony_ci		new_bfqq->ref++;
616662306a36Sopenharmony_ci		/*
616762306a36Sopenharmony_ci		 * If the bic associated with the process
616862306a36Sopenharmony_ci		 * issuing this request still points to bfqq
616962306a36Sopenharmony_ci		 * (and thus has not been already redirected
617062306a36Sopenharmony_ci		 * to new_bfqq or even some other bfq_queue),
617162306a36Sopenharmony_ci		 * then complete the merge and redirect it to
617262306a36Sopenharmony_ci		 * new_bfqq.
617362306a36Sopenharmony_ci		 */
617462306a36Sopenharmony_ci		if (bic_to_bfqq(RQ_BIC(rq), true,
617562306a36Sopenharmony_ci				bfq_actuator_index(bfqd, rq->bio)) == bfqq)
617662306a36Sopenharmony_ci			bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
617762306a36Sopenharmony_ci					bfqq, new_bfqq);
617862306a36Sopenharmony_ci
617962306a36Sopenharmony_ci		bfq_clear_bfqq_just_created(bfqq);
618062306a36Sopenharmony_ci		/*
618162306a36Sopenharmony_ci		 * rq is about to be enqueued into new_bfqq,
618262306a36Sopenharmony_ci		 * release rq reference on bfqq
618362306a36Sopenharmony_ci		 */
618462306a36Sopenharmony_ci		bfq_put_queue(bfqq);
618562306a36Sopenharmony_ci		rq->elv.priv[1] = new_bfqq;
618662306a36Sopenharmony_ci		bfqq = new_bfqq;
618762306a36Sopenharmony_ci	}
618862306a36Sopenharmony_ci
618962306a36Sopenharmony_ci	bfq_update_io_thinktime(bfqd, bfqq);
619062306a36Sopenharmony_ci	bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
619162306a36Sopenharmony_ci	bfq_update_io_seektime(bfqd, bfqq, rq);
619262306a36Sopenharmony_ci
619362306a36Sopenharmony_ci	waiting = bfqq && bfq_bfqq_wait_request(bfqq);
619462306a36Sopenharmony_ci	bfq_add_request(rq);
619562306a36Sopenharmony_ci	idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
619662306a36Sopenharmony_ci
619762306a36Sopenharmony_ci	rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
619862306a36Sopenharmony_ci	list_add_tail(&rq->queuelist, &bfqq->fifo);
619962306a36Sopenharmony_ci
620062306a36Sopenharmony_ci	bfq_rq_enqueued(bfqd, bfqq, rq);
620162306a36Sopenharmony_ci
620262306a36Sopenharmony_ci	return idle_timer_disabled;
620362306a36Sopenharmony_ci}
620462306a36Sopenharmony_ci
620562306a36Sopenharmony_ci#ifdef CONFIG_BFQ_CGROUP_DEBUG
620662306a36Sopenharmony_cistatic void bfq_update_insert_stats(struct request_queue *q,
620762306a36Sopenharmony_ci				    struct bfq_queue *bfqq,
620862306a36Sopenharmony_ci				    bool idle_timer_disabled,
620962306a36Sopenharmony_ci				    blk_opf_t cmd_flags)
621062306a36Sopenharmony_ci{
621162306a36Sopenharmony_ci	if (!bfqq)
621262306a36Sopenharmony_ci		return;
621362306a36Sopenharmony_ci
621462306a36Sopenharmony_ci	/*
621562306a36Sopenharmony_ci	 * bfqq still exists, because it can disappear only after
621662306a36Sopenharmony_ci	 * either it is merged with another queue, or the process it
621762306a36Sopenharmony_ci	 * is associated with exits. But both actions must be taken by
621862306a36Sopenharmony_ci	 * the same process currently executing this flow of
621962306a36Sopenharmony_ci	 * instructions.
622062306a36Sopenharmony_ci	 *
622162306a36Sopenharmony_ci	 * In addition, the following queue lock guarantees that
622262306a36Sopenharmony_ci	 * bfqq_group(bfqq) exists as well.
622362306a36Sopenharmony_ci	 */
622462306a36Sopenharmony_ci	spin_lock_irq(&q->queue_lock);
622562306a36Sopenharmony_ci	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
622662306a36Sopenharmony_ci	if (idle_timer_disabled)
622762306a36Sopenharmony_ci		bfqg_stats_update_idle_time(bfqq_group(bfqq));
622862306a36Sopenharmony_ci	spin_unlock_irq(&q->queue_lock);
622962306a36Sopenharmony_ci}
623062306a36Sopenharmony_ci#else
623162306a36Sopenharmony_cistatic inline void bfq_update_insert_stats(struct request_queue *q,
623262306a36Sopenharmony_ci					   struct bfq_queue *bfqq,
623362306a36Sopenharmony_ci					   bool idle_timer_disabled,
623462306a36Sopenharmony_ci					   blk_opf_t cmd_flags) {}
623562306a36Sopenharmony_ci#endif /* CONFIG_BFQ_CGROUP_DEBUG */
623662306a36Sopenharmony_ci
623762306a36Sopenharmony_cistatic struct bfq_queue *bfq_init_rq(struct request *rq);
623862306a36Sopenharmony_ci
623962306a36Sopenharmony_cistatic void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
624062306a36Sopenharmony_ci			       blk_insert_t flags)
624162306a36Sopenharmony_ci{
624262306a36Sopenharmony_ci	struct request_queue *q = hctx->queue;
624362306a36Sopenharmony_ci	struct bfq_data *bfqd = q->elevator->elevator_data;
624462306a36Sopenharmony_ci	struct bfq_queue *bfqq;
624562306a36Sopenharmony_ci	bool idle_timer_disabled = false;
624662306a36Sopenharmony_ci	blk_opf_t cmd_flags;
624762306a36Sopenharmony_ci	LIST_HEAD(free);
624862306a36Sopenharmony_ci
624962306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
625062306a36Sopenharmony_ci	if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
625162306a36Sopenharmony_ci		bfqg_stats_update_legacy_io(q, rq);
625262306a36Sopenharmony_ci#endif
625362306a36Sopenharmony_ci	spin_lock_irq(&bfqd->lock);
625462306a36Sopenharmony_ci	bfqq = bfq_init_rq(rq);
625562306a36Sopenharmony_ci	if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
625662306a36Sopenharmony_ci		spin_unlock_irq(&bfqd->lock);
625762306a36Sopenharmony_ci		blk_mq_free_requests(&free);
625862306a36Sopenharmony_ci		return;
625962306a36Sopenharmony_ci	}
626062306a36Sopenharmony_ci
626162306a36Sopenharmony_ci	trace_block_rq_insert(rq);
626262306a36Sopenharmony_ci
626362306a36Sopenharmony_ci	if (flags & BLK_MQ_INSERT_AT_HEAD) {
626462306a36Sopenharmony_ci		list_add(&rq->queuelist, &bfqd->dispatch);
626562306a36Sopenharmony_ci	} else if (!bfqq) {
626662306a36Sopenharmony_ci		list_add_tail(&rq->queuelist, &bfqd->dispatch);
626762306a36Sopenharmony_ci	} else {
626862306a36Sopenharmony_ci		idle_timer_disabled = __bfq_insert_request(bfqd, rq);
626962306a36Sopenharmony_ci		/*
627062306a36Sopenharmony_ci		 * Update bfqq, because, if a queue merge has occurred
627162306a36Sopenharmony_ci		 * in __bfq_insert_request, then rq has been
627262306a36Sopenharmony_ci		 * redirected into a new queue.
627362306a36Sopenharmony_ci		 */
627462306a36Sopenharmony_ci		bfqq = RQ_BFQQ(rq);
627562306a36Sopenharmony_ci
627662306a36Sopenharmony_ci		if (rq_mergeable(rq)) {
627762306a36Sopenharmony_ci			elv_rqhash_add(q, rq);
627862306a36Sopenharmony_ci			if (!q->last_merge)
627962306a36Sopenharmony_ci				q->last_merge = rq;
628062306a36Sopenharmony_ci		}
628162306a36Sopenharmony_ci	}
628262306a36Sopenharmony_ci
628362306a36Sopenharmony_ci	/*
628462306a36Sopenharmony_ci	 * Cache cmd_flags before releasing scheduler lock, because rq
628562306a36Sopenharmony_ci	 * may disappear afterwards (for example, because of a request
628662306a36Sopenharmony_ci	 * merge).
628762306a36Sopenharmony_ci	 */
628862306a36Sopenharmony_ci	cmd_flags = rq->cmd_flags;
628962306a36Sopenharmony_ci	spin_unlock_irq(&bfqd->lock);
629062306a36Sopenharmony_ci
629162306a36Sopenharmony_ci	bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
629262306a36Sopenharmony_ci				cmd_flags);
629362306a36Sopenharmony_ci}
629462306a36Sopenharmony_ci
629562306a36Sopenharmony_cistatic void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
629662306a36Sopenharmony_ci				struct list_head *list,
629762306a36Sopenharmony_ci				blk_insert_t flags)
629862306a36Sopenharmony_ci{
629962306a36Sopenharmony_ci	while (!list_empty(list)) {
630062306a36Sopenharmony_ci		struct request *rq;
630162306a36Sopenharmony_ci
630262306a36Sopenharmony_ci		rq = list_first_entry(list, struct request, queuelist);
630362306a36Sopenharmony_ci		list_del_init(&rq->queuelist);
630462306a36Sopenharmony_ci		bfq_insert_request(hctx, rq, flags);
630562306a36Sopenharmony_ci	}
630662306a36Sopenharmony_ci}
630762306a36Sopenharmony_ci
630862306a36Sopenharmony_cistatic void bfq_update_hw_tag(struct bfq_data *bfqd)
630962306a36Sopenharmony_ci{
631062306a36Sopenharmony_ci	struct bfq_queue *bfqq = bfqd->in_service_queue;
631162306a36Sopenharmony_ci
631262306a36Sopenharmony_ci	bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
631362306a36Sopenharmony_ci				       bfqd->tot_rq_in_driver);
631462306a36Sopenharmony_ci
631562306a36Sopenharmony_ci	if (bfqd->hw_tag == 1)
631662306a36Sopenharmony_ci		return;
631762306a36Sopenharmony_ci
631862306a36Sopenharmony_ci	/*
631962306a36Sopenharmony_ci	 * This sample is valid if the number of outstanding requests
632062306a36Sopenharmony_ci	 * is large enough to allow a queueing behavior.  Note that the
632162306a36Sopenharmony_ci	 * sum is not exact, as it's not taking into account deactivated
632262306a36Sopenharmony_ci	 * requests.
632362306a36Sopenharmony_ci	 */
632462306a36Sopenharmony_ci	if (bfqd->tot_rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
632562306a36Sopenharmony_ci		return;
632662306a36Sopenharmony_ci
632762306a36Sopenharmony_ci	/*
632862306a36Sopenharmony_ci	 * If active queue hasn't enough requests and can idle, bfq might not
632962306a36Sopenharmony_ci	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
633062306a36Sopenharmony_ci	 * case
633162306a36Sopenharmony_ci	 */
633262306a36Sopenharmony_ci	if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
633362306a36Sopenharmony_ci	    bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
633462306a36Sopenharmony_ci	    BFQ_HW_QUEUE_THRESHOLD &&
633562306a36Sopenharmony_ci	    bfqd->tot_rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
633662306a36Sopenharmony_ci		return;
633762306a36Sopenharmony_ci
633862306a36Sopenharmony_ci	if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
633962306a36Sopenharmony_ci		return;
634062306a36Sopenharmony_ci
634162306a36Sopenharmony_ci	bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
634262306a36Sopenharmony_ci	bfqd->max_rq_in_driver = 0;
634362306a36Sopenharmony_ci	bfqd->hw_tag_samples = 0;
634462306a36Sopenharmony_ci
634562306a36Sopenharmony_ci	bfqd->nonrot_with_queueing =
634662306a36Sopenharmony_ci		blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
634762306a36Sopenharmony_ci}
634862306a36Sopenharmony_ci
634962306a36Sopenharmony_cistatic void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
635062306a36Sopenharmony_ci{
635162306a36Sopenharmony_ci	u64 now_ns;
635262306a36Sopenharmony_ci	u32 delta_us;
635362306a36Sopenharmony_ci
635462306a36Sopenharmony_ci	bfq_update_hw_tag(bfqd);
635562306a36Sopenharmony_ci
635662306a36Sopenharmony_ci	bfqd->rq_in_driver[bfqq->actuator_idx]--;
635762306a36Sopenharmony_ci	bfqd->tot_rq_in_driver--;
635862306a36Sopenharmony_ci	bfqq->dispatched--;
635962306a36Sopenharmony_ci
636062306a36Sopenharmony_ci	if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
636162306a36Sopenharmony_ci		/*
636262306a36Sopenharmony_ci		 * Set budget_timeout (which we overload to store the
636362306a36Sopenharmony_ci		 * time at which the queue remains with no backlog and
636462306a36Sopenharmony_ci		 * no outstanding request; used by the weight-raising
636562306a36Sopenharmony_ci		 * mechanism).
636662306a36Sopenharmony_ci		 */
636762306a36Sopenharmony_ci		bfqq->budget_timeout = jiffies;
636862306a36Sopenharmony_ci
636962306a36Sopenharmony_ci		bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
637062306a36Sopenharmony_ci		bfq_weights_tree_remove(bfqq);
637162306a36Sopenharmony_ci	}
637262306a36Sopenharmony_ci
637362306a36Sopenharmony_ci	now_ns = ktime_get_ns();
637462306a36Sopenharmony_ci
637562306a36Sopenharmony_ci	bfqq->ttime.last_end_request = now_ns;
637662306a36Sopenharmony_ci
637762306a36Sopenharmony_ci	/*
637862306a36Sopenharmony_ci	 * Using us instead of ns, to get a reasonable precision in
637962306a36Sopenharmony_ci	 * computing rate in next check.
638062306a36Sopenharmony_ci	 */
638162306a36Sopenharmony_ci	delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
638262306a36Sopenharmony_ci
638362306a36Sopenharmony_ci	/*
638462306a36Sopenharmony_ci	 * If the request took rather long to complete, and, according
638562306a36Sopenharmony_ci	 * to the maximum request size recorded, this completion latency
638662306a36Sopenharmony_ci	 * implies that the request was certainly served at a very low
638762306a36Sopenharmony_ci	 * rate (less than 1M sectors/sec), then the whole observation
638862306a36Sopenharmony_ci	 * interval that lasts up to this time instant cannot be a
638962306a36Sopenharmony_ci	 * valid time interval for computing a new peak rate.  Invoke
639062306a36Sopenharmony_ci	 * bfq_update_rate_reset to have the following three steps
639162306a36Sopenharmony_ci	 * taken:
639262306a36Sopenharmony_ci	 * - close the observation interval at the last (previous)
639362306a36Sopenharmony_ci	 *   request dispatch or completion
639462306a36Sopenharmony_ci	 * - compute rate, if possible, for that observation interval
639562306a36Sopenharmony_ci	 * - reset to zero samples, which will trigger a proper
639662306a36Sopenharmony_ci	 *   re-initialization of the observation interval on next
639762306a36Sopenharmony_ci	 *   dispatch
639862306a36Sopenharmony_ci	 */
639962306a36Sopenharmony_ci	if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
640062306a36Sopenharmony_ci	   (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
640162306a36Sopenharmony_ci			1UL<<(BFQ_RATE_SHIFT - 10))
640262306a36Sopenharmony_ci		bfq_update_rate_reset(bfqd, NULL);
640362306a36Sopenharmony_ci	bfqd->last_completion = now_ns;
640462306a36Sopenharmony_ci	/*
640562306a36Sopenharmony_ci	 * Shared queues are likely to receive I/O at a high
640662306a36Sopenharmony_ci	 * rate. This may deceptively let them be considered as wakers
640762306a36Sopenharmony_ci	 * of other queues. But a false waker will unjustly steal
640862306a36Sopenharmony_ci	 * bandwidth to its supposedly woken queue. So considering
640962306a36Sopenharmony_ci	 * also shared queues in the waking mechanism may cause more
641062306a36Sopenharmony_ci	 * control troubles than throughput benefits. Then reset
641162306a36Sopenharmony_ci	 * last_completed_rq_bfqq if bfqq is a shared queue.
641262306a36Sopenharmony_ci	 */
641362306a36Sopenharmony_ci	if (!bfq_bfqq_coop(bfqq))
641462306a36Sopenharmony_ci		bfqd->last_completed_rq_bfqq = bfqq;
641562306a36Sopenharmony_ci	else
641662306a36Sopenharmony_ci		bfqd->last_completed_rq_bfqq = NULL;
641762306a36Sopenharmony_ci
641862306a36Sopenharmony_ci	/*
641962306a36Sopenharmony_ci	 * If we are waiting to discover whether the request pattern
642062306a36Sopenharmony_ci	 * of the task associated with the queue is actually
642162306a36Sopenharmony_ci	 * isochronous, and both requisites for this condition to hold
642262306a36Sopenharmony_ci	 * are now satisfied, then compute soft_rt_next_start (see the
642362306a36Sopenharmony_ci	 * comments on the function bfq_bfqq_softrt_next_start()). We
642462306a36Sopenharmony_ci	 * do not compute soft_rt_next_start if bfqq is in interactive
642562306a36Sopenharmony_ci	 * weight raising (see the comments in bfq_bfqq_expire() for
642662306a36Sopenharmony_ci	 * an explanation). We schedule this delayed update when bfqq
642762306a36Sopenharmony_ci	 * expires, if it still has in-flight requests.
642862306a36Sopenharmony_ci	 */
642962306a36Sopenharmony_ci	if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
643062306a36Sopenharmony_ci	    RB_EMPTY_ROOT(&bfqq->sort_list) &&
643162306a36Sopenharmony_ci	    bfqq->wr_coeff != bfqd->bfq_wr_coeff)
643262306a36Sopenharmony_ci		bfqq->soft_rt_next_start =
643362306a36Sopenharmony_ci			bfq_bfqq_softrt_next_start(bfqd, bfqq);
643462306a36Sopenharmony_ci
643562306a36Sopenharmony_ci	/*
643662306a36Sopenharmony_ci	 * If this is the in-service queue, check if it needs to be expired,
643762306a36Sopenharmony_ci	 * or if we want to idle in case it has no pending requests.
643862306a36Sopenharmony_ci	 */
643962306a36Sopenharmony_ci	if (bfqd->in_service_queue == bfqq) {
644062306a36Sopenharmony_ci		if (bfq_bfqq_must_idle(bfqq)) {
644162306a36Sopenharmony_ci			if (bfqq->dispatched == 0)
644262306a36Sopenharmony_ci				bfq_arm_slice_timer(bfqd);
644362306a36Sopenharmony_ci			/*
644462306a36Sopenharmony_ci			 * If we get here, we do not expire bfqq, even
644562306a36Sopenharmony_ci			 * if bfqq was in budget timeout or had no
644662306a36Sopenharmony_ci			 * more requests (as controlled in the next
644762306a36Sopenharmony_ci			 * conditional instructions). The reason for
644862306a36Sopenharmony_ci			 * not expiring bfqq is as follows.
644962306a36Sopenharmony_ci			 *
645062306a36Sopenharmony_ci			 * Here bfqq->dispatched > 0 holds, but
645162306a36Sopenharmony_ci			 * bfq_bfqq_must_idle() returned true. This
645262306a36Sopenharmony_ci			 * implies that, even if no request arrives
645362306a36Sopenharmony_ci			 * for bfqq before bfqq->dispatched reaches 0,
645462306a36Sopenharmony_ci			 * bfqq will, however, not be expired on the
645562306a36Sopenharmony_ci			 * completion event that causes bfqq->dispatch
645662306a36Sopenharmony_ci			 * to reach zero. In contrast, on this event,
645762306a36Sopenharmony_ci			 * bfqq will start enjoying device idling
645862306a36Sopenharmony_ci			 * (I/O-dispatch plugging).
645962306a36Sopenharmony_ci			 *
646062306a36Sopenharmony_ci			 * But, if we expired bfqq here, bfqq would
646162306a36Sopenharmony_ci			 * not have the chance to enjoy device idling
646262306a36Sopenharmony_ci			 * when bfqq->dispatched finally reaches
646362306a36Sopenharmony_ci			 * zero. This would expose bfqq to violation
646462306a36Sopenharmony_ci			 * of its reserved service guarantees.
646562306a36Sopenharmony_ci			 */
646662306a36Sopenharmony_ci			return;
646762306a36Sopenharmony_ci		} else if (bfq_may_expire_for_budg_timeout(bfqq))
646862306a36Sopenharmony_ci			bfq_bfqq_expire(bfqd, bfqq, false,
646962306a36Sopenharmony_ci					BFQQE_BUDGET_TIMEOUT);
647062306a36Sopenharmony_ci		else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
647162306a36Sopenharmony_ci			 (bfqq->dispatched == 0 ||
647262306a36Sopenharmony_ci			  !bfq_better_to_idle(bfqq)))
647362306a36Sopenharmony_ci			bfq_bfqq_expire(bfqd, bfqq, false,
647462306a36Sopenharmony_ci					BFQQE_NO_MORE_REQUESTS);
647562306a36Sopenharmony_ci	}
647662306a36Sopenharmony_ci
647762306a36Sopenharmony_ci	if (!bfqd->tot_rq_in_driver)
647862306a36Sopenharmony_ci		bfq_schedule_dispatch(bfqd);
647962306a36Sopenharmony_ci}
648062306a36Sopenharmony_ci
648162306a36Sopenharmony_ci/*
648262306a36Sopenharmony_ci * The processes associated with bfqq may happen to generate their
648362306a36Sopenharmony_ci * cumulative I/O at a lower rate than the rate at which the device
648462306a36Sopenharmony_ci * could serve the same I/O. This is rather probable, e.g., if only
648562306a36Sopenharmony_ci * one process is associated with bfqq and the device is an SSD. It
648662306a36Sopenharmony_ci * results in bfqq becoming often empty while in service. In this
648762306a36Sopenharmony_ci * respect, if BFQ is allowed to switch to another queue when bfqq
648862306a36Sopenharmony_ci * remains empty, then the device goes on being fed with I/O requests,
648962306a36Sopenharmony_ci * and the throughput is not affected. In contrast, if BFQ is not
649062306a36Sopenharmony_ci * allowed to switch to another queue---because bfqq is sync and
649162306a36Sopenharmony_ci * I/O-dispatch needs to be plugged while bfqq is temporarily
649262306a36Sopenharmony_ci * empty---then, during the service of bfqq, there will be frequent
649362306a36Sopenharmony_ci * "service holes", i.e., time intervals during which bfqq gets empty
649462306a36Sopenharmony_ci * and the device can only consume the I/O already queued in its
649562306a36Sopenharmony_ci * hardware queues. During service holes, the device may even get to
649662306a36Sopenharmony_ci * remaining idle. In the end, during the service of bfqq, the device
649762306a36Sopenharmony_ci * is driven at a lower speed than the one it can reach with the kind
649862306a36Sopenharmony_ci * of I/O flowing through bfqq.
649962306a36Sopenharmony_ci *
650062306a36Sopenharmony_ci * To counter this loss of throughput, BFQ implements a "request
650162306a36Sopenharmony_ci * injection mechanism", which tries to fill the above service holes
650262306a36Sopenharmony_ci * with I/O requests taken from other queues. The hard part in this
650362306a36Sopenharmony_ci * mechanism is finding the right amount of I/O to inject, so as to
650462306a36Sopenharmony_ci * both boost throughput and not break bfqq's bandwidth and latency
650562306a36Sopenharmony_ci * guarantees. In this respect, the mechanism maintains a per-queue
650662306a36Sopenharmony_ci * inject limit, computed as below. While bfqq is empty, the injection
650762306a36Sopenharmony_ci * mechanism dispatches extra I/O requests only until the total number
650862306a36Sopenharmony_ci * of I/O requests in flight---i.e., already dispatched but not yet
650962306a36Sopenharmony_ci * completed---remains lower than this limit.
651062306a36Sopenharmony_ci *
651162306a36Sopenharmony_ci * A first definition comes in handy to introduce the algorithm by
651262306a36Sopenharmony_ci * which the inject limit is computed.  We define as first request for
651362306a36Sopenharmony_ci * bfqq, an I/O request for bfqq that arrives while bfqq is in
651462306a36Sopenharmony_ci * service, and causes bfqq to switch from empty to non-empty. The
651562306a36Sopenharmony_ci * algorithm updates the limit as a function of the effect of
651662306a36Sopenharmony_ci * injection on the service times of only the first requests of
651762306a36Sopenharmony_ci * bfqq. The reason for this restriction is that these are the
651862306a36Sopenharmony_ci * requests whose service time is affected most, because they are the
651962306a36Sopenharmony_ci * first to arrive after injection possibly occurred.
652062306a36Sopenharmony_ci *
652162306a36Sopenharmony_ci * To evaluate the effect of injection, the algorithm measures the
652262306a36Sopenharmony_ci * "total service time" of first requests. We define as total service
652362306a36Sopenharmony_ci * time of an I/O request, the time that elapses since when the
652462306a36Sopenharmony_ci * request is enqueued into bfqq, to when it is completed. This
652562306a36Sopenharmony_ci * quantity allows the whole effect of injection to be measured. It is
652662306a36Sopenharmony_ci * easy to see why. Suppose that some requests of other queues are
652762306a36Sopenharmony_ci * actually injected while bfqq is empty, and that a new request R
652862306a36Sopenharmony_ci * then arrives for bfqq. If the device does start to serve all or
652962306a36Sopenharmony_ci * part of the injected requests during the service hole, then,
653062306a36Sopenharmony_ci * because of this extra service, it may delay the next invocation of
653162306a36Sopenharmony_ci * the dispatch hook of BFQ. Then, even after R gets eventually
653262306a36Sopenharmony_ci * dispatched, the device may delay the actual service of R if it is
653362306a36Sopenharmony_ci * still busy serving the extra requests, or if it decides to serve,
653462306a36Sopenharmony_ci * before R, some extra request still present in its queues. As a
653562306a36Sopenharmony_ci * conclusion, the cumulative extra delay caused by injection can be
653662306a36Sopenharmony_ci * easily evaluated by just comparing the total service time of first
653762306a36Sopenharmony_ci * requests with and without injection.
653862306a36Sopenharmony_ci *
653962306a36Sopenharmony_ci * The limit-update algorithm works as follows. On the arrival of a
654062306a36Sopenharmony_ci * first request of bfqq, the algorithm measures the total time of the
654162306a36Sopenharmony_ci * request only if one of the three cases below holds, and, for each
654262306a36Sopenharmony_ci * case, it updates the limit as described below:
654362306a36Sopenharmony_ci *
654462306a36Sopenharmony_ci * (1) If there is no in-flight request. This gives a baseline for the
654562306a36Sopenharmony_ci *     total service time of the requests of bfqq. If the baseline has
654662306a36Sopenharmony_ci *     not been computed yet, then, after computing it, the limit is
654762306a36Sopenharmony_ci *     set to 1, to start boosting throughput, and to prepare the
654862306a36Sopenharmony_ci *     ground for the next case. If the baseline has already been
654962306a36Sopenharmony_ci *     computed, then it is updated, in case it results to be lower
655062306a36Sopenharmony_ci *     than the previous value.
655162306a36Sopenharmony_ci *
655262306a36Sopenharmony_ci * (2) If the limit is higher than 0 and there are in-flight
655362306a36Sopenharmony_ci *     requests. By comparing the total service time in this case with
655462306a36Sopenharmony_ci *     the above baseline, it is possible to know at which extent the
655562306a36Sopenharmony_ci *     current value of the limit is inflating the total service
655662306a36Sopenharmony_ci *     time. If the inflation is below a certain threshold, then bfqq
655762306a36Sopenharmony_ci *     is assumed to be suffering from no perceivable loss of its
655862306a36Sopenharmony_ci *     service guarantees, and the limit is even tentatively
655962306a36Sopenharmony_ci *     increased. If the inflation is above the threshold, then the
656062306a36Sopenharmony_ci *     limit is decreased. Due to the lack of any hysteresis, this
656162306a36Sopenharmony_ci *     logic makes the limit oscillate even in steady workload
656262306a36Sopenharmony_ci *     conditions. Yet we opted for it, because it is fast in reaching
656362306a36Sopenharmony_ci *     the best value for the limit, as a function of the current I/O
656462306a36Sopenharmony_ci *     workload. To reduce oscillations, this step is disabled for a
656562306a36Sopenharmony_ci *     short time interval after the limit happens to be decreased.
656662306a36Sopenharmony_ci *
656762306a36Sopenharmony_ci * (3) Periodically, after resetting the limit, to make sure that the
656862306a36Sopenharmony_ci *     limit eventually drops in case the workload changes. This is
656962306a36Sopenharmony_ci *     needed because, after the limit has gone safely up for a
657062306a36Sopenharmony_ci *     certain workload, it is impossible to guess whether the
657162306a36Sopenharmony_ci *     baseline total service time may have changed, without measuring
657262306a36Sopenharmony_ci *     it again without injection. A more effective version of this
657362306a36Sopenharmony_ci *     step might be to just sample the baseline, by interrupting
657462306a36Sopenharmony_ci *     injection only once, and then to reset/lower the limit only if
657562306a36Sopenharmony_ci *     the total service time with the current limit does happen to be
657662306a36Sopenharmony_ci *     too large.
657762306a36Sopenharmony_ci *
657862306a36Sopenharmony_ci * More details on each step are provided in the comments on the
657962306a36Sopenharmony_ci * pieces of code that implement these steps: the branch handling the
658062306a36Sopenharmony_ci * transition from empty to non empty in bfq_add_request(), the branch
658162306a36Sopenharmony_ci * handling injection in bfq_select_queue(), and the function
658262306a36Sopenharmony_ci * bfq_choose_bfqq_for_injection(). These comments also explain some
658362306a36Sopenharmony_ci * exceptions, made by the injection mechanism in some special cases.
658462306a36Sopenharmony_ci */
658562306a36Sopenharmony_cistatic void bfq_update_inject_limit(struct bfq_data *bfqd,
658662306a36Sopenharmony_ci				    struct bfq_queue *bfqq)
658762306a36Sopenharmony_ci{
658862306a36Sopenharmony_ci	u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
658962306a36Sopenharmony_ci	unsigned int old_limit = bfqq->inject_limit;
659062306a36Sopenharmony_ci
659162306a36Sopenharmony_ci	if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
659262306a36Sopenharmony_ci		u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
659362306a36Sopenharmony_ci
659462306a36Sopenharmony_ci		if (tot_time_ns >= threshold && old_limit > 0) {
659562306a36Sopenharmony_ci			bfqq->inject_limit--;
659662306a36Sopenharmony_ci			bfqq->decrease_time_jif = jiffies;
659762306a36Sopenharmony_ci		} else if (tot_time_ns < threshold &&
659862306a36Sopenharmony_ci			   old_limit <= bfqd->max_rq_in_driver)
659962306a36Sopenharmony_ci			bfqq->inject_limit++;
660062306a36Sopenharmony_ci	}
660162306a36Sopenharmony_ci
660262306a36Sopenharmony_ci	/*
660362306a36Sopenharmony_ci	 * Either we still have to compute the base value for the
660462306a36Sopenharmony_ci	 * total service time, and there seem to be the right
660562306a36Sopenharmony_ci	 * conditions to do it, or we can lower the last base value
660662306a36Sopenharmony_ci	 * computed.
660762306a36Sopenharmony_ci	 *
660862306a36Sopenharmony_ci	 * NOTE: (bfqd->tot_rq_in_driver == 1) means that there is no I/O
660962306a36Sopenharmony_ci	 * request in flight, because this function is in the code
661062306a36Sopenharmony_ci	 * path that handles the completion of a request of bfqq, and,
661162306a36Sopenharmony_ci	 * in particular, this function is executed before
661262306a36Sopenharmony_ci	 * bfqd->tot_rq_in_driver is decremented in such a code path.
661362306a36Sopenharmony_ci	 */
661462306a36Sopenharmony_ci	if ((bfqq->last_serv_time_ns == 0 && bfqd->tot_rq_in_driver == 1) ||
661562306a36Sopenharmony_ci	    tot_time_ns < bfqq->last_serv_time_ns) {
661662306a36Sopenharmony_ci		if (bfqq->last_serv_time_ns == 0) {
661762306a36Sopenharmony_ci			/*
661862306a36Sopenharmony_ci			 * Now we certainly have a base value: make sure we
661962306a36Sopenharmony_ci			 * start trying injection.
662062306a36Sopenharmony_ci			 */
662162306a36Sopenharmony_ci			bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
662262306a36Sopenharmony_ci		}
662362306a36Sopenharmony_ci		bfqq->last_serv_time_ns = tot_time_ns;
662462306a36Sopenharmony_ci	} else if (!bfqd->rqs_injected && bfqd->tot_rq_in_driver == 1)
662562306a36Sopenharmony_ci		/*
662662306a36Sopenharmony_ci		 * No I/O injected and no request still in service in
662762306a36Sopenharmony_ci		 * the drive: these are the exact conditions for
662862306a36Sopenharmony_ci		 * computing the base value of the total service time
662962306a36Sopenharmony_ci		 * for bfqq. So let's update this value, because it is
663062306a36Sopenharmony_ci		 * rather variable. For example, it varies if the size
663162306a36Sopenharmony_ci		 * or the spatial locality of the I/O requests in bfqq
663262306a36Sopenharmony_ci		 * change.
663362306a36Sopenharmony_ci		 */
663462306a36Sopenharmony_ci		bfqq->last_serv_time_ns = tot_time_ns;
663562306a36Sopenharmony_ci
663662306a36Sopenharmony_ci
663762306a36Sopenharmony_ci	/* update complete, not waiting for any request completion any longer */
663862306a36Sopenharmony_ci	bfqd->waited_rq = NULL;
663962306a36Sopenharmony_ci	bfqd->rqs_injected = false;
664062306a36Sopenharmony_ci}
664162306a36Sopenharmony_ci
664262306a36Sopenharmony_ci/*
664362306a36Sopenharmony_ci * Handle either a requeue or a finish for rq. The things to do are
664462306a36Sopenharmony_ci * the same in both cases: all references to rq are to be dropped. In
664562306a36Sopenharmony_ci * particular, rq is considered completed from the point of view of
664662306a36Sopenharmony_ci * the scheduler.
664762306a36Sopenharmony_ci */
664862306a36Sopenharmony_cistatic void bfq_finish_requeue_request(struct request *rq)
664962306a36Sopenharmony_ci{
665062306a36Sopenharmony_ci	struct bfq_queue *bfqq = RQ_BFQQ(rq);
665162306a36Sopenharmony_ci	struct bfq_data *bfqd;
665262306a36Sopenharmony_ci	unsigned long flags;
665362306a36Sopenharmony_ci
665462306a36Sopenharmony_ci	/*
665562306a36Sopenharmony_ci	 * rq either is not associated with any icq, or is an already
665662306a36Sopenharmony_ci	 * requeued request that has not (yet) been re-inserted into
665762306a36Sopenharmony_ci	 * a bfq_queue.
665862306a36Sopenharmony_ci	 */
665962306a36Sopenharmony_ci	if (!rq->elv.icq || !bfqq)
666062306a36Sopenharmony_ci		return;
666162306a36Sopenharmony_ci
666262306a36Sopenharmony_ci	bfqd = bfqq->bfqd;
666362306a36Sopenharmony_ci
666462306a36Sopenharmony_ci	if (rq->rq_flags & RQF_STARTED)
666562306a36Sopenharmony_ci		bfqg_stats_update_completion(bfqq_group(bfqq),
666662306a36Sopenharmony_ci					     rq->start_time_ns,
666762306a36Sopenharmony_ci					     rq->io_start_time_ns,
666862306a36Sopenharmony_ci					     rq->cmd_flags);
666962306a36Sopenharmony_ci
667062306a36Sopenharmony_ci	spin_lock_irqsave(&bfqd->lock, flags);
667162306a36Sopenharmony_ci	if (likely(rq->rq_flags & RQF_STARTED)) {
667262306a36Sopenharmony_ci		if (rq == bfqd->waited_rq)
667362306a36Sopenharmony_ci			bfq_update_inject_limit(bfqd, bfqq);
667462306a36Sopenharmony_ci
667562306a36Sopenharmony_ci		bfq_completed_request(bfqq, bfqd);
667662306a36Sopenharmony_ci	}
667762306a36Sopenharmony_ci	bfqq_request_freed(bfqq);
667862306a36Sopenharmony_ci	bfq_put_queue(bfqq);
667962306a36Sopenharmony_ci	RQ_BIC(rq)->requests--;
668062306a36Sopenharmony_ci	spin_unlock_irqrestore(&bfqd->lock, flags);
668162306a36Sopenharmony_ci
668262306a36Sopenharmony_ci	/*
668362306a36Sopenharmony_ci	 * Reset private fields. In case of a requeue, this allows
668462306a36Sopenharmony_ci	 * this function to correctly do nothing if it is spuriously
668562306a36Sopenharmony_ci	 * invoked again on this same request (see the check at the
668662306a36Sopenharmony_ci	 * beginning of the function). Probably, a better general
668762306a36Sopenharmony_ci	 * design would be to prevent blk-mq from invoking the requeue
668862306a36Sopenharmony_ci	 * or finish hooks of an elevator, for a request that is not
668962306a36Sopenharmony_ci	 * referred by that elevator.
669062306a36Sopenharmony_ci	 *
669162306a36Sopenharmony_ci	 * Resetting the following fields would break the
669262306a36Sopenharmony_ci	 * request-insertion logic if rq is re-inserted into a bfq
669362306a36Sopenharmony_ci	 * internal queue, without a re-preparation. Here we assume
669462306a36Sopenharmony_ci	 * that re-insertions of requeued requests, without
669562306a36Sopenharmony_ci	 * re-preparation, can happen only for pass_through or at_head
669662306a36Sopenharmony_ci	 * requests (which are not re-inserted into bfq internal
669762306a36Sopenharmony_ci	 * queues).
669862306a36Sopenharmony_ci	 */
669962306a36Sopenharmony_ci	rq->elv.priv[0] = NULL;
670062306a36Sopenharmony_ci	rq->elv.priv[1] = NULL;
670162306a36Sopenharmony_ci}
670262306a36Sopenharmony_ci
670362306a36Sopenharmony_cistatic void bfq_finish_request(struct request *rq)
670462306a36Sopenharmony_ci{
670562306a36Sopenharmony_ci	bfq_finish_requeue_request(rq);
670662306a36Sopenharmony_ci
670762306a36Sopenharmony_ci	if (rq->elv.icq) {
670862306a36Sopenharmony_ci		put_io_context(rq->elv.icq->ioc);
670962306a36Sopenharmony_ci		rq->elv.icq = NULL;
671062306a36Sopenharmony_ci	}
671162306a36Sopenharmony_ci}
671262306a36Sopenharmony_ci
671362306a36Sopenharmony_ci/*
671462306a36Sopenharmony_ci * Removes the association between the current task and bfqq, assuming
671562306a36Sopenharmony_ci * that bic points to the bfq iocontext of the task.
671662306a36Sopenharmony_ci * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
671762306a36Sopenharmony_ci * was the last process referring to that bfqq.
671862306a36Sopenharmony_ci */
671962306a36Sopenharmony_cistatic struct bfq_queue *
672062306a36Sopenharmony_cibfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
672162306a36Sopenharmony_ci{
672262306a36Sopenharmony_ci	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
672362306a36Sopenharmony_ci
672462306a36Sopenharmony_ci	if (bfqq_process_refs(bfqq) == 1) {
672562306a36Sopenharmony_ci		bfqq->pid = current->pid;
672662306a36Sopenharmony_ci		bfq_clear_bfqq_coop(bfqq);
672762306a36Sopenharmony_ci		bfq_clear_bfqq_split_coop(bfqq);
672862306a36Sopenharmony_ci		return bfqq;
672962306a36Sopenharmony_ci	}
673062306a36Sopenharmony_ci
673162306a36Sopenharmony_ci	bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx);
673262306a36Sopenharmony_ci
673362306a36Sopenharmony_ci	bfq_put_cooperator(bfqq);
673462306a36Sopenharmony_ci
673562306a36Sopenharmony_ci	bfq_release_process_ref(bfqq->bfqd, bfqq);
673662306a36Sopenharmony_ci	return NULL;
673762306a36Sopenharmony_ci}
673862306a36Sopenharmony_ci
673962306a36Sopenharmony_cistatic struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
674062306a36Sopenharmony_ci						   struct bfq_io_cq *bic,
674162306a36Sopenharmony_ci						   struct bio *bio,
674262306a36Sopenharmony_ci						   bool split, bool is_sync,
674362306a36Sopenharmony_ci						   bool *new_queue)
674462306a36Sopenharmony_ci{
674562306a36Sopenharmony_ci	unsigned int act_idx = bfq_actuator_index(bfqd, bio);
674662306a36Sopenharmony_ci	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, act_idx);
674762306a36Sopenharmony_ci	struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[act_idx];
674862306a36Sopenharmony_ci
674962306a36Sopenharmony_ci	if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
675062306a36Sopenharmony_ci		return bfqq;
675162306a36Sopenharmony_ci
675262306a36Sopenharmony_ci	if (new_queue)
675362306a36Sopenharmony_ci		*new_queue = true;
675462306a36Sopenharmony_ci
675562306a36Sopenharmony_ci	if (bfqq)
675662306a36Sopenharmony_ci		bfq_put_queue(bfqq);
675762306a36Sopenharmony_ci	bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
675862306a36Sopenharmony_ci
675962306a36Sopenharmony_ci	bic_set_bfqq(bic, bfqq, is_sync, act_idx);
676062306a36Sopenharmony_ci	if (split && is_sync) {
676162306a36Sopenharmony_ci		if ((bfqq_data->was_in_burst_list && bfqd->large_burst) ||
676262306a36Sopenharmony_ci		    bfqq_data->saved_in_large_burst)
676362306a36Sopenharmony_ci			bfq_mark_bfqq_in_large_burst(bfqq);
676462306a36Sopenharmony_ci		else {
676562306a36Sopenharmony_ci			bfq_clear_bfqq_in_large_burst(bfqq);
676662306a36Sopenharmony_ci			if (bfqq_data->was_in_burst_list)
676762306a36Sopenharmony_ci				/*
676862306a36Sopenharmony_ci				 * If bfqq was in the current
676962306a36Sopenharmony_ci				 * burst list before being
677062306a36Sopenharmony_ci				 * merged, then we have to add
677162306a36Sopenharmony_ci				 * it back. And we do not need
677262306a36Sopenharmony_ci				 * to increase burst_size, as
677362306a36Sopenharmony_ci				 * we did not decrement
677462306a36Sopenharmony_ci				 * burst_size when we removed
677562306a36Sopenharmony_ci				 * bfqq from the burst list as
677662306a36Sopenharmony_ci				 * a consequence of a merge
677762306a36Sopenharmony_ci				 * (see comments in
677862306a36Sopenharmony_ci				 * bfq_put_queue). In this
677962306a36Sopenharmony_ci				 * respect, it would be rather
678062306a36Sopenharmony_ci				 * costly to know whether the
678162306a36Sopenharmony_ci				 * current burst list is still
678262306a36Sopenharmony_ci				 * the same burst list from
678362306a36Sopenharmony_ci				 * which bfqq was removed on
678462306a36Sopenharmony_ci				 * the merge. To avoid this
678562306a36Sopenharmony_ci				 * cost, if bfqq was in a
678662306a36Sopenharmony_ci				 * burst list, then we add
678762306a36Sopenharmony_ci				 * bfqq to the current burst
678862306a36Sopenharmony_ci				 * list without any further
678962306a36Sopenharmony_ci				 * check. This can cause
679062306a36Sopenharmony_ci				 * inappropriate insertions,
679162306a36Sopenharmony_ci				 * but rarely enough to not
679262306a36Sopenharmony_ci				 * harm the detection of large
679362306a36Sopenharmony_ci				 * bursts significantly.
679462306a36Sopenharmony_ci				 */
679562306a36Sopenharmony_ci				hlist_add_head(&bfqq->burst_list_node,
679662306a36Sopenharmony_ci					       &bfqd->burst_list);
679762306a36Sopenharmony_ci		}
679862306a36Sopenharmony_ci		bfqq->split_time = jiffies;
679962306a36Sopenharmony_ci	}
680062306a36Sopenharmony_ci
680162306a36Sopenharmony_ci	return bfqq;
680262306a36Sopenharmony_ci}
680362306a36Sopenharmony_ci
680462306a36Sopenharmony_ci/*
680562306a36Sopenharmony_ci * Only reset private fields. The actual request preparation will be
680662306a36Sopenharmony_ci * performed by bfq_init_rq, when rq is either inserted or merged. See
680762306a36Sopenharmony_ci * comments on bfq_init_rq for the reason behind this delayed
680862306a36Sopenharmony_ci * preparation.
680962306a36Sopenharmony_ci */
681062306a36Sopenharmony_cistatic void bfq_prepare_request(struct request *rq)
681162306a36Sopenharmony_ci{
681262306a36Sopenharmony_ci	rq->elv.icq = ioc_find_get_icq(rq->q);
681362306a36Sopenharmony_ci
681462306a36Sopenharmony_ci	/*
681562306a36Sopenharmony_ci	 * Regardless of whether we have an icq attached, we have to
681662306a36Sopenharmony_ci	 * clear the scheduler pointers, as they might point to
681762306a36Sopenharmony_ci	 * previously allocated bic/bfqq structs.
681862306a36Sopenharmony_ci	 */
681962306a36Sopenharmony_ci	rq->elv.priv[0] = rq->elv.priv[1] = NULL;
682062306a36Sopenharmony_ci}
682162306a36Sopenharmony_ci
682262306a36Sopenharmony_ci/*
682362306a36Sopenharmony_ci * If needed, init rq, allocate bfq data structures associated with
682462306a36Sopenharmony_ci * rq, and increment reference counters in the destination bfq_queue
682562306a36Sopenharmony_ci * for rq. Return the destination bfq_queue for rq, or NULL is rq is
682662306a36Sopenharmony_ci * not associated with any bfq_queue.
682762306a36Sopenharmony_ci *
682862306a36Sopenharmony_ci * This function is invoked by the functions that perform rq insertion
682962306a36Sopenharmony_ci * or merging. One may have expected the above preparation operations
683062306a36Sopenharmony_ci * to be performed in bfq_prepare_request, and not delayed to when rq
683162306a36Sopenharmony_ci * is inserted or merged. The rationale behind this delayed
683262306a36Sopenharmony_ci * preparation is that, after the prepare_request hook is invoked for
683362306a36Sopenharmony_ci * rq, rq may still be transformed into a request with no icq, i.e., a
683462306a36Sopenharmony_ci * request not associated with any queue. No bfq hook is invoked to
683562306a36Sopenharmony_ci * signal this transformation. As a consequence, should these
683662306a36Sopenharmony_ci * preparation operations be performed when the prepare_request hook
683762306a36Sopenharmony_ci * is invoked, and should rq be transformed one moment later, bfq
683862306a36Sopenharmony_ci * would end up in an inconsistent state, because it would have
683962306a36Sopenharmony_ci * incremented some queue counters for an rq destined to
684062306a36Sopenharmony_ci * transformation, without any chance to correctly lower these
684162306a36Sopenharmony_ci * counters back. In contrast, no transformation can still happen for
684262306a36Sopenharmony_ci * rq after rq has been inserted or merged. So, it is safe to execute
684362306a36Sopenharmony_ci * these preparation operations when rq is finally inserted or merged.
684462306a36Sopenharmony_ci */
684562306a36Sopenharmony_cistatic struct bfq_queue *bfq_init_rq(struct request *rq)
684662306a36Sopenharmony_ci{
684762306a36Sopenharmony_ci	struct request_queue *q = rq->q;
684862306a36Sopenharmony_ci	struct bio *bio = rq->bio;
684962306a36Sopenharmony_ci	struct bfq_data *bfqd = q->elevator->elevator_data;
685062306a36Sopenharmony_ci	struct bfq_io_cq *bic;
685162306a36Sopenharmony_ci	const int is_sync = rq_is_sync(rq);
685262306a36Sopenharmony_ci	struct bfq_queue *bfqq;
685362306a36Sopenharmony_ci	bool new_queue = false;
685462306a36Sopenharmony_ci	bool bfqq_already_existing = false, split = false;
685562306a36Sopenharmony_ci	unsigned int a_idx = bfq_actuator_index(bfqd, bio);
685662306a36Sopenharmony_ci
685762306a36Sopenharmony_ci	if (unlikely(!rq->elv.icq))
685862306a36Sopenharmony_ci		return NULL;
685962306a36Sopenharmony_ci
686062306a36Sopenharmony_ci	/*
686162306a36Sopenharmony_ci	 * Assuming that RQ_BFQQ(rq) is set only if everything is set
686262306a36Sopenharmony_ci	 * for this rq. This holds true, because this function is
686362306a36Sopenharmony_ci	 * invoked only for insertion or merging, and, after such
686462306a36Sopenharmony_ci	 * events, a request cannot be manipulated any longer before
686562306a36Sopenharmony_ci	 * being removed from bfq.
686662306a36Sopenharmony_ci	 */
686762306a36Sopenharmony_ci	if (RQ_BFQQ(rq))
686862306a36Sopenharmony_ci		return RQ_BFQQ(rq);
686962306a36Sopenharmony_ci
687062306a36Sopenharmony_ci	bic = icq_to_bic(rq->elv.icq);
687162306a36Sopenharmony_ci
687262306a36Sopenharmony_ci	bfq_check_ioprio_change(bic, bio);
687362306a36Sopenharmony_ci
687462306a36Sopenharmony_ci	bfq_bic_update_cgroup(bic, bio);
687562306a36Sopenharmony_ci
687662306a36Sopenharmony_ci	bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
687762306a36Sopenharmony_ci					 &new_queue);
687862306a36Sopenharmony_ci
687962306a36Sopenharmony_ci	if (likely(!new_queue)) {
688062306a36Sopenharmony_ci		/* If the queue was seeky for too long, break it apart. */
688162306a36Sopenharmony_ci		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
688262306a36Sopenharmony_ci			!bic->bfqq_data[a_idx].stably_merged) {
688362306a36Sopenharmony_ci			struct bfq_queue *old_bfqq = bfqq;
688462306a36Sopenharmony_ci
688562306a36Sopenharmony_ci			/* Update bic before losing reference to bfqq */
688662306a36Sopenharmony_ci			if (bfq_bfqq_in_large_burst(bfqq))
688762306a36Sopenharmony_ci				bic->bfqq_data[a_idx].saved_in_large_burst =
688862306a36Sopenharmony_ci					true;
688962306a36Sopenharmony_ci
689062306a36Sopenharmony_ci			bfqq = bfq_split_bfqq(bic, bfqq);
689162306a36Sopenharmony_ci			split = true;
689262306a36Sopenharmony_ci
689362306a36Sopenharmony_ci			if (!bfqq) {
689462306a36Sopenharmony_ci				bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
689562306a36Sopenharmony_ci								 true, is_sync,
689662306a36Sopenharmony_ci								 NULL);
689762306a36Sopenharmony_ci				if (unlikely(bfqq == &bfqd->oom_bfqq))
689862306a36Sopenharmony_ci					bfqq_already_existing = true;
689962306a36Sopenharmony_ci			} else
690062306a36Sopenharmony_ci				bfqq_already_existing = true;
690162306a36Sopenharmony_ci
690262306a36Sopenharmony_ci			if (!bfqq_already_existing) {
690362306a36Sopenharmony_ci				bfqq->waker_bfqq = old_bfqq->waker_bfqq;
690462306a36Sopenharmony_ci				bfqq->tentative_waker_bfqq = NULL;
690562306a36Sopenharmony_ci
690662306a36Sopenharmony_ci				/*
690762306a36Sopenharmony_ci				 * If the waker queue disappears, then
690862306a36Sopenharmony_ci				 * new_bfqq->waker_bfqq must be
690962306a36Sopenharmony_ci				 * reset. So insert new_bfqq into the
691062306a36Sopenharmony_ci				 * woken_list of the waker. See
691162306a36Sopenharmony_ci				 * bfq_check_waker for details.
691262306a36Sopenharmony_ci				 */
691362306a36Sopenharmony_ci				if (bfqq->waker_bfqq)
691462306a36Sopenharmony_ci					hlist_add_head(&bfqq->woken_list_node,
691562306a36Sopenharmony_ci						       &bfqq->waker_bfqq->woken_list);
691662306a36Sopenharmony_ci			}
691762306a36Sopenharmony_ci		}
691862306a36Sopenharmony_ci	}
691962306a36Sopenharmony_ci
692062306a36Sopenharmony_ci	bfqq_request_allocated(bfqq);
692162306a36Sopenharmony_ci	bfqq->ref++;
692262306a36Sopenharmony_ci	bic->requests++;
692362306a36Sopenharmony_ci	bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
692462306a36Sopenharmony_ci		     rq, bfqq, bfqq->ref);
692562306a36Sopenharmony_ci
692662306a36Sopenharmony_ci	rq->elv.priv[0] = bic;
692762306a36Sopenharmony_ci	rq->elv.priv[1] = bfqq;
692862306a36Sopenharmony_ci
692962306a36Sopenharmony_ci	/*
693062306a36Sopenharmony_ci	 * If a bfq_queue has only one process reference, it is owned
693162306a36Sopenharmony_ci	 * by only this bic: we can then set bfqq->bic = bic. in
693262306a36Sopenharmony_ci	 * addition, if the queue has also just been split, we have to
693362306a36Sopenharmony_ci	 * resume its state.
693462306a36Sopenharmony_ci	 */
693562306a36Sopenharmony_ci	if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
693662306a36Sopenharmony_ci		bfqq->bic = bic;
693762306a36Sopenharmony_ci		if (split) {
693862306a36Sopenharmony_ci			/*
693962306a36Sopenharmony_ci			 * The queue has just been split from a shared
694062306a36Sopenharmony_ci			 * queue: restore the idle window and the
694162306a36Sopenharmony_ci			 * possible weight raising period.
694262306a36Sopenharmony_ci			 */
694362306a36Sopenharmony_ci			bfq_bfqq_resume_state(bfqq, bfqd, bic,
694462306a36Sopenharmony_ci					      bfqq_already_existing);
694562306a36Sopenharmony_ci		}
694662306a36Sopenharmony_ci	}
694762306a36Sopenharmony_ci
694862306a36Sopenharmony_ci	/*
694962306a36Sopenharmony_ci	 * Consider bfqq as possibly belonging to a burst of newly
695062306a36Sopenharmony_ci	 * created queues only if:
695162306a36Sopenharmony_ci	 * 1) A burst is actually happening (bfqd->burst_size > 0)
695262306a36Sopenharmony_ci	 * or
695362306a36Sopenharmony_ci	 * 2) There is no other active queue. In fact, if, in
695462306a36Sopenharmony_ci	 *    contrast, there are active queues not belonging to the
695562306a36Sopenharmony_ci	 *    possible burst bfqq may belong to, then there is no gain
695662306a36Sopenharmony_ci	 *    in considering bfqq as belonging to a burst, and
695762306a36Sopenharmony_ci	 *    therefore in not weight-raising bfqq. See comments on
695862306a36Sopenharmony_ci	 *    bfq_handle_burst().
695962306a36Sopenharmony_ci	 *
696062306a36Sopenharmony_ci	 * This filtering also helps eliminating false positives,
696162306a36Sopenharmony_ci	 * occurring when bfqq does not belong to an actual large
696262306a36Sopenharmony_ci	 * burst, but some background task (e.g., a service) happens
696362306a36Sopenharmony_ci	 * to trigger the creation of new queues very close to when
696462306a36Sopenharmony_ci	 * bfqq and its possible companion queues are created. See
696562306a36Sopenharmony_ci	 * comments on bfq_handle_burst() for further details also on
696662306a36Sopenharmony_ci	 * this issue.
696762306a36Sopenharmony_ci	 */
696862306a36Sopenharmony_ci	if (unlikely(bfq_bfqq_just_created(bfqq) &&
696962306a36Sopenharmony_ci		     (bfqd->burst_size > 0 ||
697062306a36Sopenharmony_ci		      bfq_tot_busy_queues(bfqd) == 0)))
697162306a36Sopenharmony_ci		bfq_handle_burst(bfqd, bfqq);
697262306a36Sopenharmony_ci
697362306a36Sopenharmony_ci	return bfqq;
697462306a36Sopenharmony_ci}
697562306a36Sopenharmony_ci
697662306a36Sopenharmony_cistatic void
697762306a36Sopenharmony_cibfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
697862306a36Sopenharmony_ci{
697962306a36Sopenharmony_ci	enum bfqq_expiration reason;
698062306a36Sopenharmony_ci	unsigned long flags;
698162306a36Sopenharmony_ci
698262306a36Sopenharmony_ci	spin_lock_irqsave(&bfqd->lock, flags);
698362306a36Sopenharmony_ci
698462306a36Sopenharmony_ci	/*
698562306a36Sopenharmony_ci	 * Considering that bfqq may be in race, we should firstly check
698662306a36Sopenharmony_ci	 * whether bfqq is in service before doing something on it. If
698762306a36Sopenharmony_ci	 * the bfqq in race is not in service, it has already been expired
698862306a36Sopenharmony_ci	 * through __bfq_bfqq_expire func and its wait_request flags has
698962306a36Sopenharmony_ci	 * been cleared in __bfq_bfqd_reset_in_service func.
699062306a36Sopenharmony_ci	 */
699162306a36Sopenharmony_ci	if (bfqq != bfqd->in_service_queue) {
699262306a36Sopenharmony_ci		spin_unlock_irqrestore(&bfqd->lock, flags);
699362306a36Sopenharmony_ci		return;
699462306a36Sopenharmony_ci	}
699562306a36Sopenharmony_ci
699662306a36Sopenharmony_ci	bfq_clear_bfqq_wait_request(bfqq);
699762306a36Sopenharmony_ci
699862306a36Sopenharmony_ci	if (bfq_bfqq_budget_timeout(bfqq))
699962306a36Sopenharmony_ci		/*
700062306a36Sopenharmony_ci		 * Also here the queue can be safely expired
700162306a36Sopenharmony_ci		 * for budget timeout without wasting
700262306a36Sopenharmony_ci		 * guarantees
700362306a36Sopenharmony_ci		 */
700462306a36Sopenharmony_ci		reason = BFQQE_BUDGET_TIMEOUT;
700562306a36Sopenharmony_ci	else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
700662306a36Sopenharmony_ci		/*
700762306a36Sopenharmony_ci		 * The queue may not be empty upon timer expiration,
700862306a36Sopenharmony_ci		 * because we may not disable the timer when the
700962306a36Sopenharmony_ci		 * first request of the in-service queue arrives
701062306a36Sopenharmony_ci		 * during disk idling.
701162306a36Sopenharmony_ci		 */
701262306a36Sopenharmony_ci		reason = BFQQE_TOO_IDLE;
701362306a36Sopenharmony_ci	else
701462306a36Sopenharmony_ci		goto schedule_dispatch;
701562306a36Sopenharmony_ci
701662306a36Sopenharmony_ci	bfq_bfqq_expire(bfqd, bfqq, true, reason);
701762306a36Sopenharmony_ci
701862306a36Sopenharmony_cischedule_dispatch:
701962306a36Sopenharmony_ci	bfq_schedule_dispatch(bfqd);
702062306a36Sopenharmony_ci	spin_unlock_irqrestore(&bfqd->lock, flags);
702162306a36Sopenharmony_ci}
702262306a36Sopenharmony_ci
702362306a36Sopenharmony_ci/*
702462306a36Sopenharmony_ci * Handler of the expiration of the timer running if the in-service queue
702562306a36Sopenharmony_ci * is idling inside its time slice.
702662306a36Sopenharmony_ci */
702762306a36Sopenharmony_cistatic enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
702862306a36Sopenharmony_ci{
702962306a36Sopenharmony_ci	struct bfq_data *bfqd = container_of(timer, struct bfq_data,
703062306a36Sopenharmony_ci					     idle_slice_timer);
703162306a36Sopenharmony_ci	struct bfq_queue *bfqq = bfqd->in_service_queue;
703262306a36Sopenharmony_ci
703362306a36Sopenharmony_ci	/*
703462306a36Sopenharmony_ci	 * Theoretical race here: the in-service queue can be NULL or
703562306a36Sopenharmony_ci	 * different from the queue that was idling if a new request
703662306a36Sopenharmony_ci	 * arrives for the current queue and there is a full dispatch
703762306a36Sopenharmony_ci	 * cycle that changes the in-service queue.  This can hardly
703862306a36Sopenharmony_ci	 * happen, but in the worst case we just expire a queue too
703962306a36Sopenharmony_ci	 * early.
704062306a36Sopenharmony_ci	 */
704162306a36Sopenharmony_ci	if (bfqq)
704262306a36Sopenharmony_ci		bfq_idle_slice_timer_body(bfqd, bfqq);
704362306a36Sopenharmony_ci
704462306a36Sopenharmony_ci	return HRTIMER_NORESTART;
704562306a36Sopenharmony_ci}
704662306a36Sopenharmony_ci
704762306a36Sopenharmony_cistatic void __bfq_put_async_bfqq(struct bfq_data *bfqd,
704862306a36Sopenharmony_ci				 struct bfq_queue **bfqq_ptr)
704962306a36Sopenharmony_ci{
705062306a36Sopenharmony_ci	struct bfq_queue *bfqq = *bfqq_ptr;
705162306a36Sopenharmony_ci
705262306a36Sopenharmony_ci	bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
705362306a36Sopenharmony_ci	if (bfqq) {
705462306a36Sopenharmony_ci		bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
705562306a36Sopenharmony_ci
705662306a36Sopenharmony_ci		bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
705762306a36Sopenharmony_ci			     bfqq, bfqq->ref);
705862306a36Sopenharmony_ci		bfq_put_queue(bfqq);
705962306a36Sopenharmony_ci		*bfqq_ptr = NULL;
706062306a36Sopenharmony_ci	}
706162306a36Sopenharmony_ci}
706262306a36Sopenharmony_ci
706362306a36Sopenharmony_ci/*
706462306a36Sopenharmony_ci * Release all the bfqg references to its async queues.  If we are
706562306a36Sopenharmony_ci * deallocating the group these queues may still contain requests, so
706662306a36Sopenharmony_ci * we reparent them to the root cgroup (i.e., the only one that will
706762306a36Sopenharmony_ci * exist for sure until all the requests on a device are gone).
706862306a36Sopenharmony_ci */
706962306a36Sopenharmony_civoid bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
707062306a36Sopenharmony_ci{
707162306a36Sopenharmony_ci	int i, j, k;
707262306a36Sopenharmony_ci
707362306a36Sopenharmony_ci	for (k = 0; k < bfqd->num_actuators; k++) {
707462306a36Sopenharmony_ci		for (i = 0; i < 2; i++)
707562306a36Sopenharmony_ci			for (j = 0; j < IOPRIO_NR_LEVELS; j++)
707662306a36Sopenharmony_ci				__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j][k]);
707762306a36Sopenharmony_ci
707862306a36Sopenharmony_ci		__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq[k]);
707962306a36Sopenharmony_ci	}
708062306a36Sopenharmony_ci}
708162306a36Sopenharmony_ci
708262306a36Sopenharmony_ci/*
708362306a36Sopenharmony_ci * See the comments on bfq_limit_depth for the purpose of
708462306a36Sopenharmony_ci * the depths set in the function. Return minimum shallow depth we'll use.
708562306a36Sopenharmony_ci */
708662306a36Sopenharmony_cistatic void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
708762306a36Sopenharmony_ci{
708862306a36Sopenharmony_ci	unsigned int depth = 1U << bt->sb.shift;
708962306a36Sopenharmony_ci
709062306a36Sopenharmony_ci	bfqd->full_depth_shift = bt->sb.shift;
709162306a36Sopenharmony_ci	/*
709262306a36Sopenharmony_ci	 * In-word depths if no bfq_queue is being weight-raised:
709362306a36Sopenharmony_ci	 * leaving 25% of tags only for sync reads.
709462306a36Sopenharmony_ci	 *
709562306a36Sopenharmony_ci	 * In next formulas, right-shift the value
709662306a36Sopenharmony_ci	 * (1U<<bt->sb.shift), instead of computing directly
709762306a36Sopenharmony_ci	 * (1U<<(bt->sb.shift - something)), to be robust against
709862306a36Sopenharmony_ci	 * any possible value of bt->sb.shift, without having to
709962306a36Sopenharmony_ci	 * limit 'something'.
710062306a36Sopenharmony_ci	 */
710162306a36Sopenharmony_ci	/* no more than 50% of tags for async I/O */
710262306a36Sopenharmony_ci	bfqd->word_depths[0][0] = max(depth >> 1, 1U);
710362306a36Sopenharmony_ci	/*
710462306a36Sopenharmony_ci	 * no more than 75% of tags for sync writes (25% extra tags
710562306a36Sopenharmony_ci	 * w.r.t. async I/O, to prevent async I/O from starving sync
710662306a36Sopenharmony_ci	 * writes)
710762306a36Sopenharmony_ci	 */
710862306a36Sopenharmony_ci	bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
710962306a36Sopenharmony_ci
711062306a36Sopenharmony_ci	/*
711162306a36Sopenharmony_ci	 * In-word depths in case some bfq_queue is being weight-
711262306a36Sopenharmony_ci	 * raised: leaving ~63% of tags for sync reads. This is the
711362306a36Sopenharmony_ci	 * highest percentage for which, in our tests, application
711462306a36Sopenharmony_ci	 * start-up times didn't suffer from any regression due to tag
711562306a36Sopenharmony_ci	 * shortage.
711662306a36Sopenharmony_ci	 */
711762306a36Sopenharmony_ci	/* no more than ~18% of tags for async I/O */
711862306a36Sopenharmony_ci	bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
711962306a36Sopenharmony_ci	/* no more than ~37% of tags for sync writes (~20% extra tags) */
712062306a36Sopenharmony_ci	bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
712162306a36Sopenharmony_ci}
712262306a36Sopenharmony_ci
712362306a36Sopenharmony_cistatic void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
712462306a36Sopenharmony_ci{
712562306a36Sopenharmony_ci	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
712662306a36Sopenharmony_ci	struct blk_mq_tags *tags = hctx->sched_tags;
712762306a36Sopenharmony_ci
712862306a36Sopenharmony_ci	bfq_update_depths(bfqd, &tags->bitmap_tags);
712962306a36Sopenharmony_ci	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
713062306a36Sopenharmony_ci}
713162306a36Sopenharmony_ci
713262306a36Sopenharmony_cistatic int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
713362306a36Sopenharmony_ci{
713462306a36Sopenharmony_ci	bfq_depth_updated(hctx);
713562306a36Sopenharmony_ci	return 0;
713662306a36Sopenharmony_ci}
713762306a36Sopenharmony_ci
713862306a36Sopenharmony_cistatic void bfq_exit_queue(struct elevator_queue *e)
713962306a36Sopenharmony_ci{
714062306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;
714162306a36Sopenharmony_ci	struct bfq_queue *bfqq, *n;
714262306a36Sopenharmony_ci	unsigned int actuator;
714362306a36Sopenharmony_ci
714462306a36Sopenharmony_ci	hrtimer_cancel(&bfqd->idle_slice_timer);
714562306a36Sopenharmony_ci
714662306a36Sopenharmony_ci	spin_lock_irq(&bfqd->lock);
714762306a36Sopenharmony_ci	list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
714862306a36Sopenharmony_ci		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
714962306a36Sopenharmony_ci	spin_unlock_irq(&bfqd->lock);
715062306a36Sopenharmony_ci
715162306a36Sopenharmony_ci	for (actuator = 0; actuator < bfqd->num_actuators; actuator++)
715262306a36Sopenharmony_ci		WARN_ON_ONCE(bfqd->rq_in_driver[actuator]);
715362306a36Sopenharmony_ci	WARN_ON_ONCE(bfqd->tot_rq_in_driver);
715462306a36Sopenharmony_ci
715562306a36Sopenharmony_ci	hrtimer_cancel(&bfqd->idle_slice_timer);
715662306a36Sopenharmony_ci
715762306a36Sopenharmony_ci	/* release oom-queue reference to root group */
715862306a36Sopenharmony_ci	bfqg_and_blkg_put(bfqd->root_group);
715962306a36Sopenharmony_ci
716062306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
716162306a36Sopenharmony_ci	blkcg_deactivate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
716262306a36Sopenharmony_ci#else
716362306a36Sopenharmony_ci	spin_lock_irq(&bfqd->lock);
716462306a36Sopenharmony_ci	bfq_put_async_queues(bfqd, bfqd->root_group);
716562306a36Sopenharmony_ci	kfree(bfqd->root_group);
716662306a36Sopenharmony_ci	spin_unlock_irq(&bfqd->lock);
716762306a36Sopenharmony_ci#endif
716862306a36Sopenharmony_ci
716962306a36Sopenharmony_ci	blk_stat_disable_accounting(bfqd->queue);
717062306a36Sopenharmony_ci	clear_bit(ELEVATOR_FLAG_DISABLE_WBT, &e->flags);
717162306a36Sopenharmony_ci	wbt_enable_default(bfqd->queue->disk);
717262306a36Sopenharmony_ci
717362306a36Sopenharmony_ci	kfree(bfqd);
717462306a36Sopenharmony_ci}
717562306a36Sopenharmony_ci
717662306a36Sopenharmony_cistatic void bfq_init_root_group(struct bfq_group *root_group,
717762306a36Sopenharmony_ci				struct bfq_data *bfqd)
717862306a36Sopenharmony_ci{
717962306a36Sopenharmony_ci	int i;
718062306a36Sopenharmony_ci
718162306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
718262306a36Sopenharmony_ci	root_group->entity.parent = NULL;
718362306a36Sopenharmony_ci	root_group->my_entity = NULL;
718462306a36Sopenharmony_ci	root_group->bfqd = bfqd;
718562306a36Sopenharmony_ci#endif
718662306a36Sopenharmony_ci	root_group->rq_pos_tree = RB_ROOT;
718762306a36Sopenharmony_ci	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
718862306a36Sopenharmony_ci		root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
718962306a36Sopenharmony_ci	root_group->sched_data.bfq_class_idle_last_service = jiffies;
719062306a36Sopenharmony_ci}
719162306a36Sopenharmony_ci
719262306a36Sopenharmony_cistatic int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
719362306a36Sopenharmony_ci{
719462306a36Sopenharmony_ci	struct bfq_data *bfqd;
719562306a36Sopenharmony_ci	struct elevator_queue *eq;
719662306a36Sopenharmony_ci	unsigned int i;
719762306a36Sopenharmony_ci	struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges;
719862306a36Sopenharmony_ci
719962306a36Sopenharmony_ci	eq = elevator_alloc(q, e);
720062306a36Sopenharmony_ci	if (!eq)
720162306a36Sopenharmony_ci		return -ENOMEM;
720262306a36Sopenharmony_ci
720362306a36Sopenharmony_ci	bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
720462306a36Sopenharmony_ci	if (!bfqd) {
720562306a36Sopenharmony_ci		kobject_put(&eq->kobj);
720662306a36Sopenharmony_ci		return -ENOMEM;
720762306a36Sopenharmony_ci	}
720862306a36Sopenharmony_ci	eq->elevator_data = bfqd;
720962306a36Sopenharmony_ci
721062306a36Sopenharmony_ci	spin_lock_irq(&q->queue_lock);
721162306a36Sopenharmony_ci	q->elevator = eq;
721262306a36Sopenharmony_ci	spin_unlock_irq(&q->queue_lock);
721362306a36Sopenharmony_ci
721462306a36Sopenharmony_ci	/*
721562306a36Sopenharmony_ci	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
721662306a36Sopenharmony_ci	 * Grab a permanent reference to it, so that the normal code flow
721762306a36Sopenharmony_ci	 * will not attempt to free it.
721862306a36Sopenharmony_ci	 * Set zero as actuator index: we will pretend that
721962306a36Sopenharmony_ci	 * all I/O requests are for the same actuator.
722062306a36Sopenharmony_ci	 */
722162306a36Sopenharmony_ci	bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0, 0);
722262306a36Sopenharmony_ci	bfqd->oom_bfqq.ref++;
722362306a36Sopenharmony_ci	bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
722462306a36Sopenharmony_ci	bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
722562306a36Sopenharmony_ci	bfqd->oom_bfqq.entity.new_weight =
722662306a36Sopenharmony_ci		bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
722762306a36Sopenharmony_ci
722862306a36Sopenharmony_ci	/* oom_bfqq does not participate to bursts */
722962306a36Sopenharmony_ci	bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
723062306a36Sopenharmony_ci
723162306a36Sopenharmony_ci	/*
723262306a36Sopenharmony_ci	 * Trigger weight initialization, according to ioprio, at the
723362306a36Sopenharmony_ci	 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
723462306a36Sopenharmony_ci	 * class won't be changed any more.
723562306a36Sopenharmony_ci	 */
723662306a36Sopenharmony_ci	bfqd->oom_bfqq.entity.prio_changed = 1;
723762306a36Sopenharmony_ci
723862306a36Sopenharmony_ci	bfqd->queue = q;
723962306a36Sopenharmony_ci
724062306a36Sopenharmony_ci	bfqd->num_actuators = 1;
724162306a36Sopenharmony_ci	/*
724262306a36Sopenharmony_ci	 * If the disk supports multiple actuators, copy independent
724362306a36Sopenharmony_ci	 * access ranges from the request queue structure.
724462306a36Sopenharmony_ci	 */
724562306a36Sopenharmony_ci	spin_lock_irq(&q->queue_lock);
724662306a36Sopenharmony_ci	if (ia_ranges) {
724762306a36Sopenharmony_ci		/*
724862306a36Sopenharmony_ci		 * Check if the disk ia_ranges size exceeds the current bfq
724962306a36Sopenharmony_ci		 * actuator limit.
725062306a36Sopenharmony_ci		 */
725162306a36Sopenharmony_ci		if (ia_ranges->nr_ia_ranges > BFQ_MAX_ACTUATORS) {
725262306a36Sopenharmony_ci			pr_crit("nr_ia_ranges higher than act limit: iars=%d, max=%d.\n",
725362306a36Sopenharmony_ci				ia_ranges->nr_ia_ranges, BFQ_MAX_ACTUATORS);
725462306a36Sopenharmony_ci			pr_crit("Falling back to single actuator mode.\n");
725562306a36Sopenharmony_ci		} else {
725662306a36Sopenharmony_ci			bfqd->num_actuators = ia_ranges->nr_ia_ranges;
725762306a36Sopenharmony_ci
725862306a36Sopenharmony_ci			for (i = 0; i < bfqd->num_actuators; i++) {
725962306a36Sopenharmony_ci				bfqd->sector[i] = ia_ranges->ia_range[i].sector;
726062306a36Sopenharmony_ci				bfqd->nr_sectors[i] =
726162306a36Sopenharmony_ci					ia_ranges->ia_range[i].nr_sectors;
726262306a36Sopenharmony_ci			}
726362306a36Sopenharmony_ci		}
726462306a36Sopenharmony_ci	}
726562306a36Sopenharmony_ci
726662306a36Sopenharmony_ci	/* Otherwise use single-actuator dev info */
726762306a36Sopenharmony_ci	if (bfqd->num_actuators == 1) {
726862306a36Sopenharmony_ci		bfqd->sector[0] = 0;
726962306a36Sopenharmony_ci		bfqd->nr_sectors[0] = get_capacity(q->disk);
727062306a36Sopenharmony_ci	}
727162306a36Sopenharmony_ci	spin_unlock_irq(&q->queue_lock);
727262306a36Sopenharmony_ci
727362306a36Sopenharmony_ci	INIT_LIST_HEAD(&bfqd->dispatch);
727462306a36Sopenharmony_ci
727562306a36Sopenharmony_ci	hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
727662306a36Sopenharmony_ci		     HRTIMER_MODE_REL);
727762306a36Sopenharmony_ci	bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
727862306a36Sopenharmony_ci
727962306a36Sopenharmony_ci	bfqd->queue_weights_tree = RB_ROOT_CACHED;
728062306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
728162306a36Sopenharmony_ci	bfqd->num_groups_with_pending_reqs = 0;
728262306a36Sopenharmony_ci#endif
728362306a36Sopenharmony_ci
728462306a36Sopenharmony_ci	INIT_LIST_HEAD(&bfqd->active_list[0]);
728562306a36Sopenharmony_ci	INIT_LIST_HEAD(&bfqd->active_list[1]);
728662306a36Sopenharmony_ci	INIT_LIST_HEAD(&bfqd->idle_list);
728762306a36Sopenharmony_ci	INIT_HLIST_HEAD(&bfqd->burst_list);
728862306a36Sopenharmony_ci
728962306a36Sopenharmony_ci	bfqd->hw_tag = -1;
729062306a36Sopenharmony_ci	bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
729162306a36Sopenharmony_ci
729262306a36Sopenharmony_ci	bfqd->bfq_max_budget = bfq_default_max_budget;
729362306a36Sopenharmony_ci
729462306a36Sopenharmony_ci	bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
729562306a36Sopenharmony_ci	bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
729662306a36Sopenharmony_ci	bfqd->bfq_back_max = bfq_back_max;
729762306a36Sopenharmony_ci	bfqd->bfq_back_penalty = bfq_back_penalty;
729862306a36Sopenharmony_ci	bfqd->bfq_slice_idle = bfq_slice_idle;
729962306a36Sopenharmony_ci	bfqd->bfq_timeout = bfq_timeout;
730062306a36Sopenharmony_ci
730162306a36Sopenharmony_ci	bfqd->bfq_large_burst_thresh = 8;
730262306a36Sopenharmony_ci	bfqd->bfq_burst_interval = msecs_to_jiffies(180);
730362306a36Sopenharmony_ci
730462306a36Sopenharmony_ci	bfqd->low_latency = true;
730562306a36Sopenharmony_ci
730662306a36Sopenharmony_ci	/*
730762306a36Sopenharmony_ci	 * Trade-off between responsiveness and fairness.
730862306a36Sopenharmony_ci	 */
730962306a36Sopenharmony_ci	bfqd->bfq_wr_coeff = 30;
731062306a36Sopenharmony_ci	bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
731162306a36Sopenharmony_ci	bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
731262306a36Sopenharmony_ci	bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
731362306a36Sopenharmony_ci	bfqd->bfq_wr_max_softrt_rate = 7000; /*
731462306a36Sopenharmony_ci					      * Approximate rate required
731562306a36Sopenharmony_ci					      * to playback or record a
731662306a36Sopenharmony_ci					      * high-definition compressed
731762306a36Sopenharmony_ci					      * video.
731862306a36Sopenharmony_ci					      */
731962306a36Sopenharmony_ci	bfqd->wr_busy_queues = 0;
732062306a36Sopenharmony_ci
732162306a36Sopenharmony_ci	/*
732262306a36Sopenharmony_ci	 * Begin by assuming, optimistically, that the device peak
732362306a36Sopenharmony_ci	 * rate is equal to 2/3 of the highest reference rate.
732462306a36Sopenharmony_ci	 */
732562306a36Sopenharmony_ci	bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
732662306a36Sopenharmony_ci		ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
732762306a36Sopenharmony_ci	bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
732862306a36Sopenharmony_ci
732962306a36Sopenharmony_ci	/* see comments on the definition of next field inside bfq_data */
733062306a36Sopenharmony_ci	bfqd->actuator_load_threshold = 4;
733162306a36Sopenharmony_ci
733262306a36Sopenharmony_ci	spin_lock_init(&bfqd->lock);
733362306a36Sopenharmony_ci
733462306a36Sopenharmony_ci	/*
733562306a36Sopenharmony_ci	 * The invocation of the next bfq_create_group_hierarchy
733662306a36Sopenharmony_ci	 * function is the head of a chain of function calls
733762306a36Sopenharmony_ci	 * (bfq_create_group_hierarchy->blkcg_activate_policy->
733862306a36Sopenharmony_ci	 * blk_mq_freeze_queue) that may lead to the invocation of the
733962306a36Sopenharmony_ci	 * has_work hook function. For this reason,
734062306a36Sopenharmony_ci	 * bfq_create_group_hierarchy is invoked only after all
734162306a36Sopenharmony_ci	 * scheduler data has been initialized, apart from the fields
734262306a36Sopenharmony_ci	 * that can be initialized only after invoking
734362306a36Sopenharmony_ci	 * bfq_create_group_hierarchy. This, in particular, enables
734462306a36Sopenharmony_ci	 * has_work to correctly return false. Of course, to avoid
734562306a36Sopenharmony_ci	 * other inconsistencies, the blk-mq stack must then refrain
734662306a36Sopenharmony_ci	 * from invoking further scheduler hooks before this init
734762306a36Sopenharmony_ci	 * function is finished.
734862306a36Sopenharmony_ci	 */
734962306a36Sopenharmony_ci	bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
735062306a36Sopenharmony_ci	if (!bfqd->root_group)
735162306a36Sopenharmony_ci		goto out_free;
735262306a36Sopenharmony_ci	bfq_init_root_group(bfqd->root_group, bfqd);
735362306a36Sopenharmony_ci	bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
735462306a36Sopenharmony_ci
735562306a36Sopenharmony_ci	/* We dispatch from request queue wide instead of hw queue */
735662306a36Sopenharmony_ci	blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
735762306a36Sopenharmony_ci
735862306a36Sopenharmony_ci	set_bit(ELEVATOR_FLAG_DISABLE_WBT, &eq->flags);
735962306a36Sopenharmony_ci	wbt_disable_default(q->disk);
736062306a36Sopenharmony_ci	blk_stat_enable_accounting(q);
736162306a36Sopenharmony_ci
736262306a36Sopenharmony_ci	return 0;
736362306a36Sopenharmony_ci
736462306a36Sopenharmony_ciout_free:
736562306a36Sopenharmony_ci	kfree(bfqd);
736662306a36Sopenharmony_ci	kobject_put(&eq->kobj);
736762306a36Sopenharmony_ci	return -ENOMEM;
736862306a36Sopenharmony_ci}
736962306a36Sopenharmony_ci
737062306a36Sopenharmony_cistatic void bfq_slab_kill(void)
737162306a36Sopenharmony_ci{
737262306a36Sopenharmony_ci	kmem_cache_destroy(bfq_pool);
737362306a36Sopenharmony_ci}
737462306a36Sopenharmony_ci
737562306a36Sopenharmony_cistatic int __init bfq_slab_setup(void)
737662306a36Sopenharmony_ci{
737762306a36Sopenharmony_ci	bfq_pool = KMEM_CACHE(bfq_queue, 0);
737862306a36Sopenharmony_ci	if (!bfq_pool)
737962306a36Sopenharmony_ci		return -ENOMEM;
738062306a36Sopenharmony_ci	return 0;
738162306a36Sopenharmony_ci}
738262306a36Sopenharmony_ci
738362306a36Sopenharmony_cistatic ssize_t bfq_var_show(unsigned int var, char *page)
738462306a36Sopenharmony_ci{
738562306a36Sopenharmony_ci	return sprintf(page, "%u\n", var);
738662306a36Sopenharmony_ci}
738762306a36Sopenharmony_ci
738862306a36Sopenharmony_cistatic int bfq_var_store(unsigned long *var, const char *page)
738962306a36Sopenharmony_ci{
739062306a36Sopenharmony_ci	unsigned long new_val;
739162306a36Sopenharmony_ci	int ret = kstrtoul(page, 10, &new_val);
739262306a36Sopenharmony_ci
739362306a36Sopenharmony_ci	if (ret)
739462306a36Sopenharmony_ci		return ret;
739562306a36Sopenharmony_ci	*var = new_val;
739662306a36Sopenharmony_ci	return 0;
739762306a36Sopenharmony_ci}
739862306a36Sopenharmony_ci
739962306a36Sopenharmony_ci#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
740062306a36Sopenharmony_cistatic ssize_t __FUNC(struct elevator_queue *e, char *page)		\
740162306a36Sopenharmony_ci{									\
740262306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;			\
740362306a36Sopenharmony_ci	u64 __data = __VAR;						\
740462306a36Sopenharmony_ci	if (__CONV == 1)						\
740562306a36Sopenharmony_ci		__data = jiffies_to_msecs(__data);			\
740662306a36Sopenharmony_ci	else if (__CONV == 2)						\
740762306a36Sopenharmony_ci		__data = div_u64(__data, NSEC_PER_MSEC);		\
740862306a36Sopenharmony_ci	return bfq_var_show(__data, (page));				\
740962306a36Sopenharmony_ci}
741062306a36Sopenharmony_ciSHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
741162306a36Sopenharmony_ciSHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
741262306a36Sopenharmony_ciSHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
741362306a36Sopenharmony_ciSHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
741462306a36Sopenharmony_ciSHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
741562306a36Sopenharmony_ciSHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
741662306a36Sopenharmony_ciSHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
741762306a36Sopenharmony_ciSHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
741862306a36Sopenharmony_ciSHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
741962306a36Sopenharmony_ci#undef SHOW_FUNCTION
742062306a36Sopenharmony_ci
742162306a36Sopenharmony_ci#define USEC_SHOW_FUNCTION(__FUNC, __VAR)				\
742262306a36Sopenharmony_cistatic ssize_t __FUNC(struct elevator_queue *e, char *page)		\
742362306a36Sopenharmony_ci{									\
742462306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;			\
742562306a36Sopenharmony_ci	u64 __data = __VAR;						\
742662306a36Sopenharmony_ci	__data = div_u64(__data, NSEC_PER_USEC);			\
742762306a36Sopenharmony_ci	return bfq_var_show(__data, (page));				\
742862306a36Sopenharmony_ci}
742962306a36Sopenharmony_ciUSEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
743062306a36Sopenharmony_ci#undef USEC_SHOW_FUNCTION
743162306a36Sopenharmony_ci
743262306a36Sopenharmony_ci#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
743362306a36Sopenharmony_cistatic ssize_t								\
743462306a36Sopenharmony_ci__FUNC(struct elevator_queue *e, const char *page, size_t count)	\
743562306a36Sopenharmony_ci{									\
743662306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;			\
743762306a36Sopenharmony_ci	unsigned long __data, __min = (MIN), __max = (MAX);		\
743862306a36Sopenharmony_ci	int ret;							\
743962306a36Sopenharmony_ci									\
744062306a36Sopenharmony_ci	ret = bfq_var_store(&__data, (page));				\
744162306a36Sopenharmony_ci	if (ret)							\
744262306a36Sopenharmony_ci		return ret;						\
744362306a36Sopenharmony_ci	if (__data < __min)						\
744462306a36Sopenharmony_ci		__data = __min;						\
744562306a36Sopenharmony_ci	else if (__data > __max)					\
744662306a36Sopenharmony_ci		__data = __max;						\
744762306a36Sopenharmony_ci	if (__CONV == 1)						\
744862306a36Sopenharmony_ci		*(__PTR) = msecs_to_jiffies(__data);			\
744962306a36Sopenharmony_ci	else if (__CONV == 2)						\
745062306a36Sopenharmony_ci		*(__PTR) = (u64)__data * NSEC_PER_MSEC;			\
745162306a36Sopenharmony_ci	else								\
745262306a36Sopenharmony_ci		*(__PTR) = __data;					\
745362306a36Sopenharmony_ci	return count;							\
745462306a36Sopenharmony_ci}
745562306a36Sopenharmony_ciSTORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
745662306a36Sopenharmony_ci		INT_MAX, 2);
745762306a36Sopenharmony_ciSTORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
745862306a36Sopenharmony_ci		INT_MAX, 2);
745962306a36Sopenharmony_ciSTORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
746062306a36Sopenharmony_ciSTORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
746162306a36Sopenharmony_ci		INT_MAX, 0);
746262306a36Sopenharmony_ciSTORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
746362306a36Sopenharmony_ci#undef STORE_FUNCTION
746462306a36Sopenharmony_ci
746562306a36Sopenharmony_ci#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)			\
746662306a36Sopenharmony_cistatic ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
746762306a36Sopenharmony_ci{									\
746862306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;			\
746962306a36Sopenharmony_ci	unsigned long __data, __min = (MIN), __max = (MAX);		\
747062306a36Sopenharmony_ci	int ret;							\
747162306a36Sopenharmony_ci									\
747262306a36Sopenharmony_ci	ret = bfq_var_store(&__data, (page));				\
747362306a36Sopenharmony_ci	if (ret)							\
747462306a36Sopenharmony_ci		return ret;						\
747562306a36Sopenharmony_ci	if (__data < __min)						\
747662306a36Sopenharmony_ci		__data = __min;						\
747762306a36Sopenharmony_ci	else if (__data > __max)					\
747862306a36Sopenharmony_ci		__data = __max;						\
747962306a36Sopenharmony_ci	*(__PTR) = (u64)__data * NSEC_PER_USEC;				\
748062306a36Sopenharmony_ci	return count;							\
748162306a36Sopenharmony_ci}
748262306a36Sopenharmony_ciUSEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
748362306a36Sopenharmony_ci		    UINT_MAX);
748462306a36Sopenharmony_ci#undef USEC_STORE_FUNCTION
748562306a36Sopenharmony_ci
748662306a36Sopenharmony_cistatic ssize_t bfq_max_budget_store(struct elevator_queue *e,
748762306a36Sopenharmony_ci				    const char *page, size_t count)
748862306a36Sopenharmony_ci{
748962306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;
749062306a36Sopenharmony_ci	unsigned long __data;
749162306a36Sopenharmony_ci	int ret;
749262306a36Sopenharmony_ci
749362306a36Sopenharmony_ci	ret = bfq_var_store(&__data, (page));
749462306a36Sopenharmony_ci	if (ret)
749562306a36Sopenharmony_ci		return ret;
749662306a36Sopenharmony_ci
749762306a36Sopenharmony_ci	if (__data == 0)
749862306a36Sopenharmony_ci		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
749962306a36Sopenharmony_ci	else {
750062306a36Sopenharmony_ci		if (__data > INT_MAX)
750162306a36Sopenharmony_ci			__data = INT_MAX;
750262306a36Sopenharmony_ci		bfqd->bfq_max_budget = __data;
750362306a36Sopenharmony_ci	}
750462306a36Sopenharmony_ci
750562306a36Sopenharmony_ci	bfqd->bfq_user_max_budget = __data;
750662306a36Sopenharmony_ci
750762306a36Sopenharmony_ci	return count;
750862306a36Sopenharmony_ci}
750962306a36Sopenharmony_ci
751062306a36Sopenharmony_ci/*
751162306a36Sopenharmony_ci * Leaving this name to preserve name compatibility with cfq
751262306a36Sopenharmony_ci * parameters, but this timeout is used for both sync and async.
751362306a36Sopenharmony_ci */
751462306a36Sopenharmony_cistatic ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
751562306a36Sopenharmony_ci				      const char *page, size_t count)
751662306a36Sopenharmony_ci{
751762306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;
751862306a36Sopenharmony_ci	unsigned long __data;
751962306a36Sopenharmony_ci	int ret;
752062306a36Sopenharmony_ci
752162306a36Sopenharmony_ci	ret = bfq_var_store(&__data, (page));
752262306a36Sopenharmony_ci	if (ret)
752362306a36Sopenharmony_ci		return ret;
752462306a36Sopenharmony_ci
752562306a36Sopenharmony_ci	if (__data < 1)
752662306a36Sopenharmony_ci		__data = 1;
752762306a36Sopenharmony_ci	else if (__data > INT_MAX)
752862306a36Sopenharmony_ci		__data = INT_MAX;
752962306a36Sopenharmony_ci
753062306a36Sopenharmony_ci	bfqd->bfq_timeout = msecs_to_jiffies(__data);
753162306a36Sopenharmony_ci	if (bfqd->bfq_user_max_budget == 0)
753262306a36Sopenharmony_ci		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
753362306a36Sopenharmony_ci
753462306a36Sopenharmony_ci	return count;
753562306a36Sopenharmony_ci}
753662306a36Sopenharmony_ci
753762306a36Sopenharmony_cistatic ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
753862306a36Sopenharmony_ci				     const char *page, size_t count)
753962306a36Sopenharmony_ci{
754062306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;
754162306a36Sopenharmony_ci	unsigned long __data;
754262306a36Sopenharmony_ci	int ret;
754362306a36Sopenharmony_ci
754462306a36Sopenharmony_ci	ret = bfq_var_store(&__data, (page));
754562306a36Sopenharmony_ci	if (ret)
754662306a36Sopenharmony_ci		return ret;
754762306a36Sopenharmony_ci
754862306a36Sopenharmony_ci	if (__data > 1)
754962306a36Sopenharmony_ci		__data = 1;
755062306a36Sopenharmony_ci	if (!bfqd->strict_guarantees && __data == 1
755162306a36Sopenharmony_ci	    && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
755262306a36Sopenharmony_ci		bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
755362306a36Sopenharmony_ci
755462306a36Sopenharmony_ci	bfqd->strict_guarantees = __data;
755562306a36Sopenharmony_ci
755662306a36Sopenharmony_ci	return count;
755762306a36Sopenharmony_ci}
755862306a36Sopenharmony_ci
755962306a36Sopenharmony_cistatic ssize_t bfq_low_latency_store(struct elevator_queue *e,
756062306a36Sopenharmony_ci				     const char *page, size_t count)
756162306a36Sopenharmony_ci{
756262306a36Sopenharmony_ci	struct bfq_data *bfqd = e->elevator_data;
756362306a36Sopenharmony_ci	unsigned long __data;
756462306a36Sopenharmony_ci	int ret;
756562306a36Sopenharmony_ci
756662306a36Sopenharmony_ci	ret = bfq_var_store(&__data, (page));
756762306a36Sopenharmony_ci	if (ret)
756862306a36Sopenharmony_ci		return ret;
756962306a36Sopenharmony_ci
757062306a36Sopenharmony_ci	if (__data > 1)
757162306a36Sopenharmony_ci		__data = 1;
757262306a36Sopenharmony_ci	if (__data == 0 && bfqd->low_latency != 0)
757362306a36Sopenharmony_ci		bfq_end_wr(bfqd);
757462306a36Sopenharmony_ci	bfqd->low_latency = __data;
757562306a36Sopenharmony_ci
757662306a36Sopenharmony_ci	return count;
757762306a36Sopenharmony_ci}
757862306a36Sopenharmony_ci
757962306a36Sopenharmony_ci#define BFQ_ATTR(name) \
758062306a36Sopenharmony_ci	__ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
758162306a36Sopenharmony_ci
758262306a36Sopenharmony_cistatic struct elv_fs_entry bfq_attrs[] = {
758362306a36Sopenharmony_ci	BFQ_ATTR(fifo_expire_sync),
758462306a36Sopenharmony_ci	BFQ_ATTR(fifo_expire_async),
758562306a36Sopenharmony_ci	BFQ_ATTR(back_seek_max),
758662306a36Sopenharmony_ci	BFQ_ATTR(back_seek_penalty),
758762306a36Sopenharmony_ci	BFQ_ATTR(slice_idle),
758862306a36Sopenharmony_ci	BFQ_ATTR(slice_idle_us),
758962306a36Sopenharmony_ci	BFQ_ATTR(max_budget),
759062306a36Sopenharmony_ci	BFQ_ATTR(timeout_sync),
759162306a36Sopenharmony_ci	BFQ_ATTR(strict_guarantees),
759262306a36Sopenharmony_ci	BFQ_ATTR(low_latency),
759362306a36Sopenharmony_ci	__ATTR_NULL
759462306a36Sopenharmony_ci};
759562306a36Sopenharmony_ci
759662306a36Sopenharmony_cistatic struct elevator_type iosched_bfq_mq = {
759762306a36Sopenharmony_ci	.ops = {
759862306a36Sopenharmony_ci		.limit_depth		= bfq_limit_depth,
759962306a36Sopenharmony_ci		.prepare_request	= bfq_prepare_request,
760062306a36Sopenharmony_ci		.requeue_request        = bfq_finish_requeue_request,
760162306a36Sopenharmony_ci		.finish_request		= bfq_finish_request,
760262306a36Sopenharmony_ci		.exit_icq		= bfq_exit_icq,
760362306a36Sopenharmony_ci		.insert_requests	= bfq_insert_requests,
760462306a36Sopenharmony_ci		.dispatch_request	= bfq_dispatch_request,
760562306a36Sopenharmony_ci		.next_request		= elv_rb_latter_request,
760662306a36Sopenharmony_ci		.former_request		= elv_rb_former_request,
760762306a36Sopenharmony_ci		.allow_merge		= bfq_allow_bio_merge,
760862306a36Sopenharmony_ci		.bio_merge		= bfq_bio_merge,
760962306a36Sopenharmony_ci		.request_merge		= bfq_request_merge,
761062306a36Sopenharmony_ci		.requests_merged	= bfq_requests_merged,
761162306a36Sopenharmony_ci		.request_merged		= bfq_request_merged,
761262306a36Sopenharmony_ci		.has_work		= bfq_has_work,
761362306a36Sopenharmony_ci		.depth_updated		= bfq_depth_updated,
761462306a36Sopenharmony_ci		.init_hctx		= bfq_init_hctx,
761562306a36Sopenharmony_ci		.init_sched		= bfq_init_queue,
761662306a36Sopenharmony_ci		.exit_sched		= bfq_exit_queue,
761762306a36Sopenharmony_ci	},
761862306a36Sopenharmony_ci
761962306a36Sopenharmony_ci	.icq_size =		sizeof(struct bfq_io_cq),
762062306a36Sopenharmony_ci	.icq_align =		__alignof__(struct bfq_io_cq),
762162306a36Sopenharmony_ci	.elevator_attrs =	bfq_attrs,
762262306a36Sopenharmony_ci	.elevator_name =	"bfq",
762362306a36Sopenharmony_ci	.elevator_owner =	THIS_MODULE,
762462306a36Sopenharmony_ci};
762562306a36Sopenharmony_ciMODULE_ALIAS("bfq-iosched");
762662306a36Sopenharmony_ci
762762306a36Sopenharmony_cistatic int __init bfq_init(void)
762862306a36Sopenharmony_ci{
762962306a36Sopenharmony_ci	int ret;
763062306a36Sopenharmony_ci
763162306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
763262306a36Sopenharmony_ci	ret = blkcg_policy_register(&blkcg_policy_bfq);
763362306a36Sopenharmony_ci	if (ret)
763462306a36Sopenharmony_ci		return ret;
763562306a36Sopenharmony_ci#endif
763662306a36Sopenharmony_ci
763762306a36Sopenharmony_ci	ret = -ENOMEM;
763862306a36Sopenharmony_ci	if (bfq_slab_setup())
763962306a36Sopenharmony_ci		goto err_pol_unreg;
764062306a36Sopenharmony_ci
764162306a36Sopenharmony_ci	/*
764262306a36Sopenharmony_ci	 * Times to load large popular applications for the typical
764362306a36Sopenharmony_ci	 * systems installed on the reference devices (see the
764462306a36Sopenharmony_ci	 * comments before the definition of the next
764562306a36Sopenharmony_ci	 * array). Actually, we use slightly lower values, as the
764662306a36Sopenharmony_ci	 * estimated peak rate tends to be smaller than the actual
764762306a36Sopenharmony_ci	 * peak rate.  The reason for this last fact is that estimates
764862306a36Sopenharmony_ci	 * are computed over much shorter time intervals than the long
764962306a36Sopenharmony_ci	 * intervals typically used for benchmarking. Why? First, to
765062306a36Sopenharmony_ci	 * adapt more quickly to variations. Second, because an I/O
765162306a36Sopenharmony_ci	 * scheduler cannot rely on a peak-rate-evaluation workload to
765262306a36Sopenharmony_ci	 * be run for a long time.
765362306a36Sopenharmony_ci	 */
765462306a36Sopenharmony_ci	ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */
765562306a36Sopenharmony_ci	ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */
765662306a36Sopenharmony_ci
765762306a36Sopenharmony_ci	ret = elv_register(&iosched_bfq_mq);
765862306a36Sopenharmony_ci	if (ret)
765962306a36Sopenharmony_ci		goto slab_kill;
766062306a36Sopenharmony_ci
766162306a36Sopenharmony_ci	return 0;
766262306a36Sopenharmony_ci
766362306a36Sopenharmony_cislab_kill:
766462306a36Sopenharmony_ci	bfq_slab_kill();
766562306a36Sopenharmony_cierr_pol_unreg:
766662306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
766762306a36Sopenharmony_ci	blkcg_policy_unregister(&blkcg_policy_bfq);
766862306a36Sopenharmony_ci#endif
766962306a36Sopenharmony_ci	return ret;
767062306a36Sopenharmony_ci}
767162306a36Sopenharmony_ci
767262306a36Sopenharmony_cistatic void __exit bfq_exit(void)
767362306a36Sopenharmony_ci{
767462306a36Sopenharmony_ci	elv_unregister(&iosched_bfq_mq);
767562306a36Sopenharmony_ci#ifdef CONFIG_BFQ_GROUP_IOSCHED
767662306a36Sopenharmony_ci	blkcg_policy_unregister(&blkcg_policy_bfq);
767762306a36Sopenharmony_ci#endif
767862306a36Sopenharmony_ci	bfq_slab_kill();
767962306a36Sopenharmony_ci}
768062306a36Sopenharmony_ci
768162306a36Sopenharmony_cimodule_init(bfq_init);
768262306a36Sopenharmony_cimodule_exit(bfq_exit);
768362306a36Sopenharmony_ci
768462306a36Sopenharmony_ciMODULE_AUTHOR("Paolo Valente");
768562306a36Sopenharmony_ciMODULE_LICENSE("GPL");
768662306a36Sopenharmony_ciMODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");
7687