cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bfq-iosched.c (264983B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Budget Fair Queueing (BFQ) I/O scheduler.
      4 *
      5 * Based on ideas and code from CFQ:
      6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
      7 *
      8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
      9 *		      Paolo Valente <paolo.valente@unimore.it>
     10 *
     11 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
     12 *                    Arianna Avanzini <avanzini@google.com>
     13 *
     14 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
     15 *
     16 * BFQ is a proportional-share I/O scheduler, with some extra
     17 * low-latency capabilities. BFQ also supports full hierarchical
     18 * scheduling through cgroups. Next paragraphs provide an introduction
     19 * on BFQ inner workings. Details on BFQ benefits, usage and
     20 * limitations can be found in Documentation/block/bfq-iosched.rst.
     21 *
     22 * BFQ is a proportional-share storage-I/O scheduling algorithm based
     23 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
     24 * budgets, measured in number of sectors, to processes instead of
     25 * time slices. The device is not granted to the in-service process
     26 * for a given time slice, but until it has exhausted its assigned
     27 * budget. This change from the time to the service domain enables BFQ
     28 * to distribute the device throughput among processes as desired,
     29 * without any distortion due to throughput fluctuations, or to device
     30 * internal queueing. BFQ uses an ad hoc internal scheduler, called
     31 * B-WF2Q+, to schedule processes according to their budgets. More
     32 * precisely, BFQ schedules queues associated with processes. Each
     33 * process/queue is assigned a user-configurable weight, and B-WF2Q+
     34 * guarantees that each queue receives a fraction of the throughput
     35 * proportional to its weight. Thanks to the accurate policy of
     36 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
     37 * processes issuing sequential requests (to boost the throughput),
     38 * and yet guarantee a low latency to interactive and soft real-time
     39 * applications.
     40 *
     41 * In particular, to provide these low-latency guarantees, BFQ
     42 * explicitly privileges the I/O of two classes of time-sensitive
     43 * applications: interactive and soft real-time. In more detail, BFQ
     44 * behaves this way if the low_latency parameter is set (default
     45 * configuration). This feature enables BFQ to provide applications in
     46 * these classes with a very low latency.
     47 *
     48 * To implement this feature, BFQ constantly tries to detect whether
     49 * the I/O requests in a bfq_queue come from an interactive or a soft
     50 * real-time application. For brevity, in these cases, the queue is
     51 * said to be interactive or soft real-time. In both cases, BFQ
     52 * privileges the service of the queue, over that of non-interactive
     53 * and non-soft-real-time queues. This privileging is performed,
     54 * mainly, by raising the weight of the queue. So, for brevity, we
     55 * call just weight-raising periods the time periods during which a
     56 * queue is privileged, because deemed interactive or soft real-time.
     57 *
     58 * The detection of soft real-time queues/applications is described in
     59 * detail in the comments on the function
     60 * bfq_bfqq_softrt_next_start. On the other hand, the detection of an
     61 * interactive queue works as follows: a queue is deemed interactive
     62 * if it is constantly non empty only for a limited time interval,
     63 * after which it does become empty. The queue may be deemed
     64 * interactive again (for a limited time), if it restarts being
     65 * constantly non empty, provided that this happens only after the
     66 * queue has remained empty for a given minimum idle time.
     67 *
     68 * By default, BFQ computes automatically the above maximum time
     69 * interval, i.e., the time interval after which a constantly
     70 * non-empty queue stops being deemed interactive. Since a queue is
     71 * weight-raised while it is deemed interactive, this maximum time
     72 * interval happens to coincide with the (maximum) duration of the
     73 * weight-raising for interactive queues.
     74 *
     75 * Finally, BFQ also features additional heuristics for
     76 * preserving both a low latency and a high throughput on NCQ-capable,
     77 * rotational or flash-based devices, and to get the job done quickly
     78 * for applications consisting in many I/O-bound processes.
     79 *
     80 * NOTE: if the main or only goal, with a given device, is to achieve
     81 * the maximum-possible throughput at all times, then do switch off
     82 * all low-latency heuristics for that device, by setting low_latency
     83 * to 0.
     84 *
     85 * BFQ is described in [1], where also a reference to the initial,
     86 * more theoretical paper on BFQ can be found. The interested reader
     87 * can find in the latter paper full details on the main algorithm, as
     88 * well as formulas of the guarantees and formal proofs of all the
     89 * properties.  With respect to the version of BFQ presented in these
     90 * papers, this implementation adds a few more heuristics, such as the
     91 * ones that guarantee a low latency to interactive and soft real-time
     92 * applications, and a hierarchical extension based on H-WF2Q+.
     93 *
     94 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
     95 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
     96 * with O(log N) complexity derives from the one introduced with EEVDF
     97 * in [3].
     98 *
     99 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
    100 *     Scheduler", Proceedings of the First Workshop on Mobile System
    101 *     Technologies (MST-2015), May 2015.
    102 *     http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
    103 *
    104 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
    105 *     Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
    106 *     Oct 1997.
    107 *
    108 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
    109 *
    110 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
    111 *     First: A Flexible and Accurate Mechanism for Proportional Share
    112 *     Resource Allocation", technical report.
    113 *
    114 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
    115 */
    116#include <linux/module.h>
    117#include <linux/slab.h>
    118#include <linux/blkdev.h>
    119#include <linux/cgroup.h>
    120#include <linux/ktime.h>
    121#include <linux/rbtree.h>
    122#include <linux/ioprio.h>
    123#include <linux/sbitmap.h>
    124#include <linux/delay.h>
    125#include <linux/backing-dev.h>
    126
    127#include <trace/events/block.h>
    128
    129#include "elevator.h"
    130#include "blk.h"
    131#include "blk-mq.h"
    132#include "blk-mq-tag.h"
    133#include "blk-mq-sched.h"
    134#include "bfq-iosched.h"
    135#include "blk-wbt.h"
    136
    137#define BFQ_BFQQ_FNS(name)						\
    138void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)			\
    139{									\
    140	__set_bit(BFQQF_##name, &(bfqq)->flags);			\
    141}									\
    142void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)			\
    143{									\
    144	__clear_bit(BFQQF_##name, &(bfqq)->flags);		\
    145}									\
    146int bfq_bfqq_##name(const struct bfq_queue *bfqq)			\
    147{									\
    148	return test_bit(BFQQF_##name, &(bfqq)->flags);		\
    149}
    150
    151BFQ_BFQQ_FNS(just_created);
    152BFQ_BFQQ_FNS(busy);
    153BFQ_BFQQ_FNS(wait_request);
    154BFQ_BFQQ_FNS(non_blocking_wait_rq);
    155BFQ_BFQQ_FNS(fifo_expire);
    156BFQ_BFQQ_FNS(has_short_ttime);
    157BFQ_BFQQ_FNS(sync);
    158BFQ_BFQQ_FNS(IO_bound);
    159BFQ_BFQQ_FNS(in_large_burst);
    160BFQ_BFQQ_FNS(coop);
    161BFQ_BFQQ_FNS(split_coop);
    162BFQ_BFQQ_FNS(softrt_update);
    163#undef BFQ_BFQQ_FNS						\
    164
    165/* Expiration time of async (0) and sync (1) requests, in ns. */
    166static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
    167
    168/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
    169static const int bfq_back_max = 16 * 1024;
    170
    171/* Penalty of a backwards seek, in number of sectors. */
    172static const int bfq_back_penalty = 2;
    173
    174/* Idling period duration, in ns. */
    175static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
    176
    177/* Minimum number of assigned budgets for which stats are safe to compute. */
    178static const int bfq_stats_min_budgets = 194;
    179
    180/* Default maximum budget values, in sectors and number of requests. */
    181static const int bfq_default_max_budget = 16 * 1024;
    182
    183/*
    184 * When a sync request is dispatched, the queue that contains that
    185 * request, and all the ancestor entities of that queue, are charged
    186 * with the number of sectors of the request. In contrast, if the
    187 * request is async, then the queue and its ancestor entities are
    188 * charged with the number of sectors of the request, multiplied by
    189 * the factor below. This throttles the bandwidth for async I/O,
    190 * w.r.t. to sync I/O, and it is done to counter the tendency of async
    191 * writes to steal I/O throughput to reads.
    192 *
    193 * The current value of this parameter is the result of a tuning with
    194 * several hardware and software configurations. We tried to find the
    195 * lowest value for which writes do not cause noticeable problems to
    196 * reads. In fact, the lower this parameter, the stabler I/O control,
    197 * in the following respect.  The lower this parameter is, the less
    198 * the bandwidth enjoyed by a group decreases
    199 * - when the group does writes, w.r.t. to when it does reads;
    200 * - when other groups do reads, w.r.t. to when they do writes.
    201 */
    202static const int bfq_async_charge_factor = 3;
    203
    204/* Default timeout values, in jiffies, approximating CFQ defaults. */
    205const int bfq_timeout = HZ / 8;
    206
    207/*
    208 * Time limit for merging (see comments in bfq_setup_cooperator). Set
    209 * to the slowest value that, in our tests, proved to be effective in
    210 * removing false positives, while not causing true positives to miss
    211 * queue merging.
    212 *
    213 * As can be deduced from the low time limit below, queue merging, if
    214 * successful, happens at the very beginning of the I/O of the involved
    215 * cooperating processes, as a consequence of the arrival of the very
    216 * first requests from each cooperator.  After that, there is very
    217 * little chance to find cooperators.
    218 */
    219static const unsigned long bfq_merge_time_limit = HZ/10;
    220
    221static struct kmem_cache *bfq_pool;
    222
    223/* Below this threshold (in ns), we consider thinktime immediate. */
    224#define BFQ_MIN_TT		(2 * NSEC_PER_MSEC)
    225
    226/* hw_tag detection: parallel requests threshold and min samples needed. */
    227#define BFQ_HW_QUEUE_THRESHOLD	3
    228#define BFQ_HW_QUEUE_SAMPLES	32
    229
    230#define BFQQ_SEEK_THR		(sector_t)(8 * 100)
    231#define BFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
    232#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
    233	(get_sdist(last_pos, rq) >			\
    234	 BFQQ_SEEK_THR &&				\
    235	 (!blk_queue_nonrot(bfqd->queue) ||		\
    236	  blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
    237#define BFQQ_CLOSE_THR		(sector_t)(8 * 1024)
    238#define BFQQ_SEEKY(bfqq)	(hweight32(bfqq->seek_history) > 19)
    239/*
    240 * Sync random I/O is likely to be confused with soft real-time I/O,
    241 * because it is characterized by limited throughput and apparently
    242 * isochronous arrival pattern. To avoid false positives, queues
    243 * containing only random (seeky) I/O are prevented from being tagged
    244 * as soft real-time.
    245 */
    246#define BFQQ_TOTALLY_SEEKY(bfqq)	(bfqq->seek_history == -1)
    247
    248/* Min number of samples required to perform peak-rate update */
    249#define BFQ_RATE_MIN_SAMPLES	32
    250/* Min observation time interval required to perform a peak-rate update (ns) */
    251#define BFQ_RATE_MIN_INTERVAL	(300*NSEC_PER_MSEC)
    252/* Target observation time interval for a peak-rate update (ns) */
    253#define BFQ_RATE_REF_INTERVAL	NSEC_PER_SEC
    254
    255/*
    256 * Shift used for peak-rate fixed precision calculations.
    257 * With
    258 * - the current shift: 16 positions
    259 * - the current type used to store rate: u32
    260 * - the current unit of measure for rate: [sectors/usec], or, more precisely,
    261 *   [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
    262 * the range of rates that can be stored is
    263 * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
    264 * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
    265 * [15, 65G] sectors/sec
    266 * Which, assuming a sector size of 512B, corresponds to a range of
    267 * [7.5K, 33T] B/sec
    268 */
    269#define BFQ_RATE_SHIFT		16
    270
    271/*
    272 * When configured for computing the duration of the weight-raising
    273 * for interactive queues automatically (see the comments at the
    274 * beginning of this file), BFQ does it using the following formula:
    275 * duration = (ref_rate / r) * ref_wr_duration,
    276 * where r is the peak rate of the device, and ref_rate and
    277 * ref_wr_duration are two reference parameters.  In particular,
    278 * ref_rate is the peak rate of the reference storage device (see
    279 * below), and ref_wr_duration is about the maximum time needed, with
    280 * BFQ and while reading two files in parallel, to load typical large
    281 * applications on the reference device (see the comments on
    282 * max_service_from_wr below, for more details on how ref_wr_duration
    283 * is obtained).  In practice, the slower/faster the device at hand
    284 * is, the more/less it takes to load applications with respect to the
    285 * reference device.  Accordingly, the longer/shorter BFQ grants
    286 * weight raising to interactive applications.
    287 *
    288 * BFQ uses two different reference pairs (ref_rate, ref_wr_duration),
    289 * depending on whether the device is rotational or non-rotational.
    290 *
    291 * In the following definitions, ref_rate[0] and ref_wr_duration[0]
    292 * are the reference values for a rotational device, whereas
    293 * ref_rate[1] and ref_wr_duration[1] are the reference values for a
    294 * non-rotational device. The reference rates are not the actual peak
    295 * rates of the devices used as a reference, but slightly lower
    296 * values. The reason for using slightly lower values is that the
    297 * peak-rate estimator tends to yield slightly lower values than the
    298 * actual peak rate (it can yield the actual peak rate only if there
    299 * is only one process doing I/O, and the process does sequential
    300 * I/O).
    301 *
    302 * The reference peak rates are measured in sectors/usec, left-shifted
    303 * by BFQ_RATE_SHIFT.
    304 */
    305static int ref_rate[2] = {14000, 33000};
    306/*
    307 * To improve readability, a conversion function is used to initialize
    308 * the following array, which entails that the array can be
    309 * initialized only in a function.
    310 */
    311static int ref_wr_duration[2];
    312
    313/*
    314 * BFQ uses the above-detailed, time-based weight-raising mechanism to
    315 * privilege interactive tasks. This mechanism is vulnerable to the
    316 * following false positives: I/O-bound applications that will go on
    317 * doing I/O for much longer than the duration of weight
    318 * raising. These applications have basically no benefit from being
    319 * weight-raised at the beginning of their I/O. On the opposite end,
    320 * while being weight-raised, these applications
    321 * a) unjustly steal throughput to applications that may actually need
    322 * low latency;
    323 * b) make BFQ uselessly perform device idling; device idling results
    324 * in loss of device throughput with most flash-based storage, and may
    325 * increase latencies when used purposelessly.
    326 *
    327 * BFQ tries to reduce these problems, by adopting the following
    328 * countermeasure. To introduce this countermeasure, we need first to
    329 * finish explaining how the duration of weight-raising for
    330 * interactive tasks is computed.
    331 *
    332 * For a bfq_queue deemed as interactive, the duration of weight
    333 * raising is dynamically adjusted, as a function of the estimated
    334 * peak rate of the device, so as to be equal to the time needed to
    335 * execute the 'largest' interactive task we benchmarked so far. By
    336 * largest task, we mean the task for which each involved process has
    337 * to do more I/O than for any of the other tasks we benchmarked. This
    338 * reference interactive task is the start-up of LibreOffice Writer,
    339 * and in this task each process/bfq_queue needs to have at most ~110K
    340 * sectors transferred.
    341 *
    342 * This last piece of information enables BFQ to reduce the actual
    343 * duration of weight-raising for at least one class of I/O-bound
    344 * applications: those doing sequential or quasi-sequential I/O. An
    345 * example is file copy. In fact, once started, the main I/O-bound
    346 * processes of these applications usually consume the above 110K
    347 * sectors in much less time than the processes of an application that
    348 * is starting, because these I/O-bound processes will greedily devote
    349 * almost all their CPU cycles only to their target,
    350 * throughput-friendly I/O operations. This is even more true if BFQ
    351 * happens to be underestimating the device peak rate, and thus
    352 * overestimating the duration of weight raising. But, according to
    353 * our measurements, once transferred 110K sectors, these processes
    354 * have no right to be weight-raised any longer.
    355 *
    356 * Basing on the last consideration, BFQ ends weight-raising for a
    357 * bfq_queue if the latter happens to have received an amount of
    358 * service at least equal to the following constant. The constant is
    359 * set to slightly more than 110K, to have a minimum safety margin.
    360 *
    361 * This early ending of weight-raising reduces the amount of time
    362 * during which interactive false positives cause the two problems
    363 * described at the beginning of these comments.
    364 */
    365static const unsigned long max_service_from_wr = 120000;
    366
    367/*
    368 * Maximum time between the creation of two queues, for stable merge
    369 * to be activated (in ms)
    370 */
    371static const unsigned long bfq_activation_stable_merging = 600;
    372/*
    373 * Minimum time to be waited before evaluating delayed stable merge (in ms)
    374 */
    375static const unsigned long bfq_late_stable_merging = 600;
    376
    377#define RQ_BIC(rq)		((struct bfq_io_cq *)((rq)->elv.priv[0]))
    378#define RQ_BFQQ(rq)		((rq)->elv.priv[1])
    379
    380struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
    381{
    382	return bic->bfqq[is_sync];
    383}
    384
    385static void bfq_put_stable_ref(struct bfq_queue *bfqq);
    386
    387void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
    388{
    389	/*
    390	 * If bfqq != NULL, then a non-stable queue merge between
    391	 * bic->bfqq and bfqq is happening here. This causes troubles
    392	 * in the following case: bic->bfqq has also been scheduled
    393	 * for a possible stable merge with bic->stable_merge_bfqq,
    394	 * and bic->stable_merge_bfqq == bfqq happens to
    395	 * hold. Troubles occur because bfqq may then undergo a split,
    396	 * thereby becoming eligible for a stable merge. Yet, if
    397	 * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
    398	 * would be stably merged with itself. To avoid this anomaly,
    399	 * we cancel the stable merge if
    400	 * bic->stable_merge_bfqq == bfqq.
    401	 */
    402	bic->bfqq[is_sync] = bfqq;
    403
    404	if (bfqq && bic->stable_merge_bfqq == bfqq) {
    405		/*
    406		 * Actually, these same instructions are executed also
    407		 * in bfq_setup_cooperator, in case of abort or actual
    408		 * execution of a stable merge. We could avoid
    409		 * repeating these instructions there too, but if we
    410		 * did so, we would nest even more complexity in this
    411		 * function.
    412		 */
    413		bfq_put_stable_ref(bic->stable_merge_bfqq);
    414
    415		bic->stable_merge_bfqq = NULL;
    416	}
    417}
    418
    419struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
    420{
    421	return bic->icq.q->elevator->elevator_data;
    422}
    423
    424/**
    425 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
    426 * @icq: the iocontext queue.
    427 */
    428static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
    429{
    430	/* bic->icq is the first member, %NULL will convert to %NULL */
    431	return container_of(icq, struct bfq_io_cq, icq);
    432}
    433
    434/**
    435 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
    436 * @q: the request queue.
    437 */
    438static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
    439{
    440	struct bfq_io_cq *icq;
    441	unsigned long flags;
    442
    443	if (!current->io_context)
    444		return NULL;
    445
    446	spin_lock_irqsave(&q->queue_lock, flags);
    447	icq = icq_to_bic(ioc_lookup_icq(q));
    448	spin_unlock_irqrestore(&q->queue_lock, flags);
    449
    450	return icq;
    451}
    452
    453/*
    454 * Scheduler run of queue, if there are requests pending and no one in the
    455 * driver that will restart queueing.
    456 */
    457void bfq_schedule_dispatch(struct bfq_data *bfqd)
    458{
    459	lockdep_assert_held(&bfqd->lock);
    460
    461	if (bfqd->queued != 0) {
    462		bfq_log(bfqd, "schedule dispatch");
    463		blk_mq_run_hw_queues(bfqd->queue, true);
    464	}
    465}
    466
    467#define bfq_class_idle(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
    468
    469#define bfq_sample_valid(samples)	((samples) > 80)
    470
    471/*
    472 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
    473 * We choose the request that is closer to the head right now.  Distance
    474 * behind the head is penalized and only allowed to a certain extent.
    475 */
    476static struct request *bfq_choose_req(struct bfq_data *bfqd,
    477				      struct request *rq1,
    478				      struct request *rq2,
    479				      sector_t last)
    480{
    481	sector_t s1, s2, d1 = 0, d2 = 0;
    482	unsigned long back_max;
    483#define BFQ_RQ1_WRAP	0x01 /* request 1 wraps */
    484#define BFQ_RQ2_WRAP	0x02 /* request 2 wraps */
    485	unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
    486
    487	if (!rq1 || rq1 == rq2)
    488		return rq2;
    489	if (!rq2)
    490		return rq1;
    491
    492	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
    493		return rq1;
    494	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
    495		return rq2;
    496	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
    497		return rq1;
    498	else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
    499		return rq2;
    500
    501	s1 = blk_rq_pos(rq1);
    502	s2 = blk_rq_pos(rq2);
    503
    504	/*
    505	 * By definition, 1KiB is 2 sectors.
    506	 */
    507	back_max = bfqd->bfq_back_max * 2;
    508
    509	/*
    510	 * Strict one way elevator _except_ in the case where we allow
    511	 * short backward seeks which are biased as twice the cost of a
    512	 * similar forward seek.
    513	 */
    514	if (s1 >= last)
    515		d1 = s1 - last;
    516	else if (s1 + back_max >= last)
    517		d1 = (last - s1) * bfqd->bfq_back_penalty;
    518	else
    519		wrap |= BFQ_RQ1_WRAP;
    520
    521	if (s2 >= last)
    522		d2 = s2 - last;
    523	else if (s2 + back_max >= last)
    524		d2 = (last - s2) * bfqd->bfq_back_penalty;
    525	else
    526		wrap |= BFQ_RQ2_WRAP;
    527
    528	/* Found required data */
    529
    530	/*
    531	 * By doing switch() on the bit mask "wrap" we avoid having to
    532	 * check two variables for all permutations: --> faster!
    533	 */
    534	switch (wrap) {
    535	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
    536		if (d1 < d2)
    537			return rq1;
    538		else if (d2 < d1)
    539			return rq2;
    540
    541		if (s1 >= s2)
    542			return rq1;
    543		else
    544			return rq2;
    545
    546	case BFQ_RQ2_WRAP:
    547		return rq1;
    548	case BFQ_RQ1_WRAP:
    549		return rq2;
    550	case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
    551	default:
    552		/*
    553		 * Since both rqs are wrapped,
    554		 * start with the one that's further behind head
    555		 * (--> only *one* back seek required),
    556		 * since back seek takes more time than forward.
    557		 */
    558		if (s1 <= s2)
    559			return rq1;
    560		else
    561			return rq2;
    562	}
    563}
    564
    565#define BFQ_LIMIT_INLINE_DEPTH 16
    566
    567#ifdef CONFIG_BFQ_GROUP_IOSCHED
    568static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
    569{
    570	struct bfq_data *bfqd = bfqq->bfqd;
    571	struct bfq_entity *entity = &bfqq->entity;
    572	struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
    573	struct bfq_entity **entities = inline_entities;
    574	int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
    575	int class_idx = bfqq->ioprio_class - 1;
    576	struct bfq_sched_data *sched_data;
    577	unsigned long wsum;
    578	bool ret = false;
    579
    580	if (!entity->on_st_or_in_serv)
    581		return false;
    582
    583retry:
    584	spin_lock_irq(&bfqd->lock);
    585	/* +1 for bfqq entity, root cgroup not included */
    586	depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
    587	if (depth > alloc_depth) {
    588		spin_unlock_irq(&bfqd->lock);
    589		if (entities != inline_entities)
    590			kfree(entities);
    591		entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
    592		if (!entities)
    593			return false;
    594		alloc_depth = depth;
    595		goto retry;
    596	}
    597
    598	sched_data = entity->sched_data;
    599	/* Gather our ancestors as we need to traverse them in reverse order */
    600	level = 0;
    601	for_each_entity(entity) {
    602		/*
    603		 * If at some level entity is not even active, allow request
    604		 * queueing so that BFQ knows there's work to do and activate
    605		 * entities.
    606		 */
    607		if (!entity->on_st_or_in_serv)
    608			goto out;
    609		/* Uh, more parents than cgroup subsystem thinks? */
    610		if (WARN_ON_ONCE(level >= depth))
    611			break;
    612		entities[level++] = entity;
    613	}
    614	WARN_ON_ONCE(level != depth);
    615	for (level--; level >= 0; level--) {
    616		entity = entities[level];
    617		if (level > 0) {
    618			wsum = bfq_entity_service_tree(entity)->wsum;
    619		} else {
    620			int i;
    621			/*
    622			 * For bfqq itself we take into account service trees
    623			 * of all higher priority classes and multiply their
    624			 * weights so that low prio queue from higher class
    625			 * gets more requests than high prio queue from lower
    626			 * class.
    627			 */
    628			wsum = 0;
    629			for (i = 0; i <= class_idx; i++) {
    630				wsum = wsum * IOPRIO_BE_NR +
    631					sched_data->service_tree[i].wsum;
    632			}
    633		}
    634		limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum);
    635		if (entity->allocated >= limit) {
    636			bfq_log_bfqq(bfqq->bfqd, bfqq,
    637				"too many requests: allocated %d limit %d level %d",
    638				entity->allocated, limit, level);
    639			ret = true;
    640			break;
    641		}
    642	}
    643out:
    644	spin_unlock_irq(&bfqd->lock);
    645	if (entities != inline_entities)
    646		kfree(entities);
    647	return ret;
    648}
    649#else
    650static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
    651{
    652	return false;
    653}
    654#endif
    655
    656/*
    657 * Async I/O can easily starve sync I/O (both sync reads and sync
    658 * writes), by consuming all tags. Similarly, storms of sync writes,
    659 * such as those that sync(2) may trigger, can starve sync reads.
    660 * Limit depths of async I/O and sync writes so as to counter both
    661 * problems.
    662 *
    663 * Also if a bfq queue or its parent cgroup consume more tags than would be
    664 * appropriate for their weight, we trim the available tag depth to 1. This
    665 * avoids a situation where one cgroup can starve another cgroup from tags and
    666 * thus block service differentiation among cgroups. Note that because the
    667 * queue / cgroup already has many requests allocated and queued, this does not
    668 * significantly affect service guarantees coming from the BFQ scheduling
    669 * algorithm.
    670 */
    671static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
    672{
    673	struct bfq_data *bfqd = data->q->elevator->elevator_data;
    674	struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
    675	struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(op)) : NULL;
    676	int depth;
    677	unsigned limit = data->q->nr_requests;
    678
    679	/* Sync reads have full depth available */
    680	if (op_is_sync(op) && !op_is_write(op)) {
    681		depth = 0;
    682	} else {
    683		depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
    684		limit = (limit * depth) >> bfqd->full_depth_shift;
    685	}
    686
    687	/*
    688	 * Does queue (or any parent entity) exceed number of requests that
    689	 * should be available to it? Heavily limit depth so that it cannot
    690	 * consume more available requests and thus starve other entities.
    691	 */
    692	if (bfqq && bfqq_request_over_limit(bfqq, limit))
    693		depth = 1;
    694
    695	bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
    696		__func__, bfqd->wr_busy_queues, op_is_sync(op), depth);
    697	if (depth)
    698		data->shallow_depth = depth;
    699}
    700
    701static struct bfq_queue *
    702bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
    703		     sector_t sector, struct rb_node **ret_parent,
    704		     struct rb_node ***rb_link)
    705{
    706	struct rb_node **p, *parent;
    707	struct bfq_queue *bfqq = NULL;
    708
    709	parent = NULL;
    710	p = &root->rb_node;
    711	while (*p) {
    712		struct rb_node **n;
    713
    714		parent = *p;
    715		bfqq = rb_entry(parent, struct bfq_queue, pos_node);
    716
    717		/*
    718		 * Sort strictly based on sector. Smallest to the left,
    719		 * largest to the right.
    720		 */
    721		if (sector > blk_rq_pos(bfqq->next_rq))
    722			n = &(*p)->rb_right;
    723		else if (sector < blk_rq_pos(bfqq->next_rq))
    724			n = &(*p)->rb_left;
    725		else
    726			break;
    727		p = n;
    728		bfqq = NULL;
    729	}
    730
    731	*ret_parent = parent;
    732	if (rb_link)
    733		*rb_link = p;
    734
    735	bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
    736		(unsigned long long)sector,
    737		bfqq ? bfqq->pid : 0);
    738
    739	return bfqq;
    740}
    741
    742static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
    743{
    744	return bfqq->service_from_backlogged > 0 &&
    745		time_is_before_jiffies(bfqq->first_IO_time +
    746				       bfq_merge_time_limit);
    747}
    748
    749/*
    750 * The following function is not marked as __cold because it is
    751 * actually cold, but for the same performance goal described in the
    752 * comments on the likely() at the beginning of
    753 * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
    754 * execution time for the case where this function is not invoked, we
    755 * had to add an unlikely() in each involved if().
    756 */
    757void __cold
    758bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
    759{
    760	struct rb_node **p, *parent;
    761	struct bfq_queue *__bfqq;
    762
    763	if (bfqq->pos_root) {
    764		rb_erase(&bfqq->pos_node, bfqq->pos_root);
    765		bfqq->pos_root = NULL;
    766	}
    767
    768	/* oom_bfqq does not participate in queue merging */
    769	if (bfqq == &bfqd->oom_bfqq)
    770		return;
    771
    772	/*
    773	 * bfqq cannot be merged any longer (see comments in
    774	 * bfq_setup_cooperator): no point in adding bfqq into the
    775	 * position tree.
    776	 */
    777	if (bfq_too_late_for_merging(bfqq))
    778		return;
    779
    780	if (bfq_class_idle(bfqq))
    781		return;
    782	if (!bfqq->next_rq)
    783		return;
    784
    785	bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree;
    786	__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
    787			blk_rq_pos(bfqq->next_rq), &parent, &p);
    788	if (!__bfqq) {
    789		rb_link_node(&bfqq->pos_node, parent, p);
    790		rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
    791	} else
    792		bfqq->pos_root = NULL;
    793}
    794
    795/*
    796 * The following function returns false either if every active queue
    797 * must receive the same share of the throughput (symmetric scenario),
    798 * or, as a special case, if bfqq must receive a share of the
    799 * throughput lower than or equal to the share that every other active
    800 * queue must receive.  If bfqq does sync I/O, then these are the only
    801 * two cases where bfqq happens to be guaranteed its share of the
    802 * throughput even if I/O dispatching is not plugged when bfqq remains
    803 * temporarily empty (for more details, see the comments in the
    804 * function bfq_better_to_idle()). For this reason, the return value
    805 * of this function is used to check whether I/O-dispatch plugging can
    806 * be avoided.
    807 *
    808 * The above first case (symmetric scenario) occurs when:
    809 * 1) all active queues have the same weight,
    810 * 2) all active queues belong to the same I/O-priority class,
    811 * 3) all active groups at the same level in the groups tree have the same
    812 *    weight,
    813 * 4) all active groups at the same level in the groups tree have the same
    814 *    number of children.
    815 *
    816 * Unfortunately, keeping the necessary state for evaluating exactly
    817 * the last two symmetry sub-conditions above would be quite complex
    818 * and time consuming. Therefore this function evaluates, instead,
    819 * only the following stronger three sub-conditions, for which it is
    820 * much easier to maintain the needed state:
    821 * 1) all active queues have the same weight,
    822 * 2) all active queues belong to the same I/O-priority class,
    823 * 3) there are no active groups.
    824 * In particular, the last condition is always true if hierarchical
    825 * support or the cgroups interface are not enabled, thus no state
    826 * needs to be maintained in this case.
    827 */
    828static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
    829				   struct bfq_queue *bfqq)
    830{
    831	bool smallest_weight = bfqq &&
    832		bfqq->weight_counter &&
    833		bfqq->weight_counter ==
    834		container_of(
    835			rb_first_cached(&bfqd->queue_weights_tree),
    836			struct bfq_weight_counter,
    837			weights_node);
    838
    839	/*
    840	 * For queue weights to differ, queue_weights_tree must contain
    841	 * at least two nodes.
    842	 */
    843	bool varied_queue_weights = !smallest_weight &&
    844		!RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
    845		(bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
    846		 bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
    847
    848	bool multiple_classes_busy =
    849		(bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
    850		(bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
    851		(bfqd->busy_queues[1] && bfqd->busy_queues[2]);
    852
    853	return varied_queue_weights || multiple_classes_busy
    854#ifdef CONFIG_BFQ_GROUP_IOSCHED
    855	       || bfqd->num_groups_with_pending_reqs > 0
    856#endif
    857		;
    858}
    859
    860/*
    861 * If the weight-counter tree passed as input contains no counter for
    862 * the weight of the input queue, then add that counter; otherwise just
    863 * increment the existing counter.
    864 *
    865 * Note that weight-counter trees contain few nodes in mostly symmetric
    866 * scenarios. For example, if all queues have the same weight, then the
    867 * weight-counter tree for the queues may contain at most one node.
    868 * This holds even if low_latency is on, because weight-raised queues
    869 * are not inserted in the tree.
    870 * In most scenarios, the rate at which nodes are created/destroyed
    871 * should be low too.
    872 */
    873void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
    874			  struct rb_root_cached *root)
    875{
    876	struct bfq_entity *entity = &bfqq->entity;
    877	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
    878	bool leftmost = true;
    879
    880	/*
    881	 * Do not insert if the queue is already associated with a
    882	 * counter, which happens if:
    883	 *   1) a request arrival has caused the queue to become both
    884	 *      non-weight-raised, and hence change its weight, and
    885	 *      backlogged; in this respect, each of the two events
    886	 *      causes an invocation of this function,
    887	 *   2) this is the invocation of this function caused by the
    888	 *      second event. This second invocation is actually useless,
    889	 *      and we handle this fact by exiting immediately. More
    890	 *      efficient or clearer solutions might possibly be adopted.
    891	 */
    892	if (bfqq->weight_counter)
    893		return;
    894
    895	while (*new) {
    896		struct bfq_weight_counter *__counter = container_of(*new,
    897						struct bfq_weight_counter,
    898						weights_node);
    899		parent = *new;
    900
    901		if (entity->weight == __counter->weight) {
    902			bfqq->weight_counter = __counter;
    903			goto inc_counter;
    904		}
    905		if (entity->weight < __counter->weight)
    906			new = &((*new)->rb_left);
    907		else {
    908			new = &((*new)->rb_right);
    909			leftmost = false;
    910		}
    911	}
    912
    913	bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
    914				       GFP_ATOMIC);
    915
    916	/*
    917	 * In the unlucky event of an allocation failure, we just
    918	 * exit. This will cause the weight of queue to not be
    919	 * considered in bfq_asymmetric_scenario, which, in its turn,
    920	 * causes the scenario to be deemed wrongly symmetric in case
    921	 * bfqq's weight would have been the only weight making the
    922	 * scenario asymmetric.  On the bright side, no unbalance will
    923	 * however occur when bfqq becomes inactive again (the
    924	 * invocation of this function is triggered by an activation
    925	 * of queue).  In fact, bfq_weights_tree_remove does nothing
    926	 * if !bfqq->weight_counter.
    927	 */
    928	if (unlikely(!bfqq->weight_counter))
    929		return;
    930
    931	bfqq->weight_counter->weight = entity->weight;
    932	rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
    933	rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
    934				leftmost);
    935
    936inc_counter:
    937	bfqq->weight_counter->num_active++;
    938	bfqq->ref++;
    939}
    940
    941/*
    942 * Decrement the weight counter associated with the queue, and, if the
    943 * counter reaches 0, remove the counter from the tree.
    944 * See the comments to the function bfq_weights_tree_add() for considerations
    945 * about overhead.
    946 */
    947void __bfq_weights_tree_remove(struct bfq_data *bfqd,
    948			       struct bfq_queue *bfqq,
    949			       struct rb_root_cached *root)
    950{
    951	if (!bfqq->weight_counter)
    952		return;
    953
    954	bfqq->weight_counter->num_active--;
    955	if (bfqq->weight_counter->num_active > 0)
    956		goto reset_entity_pointer;
    957
    958	rb_erase_cached(&bfqq->weight_counter->weights_node, root);
    959	kfree(bfqq->weight_counter);
    960
    961reset_entity_pointer:
    962	bfqq->weight_counter = NULL;
    963	bfq_put_queue(bfqq);
    964}
    965
    966/*
    967 * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
    968 * of active groups for each queue's inactive parent entity.
    969 */
    970void bfq_weights_tree_remove(struct bfq_data *bfqd,
    971			     struct bfq_queue *bfqq)
    972{
    973	struct bfq_entity *entity = bfqq->entity.parent;
    974
    975	for_each_entity(entity) {
    976		struct bfq_sched_data *sd = entity->my_sched_data;
    977
    978		if (sd->next_in_service || sd->in_service_entity) {
    979			/*
    980			 * entity is still active, because either
    981			 * next_in_service or in_service_entity is not
    982			 * NULL (see the comments on the definition of
    983			 * next_in_service for details on why
    984			 * in_service_entity must be checked too).
    985			 *
    986			 * As a consequence, its parent entities are
    987			 * active as well, and thus this loop must
    988			 * stop here.
    989			 */
    990			break;
    991		}
    992
    993		/*
    994		 * The decrement of num_groups_with_pending_reqs is
    995		 * not performed immediately upon the deactivation of
    996		 * entity, but it is delayed to when it also happens
    997		 * that the first leaf descendant bfqq of entity gets
    998		 * all its pending requests completed. The following
    999		 * instructions perform this delayed decrement, if
   1000		 * needed. See the comments on
   1001		 * num_groups_with_pending_reqs for details.
   1002		 */
   1003		if (entity->in_groups_with_pending_reqs) {
   1004			entity->in_groups_with_pending_reqs = false;
   1005			bfqd->num_groups_with_pending_reqs--;
   1006		}
   1007	}
   1008
   1009	/*
   1010	 * Next function is invoked last, because it causes bfqq to be
   1011	 * freed if the following holds: bfqq is not in service and
   1012	 * has no dispatched request. DO NOT use bfqq after the next
   1013	 * function invocation.
   1014	 */
   1015	__bfq_weights_tree_remove(bfqd, bfqq,
   1016				  &bfqd->queue_weights_tree);
   1017}
   1018
   1019/*
   1020 * Return expired entry, or NULL to just start from scratch in rbtree.
   1021 */
   1022static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
   1023				      struct request *last)
   1024{
   1025	struct request *rq;
   1026
   1027	if (bfq_bfqq_fifo_expire(bfqq))
   1028		return NULL;
   1029
   1030	bfq_mark_bfqq_fifo_expire(bfqq);
   1031
   1032	rq = rq_entry_fifo(bfqq->fifo.next);
   1033
   1034	if (rq == last || ktime_get_ns() < rq->fifo_time)
   1035		return NULL;
   1036
   1037	bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
   1038	return rq;
   1039}
   1040
   1041static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
   1042					struct bfq_queue *bfqq,
   1043					struct request *last)
   1044{
   1045	struct rb_node *rbnext = rb_next(&last->rb_node);
   1046	struct rb_node *rbprev = rb_prev(&last->rb_node);
   1047	struct request *next, *prev = NULL;
   1048
   1049	/* Follow expired path, else get first next available. */
   1050	next = bfq_check_fifo(bfqq, last);
   1051	if (next)
   1052		return next;
   1053
   1054	if (rbprev)
   1055		prev = rb_entry_rq(rbprev);
   1056
   1057	if (rbnext)
   1058		next = rb_entry_rq(rbnext);
   1059	else {
   1060		rbnext = rb_first(&bfqq->sort_list);
   1061		if (rbnext && rbnext != &last->rb_node)
   1062			next = rb_entry_rq(rbnext);
   1063	}
   1064
   1065	return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
   1066}
   1067
   1068/* see the definition of bfq_async_charge_factor for details */
   1069static unsigned long bfq_serv_to_charge(struct request *rq,
   1070					struct bfq_queue *bfqq)
   1071{
   1072	if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
   1073	    bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
   1074		return blk_rq_sectors(rq);
   1075
   1076	return blk_rq_sectors(rq) * bfq_async_charge_factor;
   1077}
   1078
   1079/**
   1080 * bfq_updated_next_req - update the queue after a new next_rq selection.
   1081 * @bfqd: the device data the queue belongs to.
   1082 * @bfqq: the queue to update.
   1083 *
   1084 * If the first request of a queue changes we make sure that the queue
   1085 * has enough budget to serve at least its first request (if the
   1086 * request has grown).  We do this because if the queue has not enough
   1087 * budget for its first request, it has to go through two dispatch
   1088 * rounds to actually get it dispatched.
   1089 */
   1090static void bfq_updated_next_req(struct bfq_data *bfqd,
   1091				 struct bfq_queue *bfqq)
   1092{
   1093	struct bfq_entity *entity = &bfqq->entity;
   1094	struct request *next_rq = bfqq->next_rq;
   1095	unsigned long new_budget;
   1096
   1097	if (!next_rq)
   1098		return;
   1099
   1100	if (bfqq == bfqd->in_service_queue)
   1101		/*
   1102		 * In order not to break guarantees, budgets cannot be
   1103		 * changed after an entity has been selected.
   1104		 */
   1105		return;
   1106
   1107	new_budget = max_t(unsigned long,
   1108			   max_t(unsigned long, bfqq->max_budget,
   1109				 bfq_serv_to_charge(next_rq, bfqq)),
   1110			   entity->service);
   1111	if (entity->budget != new_budget) {
   1112		entity->budget = new_budget;
   1113		bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
   1114					 new_budget);
   1115		bfq_requeue_bfqq(bfqd, bfqq, false);
   1116	}
   1117}
   1118
   1119static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
   1120{
   1121	u64 dur;
   1122
   1123	if (bfqd->bfq_wr_max_time > 0)
   1124		return bfqd->bfq_wr_max_time;
   1125
   1126	dur = bfqd->rate_dur_prod;
   1127	do_div(dur, bfqd->peak_rate);
   1128
   1129	/*
   1130	 * Limit duration between 3 and 25 seconds. The upper limit
   1131	 * has been conservatively set after the following worst case:
   1132	 * on a QEMU/KVM virtual machine
   1133	 * - running in a slow PC
   1134	 * - with a virtual disk stacked on a slow low-end 5400rpm HDD
   1135	 * - serving a heavy I/O workload, such as the sequential reading
   1136	 *   of several files
   1137	 * mplayer took 23 seconds to start, if constantly weight-raised.
   1138	 *
   1139	 * As for higher values than that accommodating the above bad
   1140	 * scenario, tests show that higher values would often yield
   1141	 * the opposite of the desired result, i.e., would worsen
   1142	 * responsiveness by allowing non-interactive applications to
   1143	 * preserve weight raising for too long.
   1144	 *
   1145	 * On the other end, lower values than 3 seconds make it
   1146	 * difficult for most interactive tasks to complete their jobs
   1147	 * before weight-raising finishes.
   1148	 */
   1149	return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000));
   1150}
   1151
   1152/* switch back from soft real-time to interactive weight raising */
   1153static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
   1154					  struct bfq_data *bfqd)
   1155{
   1156	bfqq->wr_coeff = bfqd->bfq_wr_coeff;
   1157	bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
   1158	bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
   1159}
   1160
   1161static void
   1162bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
   1163		      struct bfq_io_cq *bic, bool bfq_already_existing)
   1164{
   1165	unsigned int old_wr_coeff = 1;
   1166	bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
   1167
   1168	if (bic->saved_has_short_ttime)
   1169		bfq_mark_bfqq_has_short_ttime(bfqq);
   1170	else
   1171		bfq_clear_bfqq_has_short_ttime(bfqq);
   1172
   1173	if (bic->saved_IO_bound)
   1174		bfq_mark_bfqq_IO_bound(bfqq);
   1175	else
   1176		bfq_clear_bfqq_IO_bound(bfqq);
   1177
   1178	bfqq->last_serv_time_ns = bic->saved_last_serv_time_ns;
   1179	bfqq->inject_limit = bic->saved_inject_limit;
   1180	bfqq->decrease_time_jif = bic->saved_decrease_time_jif;
   1181
   1182	bfqq->entity.new_weight = bic->saved_weight;
   1183	bfqq->ttime = bic->saved_ttime;
   1184	bfqq->io_start_time = bic->saved_io_start_time;
   1185	bfqq->tot_idle_time = bic->saved_tot_idle_time;
   1186	/*
   1187	 * Restore weight coefficient only if low_latency is on
   1188	 */
   1189	if (bfqd->low_latency) {
   1190		old_wr_coeff = bfqq->wr_coeff;
   1191		bfqq->wr_coeff = bic->saved_wr_coeff;
   1192	}
   1193	bfqq->service_from_wr = bic->saved_service_from_wr;
   1194	bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
   1195	bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
   1196	bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
   1197
   1198	if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
   1199	    time_is_before_jiffies(bfqq->last_wr_start_finish +
   1200				   bfqq->wr_cur_max_time))) {
   1201		if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
   1202		    !bfq_bfqq_in_large_burst(bfqq) &&
   1203		    time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
   1204					     bfq_wr_duration(bfqd))) {
   1205			switch_back_to_interactive_wr(bfqq, bfqd);
   1206		} else {
   1207			bfqq->wr_coeff = 1;
   1208			bfq_log_bfqq(bfqq->bfqd, bfqq,
   1209				     "resume state: switching off wr");
   1210		}
   1211	}
   1212
   1213	/* make sure weight will be updated, however we got here */
   1214	bfqq->entity.prio_changed = 1;
   1215
   1216	if (likely(!busy))
   1217		return;
   1218
   1219	if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
   1220		bfqd->wr_busy_queues++;
   1221	else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
   1222		bfqd->wr_busy_queues--;
   1223}
   1224
   1225static int bfqq_process_refs(struct bfq_queue *bfqq)
   1226{
   1227	return bfqq->ref - bfqq->entity.allocated -
   1228		bfqq->entity.on_st_or_in_serv -
   1229		(bfqq->weight_counter != NULL) - bfqq->stable_ref;
   1230}
   1231
   1232/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
   1233static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
   1234{
   1235	struct bfq_queue *item;
   1236	struct hlist_node *n;
   1237
   1238	hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
   1239		hlist_del_init(&item->burst_list_node);
   1240
   1241	/*
   1242	 * Start the creation of a new burst list only if there is no
   1243	 * active queue. See comments on the conditional invocation of
   1244	 * bfq_handle_burst().
   1245	 */
   1246	if (bfq_tot_busy_queues(bfqd) == 0) {
   1247		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
   1248		bfqd->burst_size = 1;
   1249	} else
   1250		bfqd->burst_size = 0;
   1251
   1252	bfqd->burst_parent_entity = bfqq->entity.parent;
   1253}
   1254
   1255/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
   1256static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
   1257{
   1258	/* Increment burst size to take into account also bfqq */
   1259	bfqd->burst_size++;
   1260
   1261	if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
   1262		struct bfq_queue *pos, *bfqq_item;
   1263		struct hlist_node *n;
   1264
   1265		/*
   1266		 * Enough queues have been activated shortly after each
   1267		 * other to consider this burst as large.
   1268		 */
   1269		bfqd->large_burst = true;
   1270
   1271		/*
   1272		 * We can now mark all queues in the burst list as
   1273		 * belonging to a large burst.
   1274		 */
   1275		hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
   1276				     burst_list_node)
   1277			bfq_mark_bfqq_in_large_burst(bfqq_item);
   1278		bfq_mark_bfqq_in_large_burst(bfqq);
   1279
   1280		/*
   1281		 * From now on, and until the current burst finishes, any
   1282		 * new queue being activated shortly after the last queue
   1283		 * was inserted in the burst can be immediately marked as
   1284		 * belonging to a large burst. So the burst list is not
   1285		 * needed any more. Remove it.
   1286		 */
   1287		hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
   1288					  burst_list_node)
   1289			hlist_del_init(&pos->burst_list_node);
   1290	} else /*
   1291		* Burst not yet large: add bfqq to the burst list. Do
   1292		* not increment the ref counter for bfqq, because bfqq
   1293		* is removed from the burst list before freeing bfqq
   1294		* in put_queue.
   1295		*/
   1296		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
   1297}
   1298
   1299/*
   1300 * If many queues belonging to the same group happen to be created
   1301 * shortly after each other, then the processes associated with these
   1302 * queues have typically a common goal. In particular, bursts of queue
   1303 * creations are usually caused by services or applications that spawn
   1304 * many parallel threads/processes. Examples are systemd during boot,
   1305 * or git grep. To help these processes get their job done as soon as
   1306 * possible, it is usually better to not grant either weight-raising
   1307 * or device idling to their queues, unless these queues must be
   1308 * protected from the I/O flowing through other active queues.
   1309 *
   1310 * In this comment we describe, firstly, the reasons why this fact
   1311 * holds, and, secondly, the next function, which implements the main
   1312 * steps needed to properly mark these queues so that they can then be
   1313 * treated in a different way.
   1314 *
   1315 * The above services or applications benefit mostly from a high
   1316 * throughput: the quicker the requests of the activated queues are
   1317 * cumulatively served, the sooner the target job of these queues gets
   1318 * completed. As a consequence, weight-raising any of these queues,
   1319 * which also implies idling the device for it, is almost always
   1320 * counterproductive, unless there are other active queues to isolate
   1321 * these new queues from. If there no other active queues, then
   1322 * weight-raising these new queues just lowers throughput in most
   1323 * cases.
   1324 *
   1325 * On the other hand, a burst of queue creations may be caused also by
   1326 * the start of an application that does not consist of a lot of
   1327 * parallel I/O-bound threads. In fact, with a complex application,
   1328 * several short processes may need to be executed to start-up the
   1329 * application. In this respect, to start an application as quickly as
   1330 * possible, the best thing to do is in any case to privilege the I/O
   1331 * related to the application with respect to all other
   1332 * I/O. Therefore, the best strategy to start as quickly as possible
   1333 * an application that causes a burst of queue creations is to
   1334 * weight-raise all the queues created during the burst. This is the
   1335 * exact opposite of the best strategy for the other type of bursts.
   1336 *
   1337 * In the end, to take the best action for each of the two cases, the
   1338 * two types of bursts need to be distinguished. Fortunately, this
   1339 * seems relatively easy, by looking at the sizes of the bursts. In
   1340 * particular, we found a threshold such that only bursts with a
   1341 * larger size than that threshold are apparently caused by
   1342 * services or commands such as systemd or git grep. For brevity,
   1343 * hereafter we call just 'large' these bursts. BFQ *does not*
   1344 * weight-raise queues whose creation occurs in a large burst. In
   1345 * addition, for each of these queues BFQ performs or does not perform
   1346 * idling depending on which choice boosts the throughput more. The
   1347 * exact choice depends on the device and request pattern at
   1348 * hand.
   1349 *
   1350 * Unfortunately, false positives may occur while an interactive task
   1351 * is starting (e.g., an application is being started). The
   1352 * consequence is that the queues associated with the task do not
   1353 * enjoy weight raising as expected. Fortunately these false positives
   1354 * are very rare. They typically occur if some service happens to
   1355 * start doing I/O exactly when the interactive task starts.
   1356 *
   1357 * Turning back to the next function, it is invoked only if there are
   1358 * no active queues (apart from active queues that would belong to the
   1359 * same, possible burst bfqq would belong to), and it implements all
   1360 * the steps needed to detect the occurrence of a large burst and to
   1361 * properly mark all the queues belonging to it (so that they can then
   1362 * be treated in a different way). This goal is achieved by
   1363 * maintaining a "burst list" that holds, temporarily, the queues that
   1364 * belong to the burst in progress. The list is then used to mark
   1365 * these queues as belonging to a large burst if the burst does become
   1366 * large. The main steps are the following.
   1367 *
   1368 * . when the very first queue is created, the queue is inserted into the
   1369 *   list (as it could be the first queue in a possible burst)
   1370 *
   1371 * . if the current burst has not yet become large, and a queue Q that does
   1372 *   not yet belong to the burst is activated shortly after the last time
   1373 *   at which a new queue entered the burst list, then the function appends
   1374 *   Q to the burst list
   1375 *
   1376 * . if, as a consequence of the previous step, the burst size reaches
   1377 *   the large-burst threshold, then
   1378 *
   1379 *     . all the queues in the burst list are marked as belonging to a
   1380 *       large burst
   1381 *
   1382 *     . the burst list is deleted; in fact, the burst list already served
   1383 *       its purpose (keeping temporarily track of the queues in a burst,
   1384 *       so as to be able to mark them as belonging to a large burst in the
   1385 *       previous sub-step), and now is not needed any more
   1386 *
   1387 *     . the device enters a large-burst mode
   1388 *
   1389 * . if a queue Q that does not belong to the burst is created while
   1390 *   the device is in large-burst mode and shortly after the last time
   1391 *   at which a queue either entered the burst list or was marked as
   1392 *   belonging to the current large burst, then Q is immediately marked
   1393 *   as belonging to a large burst.
   1394 *
   1395 * . if a queue Q that does not belong to the burst is created a while
   1396 *   later, i.e., not shortly after, than the last time at which a queue
   1397 *   either entered the burst list or was marked as belonging to the
   1398 *   current large burst, then the current burst is deemed as finished and:
   1399 *
   1400 *        . the large-burst mode is reset if set
   1401 *
   1402 *        . the burst list is emptied
   1403 *
   1404 *        . Q is inserted in the burst list, as Q may be the first queue
   1405 *          in a possible new burst (then the burst list contains just Q
   1406 *          after this step).
   1407 */
   1408static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
   1409{
   1410	/*
   1411	 * If bfqq is already in the burst list or is part of a large
   1412	 * burst, or finally has just been split, then there is
   1413	 * nothing else to do.
   1414	 */
   1415	if (!hlist_unhashed(&bfqq->burst_list_node) ||
   1416	    bfq_bfqq_in_large_burst(bfqq) ||
   1417	    time_is_after_eq_jiffies(bfqq->split_time +
   1418				     msecs_to_jiffies(10)))
   1419		return;
   1420
   1421	/*
   1422	 * If bfqq's creation happens late enough, or bfqq belongs to
   1423	 * a different group than the burst group, then the current
   1424	 * burst is finished, and related data structures must be
   1425	 * reset.
   1426	 *
   1427	 * In this respect, consider the special case where bfqq is
   1428	 * the very first queue created after BFQ is selected for this
   1429	 * device. In this case, last_ins_in_burst and
   1430	 * burst_parent_entity are not yet significant when we get
   1431	 * here. But it is easy to verify that, whether or not the
   1432	 * following condition is true, bfqq will end up being
   1433	 * inserted into the burst list. In particular the list will
   1434	 * happen to contain only bfqq. And this is exactly what has
   1435	 * to happen, as bfqq may be the first queue of the first
   1436	 * burst.
   1437	 */
   1438	if (time_is_before_jiffies(bfqd->last_ins_in_burst +
   1439	    bfqd->bfq_burst_interval) ||
   1440	    bfqq->entity.parent != bfqd->burst_parent_entity) {
   1441		bfqd->large_burst = false;
   1442		bfq_reset_burst_list(bfqd, bfqq);
   1443		goto end;
   1444	}
   1445
   1446	/*
   1447	 * If we get here, then bfqq is being activated shortly after the
   1448	 * last queue. So, if the current burst is also large, we can mark
   1449	 * bfqq as belonging to this large burst immediately.
   1450	 */
   1451	if (bfqd->large_burst) {
   1452		bfq_mark_bfqq_in_large_burst(bfqq);
   1453		goto end;
   1454	}
   1455
   1456	/*
   1457	 * If we get here, then a large-burst state has not yet been
   1458	 * reached, but bfqq is being activated shortly after the last
   1459	 * queue. Then we add bfqq to the burst.
   1460	 */
   1461	bfq_add_to_burst(bfqd, bfqq);
   1462end:
   1463	/*
   1464	 * At this point, bfqq either has been added to the current
   1465	 * burst or has caused the current burst to terminate and a
   1466	 * possible new burst to start. In particular, in the second
   1467	 * case, bfqq has become the first queue in the possible new
   1468	 * burst.  In both cases last_ins_in_burst needs to be moved
   1469	 * forward.
   1470	 */
   1471	bfqd->last_ins_in_burst = jiffies;
   1472}
   1473
   1474static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
   1475{
   1476	struct bfq_entity *entity = &bfqq->entity;
   1477
   1478	return entity->budget - entity->service;
   1479}
   1480
   1481/*
   1482 * If enough samples have been computed, return the current max budget
   1483 * stored in bfqd, which is dynamically updated according to the
   1484 * estimated disk peak rate; otherwise return the default max budget
   1485 */
   1486static int bfq_max_budget(struct bfq_data *bfqd)
   1487{
   1488	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
   1489		return bfq_default_max_budget;
   1490	else
   1491		return bfqd->bfq_max_budget;
   1492}
   1493
   1494/*
   1495 * Return min budget, which is a fraction of the current or default
   1496 * max budget (trying with 1/32)
   1497 */
   1498static int bfq_min_budget(struct bfq_data *bfqd)
   1499{
   1500	if (bfqd->budgets_assigned < bfq_stats_min_budgets)
   1501		return bfq_default_max_budget / 32;
   1502	else
   1503		return bfqd->bfq_max_budget / 32;
   1504}
   1505
   1506/*
   1507 * The next function, invoked after the input queue bfqq switches from
   1508 * idle to busy, updates the budget of bfqq. The function also tells
   1509 * whether the in-service queue should be expired, by returning
   1510 * true. The purpose of expiring the in-service queue is to give bfqq
   1511 * the chance to possibly preempt the in-service queue, and the reason
   1512 * for preempting the in-service queue is to achieve one of the two
   1513 * goals below.
   1514 *
   1515 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
   1516 * expired because it has remained idle. In particular, bfqq may have
   1517 * expired for one of the following two reasons:
   1518 *
   1519 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
   1520 *   and did not make it to issue a new request before its last
   1521 *   request was served;
   1522 *
   1523 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
   1524 *   a new request before the expiration of the idling-time.
   1525 *
   1526 * Even if bfqq has expired for one of the above reasons, the process
   1527 * associated with the queue may be however issuing requests greedily,
   1528 * and thus be sensitive to the bandwidth it receives (bfqq may have
   1529 * remained idle for other reasons: CPU high load, bfqq not enjoying
   1530 * idling, I/O throttling somewhere in the path from the process to
   1531 * the I/O scheduler, ...). But if, after every expiration for one of
   1532 * the above two reasons, bfqq has to wait for the service of at least
   1533 * one full budget of another queue before being served again, then
   1534 * bfqq is likely to get a much lower bandwidth or resource time than
   1535 * its reserved ones. To address this issue, two countermeasures need
   1536 * to be taken.
   1537 *
   1538 * First, the budget and the timestamps of bfqq need to be updated in
   1539 * a special way on bfqq reactivation: they need to be updated as if
   1540 * bfqq did not remain idle and did not expire. In fact, if they are
   1541 * computed as if bfqq expired and remained idle until reactivation,
   1542 * then the process associated with bfqq is treated as if, instead of
   1543 * being greedy, it stopped issuing requests when bfqq remained idle,
   1544 * and restarts issuing requests only on this reactivation. In other
   1545 * words, the scheduler does not help the process recover the "service
   1546 * hole" between bfqq expiration and reactivation. As a consequence,
   1547 * the process receives a lower bandwidth than its reserved one. In
   1548 * contrast, to recover this hole, the budget must be updated as if
   1549 * bfqq was not expired at all before this reactivation, i.e., it must
   1550 * be set to the value of the remaining budget when bfqq was
   1551 * expired. Along the same line, timestamps need to be assigned the
   1552 * value they had the last time bfqq was selected for service, i.e.,
   1553 * before last expiration. Thus timestamps need to be back-shifted
   1554 * with respect to their normal computation (see [1] for more details
   1555 * on this tricky aspect).
   1556 *
   1557 * Secondly, to allow the process to recover the hole, the in-service
   1558 * queue must be expired too, to give bfqq the chance to preempt it
   1559 * immediately. In fact, if bfqq has to wait for a full budget of the
   1560 * in-service queue to be completed, then it may become impossible to
   1561 * let the process recover the hole, even if the back-shifted
   1562 * timestamps of bfqq are lower than those of the in-service queue. If
   1563 * this happens for most or all of the holes, then the process may not
   1564 * receive its reserved bandwidth. In this respect, it is worth noting
   1565 * that, being the service of outstanding requests unpreemptible, a
   1566 * little fraction of the holes may however be unrecoverable, thereby
   1567 * causing a little loss of bandwidth.
   1568 *
   1569 * The last important point is detecting whether bfqq does need this
   1570 * bandwidth recovery. In this respect, the next function deems the
   1571 * process associated with bfqq greedy, and thus allows it to recover
   1572 * the hole, if: 1) the process is waiting for the arrival of a new
   1573 * request (which implies that bfqq expired for one of the above two
   1574 * reasons), and 2) such a request has arrived soon. The first
   1575 * condition is controlled through the flag non_blocking_wait_rq,
   1576 * while the second through the flag arrived_in_time. If both
   1577 * conditions hold, then the function computes the budget in the
   1578 * above-described special way, and signals that the in-service queue
   1579 * should be expired. Timestamp back-shifting is done later in
   1580 * __bfq_activate_entity.
   1581 *
   1582 * 2. Reduce latency. Even if timestamps are not backshifted to let
   1583 * the process associated with bfqq recover a service hole, bfqq may
   1584 * however happen to have, after being (re)activated, a lower finish
   1585 * timestamp than the in-service queue.	 That is, the next budget of
   1586 * bfqq may have to be completed before the one of the in-service
   1587 * queue. If this is the case, then preempting the in-service queue
   1588 * allows this goal to be achieved, apart from the unpreemptible,
   1589 * outstanding requests mentioned above.
   1590 *
   1591 * Unfortunately, regardless of which of the above two goals one wants
   1592 * to achieve, service trees need first to be updated to know whether
   1593 * the in-service queue must be preempted. To have service trees
   1594 * correctly updated, the in-service queue must be expired and
   1595 * rescheduled, and bfqq must be scheduled too. This is one of the
   1596 * most costly operations (in future versions, the scheduling
   1597 * mechanism may be re-designed in such a way to make it possible to
   1598 * know whether preemption is needed without needing to update service
   1599 * trees). In addition, queue preemptions almost always cause random
   1600 * I/O, which may in turn cause loss of throughput. Finally, there may
   1601 * even be no in-service queue when the next function is invoked (so,
   1602 * no queue to compare timestamps with). Because of these facts, the
   1603 * next function adopts the following simple scheme to avoid costly
   1604 * operations, too frequent preemptions and too many dependencies on
   1605 * the state of the scheduler: it requests the expiration of the
   1606 * in-service queue (unconditionally) only for queues that need to
   1607 * recover a hole. Then it delegates to other parts of the code the
   1608 * responsibility of handling the above case 2.
   1609 */
   1610static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
   1611						struct bfq_queue *bfqq,
   1612						bool arrived_in_time)
   1613{
   1614	struct bfq_entity *entity = &bfqq->entity;
   1615
   1616	/*
   1617	 * In the next compound condition, we check also whether there
   1618	 * is some budget left, because otherwise there is no point in
   1619	 * trying to go on serving bfqq with this same budget: bfqq
   1620	 * would be expired immediately after being selected for
   1621	 * service. This would only cause useless overhead.
   1622	 */
   1623	if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
   1624	    bfq_bfqq_budget_left(bfqq) > 0) {
   1625		/*
   1626		 * We do not clear the flag non_blocking_wait_rq here, as
   1627		 * the latter is used in bfq_activate_bfqq to signal
   1628		 * that timestamps need to be back-shifted (and is
   1629		 * cleared right after).
   1630		 */
   1631
   1632		/*
   1633		 * In next assignment we rely on that either
   1634		 * entity->service or entity->budget are not updated
   1635		 * on expiration if bfqq is empty (see
   1636		 * __bfq_bfqq_recalc_budget). Thus both quantities
   1637		 * remain unchanged after such an expiration, and the
   1638		 * following statement therefore assigns to
   1639		 * entity->budget the remaining budget on such an
   1640		 * expiration.
   1641		 */
   1642		entity->budget = min_t(unsigned long,
   1643				       bfq_bfqq_budget_left(bfqq),
   1644				       bfqq->max_budget);
   1645
   1646		/*
   1647		 * At this point, we have used entity->service to get
   1648		 * the budget left (needed for updating
   1649		 * entity->budget). Thus we finally can, and have to,
   1650		 * reset entity->service. The latter must be reset
   1651		 * because bfqq would otherwise be charged again for
   1652		 * the service it has received during its previous
   1653		 * service slot(s).
   1654		 */
   1655		entity->service = 0;
   1656
   1657		return true;
   1658	}
   1659
   1660	/*
   1661	 * We can finally complete expiration, by setting service to 0.
   1662	 */
   1663	entity->service = 0;
   1664	entity->budget = max_t(unsigned long, bfqq->max_budget,
   1665			       bfq_serv_to_charge(bfqq->next_rq, bfqq));
   1666	bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
   1667	return false;
   1668}
   1669
   1670/*
   1671 * Return the farthest past time instant according to jiffies
   1672 * macros.
   1673 */
   1674static unsigned long bfq_smallest_from_now(void)
   1675{
   1676	return jiffies - MAX_JIFFY_OFFSET;
   1677}
   1678
   1679static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
   1680					     struct bfq_queue *bfqq,
   1681					     unsigned int old_wr_coeff,
   1682					     bool wr_or_deserves_wr,
   1683					     bool interactive,
   1684					     bool in_burst,
   1685					     bool soft_rt)
   1686{
   1687	if (old_wr_coeff == 1 && wr_or_deserves_wr) {
   1688		/* start a weight-raising period */
   1689		if (interactive) {
   1690			bfqq->service_from_wr = 0;
   1691			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
   1692			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
   1693		} else {
   1694			/*
   1695			 * No interactive weight raising in progress
   1696			 * here: assign minus infinity to
   1697			 * wr_start_at_switch_to_srt, to make sure
   1698			 * that, at the end of the soft-real-time
   1699			 * weight raising periods that is starting
   1700			 * now, no interactive weight-raising period
   1701			 * may be wrongly considered as still in
   1702			 * progress (and thus actually started by
   1703			 * mistake).
   1704			 */
   1705			bfqq->wr_start_at_switch_to_srt =
   1706				bfq_smallest_from_now();
   1707			bfqq->wr_coeff = bfqd->bfq_wr_coeff *
   1708				BFQ_SOFTRT_WEIGHT_FACTOR;
   1709			bfqq->wr_cur_max_time =
   1710				bfqd->bfq_wr_rt_max_time;
   1711		}
   1712
   1713		/*
   1714		 * If needed, further reduce budget to make sure it is
   1715		 * close to bfqq's backlog, so as to reduce the
   1716		 * scheduling-error component due to a too large
   1717		 * budget. Do not care about throughput consequences,
   1718		 * but only about latency. Finally, do not assign a
   1719		 * too small budget either, to avoid increasing
   1720		 * latency by causing too frequent expirations.
   1721		 */
   1722		bfqq->entity.budget = min_t(unsigned long,
   1723					    bfqq->entity.budget,
   1724					    2 * bfq_min_budget(bfqd));
   1725	} else if (old_wr_coeff > 1) {
   1726		if (interactive) { /* update wr coeff and duration */
   1727			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
   1728			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
   1729		} else if (in_burst)
   1730			bfqq->wr_coeff = 1;
   1731		else if (soft_rt) {
   1732			/*
   1733			 * The application is now or still meeting the
   1734			 * requirements for being deemed soft rt.  We
   1735			 * can then correctly and safely (re)charge
   1736			 * the weight-raising duration for the
   1737			 * application with the weight-raising
   1738			 * duration for soft rt applications.
   1739			 *
   1740			 * In particular, doing this recharge now, i.e.,
   1741			 * before the weight-raising period for the
   1742			 * application finishes, reduces the probability
   1743			 * of the following negative scenario:
   1744			 * 1) the weight of a soft rt application is
   1745			 *    raised at startup (as for any newly
   1746			 *    created application),
   1747			 * 2) since the application is not interactive,
   1748			 *    at a certain time weight-raising is
   1749			 *    stopped for the application,
   1750			 * 3) at that time the application happens to
   1751			 *    still have pending requests, and hence
   1752			 *    is destined to not have a chance to be
   1753			 *    deemed soft rt before these requests are
   1754			 *    completed (see the comments to the
   1755			 *    function bfq_bfqq_softrt_next_start()
   1756			 *    for details on soft rt detection),
   1757			 * 4) these pending requests experience a high
   1758			 *    latency because the application is not
   1759			 *    weight-raised while they are pending.
   1760			 */
   1761			if (bfqq->wr_cur_max_time !=
   1762				bfqd->bfq_wr_rt_max_time) {
   1763				bfqq->wr_start_at_switch_to_srt =
   1764					bfqq->last_wr_start_finish;
   1765
   1766				bfqq->wr_cur_max_time =
   1767					bfqd->bfq_wr_rt_max_time;
   1768				bfqq->wr_coeff = bfqd->bfq_wr_coeff *
   1769					BFQ_SOFTRT_WEIGHT_FACTOR;
   1770			}
   1771			bfqq->last_wr_start_finish = jiffies;
   1772		}
   1773	}
   1774}
   1775
   1776static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
   1777					struct bfq_queue *bfqq)
   1778{
   1779	return bfqq->dispatched == 0 &&
   1780		time_is_before_jiffies(
   1781			bfqq->budget_timeout +
   1782			bfqd->bfq_wr_min_idle_time);
   1783}
   1784
   1785
   1786/*
   1787 * Return true if bfqq is in a higher priority class, or has a higher
   1788 * weight than the in-service queue.
   1789 */
   1790static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
   1791					    struct bfq_queue *in_serv_bfqq)
   1792{
   1793	int bfqq_weight, in_serv_weight;
   1794
   1795	if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
   1796		return true;
   1797
   1798	if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
   1799		bfqq_weight = bfqq->entity.weight;
   1800		in_serv_weight = in_serv_bfqq->entity.weight;
   1801	} else {
   1802		if (bfqq->entity.parent)
   1803			bfqq_weight = bfqq->entity.parent->weight;
   1804		else
   1805			bfqq_weight = bfqq->entity.weight;
   1806		if (in_serv_bfqq->entity.parent)
   1807			in_serv_weight = in_serv_bfqq->entity.parent->weight;
   1808		else
   1809			in_serv_weight = in_serv_bfqq->entity.weight;
   1810	}
   1811
   1812	return bfqq_weight > in_serv_weight;
   1813}
   1814
   1815static bool bfq_better_to_idle(struct bfq_queue *bfqq);
   1816
   1817static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
   1818					     struct bfq_queue *bfqq,
   1819					     int old_wr_coeff,
   1820					     struct request *rq,
   1821					     bool *interactive)
   1822{
   1823	bool soft_rt, in_burst,	wr_or_deserves_wr,
   1824		bfqq_wants_to_preempt,
   1825		idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
   1826		/*
   1827		 * See the comments on
   1828		 * bfq_bfqq_update_budg_for_activation for
   1829		 * details on the usage of the next variable.
   1830		 */
   1831		arrived_in_time =  ktime_get_ns() <=
   1832			bfqq->ttime.last_end_request +
   1833			bfqd->bfq_slice_idle * 3;
   1834
   1835
   1836	/*
   1837	 * bfqq deserves to be weight-raised if:
   1838	 * - it is sync,
   1839	 * - it does not belong to a large burst,
   1840	 * - it has been idle for enough time or is soft real-time,
   1841	 * - is linked to a bfq_io_cq (it is not shared in any sense),
   1842	 * - has a default weight (otherwise we assume the user wanted
   1843	 *   to control its weight explicitly)
   1844	 */
   1845	in_burst = bfq_bfqq_in_large_burst(bfqq);
   1846	soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
   1847		!BFQQ_TOTALLY_SEEKY(bfqq) &&
   1848		!in_burst &&
   1849		time_is_before_jiffies(bfqq->soft_rt_next_start) &&
   1850		bfqq->dispatched == 0 &&
   1851		bfqq->entity.new_weight == 40;
   1852	*interactive = !in_burst && idle_for_long_time &&
   1853		bfqq->entity.new_weight == 40;
   1854	/*
   1855	 * Merged bfq_queues are kept out of weight-raising
   1856	 * (low-latency) mechanisms. The reason is that these queues
   1857	 * are usually created for non-interactive and
   1858	 * non-soft-real-time tasks. Yet this is not the case for
   1859	 * stably-merged queues. These queues are merged just because
   1860	 * they are created shortly after each other. So they may
   1861	 * easily serve the I/O of an interactive or soft-real time
   1862	 * application, if the application happens to spawn multiple
   1863	 * processes. So let also stably-merged queued enjoy weight
   1864	 * raising.
   1865	 */
   1866	wr_or_deserves_wr = bfqd->low_latency &&
   1867		(bfqq->wr_coeff > 1 ||
   1868		 (bfq_bfqq_sync(bfqq) &&
   1869		  (bfqq->bic || RQ_BIC(rq)->stably_merged) &&
   1870		   (*interactive || soft_rt)));
   1871
   1872	/*
   1873	 * Using the last flag, update budget and check whether bfqq
   1874	 * may want to preempt the in-service queue.
   1875	 */
   1876	bfqq_wants_to_preempt =
   1877		bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
   1878						    arrived_in_time);
   1879
   1880	/*
   1881	 * If bfqq happened to be activated in a burst, but has been
   1882	 * idle for much more than an interactive queue, then we
   1883	 * assume that, in the overall I/O initiated in the burst, the
   1884	 * I/O associated with bfqq is finished. So bfqq does not need
   1885	 * to be treated as a queue belonging to a burst
   1886	 * anymore. Accordingly, we reset bfqq's in_large_burst flag
   1887	 * if set, and remove bfqq from the burst list if it's
   1888	 * there. We do not decrement burst_size, because the fact
   1889	 * that bfqq does not need to belong to the burst list any
   1890	 * more does not invalidate the fact that bfqq was created in
   1891	 * a burst.
   1892	 */
   1893	if (likely(!bfq_bfqq_just_created(bfqq)) &&
   1894	    idle_for_long_time &&
   1895	    time_is_before_jiffies(
   1896		    bfqq->budget_timeout +
   1897		    msecs_to_jiffies(10000))) {
   1898		hlist_del_init(&bfqq->burst_list_node);
   1899		bfq_clear_bfqq_in_large_burst(bfqq);
   1900	}
   1901
   1902	bfq_clear_bfqq_just_created(bfqq);
   1903
   1904	if (bfqd->low_latency) {
   1905		if (unlikely(time_is_after_jiffies(bfqq->split_time)))
   1906			/* wraparound */
   1907			bfqq->split_time =
   1908				jiffies - bfqd->bfq_wr_min_idle_time - 1;
   1909
   1910		if (time_is_before_jiffies(bfqq->split_time +
   1911					   bfqd->bfq_wr_min_idle_time)) {
   1912			bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
   1913							 old_wr_coeff,
   1914							 wr_or_deserves_wr,
   1915							 *interactive,
   1916							 in_burst,
   1917							 soft_rt);
   1918
   1919			if (old_wr_coeff != bfqq->wr_coeff)
   1920				bfqq->entity.prio_changed = 1;
   1921		}
   1922	}
   1923
   1924	bfqq->last_idle_bklogged = jiffies;
   1925	bfqq->service_from_backlogged = 0;
   1926	bfq_clear_bfqq_softrt_update(bfqq);
   1927
   1928	bfq_add_bfqq_busy(bfqd, bfqq);
   1929
   1930	/*
   1931	 * Expire in-service queue if preemption may be needed for
   1932	 * guarantees or throughput. As for guarantees, we care
   1933	 * explicitly about two cases. The first is that bfqq has to
   1934	 * recover a service hole, as explained in the comments on
   1935	 * bfq_bfqq_update_budg_for_activation(), i.e., that
   1936	 * bfqq_wants_to_preempt is true. However, if bfqq does not
   1937	 * carry time-critical I/O, then bfqq's bandwidth is less
   1938	 * important than that of queues that carry time-critical I/O.
   1939	 * So, as a further constraint, we consider this case only if
   1940	 * bfqq is at least as weight-raised, i.e., at least as time
   1941	 * critical, as the in-service queue.
   1942	 *
   1943	 * The second case is that bfqq is in a higher priority class,
   1944	 * or has a higher weight than the in-service queue. If this
   1945	 * condition does not hold, we don't care because, even if
   1946	 * bfqq does not start to be served immediately, the resulting
   1947	 * delay for bfqq's I/O is however lower or much lower than
   1948	 * the ideal completion time to be guaranteed to bfqq's I/O.
   1949	 *
   1950	 * In both cases, preemption is needed only if, according to
   1951	 * the timestamps of both bfqq and of the in-service queue,
   1952	 * bfqq actually is the next queue to serve. So, to reduce
   1953	 * useless preemptions, the return value of
   1954	 * next_queue_may_preempt() is considered in the next compound
   1955	 * condition too. Yet next_queue_may_preempt() just checks a
   1956	 * simple, necessary condition for bfqq to be the next queue
   1957	 * to serve. In fact, to evaluate a sufficient condition, the
   1958	 * timestamps of the in-service queue would need to be
   1959	 * updated, and this operation is quite costly (see the
   1960	 * comments on bfq_bfqq_update_budg_for_activation()).
   1961	 *
   1962	 * As for throughput, we ask bfq_better_to_idle() whether we
   1963	 * still need to plug I/O dispatching. If bfq_better_to_idle()
   1964	 * says no, then plugging is not needed any longer, either to
   1965	 * boost throughput or to perserve service guarantees. Then
   1966	 * the best option is to stop plugging I/O, as not doing so
   1967	 * would certainly lower throughput. We may end up in this
   1968	 * case if: (1) upon a dispatch attempt, we detected that it
   1969	 * was better to plug I/O dispatch, and to wait for a new
   1970	 * request to arrive for the currently in-service queue, but
   1971	 * (2) this switch of bfqq to busy changes the scenario.
   1972	 */
   1973	if (bfqd->in_service_queue &&
   1974	    ((bfqq_wants_to_preempt &&
   1975	      bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
   1976	     bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
   1977	     !bfq_better_to_idle(bfqd->in_service_queue)) &&
   1978	    next_queue_may_preempt(bfqd))
   1979		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
   1980				false, BFQQE_PREEMPTED);
   1981}
   1982
   1983static void bfq_reset_inject_limit(struct bfq_data *bfqd,
   1984				   struct bfq_queue *bfqq)
   1985{
   1986	/* invalidate baseline total service time */
   1987	bfqq->last_serv_time_ns = 0;
   1988
   1989	/*
   1990	 * Reset pointer in case we are waiting for
   1991	 * some request completion.
   1992	 */
   1993	bfqd->waited_rq = NULL;
   1994
   1995	/*
   1996	 * If bfqq has a short think time, then start by setting the
   1997	 * inject limit to 0 prudentially, because the service time of
   1998	 * an injected I/O request may be higher than the think time
   1999	 * of bfqq, and therefore, if one request was injected when
   2000	 * bfqq remains empty, this injected request might delay the
   2001	 * service of the next I/O request for bfqq significantly. In
   2002	 * case bfqq can actually tolerate some injection, then the
   2003	 * adaptive update will however raise the limit soon. This
   2004	 * lucky circumstance holds exactly because bfqq has a short
   2005	 * think time, and thus, after remaining empty, is likely to
   2006	 * get new I/O enqueued---and then completed---before being
   2007	 * expired. This is the very pattern that gives the
   2008	 * limit-update algorithm the chance to measure the effect of
   2009	 * injection on request service times, and then to update the
   2010	 * limit accordingly.
   2011	 *
   2012	 * However, in the following special case, the inject limit is
   2013	 * left to 1 even if the think time is short: bfqq's I/O is
   2014	 * synchronized with that of some other queue, i.e., bfqq may
   2015	 * receive new I/O only after the I/O of the other queue is
   2016	 * completed. Keeping the inject limit to 1 allows the
   2017	 * blocking I/O to be served while bfqq is in service. And
   2018	 * this is very convenient both for bfqq and for overall
   2019	 * throughput, as explained in detail in the comments in
   2020	 * bfq_update_has_short_ttime().
   2021	 *
   2022	 * On the opposite end, if bfqq has a long think time, then
   2023	 * start directly by 1, because:
   2024	 * a) on the bright side, keeping at most one request in
   2025	 * service in the drive is unlikely to cause any harm to the
   2026	 * latency of bfqq's requests, as the service time of a single
   2027	 * request is likely to be lower than the think time of bfqq;
   2028	 * b) on the downside, after becoming empty, bfqq is likely to
   2029	 * expire before getting its next request. With this request
   2030	 * arrival pattern, it is very hard to sample total service
   2031	 * times and update the inject limit accordingly (see comments
   2032	 * on bfq_update_inject_limit()). So the limit is likely to be
   2033	 * never, or at least seldom, updated.  As a consequence, by
   2034	 * setting the limit to 1, we avoid that no injection ever
   2035	 * occurs with bfqq. On the downside, this proactive step
   2036	 * further reduces chances to actually compute the baseline
   2037	 * total service time. Thus it reduces chances to execute the
   2038	 * limit-update algorithm and possibly raise the limit to more
   2039	 * than 1.
   2040	 */
   2041	if (bfq_bfqq_has_short_ttime(bfqq))
   2042		bfqq->inject_limit = 0;
   2043	else
   2044		bfqq->inject_limit = 1;
   2045
   2046	bfqq->decrease_time_jif = jiffies;
   2047}
   2048
   2049static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
   2050{
   2051	u64 tot_io_time = now_ns - bfqq->io_start_time;
   2052
   2053	if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0)
   2054		bfqq->tot_idle_time +=
   2055			now_ns - bfqq->ttime.last_end_request;
   2056
   2057	if (unlikely(bfq_bfqq_just_created(bfqq)))
   2058		return;
   2059
   2060	/*
   2061	 * Must be busy for at least about 80% of the time to be
   2062	 * considered I/O bound.
   2063	 */
   2064	if (bfqq->tot_idle_time * 5 > tot_io_time)
   2065		bfq_clear_bfqq_IO_bound(bfqq);
   2066	else
   2067		bfq_mark_bfqq_IO_bound(bfqq);
   2068
   2069	/*
   2070	 * Keep an observation window of at most 200 ms in the past
   2071	 * from now.
   2072	 */
   2073	if (tot_io_time > 200 * NSEC_PER_MSEC) {
   2074		bfqq->io_start_time = now_ns - (tot_io_time>>1);
   2075		bfqq->tot_idle_time >>= 1;
   2076	}
   2077}
   2078
   2079/*
   2080 * Detect whether bfqq's I/O seems synchronized with that of some
   2081 * other queue, i.e., whether bfqq, after remaining empty, happens to
   2082 * receive new I/O only right after some I/O request of the other
   2083 * queue has been completed. We call waker queue the other queue, and
   2084 * we assume, for simplicity, that bfqq may have at most one waker
   2085 * queue.
   2086 *
   2087 * A remarkable throughput boost can be reached by unconditionally
   2088 * injecting the I/O of the waker queue, every time a new
   2089 * bfq_dispatch_request happens to be invoked while I/O is being
   2090 * plugged for bfqq.  In addition to boosting throughput, this
   2091 * unblocks bfqq's I/O, thereby improving bandwidth and latency for
   2092 * bfqq. Note that these same results may be achieved with the general
   2093 * injection mechanism, but less effectively. For details on this
   2094 * aspect, see the comments on the choice of the queue for injection
   2095 * in bfq_select_queue().
   2096 *
   2097 * Turning back to the detection of a waker queue, a queue Q is deemed as a
   2098 * waker queue for bfqq if, for three consecutive times, bfqq happens to become
   2099 * non empty right after a request of Q has been completed within given
   2100 * timeout. In this respect, even if bfqq is empty, we do not check for a waker
   2101 * if it still has some in-flight I/O. In fact, in this case bfqq is actually
   2102 * still being served by the drive, and may receive new I/O on the completion
   2103 * of some of the in-flight requests. In particular, on the first time, Q is
   2104 * tentatively set as a candidate waker queue, while on the third consecutive
   2105 * time that Q is detected, the field waker_bfqq is set to Q, to confirm that Q
   2106 * is a waker queue for bfqq. These detection steps are performed only if bfqq
   2107 * has a long think time, so as to make it more likely that bfqq's I/O is
   2108 * actually being blocked by a synchronization. This last filter, plus the
   2109 * above three-times requirement and time limit for detection, make false
   2110 * positives less likely.
   2111 *
   2112 * NOTE
   2113 *
   2114 * The sooner a waker queue is detected, the sooner throughput can be
   2115 * boosted by injecting I/O from the waker queue. Fortunately,
   2116 * detection is likely to be actually fast, for the following
   2117 * reasons. While blocked by synchronization, bfqq has a long think
   2118 * time. This implies that bfqq's inject limit is at least equal to 1
   2119 * (see the comments in bfq_update_inject_limit()). So, thanks to
   2120 * injection, the waker queue is likely to be served during the very
   2121 * first I/O-plugging time interval for bfqq. This triggers the first
   2122 * step of the detection mechanism. Thanks again to injection, the
   2123 * candidate waker queue is then likely to be confirmed no later than
   2124 * during the next I/O-plugging interval for bfqq.
   2125 *
   2126 * ISSUE
   2127 *
   2128 * On queue merging all waker information is lost.
   2129 */
   2130static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   2131			    u64 now_ns)
   2132{
   2133	char waker_name[MAX_BFQQ_NAME_LENGTH];
   2134
   2135	if (!bfqd->last_completed_rq_bfqq ||
   2136	    bfqd->last_completed_rq_bfqq == bfqq ||
   2137	    bfq_bfqq_has_short_ttime(bfqq) ||
   2138	    now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC)
   2139		return;
   2140
   2141	/*
   2142	 * We reset waker detection logic also if too much time has passed
   2143 	 * since the first detection. If wakeups are rare, pointless idling
   2144	 * doesn't hurt throughput that much. The condition below makes sure
   2145	 * we do not uselessly idle blocking waker in more than 1/64 cases. 
   2146	 */
   2147	if (bfqd->last_completed_rq_bfqq !=
   2148	    bfqq->tentative_waker_bfqq ||
   2149	    now_ns > bfqq->waker_detection_started +
   2150					128 * (u64)bfqd->bfq_slice_idle) {
   2151		/*
   2152		 * First synchronization detected with a
   2153		 * candidate waker queue, or with a different
   2154		 * candidate waker queue from the current one.
   2155		 */
   2156		bfqq->tentative_waker_bfqq =
   2157			bfqd->last_completed_rq_bfqq;
   2158		bfqq->num_waker_detections = 1;
   2159		bfqq->waker_detection_started = now_ns;
   2160		bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name,
   2161			      MAX_BFQQ_NAME_LENGTH);
   2162		bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name);
   2163	} else /* Same tentative waker queue detected again */
   2164		bfqq->num_waker_detections++;
   2165
   2166	if (bfqq->num_waker_detections == 3) {
   2167		bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
   2168		bfqq->tentative_waker_bfqq = NULL;
   2169		bfq_bfqq_name(bfqq->waker_bfqq, waker_name,
   2170			      MAX_BFQQ_NAME_LENGTH);
   2171		bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name);
   2172
   2173		/*
   2174		 * If the waker queue disappears, then
   2175		 * bfqq->waker_bfqq must be reset. To
   2176		 * this goal, we maintain in each
   2177		 * waker queue a list, woken_list, of
   2178		 * all the queues that reference the
   2179		 * waker queue through their
   2180		 * waker_bfqq pointer. When the waker
   2181		 * queue exits, the waker_bfqq pointer
   2182		 * of all the queues in the woken_list
   2183		 * is reset.
   2184		 *
   2185		 * In addition, if bfqq is already in
   2186		 * the woken_list of a waker queue,
   2187		 * then, before being inserted into
   2188		 * the woken_list of a new waker
   2189		 * queue, bfqq must be removed from
   2190		 * the woken_list of the old waker
   2191		 * queue.
   2192		 */
   2193		if (!hlist_unhashed(&bfqq->woken_list_node))
   2194			hlist_del_init(&bfqq->woken_list_node);
   2195		hlist_add_head(&bfqq->woken_list_node,
   2196			       &bfqd->last_completed_rq_bfqq->woken_list);
   2197	}
   2198}
   2199
   2200static void bfq_add_request(struct request *rq)
   2201{
   2202	struct bfq_queue *bfqq = RQ_BFQQ(rq);
   2203	struct bfq_data *bfqd = bfqq->bfqd;
   2204	struct request *next_rq, *prev;
   2205	unsigned int old_wr_coeff = bfqq->wr_coeff;
   2206	bool interactive = false;
   2207	u64 now_ns = ktime_get_ns();
   2208
   2209	bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
   2210	bfqq->queued[rq_is_sync(rq)]++;
   2211	/*
   2212	 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
   2213	 * may be read without holding the lock in bfq_has_work().
   2214	 */
   2215	WRITE_ONCE(bfqd->queued, bfqd->queued + 1);
   2216
   2217	if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
   2218		bfq_check_waker(bfqd, bfqq, now_ns);
   2219
   2220		/*
   2221		 * Periodically reset inject limit, to make sure that
   2222		 * the latter eventually drops in case workload
   2223		 * changes, see step (3) in the comments on
   2224		 * bfq_update_inject_limit().
   2225		 */
   2226		if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
   2227					     msecs_to_jiffies(1000)))
   2228			bfq_reset_inject_limit(bfqd, bfqq);
   2229
   2230		/*
   2231		 * The following conditions must hold to setup a new
   2232		 * sampling of total service time, and then a new
   2233		 * update of the inject limit:
   2234		 * - bfqq is in service, because the total service
   2235		 *   time is evaluated only for the I/O requests of
   2236		 *   the queues in service;
   2237		 * - this is the right occasion to compute or to
   2238		 *   lower the baseline total service time, because
   2239		 *   there are actually no requests in the drive,
   2240		 *   or
   2241		 *   the baseline total service time is available, and
   2242		 *   this is the right occasion to compute the other
   2243		 *   quantity needed to update the inject limit, i.e.,
   2244		 *   the total service time caused by the amount of
   2245		 *   injection allowed by the current value of the
   2246		 *   limit. It is the right occasion because injection
   2247		 *   has actually been performed during the service
   2248		 *   hole, and there are still in-flight requests,
   2249		 *   which are very likely to be exactly the injected
   2250		 *   requests, or part of them;
   2251		 * - the minimum interval for sampling the total
   2252		 *   service time and updating the inject limit has
   2253		 *   elapsed.
   2254		 */
   2255		if (bfqq == bfqd->in_service_queue &&
   2256		    (bfqd->rq_in_driver == 0 ||
   2257		     (bfqq->last_serv_time_ns > 0 &&
   2258		      bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
   2259		    time_is_before_eq_jiffies(bfqq->decrease_time_jif +
   2260					      msecs_to_jiffies(10))) {
   2261			bfqd->last_empty_occupied_ns = ktime_get_ns();
   2262			/*
   2263			 * Start the state machine for measuring the
   2264			 * total service time of rq: setting
   2265			 * wait_dispatch will cause bfqd->waited_rq to
   2266			 * be set when rq will be dispatched.
   2267			 */
   2268			bfqd->wait_dispatch = true;
   2269			/*
   2270			 * If there is no I/O in service in the drive,
   2271			 * then possible injection occurred before the
   2272			 * arrival of rq will not affect the total
   2273			 * service time of rq. So the injection limit
   2274			 * must not be updated as a function of such
   2275			 * total service time, unless new injection
   2276			 * occurs before rq is completed. To have the
   2277			 * injection limit updated only in the latter
   2278			 * case, reset rqs_injected here (rqs_injected
   2279			 * will be set in case injection is performed
   2280			 * on bfqq before rq is completed).
   2281			 */
   2282			if (bfqd->rq_in_driver == 0)
   2283				bfqd->rqs_injected = false;
   2284		}
   2285	}
   2286
   2287	if (bfq_bfqq_sync(bfqq))
   2288		bfq_update_io_intensity(bfqq, now_ns);
   2289
   2290	elv_rb_add(&bfqq->sort_list, rq);
   2291
   2292	/*
   2293	 * Check if this request is a better next-serve candidate.
   2294	 */
   2295	prev = bfqq->next_rq;
   2296	next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
   2297	bfqq->next_rq = next_rq;
   2298
   2299	/*
   2300	 * Adjust priority tree position, if next_rq changes.
   2301	 * See comments on bfq_pos_tree_add_move() for the unlikely().
   2302	 */
   2303	if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
   2304		bfq_pos_tree_add_move(bfqd, bfqq);
   2305
   2306	if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
   2307		bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
   2308						 rq, &interactive);
   2309	else {
   2310		if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
   2311		    time_is_before_jiffies(
   2312				bfqq->last_wr_start_finish +
   2313				bfqd->bfq_wr_min_inter_arr_async)) {
   2314			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
   2315			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
   2316
   2317			bfqd->wr_busy_queues++;
   2318			bfqq->entity.prio_changed = 1;
   2319		}
   2320		if (prev != bfqq->next_rq)
   2321			bfq_updated_next_req(bfqd, bfqq);
   2322	}
   2323
   2324	/*
   2325	 * Assign jiffies to last_wr_start_finish in the following
   2326	 * cases:
   2327	 *
   2328	 * . if bfqq is not going to be weight-raised, because, for
   2329	 *   non weight-raised queues, last_wr_start_finish stores the
   2330	 *   arrival time of the last request; as of now, this piece
   2331	 *   of information is used only for deciding whether to
   2332	 *   weight-raise async queues
   2333	 *
   2334	 * . if bfqq is not weight-raised, because, if bfqq is now
   2335	 *   switching to weight-raised, then last_wr_start_finish
   2336	 *   stores the time when weight-raising starts
   2337	 *
   2338	 * . if bfqq is interactive, because, regardless of whether
   2339	 *   bfqq is currently weight-raised, the weight-raising
   2340	 *   period must start or restart (this case is considered
   2341	 *   separately because it is not detected by the above
   2342	 *   conditions, if bfqq is already weight-raised)
   2343	 *
   2344	 * last_wr_start_finish has to be updated also if bfqq is soft
   2345	 * real-time, because the weight-raising period is constantly
   2346	 * restarted on idle-to-busy transitions for these queues, but
   2347	 * this is already done in bfq_bfqq_handle_idle_busy_switch if
   2348	 * needed.
   2349	 */
   2350	if (bfqd->low_latency &&
   2351		(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
   2352		bfqq->last_wr_start_finish = jiffies;
   2353}
   2354
   2355static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
   2356					  struct bio *bio,
   2357					  struct request_queue *q)
   2358{
   2359	struct bfq_queue *bfqq = bfqd->bio_bfqq;
   2360
   2361
   2362	if (bfqq)
   2363		return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
   2364
   2365	return NULL;
   2366}
   2367
   2368static sector_t get_sdist(sector_t last_pos, struct request *rq)
   2369{
   2370	if (last_pos)
   2371		return abs(blk_rq_pos(rq) - last_pos);
   2372
   2373	return 0;
   2374}
   2375
   2376#if 0 /* Still not clear if we can do without next two functions */
   2377static void bfq_activate_request(struct request_queue *q, struct request *rq)
   2378{
   2379	struct bfq_data *bfqd = q->elevator->elevator_data;
   2380
   2381	bfqd->rq_in_driver++;
   2382}
   2383
   2384static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
   2385{
   2386	struct bfq_data *bfqd = q->elevator->elevator_data;
   2387
   2388	bfqd->rq_in_driver--;
   2389}
   2390#endif
   2391
   2392static void bfq_remove_request(struct request_queue *q,
   2393			       struct request *rq)
   2394{
   2395	struct bfq_queue *bfqq = RQ_BFQQ(rq);
   2396	struct bfq_data *bfqd = bfqq->bfqd;
   2397	const int sync = rq_is_sync(rq);
   2398
   2399	if (bfqq->next_rq == rq) {
   2400		bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
   2401		bfq_updated_next_req(bfqd, bfqq);
   2402	}
   2403
   2404	if (rq->queuelist.prev != &rq->queuelist)
   2405		list_del_init(&rq->queuelist);
   2406	bfqq->queued[sync]--;
   2407	/*
   2408	 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
   2409	 * may be read without holding the lock in bfq_has_work().
   2410	 */
   2411	WRITE_ONCE(bfqd->queued, bfqd->queued - 1);
   2412	elv_rb_del(&bfqq->sort_list, rq);
   2413
   2414	elv_rqhash_del(q, rq);
   2415	if (q->last_merge == rq)
   2416		q->last_merge = NULL;
   2417
   2418	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
   2419		bfqq->next_rq = NULL;
   2420
   2421		if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
   2422			bfq_del_bfqq_busy(bfqd, bfqq, false);
   2423			/*
   2424			 * bfqq emptied. In normal operation, when
   2425			 * bfqq is empty, bfqq->entity.service and
   2426			 * bfqq->entity.budget must contain,
   2427			 * respectively, the service received and the
   2428			 * budget used last time bfqq emptied. These
   2429			 * facts do not hold in this case, as at least
   2430			 * this last removal occurred while bfqq is
   2431			 * not in service. To avoid inconsistencies,
   2432			 * reset both bfqq->entity.service and
   2433			 * bfqq->entity.budget, if bfqq has still a
   2434			 * process that may issue I/O requests to it.
   2435			 */
   2436			bfqq->entity.budget = bfqq->entity.service = 0;
   2437		}
   2438
   2439		/*
   2440		 * Remove queue from request-position tree as it is empty.
   2441		 */
   2442		if (bfqq->pos_root) {
   2443			rb_erase(&bfqq->pos_node, bfqq->pos_root);
   2444			bfqq->pos_root = NULL;
   2445		}
   2446	} else {
   2447		/* see comments on bfq_pos_tree_add_move() for the unlikely() */
   2448		if (unlikely(!bfqd->nonrot_with_queueing))
   2449			bfq_pos_tree_add_move(bfqd, bfqq);
   2450	}
   2451
   2452	if (rq->cmd_flags & REQ_META)
   2453		bfqq->meta_pending--;
   2454
   2455}
   2456
   2457static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
   2458		unsigned int nr_segs)
   2459{
   2460	struct bfq_data *bfqd = q->elevator->elevator_data;
   2461	struct request *free = NULL;
   2462	/*
   2463	 * bfq_bic_lookup grabs the queue_lock: invoke it now and
   2464	 * store its return value for later use, to avoid nesting
   2465	 * queue_lock inside the bfqd->lock. We assume that the bic
   2466	 * returned by bfq_bic_lookup does not go away before
   2467	 * bfqd->lock is taken.
   2468	 */
   2469	struct bfq_io_cq *bic = bfq_bic_lookup(q);
   2470	bool ret;
   2471
   2472	spin_lock_irq(&bfqd->lock);
   2473
   2474	if (bic) {
   2475		/*
   2476		 * Make sure cgroup info is uptodate for current process before
   2477		 * considering the merge.
   2478		 */
   2479		bfq_bic_update_cgroup(bic, bio);
   2480
   2481		bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
   2482	} else {
   2483		bfqd->bio_bfqq = NULL;
   2484	}
   2485	bfqd->bio_bic = bic;
   2486
   2487	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
   2488
   2489	spin_unlock_irq(&bfqd->lock);
   2490	if (free)
   2491		blk_mq_free_request(free);
   2492
   2493	return ret;
   2494}
   2495
   2496static int bfq_request_merge(struct request_queue *q, struct request **req,
   2497			     struct bio *bio)
   2498{
   2499	struct bfq_data *bfqd = q->elevator->elevator_data;
   2500	struct request *__rq;
   2501
   2502	__rq = bfq_find_rq_fmerge(bfqd, bio, q);
   2503	if (__rq && elv_bio_merge_ok(__rq, bio)) {
   2504		*req = __rq;
   2505
   2506		if (blk_discard_mergable(__rq))
   2507			return ELEVATOR_DISCARD_MERGE;
   2508		return ELEVATOR_FRONT_MERGE;
   2509	}
   2510
   2511	return ELEVATOR_NO_MERGE;
   2512}
   2513
   2514static void bfq_request_merged(struct request_queue *q, struct request *req,
   2515			       enum elv_merge type)
   2516{
   2517	if (type == ELEVATOR_FRONT_MERGE &&
   2518	    rb_prev(&req->rb_node) &&
   2519	    blk_rq_pos(req) <
   2520	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
   2521				    struct request, rb_node))) {
   2522		struct bfq_queue *bfqq = RQ_BFQQ(req);
   2523		struct bfq_data *bfqd;
   2524		struct request *prev, *next_rq;
   2525
   2526		if (!bfqq)
   2527			return;
   2528
   2529		bfqd = bfqq->bfqd;
   2530
   2531		/* Reposition request in its sort_list */
   2532		elv_rb_del(&bfqq->sort_list, req);
   2533		elv_rb_add(&bfqq->sort_list, req);
   2534
   2535		/* Choose next request to be served for bfqq */
   2536		prev = bfqq->next_rq;
   2537		next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
   2538					 bfqd->last_position);
   2539		bfqq->next_rq = next_rq;
   2540		/*
   2541		 * If next_rq changes, update both the queue's budget to
   2542		 * fit the new request and the queue's position in its
   2543		 * rq_pos_tree.
   2544		 */
   2545		if (prev != bfqq->next_rq) {
   2546			bfq_updated_next_req(bfqd, bfqq);
   2547			/*
   2548			 * See comments on bfq_pos_tree_add_move() for
   2549			 * the unlikely().
   2550			 */
   2551			if (unlikely(!bfqd->nonrot_with_queueing))
   2552				bfq_pos_tree_add_move(bfqd, bfqq);
   2553		}
   2554	}
   2555}
   2556
   2557/*
   2558 * This function is called to notify the scheduler that the requests
   2559 * rq and 'next' have been merged, with 'next' going away.  BFQ
   2560 * exploits this hook to address the following issue: if 'next' has a
   2561 * fifo_time lower that rq, then the fifo_time of rq must be set to
   2562 * the value of 'next', to not forget the greater age of 'next'.
   2563 *
   2564 * NOTE: in this function we assume that rq is in a bfq_queue, basing
   2565 * on that rq is picked from the hash table q->elevator->hash, which,
   2566 * in its turn, is filled only with I/O requests present in
   2567 * bfq_queues, while BFQ is in use for the request queue q. In fact,
   2568 * the function that fills this hash table (elv_rqhash_add) is called
   2569 * only by bfq_insert_request.
   2570 */
   2571static void bfq_requests_merged(struct request_queue *q, struct request *rq,
   2572				struct request *next)
   2573{
   2574	struct bfq_queue *bfqq = RQ_BFQQ(rq),
   2575		*next_bfqq = RQ_BFQQ(next);
   2576
   2577	if (!bfqq)
   2578		goto remove;
   2579
   2580	/*
   2581	 * If next and rq belong to the same bfq_queue and next is older
   2582	 * than rq, then reposition rq in the fifo (by substituting next
   2583	 * with rq). Otherwise, if next and rq belong to different
   2584	 * bfq_queues, never reposition rq: in fact, we would have to
   2585	 * reposition it with respect to next's position in its own fifo,
   2586	 * which would most certainly be too expensive with respect to
   2587	 * the benefits.
   2588	 */
   2589	if (bfqq == next_bfqq &&
   2590	    !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
   2591	    next->fifo_time < rq->fifo_time) {
   2592		list_del_init(&rq->queuelist);
   2593		list_replace_init(&next->queuelist, &rq->queuelist);
   2594		rq->fifo_time = next->fifo_time;
   2595	}
   2596
   2597	if (bfqq->next_rq == next)
   2598		bfqq->next_rq = rq;
   2599
   2600	bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
   2601remove:
   2602	/* Merged request may be in the IO scheduler. Remove it. */
   2603	if (!RB_EMPTY_NODE(&next->rb_node)) {
   2604		bfq_remove_request(next->q, next);
   2605		if (next_bfqq)
   2606			bfqg_stats_update_io_remove(bfqq_group(next_bfqq),
   2607						    next->cmd_flags);
   2608	}
   2609}
   2610
   2611/* Must be called with bfqq != NULL */
   2612static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
   2613{
   2614	/*
   2615	 * If bfqq has been enjoying interactive weight-raising, then
   2616	 * reset soft_rt_next_start. We do it for the following
   2617	 * reason. bfqq may have been conveying the I/O needed to load
   2618	 * a soft real-time application. Such an application actually
   2619	 * exhibits a soft real-time I/O pattern after it finishes
   2620	 * loading, and finally starts doing its job. But, if bfqq has
   2621	 * been receiving a lot of bandwidth so far (likely to happen
   2622	 * on a fast device), then soft_rt_next_start now contains a
   2623	 * high value that. So, without this reset, bfqq would be
   2624	 * prevented from being possibly considered as soft_rt for a
   2625	 * very long time.
   2626	 */
   2627
   2628	if (bfqq->wr_cur_max_time !=
   2629	    bfqq->bfqd->bfq_wr_rt_max_time)
   2630		bfqq->soft_rt_next_start = jiffies;
   2631
   2632	if (bfq_bfqq_busy(bfqq))
   2633		bfqq->bfqd->wr_busy_queues--;
   2634	bfqq->wr_coeff = 1;
   2635	bfqq->wr_cur_max_time = 0;
   2636	bfqq->last_wr_start_finish = jiffies;
   2637	/*
   2638	 * Trigger a weight change on the next invocation of
   2639	 * __bfq_entity_update_weight_prio.
   2640	 */
   2641	bfqq->entity.prio_changed = 1;
   2642}
   2643
   2644void bfq_end_wr_async_queues(struct bfq_data *bfqd,
   2645			     struct bfq_group *bfqg)
   2646{
   2647	int i, j;
   2648
   2649	for (i = 0; i < 2; i++)
   2650		for (j = 0; j < IOPRIO_NR_LEVELS; j++)
   2651			if (bfqg->async_bfqq[i][j])
   2652				bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
   2653	if (bfqg->async_idle_bfqq)
   2654		bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
   2655}
   2656
   2657static void bfq_end_wr(struct bfq_data *bfqd)
   2658{
   2659	struct bfq_queue *bfqq;
   2660
   2661	spin_lock_irq(&bfqd->lock);
   2662
   2663	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
   2664		bfq_bfqq_end_wr(bfqq);
   2665	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
   2666		bfq_bfqq_end_wr(bfqq);
   2667	bfq_end_wr_async(bfqd);
   2668
   2669	spin_unlock_irq(&bfqd->lock);
   2670}
   2671
   2672static sector_t bfq_io_struct_pos(void *io_struct, bool request)
   2673{
   2674	if (request)
   2675		return blk_rq_pos(io_struct);
   2676	else
   2677		return ((struct bio *)io_struct)->bi_iter.bi_sector;
   2678}
   2679
   2680static int bfq_rq_close_to_sector(void *io_struct, bool request,
   2681				  sector_t sector)
   2682{
   2683	return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
   2684	       BFQQ_CLOSE_THR;
   2685}
   2686
   2687static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
   2688					 struct bfq_queue *bfqq,
   2689					 sector_t sector)
   2690{
   2691	struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree;
   2692	struct rb_node *parent, *node;
   2693	struct bfq_queue *__bfqq;
   2694
   2695	if (RB_EMPTY_ROOT(root))
   2696		return NULL;
   2697
   2698	/*
   2699	 * First, if we find a request starting at the end of the last
   2700	 * request, choose it.
   2701	 */
   2702	__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
   2703	if (__bfqq)
   2704		return __bfqq;
   2705
   2706	/*
   2707	 * If the exact sector wasn't found, the parent of the NULL leaf
   2708	 * will contain the closest sector (rq_pos_tree sorted by
   2709	 * next_request position).
   2710	 */
   2711	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
   2712	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
   2713		return __bfqq;
   2714
   2715	if (blk_rq_pos(__bfqq->next_rq) < sector)
   2716		node = rb_next(&__bfqq->pos_node);
   2717	else
   2718		node = rb_prev(&__bfqq->pos_node);
   2719	if (!node)
   2720		return NULL;
   2721
   2722	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
   2723	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
   2724		return __bfqq;
   2725
   2726	return NULL;
   2727}
   2728
   2729static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
   2730						   struct bfq_queue *cur_bfqq,
   2731						   sector_t sector)
   2732{
   2733	struct bfq_queue *bfqq;
   2734
   2735	/*
   2736	 * We shall notice if some of the queues are cooperating,
   2737	 * e.g., working closely on the same area of the device. In
   2738	 * that case, we can group them together and: 1) don't waste
   2739	 * time idling, and 2) serve the union of their requests in
   2740	 * the best possible order for throughput.
   2741	 */
   2742	bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
   2743	if (!bfqq || bfqq == cur_bfqq)
   2744		return NULL;
   2745
   2746	return bfqq;
   2747}
   2748
   2749static struct bfq_queue *
   2750bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
   2751{
   2752	int process_refs, new_process_refs;
   2753	struct bfq_queue *__bfqq;
   2754
   2755	/*
   2756	 * If there are no process references on the new_bfqq, then it is
   2757	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
   2758	 * may have dropped their last reference (not just their last process
   2759	 * reference).
   2760	 */
   2761	if (!bfqq_process_refs(new_bfqq))
   2762		return NULL;
   2763
   2764	/* Avoid a circular list and skip interim queue merges. */
   2765	while ((__bfqq = new_bfqq->new_bfqq)) {
   2766		if (__bfqq == bfqq)
   2767			return NULL;
   2768		new_bfqq = __bfqq;
   2769	}
   2770
   2771	process_refs = bfqq_process_refs(bfqq);
   2772	new_process_refs = bfqq_process_refs(new_bfqq);
   2773	/*
   2774	 * If the process for the bfqq has gone away, there is no
   2775	 * sense in merging the queues.
   2776	 */
   2777	if (process_refs == 0 || new_process_refs == 0)
   2778		return NULL;
   2779
   2780	/*
   2781	 * Make sure merged queues belong to the same parent. Parents could
   2782	 * have changed since the time we decided the two queues are suitable
   2783	 * for merging.
   2784	 */
   2785	if (new_bfqq->entity.parent != bfqq->entity.parent)
   2786		return NULL;
   2787
   2788	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
   2789		new_bfqq->pid);
   2790
   2791	/*
   2792	 * Merging is just a redirection: the requests of the process
   2793	 * owning one of the two queues are redirected to the other queue.
   2794	 * The latter queue, in its turn, is set as shared if this is the
   2795	 * first time that the requests of some process are redirected to
   2796	 * it.
   2797	 *
   2798	 * We redirect bfqq to new_bfqq and not the opposite, because
   2799	 * we are in the context of the process owning bfqq, thus we
   2800	 * have the io_cq of this process. So we can immediately
   2801	 * configure this io_cq to redirect the requests of the
   2802	 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
   2803	 * not available any more (new_bfqq->bic == NULL).
   2804	 *
   2805	 * Anyway, even in case new_bfqq coincides with the in-service
   2806	 * queue, redirecting requests the in-service queue is the
   2807	 * best option, as we feed the in-service queue with new
   2808	 * requests close to the last request served and, by doing so,
   2809	 * are likely to increase the throughput.
   2810	 */
   2811	bfqq->new_bfqq = new_bfqq;
   2812	/*
   2813	 * The above assignment schedules the following redirections:
   2814	 * each time some I/O for bfqq arrives, the process that
   2815	 * generated that I/O is disassociated from bfqq and
   2816	 * associated with new_bfqq. Here we increases new_bfqq->ref
   2817	 * in advance, adding the number of processes that are
   2818	 * expected to be associated with new_bfqq as they happen to
   2819	 * issue I/O.
   2820	 */
   2821	new_bfqq->ref += process_refs;
   2822	return new_bfqq;
   2823}
   2824
   2825static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
   2826					struct bfq_queue *new_bfqq)
   2827{
   2828	if (bfq_too_late_for_merging(new_bfqq))
   2829		return false;
   2830
   2831	if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
   2832	    (bfqq->ioprio_class != new_bfqq->ioprio_class))
   2833		return false;
   2834
   2835	/*
   2836	 * If either of the queues has already been detected as seeky,
   2837	 * then merging it with the other queue is unlikely to lead to
   2838	 * sequential I/O.
   2839	 */
   2840	if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
   2841		return false;
   2842
   2843	/*
   2844	 * Interleaved I/O is known to be done by (some) applications
   2845	 * only for reads, so it does not make sense to merge async
   2846	 * queues.
   2847	 */
   2848	if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
   2849		return false;
   2850
   2851	return true;
   2852}
   2853
   2854static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
   2855					     struct bfq_queue *bfqq);
   2856
   2857/*
   2858 * Attempt to schedule a merge of bfqq with the currently in-service
   2859 * queue or with a close queue among the scheduled queues.  Return
   2860 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
   2861 * structure otherwise.
   2862 *
   2863 * The OOM queue is not allowed to participate to cooperation: in fact, since
   2864 * the requests temporarily redirected to the OOM queue could be redirected
   2865 * again to dedicated queues at any time, the state needed to correctly
   2866 * handle merging with the OOM queue would be quite complex and expensive
   2867 * to maintain. Besides, in such a critical condition as an out of memory,
   2868 * the benefits of queue merging may be little relevant, or even negligible.
   2869 *
   2870 * WARNING: queue merging may impair fairness among non-weight raised
   2871 * queues, for at least two reasons: 1) the original weight of a
   2872 * merged queue may change during the merged state, 2) even being the
   2873 * weight the same, a merged queue may be bloated with many more
   2874 * requests than the ones produced by its originally-associated
   2875 * process.
   2876 */
   2877static struct bfq_queue *
   2878bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   2879		     void *io_struct, bool request, struct bfq_io_cq *bic)
   2880{
   2881	struct bfq_queue *in_service_bfqq, *new_bfqq;
   2882
   2883	/* if a merge has already been setup, then proceed with that first */
   2884	if (bfqq->new_bfqq)
   2885		return bfqq->new_bfqq;
   2886
   2887	/*
   2888	 * Check delayed stable merge for rotational or non-queueing
   2889	 * devs. For this branch to be executed, bfqq must not be
   2890	 * currently merged with some other queue (i.e., bfqq->bic
   2891	 * must be non null). If we considered also merged queues,
   2892	 * then we should also check whether bfqq has already been
   2893	 * merged with bic->stable_merge_bfqq. But this would be
   2894	 * costly and complicated.
   2895	 */
   2896	if (unlikely(!bfqd->nonrot_with_queueing)) {
   2897		/*
   2898		 * Make sure also that bfqq is sync, because
   2899		 * bic->stable_merge_bfqq may point to some queue (for
   2900		 * stable merging) also if bic is associated with a
   2901		 * sync queue, but this bfqq is async
   2902		 */
   2903		if (bfq_bfqq_sync(bfqq) && bic->stable_merge_bfqq &&
   2904		    !bfq_bfqq_just_created(bfqq) &&
   2905		    time_is_before_jiffies(bfqq->split_time +
   2906					  msecs_to_jiffies(bfq_late_stable_merging)) &&
   2907		    time_is_before_jiffies(bfqq->creation_time +
   2908					   msecs_to_jiffies(bfq_late_stable_merging))) {
   2909			struct bfq_queue *stable_merge_bfqq =
   2910				bic->stable_merge_bfqq;
   2911			int proc_ref = min(bfqq_process_refs(bfqq),
   2912					   bfqq_process_refs(stable_merge_bfqq));
   2913
   2914			/* deschedule stable merge, because done or aborted here */
   2915			bfq_put_stable_ref(stable_merge_bfqq);
   2916
   2917			bic->stable_merge_bfqq = NULL;
   2918
   2919			if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
   2920			    proc_ref > 0) {
   2921				/* next function will take at least one ref */
   2922				struct bfq_queue *new_bfqq =
   2923					bfq_setup_merge(bfqq, stable_merge_bfqq);
   2924
   2925				if (new_bfqq) {
   2926					bic->stably_merged = true;
   2927					if (new_bfqq->bic)
   2928						new_bfqq->bic->stably_merged =
   2929									true;
   2930				}
   2931				return new_bfqq;
   2932			} else
   2933				return NULL;
   2934		}
   2935	}
   2936
   2937	/*
   2938	 * Do not perform queue merging if the device is non
   2939	 * rotational and performs internal queueing. In fact, such a
   2940	 * device reaches a high speed through internal parallelism
   2941	 * and pipelining. This means that, to reach a high
   2942	 * throughput, it must have many requests enqueued at the same
   2943	 * time. But, in this configuration, the internal scheduling
   2944	 * algorithm of the device does exactly the job of queue
   2945	 * merging: it reorders requests so as to obtain as much as
   2946	 * possible a sequential I/O pattern. As a consequence, with
   2947	 * the workload generated by processes doing interleaved I/O,
   2948	 * the throughput reached by the device is likely to be the
   2949	 * same, with and without queue merging.
   2950	 *
   2951	 * Disabling merging also provides a remarkable benefit in
   2952	 * terms of throughput. Merging tends to make many workloads
   2953	 * artificially more uneven, because of shared queues
   2954	 * remaining non empty for incomparably more time than
   2955	 * non-merged queues. This may accentuate workload
   2956	 * asymmetries. For example, if one of the queues in a set of
   2957	 * merged queues has a higher weight than a normal queue, then
   2958	 * the shared queue may inherit such a high weight and, by
   2959	 * staying almost always active, may force BFQ to perform I/O
   2960	 * plugging most of the time. This evidently makes it harder
   2961	 * for BFQ to let the device reach a high throughput.
   2962	 *
   2963	 * Finally, the likely() macro below is not used because one
   2964	 * of the two branches is more likely than the other, but to
   2965	 * have the code path after the following if() executed as
   2966	 * fast as possible for the case of a non rotational device
   2967	 * with queueing. We want it because this is the fastest kind
   2968	 * of device. On the opposite end, the likely() may lengthen
   2969	 * the execution time of BFQ for the case of slower devices
   2970	 * (rotational or at least without queueing). But in this case
   2971	 * the execution time of BFQ matters very little, if not at
   2972	 * all.
   2973	 */
   2974	if (likely(bfqd->nonrot_with_queueing))
   2975		return NULL;
   2976
   2977	/*
   2978	 * Prevent bfqq from being merged if it has been created too
   2979	 * long ago. The idea is that true cooperating processes, and
   2980	 * thus their associated bfq_queues, are supposed to be
   2981	 * created shortly after each other. This is the case, e.g.,
   2982	 * for KVM/QEMU and dump I/O threads. Basing on this
   2983	 * assumption, the following filtering greatly reduces the
   2984	 * probability that two non-cooperating processes, which just
   2985	 * happen to do close I/O for some short time interval, have
   2986	 * their queues merged by mistake.
   2987	 */
   2988	if (bfq_too_late_for_merging(bfqq))
   2989		return NULL;
   2990
   2991	if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
   2992		return NULL;
   2993
   2994	/* If there is only one backlogged queue, don't search. */
   2995	if (bfq_tot_busy_queues(bfqd) == 1)
   2996		return NULL;
   2997
   2998	in_service_bfqq = bfqd->in_service_queue;
   2999
   3000	if (in_service_bfqq && in_service_bfqq != bfqq &&
   3001	    likely(in_service_bfqq != &bfqd->oom_bfqq) &&
   3002	    bfq_rq_close_to_sector(io_struct, request,
   3003				   bfqd->in_serv_last_pos) &&
   3004	    bfqq->entity.parent == in_service_bfqq->entity.parent &&
   3005	    bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
   3006		new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
   3007		if (new_bfqq)
   3008			return new_bfqq;
   3009	}
   3010	/*
   3011	 * Check whether there is a cooperator among currently scheduled
   3012	 * queues. The only thing we need is that the bio/request is not
   3013	 * NULL, as we need it to establish whether a cooperator exists.
   3014	 */
   3015	new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
   3016			bfq_io_struct_pos(io_struct, request));
   3017
   3018	if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
   3019	    bfq_may_be_close_cooperator(bfqq, new_bfqq))
   3020		return bfq_setup_merge(bfqq, new_bfqq);
   3021
   3022	return NULL;
   3023}
   3024
   3025static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
   3026{
   3027	struct bfq_io_cq *bic = bfqq->bic;
   3028
   3029	/*
   3030	 * If !bfqq->bic, the queue is already shared or its requests
   3031	 * have already been redirected to a shared queue; both idle window
   3032	 * and weight raising state have already been saved. Do nothing.
   3033	 */
   3034	if (!bic)
   3035		return;
   3036
   3037	bic->saved_last_serv_time_ns = bfqq->last_serv_time_ns;
   3038	bic->saved_inject_limit = bfqq->inject_limit;
   3039	bic->saved_decrease_time_jif = bfqq->decrease_time_jif;
   3040
   3041	bic->saved_weight = bfqq->entity.orig_weight;
   3042	bic->saved_ttime = bfqq->ttime;
   3043	bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
   3044	bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
   3045	bic->saved_io_start_time = bfqq->io_start_time;
   3046	bic->saved_tot_idle_time = bfqq->tot_idle_time;
   3047	bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
   3048	bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
   3049	if (unlikely(bfq_bfqq_just_created(bfqq) &&
   3050		     !bfq_bfqq_in_large_burst(bfqq) &&
   3051		     bfqq->bfqd->low_latency)) {
   3052		/*
   3053		 * bfqq being merged right after being created: bfqq
   3054		 * would have deserved interactive weight raising, but
   3055		 * did not make it to be set in a weight-raised state,
   3056		 * because of this early merge.	Store directly the
   3057		 * weight-raising state that would have been assigned
   3058		 * to bfqq, so that to avoid that bfqq unjustly fails
   3059		 * to enjoy weight raising if split soon.
   3060		 */
   3061		bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
   3062		bic->saved_wr_start_at_switch_to_srt = bfq_smallest_from_now();
   3063		bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
   3064		bic->saved_last_wr_start_finish = jiffies;
   3065	} else {
   3066		bic->saved_wr_coeff = bfqq->wr_coeff;
   3067		bic->saved_wr_start_at_switch_to_srt =
   3068			bfqq->wr_start_at_switch_to_srt;
   3069		bic->saved_service_from_wr = bfqq->service_from_wr;
   3070		bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
   3071		bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
   3072	}
   3073}
   3074
   3075
   3076static void
   3077bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, struct bfq_queue *new_bfqq)
   3078{
   3079	if (cur_bfqq->entity.parent &&
   3080	    cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq)
   3081		cur_bfqq->entity.parent->last_bfqq_created = new_bfqq;
   3082	else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq)
   3083		cur_bfqq->bfqd->last_bfqq_created = new_bfqq;
   3084}
   3085
   3086void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
   3087{
   3088	/*
   3089	 * To prevent bfqq's service guarantees from being violated,
   3090	 * bfqq may be left busy, i.e., queued for service, even if
   3091	 * empty (see comments in __bfq_bfqq_expire() for
   3092	 * details). But, if no process will send requests to bfqq any
   3093	 * longer, then there is no point in keeping bfqq queued for
   3094	 * service. In addition, keeping bfqq queued for service, but
   3095	 * with no process ref any longer, may have caused bfqq to be
   3096	 * freed when dequeued from service. But this is assumed to
   3097	 * never happen.
   3098	 */
   3099	if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
   3100	    bfqq != bfqd->in_service_queue)
   3101		bfq_del_bfqq_busy(bfqd, bfqq, false);
   3102
   3103	bfq_reassign_last_bfqq(bfqq, NULL);
   3104
   3105	bfq_put_queue(bfqq);
   3106}
   3107
   3108static void
   3109bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
   3110		struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
   3111{
   3112	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
   3113		(unsigned long)new_bfqq->pid);
   3114	/* Save weight raising and idle window of the merged queues */
   3115	bfq_bfqq_save_state(bfqq);
   3116	bfq_bfqq_save_state(new_bfqq);
   3117	if (bfq_bfqq_IO_bound(bfqq))
   3118		bfq_mark_bfqq_IO_bound(new_bfqq);
   3119	bfq_clear_bfqq_IO_bound(bfqq);
   3120
   3121	/*
   3122	 * The processes associated with bfqq are cooperators of the
   3123	 * processes associated with new_bfqq. So, if bfqq has a
   3124	 * waker, then assume that all these processes will be happy
   3125	 * to let bfqq's waker freely inject I/O when they have no
   3126	 * I/O.
   3127	 */
   3128	if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq &&
   3129	    bfqq->waker_bfqq != new_bfqq) {
   3130		new_bfqq->waker_bfqq = bfqq->waker_bfqq;
   3131		new_bfqq->tentative_waker_bfqq = NULL;
   3132
   3133		/*
   3134		 * If the waker queue disappears, then
   3135		 * new_bfqq->waker_bfqq must be reset. So insert
   3136		 * new_bfqq into the woken_list of the waker. See
   3137		 * bfq_check_waker for details.
   3138		 */
   3139		hlist_add_head(&new_bfqq->woken_list_node,
   3140			       &new_bfqq->waker_bfqq->woken_list);
   3141
   3142	}
   3143
   3144	/*
   3145	 * If bfqq is weight-raised, then let new_bfqq inherit
   3146	 * weight-raising. To reduce false positives, neglect the case
   3147	 * where bfqq has just been created, but has not yet made it
   3148	 * to be weight-raised (which may happen because EQM may merge
   3149	 * bfqq even before bfq_add_request is executed for the first
   3150	 * time for bfqq). Handling this case would however be very
   3151	 * easy, thanks to the flag just_created.
   3152	 */
   3153	if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
   3154		new_bfqq->wr_coeff = bfqq->wr_coeff;
   3155		new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
   3156		new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
   3157		new_bfqq->wr_start_at_switch_to_srt =
   3158			bfqq->wr_start_at_switch_to_srt;
   3159		if (bfq_bfqq_busy(new_bfqq))
   3160			bfqd->wr_busy_queues++;
   3161		new_bfqq->entity.prio_changed = 1;
   3162	}
   3163
   3164	if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
   3165		bfqq->wr_coeff = 1;
   3166		bfqq->entity.prio_changed = 1;
   3167		if (bfq_bfqq_busy(bfqq))
   3168			bfqd->wr_busy_queues--;
   3169	}
   3170
   3171	bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
   3172		     bfqd->wr_busy_queues);
   3173
   3174	/*
   3175	 * Merge queues (that is, let bic redirect its requests to new_bfqq)
   3176	 */
   3177	bic_set_bfqq(bic, new_bfqq, 1);
   3178	bfq_mark_bfqq_coop(new_bfqq);
   3179	/*
   3180	 * new_bfqq now belongs to at least two bics (it is a shared queue):
   3181	 * set new_bfqq->bic to NULL. bfqq either:
   3182	 * - does not belong to any bic any more, and hence bfqq->bic must
   3183	 *   be set to NULL, or
   3184	 * - is a queue whose owning bics have already been redirected to a
   3185	 *   different queue, hence the queue is destined to not belong to
   3186	 *   any bic soon and bfqq->bic is already NULL (therefore the next
   3187	 *   assignment causes no harm).
   3188	 */
   3189	new_bfqq->bic = NULL;
   3190	/*
   3191	 * If the queue is shared, the pid is the pid of one of the associated
   3192	 * processes. Which pid depends on the exact sequence of merge events
   3193	 * the queue underwent. So printing such a pid is useless and confusing
   3194	 * because it reports a random pid between those of the associated
   3195	 * processes.
   3196	 * We mark such a queue with a pid -1, and then print SHARED instead of
   3197	 * a pid in logging messages.
   3198	 */
   3199	new_bfqq->pid = -1;
   3200	bfqq->bic = NULL;
   3201
   3202	bfq_reassign_last_bfqq(bfqq, new_bfqq);
   3203
   3204	bfq_release_process_ref(bfqd, bfqq);
   3205}
   3206
   3207static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
   3208				struct bio *bio)
   3209{
   3210	struct bfq_data *bfqd = q->elevator->elevator_data;
   3211	bool is_sync = op_is_sync(bio->bi_opf);
   3212	struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
   3213
   3214	/*
   3215	 * Disallow merge of a sync bio into an async request.
   3216	 */
   3217	if (is_sync && !rq_is_sync(rq))
   3218		return false;
   3219
   3220	/*
   3221	 * Lookup the bfqq that this bio will be queued with. Allow
   3222	 * merge only if rq is queued there.
   3223	 */
   3224	if (!bfqq)
   3225		return false;
   3226
   3227	/*
   3228	 * We take advantage of this function to perform an early merge
   3229	 * of the queues of possible cooperating processes.
   3230	 */
   3231	new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
   3232	if (new_bfqq) {
   3233		/*
   3234		 * bic still points to bfqq, then it has not yet been
   3235		 * redirected to some other bfq_queue, and a queue
   3236		 * merge between bfqq and new_bfqq can be safely
   3237		 * fulfilled, i.e., bic can be redirected to new_bfqq
   3238		 * and bfqq can be put.
   3239		 */
   3240		bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
   3241				new_bfqq);
   3242		/*
   3243		 * If we get here, bio will be queued into new_queue,
   3244		 * so use new_bfqq to decide whether bio and rq can be
   3245		 * merged.
   3246		 */
   3247		bfqq = new_bfqq;
   3248
   3249		/*
   3250		 * Change also bqfd->bio_bfqq, as
   3251		 * bfqd->bio_bic now points to new_bfqq, and
   3252		 * this function may be invoked again (and then may
   3253		 * use again bqfd->bio_bfqq).
   3254		 */
   3255		bfqd->bio_bfqq = bfqq;
   3256	}
   3257
   3258	return bfqq == RQ_BFQQ(rq);
   3259}
   3260
   3261/*
   3262 * Set the maximum time for the in-service queue to consume its
   3263 * budget. This prevents seeky processes from lowering the throughput.
   3264 * In practice, a time-slice service scheme is used with seeky
   3265 * processes.
   3266 */
   3267static void bfq_set_budget_timeout(struct bfq_data *bfqd,
   3268				   struct bfq_queue *bfqq)
   3269{
   3270	unsigned int timeout_coeff;
   3271
   3272	if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
   3273		timeout_coeff = 1;
   3274	else
   3275		timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
   3276
   3277	bfqd->last_budget_start = ktime_get();
   3278
   3279	bfqq->budget_timeout = jiffies +
   3280		bfqd->bfq_timeout * timeout_coeff;
   3281}
   3282
   3283static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
   3284				       struct bfq_queue *bfqq)
   3285{
   3286	if (bfqq) {
   3287		bfq_clear_bfqq_fifo_expire(bfqq);
   3288
   3289		bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
   3290
   3291		if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
   3292		    bfqq->wr_coeff > 1 &&
   3293		    bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
   3294		    time_is_before_jiffies(bfqq->budget_timeout)) {
   3295			/*
   3296			 * For soft real-time queues, move the start
   3297			 * of the weight-raising period forward by the
   3298			 * time the queue has not received any
   3299			 * service. Otherwise, a relatively long
   3300			 * service delay is likely to cause the
   3301			 * weight-raising period of the queue to end,
   3302			 * because of the short duration of the
   3303			 * weight-raising period of a soft real-time
   3304			 * queue.  It is worth noting that this move
   3305			 * is not so dangerous for the other queues,
   3306			 * because soft real-time queues are not
   3307			 * greedy.
   3308			 *
   3309			 * To not add a further variable, we use the
   3310			 * overloaded field budget_timeout to
   3311			 * determine for how long the queue has not
   3312			 * received service, i.e., how much time has
   3313			 * elapsed since the queue expired. However,
   3314			 * this is a little imprecise, because
   3315			 * budget_timeout is set to jiffies if bfqq
   3316			 * not only expires, but also remains with no
   3317			 * request.
   3318			 */
   3319			if (time_after(bfqq->budget_timeout,
   3320				       bfqq->last_wr_start_finish))
   3321				bfqq->last_wr_start_finish +=
   3322					jiffies - bfqq->budget_timeout;
   3323			else
   3324				bfqq->last_wr_start_finish = jiffies;
   3325		}
   3326
   3327		bfq_set_budget_timeout(bfqd, bfqq);
   3328		bfq_log_bfqq(bfqd, bfqq,
   3329			     "set_in_service_queue, cur-budget = %d",
   3330			     bfqq->entity.budget);
   3331	}
   3332
   3333	bfqd->in_service_queue = bfqq;
   3334	bfqd->in_serv_last_pos = 0;
   3335}
   3336
   3337/*
   3338 * Get and set a new queue for service.
   3339 */
   3340static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
   3341{
   3342	struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
   3343
   3344	__bfq_set_in_service_queue(bfqd, bfqq);
   3345	return bfqq;
   3346}
   3347
   3348static void bfq_arm_slice_timer(struct bfq_data *bfqd)
   3349{
   3350	struct bfq_queue *bfqq = bfqd->in_service_queue;
   3351	u32 sl;
   3352
   3353	bfq_mark_bfqq_wait_request(bfqq);
   3354
   3355	/*
   3356	 * We don't want to idle for seeks, but we do want to allow
   3357	 * fair distribution of slice time for a process doing back-to-back
   3358	 * seeks. So allow a little bit of time for him to submit a new rq.
   3359	 */
   3360	sl = bfqd->bfq_slice_idle;
   3361	/*
   3362	 * Unless the queue is being weight-raised or the scenario is
   3363	 * asymmetric, grant only minimum idle time if the queue
   3364	 * is seeky. A long idling is preserved for a weight-raised
   3365	 * queue, or, more in general, in an asymmetric scenario,
   3366	 * because a long idling is needed for guaranteeing to a queue
   3367	 * its reserved share of the throughput (in particular, it is
   3368	 * needed if the queue has a higher weight than some other
   3369	 * queue).
   3370	 */
   3371	if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
   3372	    !bfq_asymmetric_scenario(bfqd, bfqq))
   3373		sl = min_t(u64, sl, BFQ_MIN_TT);
   3374	else if (bfqq->wr_coeff > 1)
   3375		sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
   3376
   3377	bfqd->last_idling_start = ktime_get();
   3378	bfqd->last_idling_start_jiffies = jiffies;
   3379
   3380	hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
   3381		      HRTIMER_MODE_REL);
   3382	bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
   3383}
   3384
   3385/*
   3386 * In autotuning mode, max_budget is dynamically recomputed as the
   3387 * amount of sectors transferred in timeout at the estimated peak
   3388 * rate. This enables BFQ to utilize a full timeslice with a full
   3389 * budget, even if the in-service queue is served at peak rate. And
   3390 * this maximises throughput with sequential workloads.
   3391 */
   3392static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
   3393{
   3394	return (u64)bfqd->peak_rate * USEC_PER_MSEC *
   3395		jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
   3396}
   3397
   3398/*
   3399 * Update parameters related to throughput and responsiveness, as a
   3400 * function of the estimated peak rate. See comments on
   3401 * bfq_calc_max_budget(), and on the ref_wr_duration array.
   3402 */
   3403static void update_thr_responsiveness_params(struct bfq_data *bfqd)
   3404{
   3405	if (bfqd->bfq_user_max_budget == 0) {
   3406		bfqd->bfq_max_budget =
   3407			bfq_calc_max_budget(bfqd);
   3408		bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
   3409	}
   3410}
   3411
   3412static void bfq_reset_rate_computation(struct bfq_data *bfqd,
   3413				       struct request *rq)
   3414{
   3415	if (rq != NULL) { /* new rq dispatch now, reset accordingly */
   3416		bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
   3417		bfqd->peak_rate_samples = 1;
   3418		bfqd->sequential_samples = 0;
   3419		bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
   3420			blk_rq_sectors(rq);
   3421	} else /* no new rq dispatched, just reset the number of samples */
   3422		bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
   3423
   3424	bfq_log(bfqd,
   3425		"reset_rate_computation at end, sample %u/%u tot_sects %llu",
   3426		bfqd->peak_rate_samples, bfqd->sequential_samples,
   3427		bfqd->tot_sectors_dispatched);
   3428}
   3429
   3430static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
   3431{
   3432	u32 rate, weight, divisor;
   3433
   3434	/*
   3435	 * For the convergence property to hold (see comments on
   3436	 * bfq_update_peak_rate()) and for the assessment to be
   3437	 * reliable, a minimum number of samples must be present, and
   3438	 * a minimum amount of time must have elapsed. If not so, do
   3439	 * not compute new rate. Just reset parameters, to get ready
   3440	 * for a new evaluation attempt.
   3441	 */
   3442	if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
   3443	    bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
   3444		goto reset_computation;
   3445
   3446	/*
   3447	 * If a new request completion has occurred after last
   3448	 * dispatch, then, to approximate the rate at which requests
   3449	 * have been served by the device, it is more precise to
   3450	 * extend the observation interval to the last completion.
   3451	 */
   3452	bfqd->delta_from_first =
   3453		max_t(u64, bfqd->delta_from_first,
   3454		      bfqd->last_completion - bfqd->first_dispatch);
   3455
   3456	/*
   3457	 * Rate computed in sects/usec, and not sects/nsec, for
   3458	 * precision issues.
   3459	 */
   3460	rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
   3461			div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
   3462
   3463	/*
   3464	 * Peak rate not updated if:
   3465	 * - the percentage of sequential dispatches is below 3/4 of the
   3466	 *   total, and rate is below the current estimated peak rate
   3467	 * - rate is unreasonably high (> 20M sectors/sec)
   3468	 */
   3469	if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
   3470	     rate <= bfqd->peak_rate) ||
   3471		rate > 20<<BFQ_RATE_SHIFT)
   3472		goto reset_computation;
   3473
   3474	/*
   3475	 * We have to update the peak rate, at last! To this purpose,
   3476	 * we use a low-pass filter. We compute the smoothing constant
   3477	 * of the filter as a function of the 'weight' of the new
   3478	 * measured rate.
   3479	 *
   3480	 * As can be seen in next formulas, we define this weight as a
   3481	 * quantity proportional to how sequential the workload is,
   3482	 * and to how long the observation time interval is.
   3483	 *
   3484	 * The weight runs from 0 to 8. The maximum value of the
   3485	 * weight, 8, yields the minimum value for the smoothing
   3486	 * constant. At this minimum value for the smoothing constant,
   3487	 * the measured rate contributes for half of the next value of
   3488	 * the estimated peak rate.
   3489	 *
   3490	 * So, the first step is to compute the weight as a function
   3491	 * of how sequential the workload is. Note that the weight
   3492	 * cannot reach 9, because bfqd->sequential_samples cannot
   3493	 * become equal to bfqd->peak_rate_samples, which, in its
   3494	 * turn, holds true because bfqd->sequential_samples is not
   3495	 * incremented for the first sample.
   3496	 */
   3497	weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
   3498
   3499	/*
   3500	 * Second step: further refine the weight as a function of the
   3501	 * duration of the observation interval.
   3502	 */
   3503	weight = min_t(u32, 8,
   3504		       div_u64(weight * bfqd->delta_from_first,
   3505			       BFQ_RATE_REF_INTERVAL));
   3506
   3507	/*
   3508	 * Divisor ranging from 10, for minimum weight, to 2, for
   3509	 * maximum weight.
   3510	 */
   3511	divisor = 10 - weight;
   3512
   3513	/*
   3514	 * Finally, update peak rate:
   3515	 *
   3516	 * peak_rate = peak_rate * (divisor-1) / divisor  +  rate / divisor
   3517	 */
   3518	bfqd->peak_rate *= divisor-1;
   3519	bfqd->peak_rate /= divisor;
   3520	rate /= divisor; /* smoothing constant alpha = 1/divisor */
   3521
   3522	bfqd->peak_rate += rate;
   3523
   3524	/*
   3525	 * For a very slow device, bfqd->peak_rate can reach 0 (see
   3526	 * the minimum representable values reported in the comments
   3527	 * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
   3528	 * divisions by zero where bfqd->peak_rate is used as a
   3529	 * divisor.
   3530	 */
   3531	bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
   3532
   3533	update_thr_responsiveness_params(bfqd);
   3534
   3535reset_computation:
   3536	bfq_reset_rate_computation(bfqd, rq);
   3537}
   3538
   3539/*
   3540 * Update the read/write peak rate (the main quantity used for
   3541 * auto-tuning, see update_thr_responsiveness_params()).
   3542 *
   3543 * It is not trivial to estimate the peak rate (correctly): because of
   3544 * the presence of sw and hw queues between the scheduler and the
   3545 * device components that finally serve I/O requests, it is hard to
   3546 * say exactly when a given dispatched request is served inside the
   3547 * device, and for how long. As a consequence, it is hard to know
   3548 * precisely at what rate a given set of requests is actually served
   3549 * by the device.
   3550 *
   3551 * On the opposite end, the dispatch time of any request is trivially
   3552 * available, and, from this piece of information, the "dispatch rate"
   3553 * of requests can be immediately computed. So, the idea in the next
   3554 * function is to use what is known, namely request dispatch times
   3555 * (plus, when useful, request completion times), to estimate what is
   3556 * unknown, namely in-device request service rate.
   3557 *
   3558 * The main issue is that, because of the above facts, the rate at
   3559 * which a certain set of requests is dispatched over a certain time
   3560 * interval can vary greatly with respect to the rate at which the
   3561 * same requests are then served. But, since the size of any
   3562 * intermediate queue is limited, and the service scheme is lossless
   3563 * (no request is silently dropped), the following obvious convergence
   3564 * property holds: the number of requests dispatched MUST become
   3565 * closer and closer to the number of requests completed as the
   3566 * observation interval grows. This is the key property used in
   3567 * the next function to estimate the peak service rate as a function
   3568 * of the observed dispatch rate. The function assumes to be invoked
   3569 * on every request dispatch.
   3570 */
   3571static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
   3572{
   3573	u64 now_ns = ktime_get_ns();
   3574
   3575	if (bfqd->peak_rate_samples == 0) { /* first dispatch */
   3576		bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
   3577			bfqd->peak_rate_samples);
   3578		bfq_reset_rate_computation(bfqd, rq);
   3579		goto update_last_values; /* will add one sample */
   3580	}
   3581
   3582	/*
   3583	 * Device idle for very long: the observation interval lasting
   3584	 * up to this dispatch cannot be a valid observation interval
   3585	 * for computing a new peak rate (similarly to the late-
   3586	 * completion event in bfq_completed_request()). Go to
   3587	 * update_rate_and_reset to have the following three steps
   3588	 * taken:
   3589	 * - close the observation interval at the last (previous)
   3590	 *   request dispatch or completion
   3591	 * - compute rate, if possible, for that observation interval
   3592	 * - start a new observation interval with this dispatch
   3593	 */
   3594	if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
   3595	    bfqd->rq_in_driver == 0)
   3596		goto update_rate_and_reset;
   3597
   3598	/* Update sampling information */
   3599	bfqd->peak_rate_samples++;
   3600
   3601	if ((bfqd->rq_in_driver > 0 ||
   3602		now_ns - bfqd->last_completion < BFQ_MIN_TT)
   3603	    && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
   3604		bfqd->sequential_samples++;
   3605
   3606	bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
   3607
   3608	/* Reset max observed rq size every 32 dispatches */
   3609	if (likely(bfqd->peak_rate_samples % 32))
   3610		bfqd->last_rq_max_size =
   3611			max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
   3612	else
   3613		bfqd->last_rq_max_size = blk_rq_sectors(rq);
   3614
   3615	bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
   3616
   3617	/* Target observation interval not yet reached, go on sampling */
   3618	if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
   3619		goto update_last_values;
   3620
   3621update_rate_and_reset:
   3622	bfq_update_rate_reset(bfqd, rq);
   3623update_last_values:
   3624	bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
   3625	if (RQ_BFQQ(rq) == bfqd->in_service_queue)
   3626		bfqd->in_serv_last_pos = bfqd->last_position;
   3627	bfqd->last_dispatch = now_ns;
   3628}
   3629
   3630/*
   3631 * Remove request from internal lists.
   3632 */
   3633static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
   3634{
   3635	struct bfq_queue *bfqq = RQ_BFQQ(rq);
   3636
   3637	/*
   3638	 * For consistency, the next instruction should have been
   3639	 * executed after removing the request from the queue and
   3640	 * dispatching it.  We execute instead this instruction before
   3641	 * bfq_remove_request() (and hence introduce a temporary
   3642	 * inconsistency), for efficiency.  In fact, should this
   3643	 * dispatch occur for a non in-service bfqq, this anticipated
   3644	 * increment prevents two counters related to bfqq->dispatched
   3645	 * from risking to be, first, uselessly decremented, and then
   3646	 * incremented again when the (new) value of bfqq->dispatched
   3647	 * happens to be taken into account.
   3648	 */
   3649	bfqq->dispatched++;
   3650	bfq_update_peak_rate(q->elevator->elevator_data, rq);
   3651
   3652	bfq_remove_request(q, rq);
   3653}
   3654
   3655/*
   3656 * There is a case where idling does not have to be performed for
   3657 * throughput concerns, but to preserve the throughput share of
   3658 * the process associated with bfqq.
   3659 *
   3660 * To introduce this case, we can note that allowing the drive
   3661 * to enqueue more than one request at a time, and hence
   3662 * delegating de facto final scheduling decisions to the
   3663 * drive's internal scheduler, entails loss of control on the
   3664 * actual request service order. In particular, the critical
   3665 * situation is when requests from different processes happen
   3666 * to be present, at the same time, in the internal queue(s)
   3667 * of the drive. In such a situation, the drive, by deciding
   3668 * the service order of the internally-queued requests, does
   3669 * determine also the actual throughput distribution among
   3670 * these processes. But the drive typically has no notion or
   3671 * concern about per-process throughput distribution, and
   3672 * makes its decisions only on a per-request basis. Therefore,
   3673 * the service distribution enforced by the drive's internal
   3674 * scheduler is likely to coincide with the desired throughput
   3675 * distribution only in a completely symmetric, or favorably
   3676 * skewed scenario where:
   3677 * (i-a) each of these processes must get the same throughput as
   3678 *	 the others,
   3679 * (i-b) in case (i-a) does not hold, it holds that the process
   3680 *       associated with bfqq must receive a lower or equal
   3681 *	 throughput than any of the other processes;
   3682 * (ii)  the I/O of each process has the same properties, in
   3683 *       terms of locality (sequential or random), direction
   3684 *       (reads or writes), request sizes, greediness
   3685 *       (from I/O-bound to sporadic), and so on;
   3686
   3687 * In fact, in such a scenario, the drive tends to treat the requests
   3688 * of each process in about the same way as the requests of the
   3689 * others, and thus to provide each of these processes with about the
   3690 * same throughput.  This is exactly the desired throughput
   3691 * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
   3692 * even more convenient distribution for (the process associated with)
   3693 * bfqq.
   3694 *
   3695 * In contrast, in any asymmetric or unfavorable scenario, device
   3696 * idling (I/O-dispatch plugging) is certainly needed to guarantee
   3697 * that bfqq receives its assigned fraction of the device throughput
   3698 * (see [1] for details).
   3699 *
   3700 * The problem is that idling may significantly reduce throughput with
   3701 * certain combinations of types of I/O and devices. An important
   3702 * example is sync random I/O on flash storage with command
   3703 * queueing. So, unless bfqq falls in cases where idling also boosts
   3704 * throughput, it is important to check conditions (i-a), i(-b) and
   3705 * (ii) accurately, so as to avoid idling when not strictly needed for
   3706 * service guarantees.
   3707 *
   3708 * Unfortunately, it is extremely difficult to thoroughly check
   3709 * condition (ii). And, in case there are active groups, it becomes
   3710 * very difficult to check conditions (i-a) and (i-b) too.  In fact,
   3711 * if there are active groups, then, for conditions (i-a) or (i-b) to
   3712 * become false 'indirectly', it is enough that an active group
   3713 * contains more active processes or sub-groups than some other active
   3714 * group. More precisely, for conditions (i-a) or (i-b) to become
   3715 * false because of such a group, it is not even necessary that the
   3716 * group is (still) active: it is sufficient that, even if the group
   3717 * has become inactive, some of its descendant processes still have
   3718 * some request already dispatched but still waiting for
   3719 * completion. In fact, requests have still to be guaranteed their
   3720 * share of the throughput even after being dispatched. In this
   3721 * respect, it is easy to show that, if a group frequently becomes
   3722 * inactive while still having in-flight requests, and if, when this
   3723 * happens, the group is not considered in the calculation of whether
   3724 * the scenario is asymmetric, then the group may fail to be
   3725 * guaranteed its fair share of the throughput (basically because
   3726 * idling may not be performed for the descendant processes of the
   3727 * group, but it had to be).  We address this issue with the following
   3728 * bi-modal behavior, implemented in the function
   3729 * bfq_asymmetric_scenario().
   3730 *
   3731 * If there are groups with requests waiting for completion
   3732 * (as commented above, some of these groups may even be
   3733 * already inactive), then the scenario is tagged as
   3734 * asymmetric, conservatively, without checking any of the
   3735 * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
   3736 * This behavior matches also the fact that groups are created
   3737 * exactly if controlling I/O is a primary concern (to
   3738 * preserve bandwidth and latency guarantees).
   3739 *
   3740 * On the opposite end, if there are no groups with requests waiting
   3741 * for completion, then only conditions (i-a) and (i-b) are actually
   3742 * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
   3743 * idling is not performed, regardless of whether condition (ii)
   3744 * holds.  In other words, only if conditions (i-a) and (i-b) do not
   3745 * hold, then idling is allowed, and the device tends to be prevented
   3746 * from queueing many requests, possibly of several processes. Since
   3747 * there are no groups with requests waiting for completion, then, to
   3748 * control conditions (i-a) and (i-b) it is enough to check just
   3749 * whether all the queues with requests waiting for completion also
   3750 * have the same weight.
   3751 *
   3752 * Not checking condition (ii) evidently exposes bfqq to the
   3753 * risk of getting less throughput than its fair share.
   3754 * However, for queues with the same weight, a further
   3755 * mechanism, preemption, mitigates or even eliminates this
   3756 * problem. And it does so without consequences on overall
   3757 * throughput. This mechanism and its benefits are explained
   3758 * in the next three paragraphs.
   3759 *
   3760 * Even if a queue, say Q, is expired when it remains idle, Q
   3761 * can still preempt the new in-service queue if the next
   3762 * request of Q arrives soon (see the comments on
   3763 * bfq_bfqq_update_budg_for_activation). If all queues and
   3764 * groups have the same weight, this form of preemption,
   3765 * combined with the hole-recovery heuristic described in the
   3766 * comments on function bfq_bfqq_update_budg_for_activation,
   3767 * are enough to preserve a correct bandwidth distribution in
   3768 * the mid term, even without idling. In fact, even if not
   3769 * idling allows the internal queues of the device to contain
   3770 * many requests, and thus to reorder requests, we can rather
   3771 * safely assume that the internal scheduler still preserves a
   3772 * minimum of mid-term fairness.
   3773 *
   3774 * More precisely, this preemption-based, idleless approach
   3775 * provides fairness in terms of IOPS, and not sectors per
   3776 * second. This can be seen with a simple example. Suppose
   3777 * that there are two queues with the same weight, but that
   3778 * the first queue receives requests of 8 sectors, while the
   3779 * second queue receives requests of 1024 sectors. In
   3780 * addition, suppose that each of the two queues contains at
   3781 * most one request at a time, which implies that each queue
   3782 * always remains idle after it is served. Finally, after
   3783 * remaining idle, each queue receives very quickly a new
   3784 * request. It follows that the two queues are served
   3785 * alternatively, preempting each other if needed. This
   3786 * implies that, although both queues have the same weight,
   3787 * the queue with large requests receives a service that is
   3788 * 1024/8 times as high as the service received by the other
   3789 * queue.
   3790 *
   3791 * The motivation for using preemption instead of idling (for
   3792 * queues with the same weight) is that, by not idling,
   3793 * service guarantees are preserved (completely or at least in
   3794 * part) without minimally sacrificing throughput. And, if
   3795 * there is no active group, then the primary expectation for
   3796 * this device is probably a high throughput.
   3797 *
   3798 * We are now left only with explaining the two sub-conditions in the
   3799 * additional compound condition that is checked below for deciding
   3800 * whether the scenario is asymmetric. To explain the first
   3801 * sub-condition, we need to add that the function
   3802 * bfq_asymmetric_scenario checks the weights of only
   3803 * non-weight-raised queues, for efficiency reasons (see comments on
   3804 * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
   3805 * is checked explicitly here. More precisely, the compound condition
   3806 * below takes into account also the fact that, even if bfqq is being
   3807 * weight-raised, the scenario is still symmetric if all queues with
   3808 * requests waiting for completion happen to be
   3809 * weight-raised. Actually, we should be even more precise here, and
   3810 * differentiate between interactive weight raising and soft real-time
   3811 * weight raising.
   3812 *
   3813 * The second sub-condition checked in the compound condition is
   3814 * whether there is a fair amount of already in-flight I/O not
   3815 * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
   3816 * following reason. The drive may decide to serve in-flight
   3817 * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
   3818 * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
   3819 * I/O-dispatching is not plugged, then, while bfqq remains empty, a
   3820 * basically uncontrolled amount of I/O from other queues may be
   3821 * dispatched too, possibly causing the service of bfqq's I/O to be
   3822 * delayed even longer in the drive. This problem gets more and more
   3823 * serious as the speed and the queue depth of the drive grow,
   3824 * because, as these two quantities grow, the probability to find no
   3825 * queue busy but many requests in flight grows too. By contrast,
   3826 * plugging I/O dispatching minimizes the delay induced by already
   3827 * in-flight I/O, and enables bfqq to recover the bandwidth it may
   3828 * lose because of this delay.
   3829 *
   3830 * As a side note, it is worth considering that the above
   3831 * device-idling countermeasures may however fail in the following
   3832 * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
   3833 * in a time period during which all symmetry sub-conditions hold, and
   3834 * therefore the device is allowed to enqueue many requests, but at
   3835 * some later point in time some sub-condition stops to hold, then it
   3836 * may become impossible to make requests be served in the desired
   3837 * order until all the requests already queued in the device have been
   3838 * served. The last sub-condition commented above somewhat mitigates
   3839 * this problem for weight-raised queues.
   3840 *
   3841 * However, as an additional mitigation for this problem, we preserve
   3842 * plugging for a special symmetric case that may suddenly turn into
   3843 * asymmetric: the case where only bfqq is busy. In this case, not
   3844 * expiring bfqq does not cause any harm to any other queues in terms
   3845 * of service guarantees. In contrast, it avoids the following unlucky
   3846 * sequence of events: (1) bfqq is expired, (2) a new queue with a
   3847 * lower weight than bfqq becomes busy (or more queues), (3) the new
   3848 * queue is served until a new request arrives for bfqq, (4) when bfqq
   3849 * is finally served, there are so many requests of the new queue in
   3850 * the drive that the pending requests for bfqq take a lot of time to
   3851 * be served. In particular, event (2) may case even already
   3852 * dispatched requests of bfqq to be delayed, inside the drive. So, to
   3853 * avoid this series of events, the scenario is preventively declared
   3854 * as asymmetric also if bfqq is the only busy queues
   3855 */
   3856static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
   3857						 struct bfq_queue *bfqq)
   3858{
   3859	int tot_busy_queues = bfq_tot_busy_queues(bfqd);
   3860
   3861	/* No point in idling for bfqq if it won't get requests any longer */
   3862	if (unlikely(!bfqq_process_refs(bfqq)))
   3863		return false;
   3864
   3865	return (bfqq->wr_coeff > 1 &&
   3866		(bfqd->wr_busy_queues <
   3867		 tot_busy_queues ||
   3868		 bfqd->rq_in_driver >=
   3869		 bfqq->dispatched + 4)) ||
   3870		bfq_asymmetric_scenario(bfqd, bfqq) ||
   3871		tot_busy_queues == 1;
   3872}
   3873
   3874static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   3875			      enum bfqq_expiration reason)
   3876{
   3877	/*
   3878	 * If this bfqq is shared between multiple processes, check
   3879	 * to make sure that those processes are still issuing I/Os
   3880	 * within the mean seek distance. If not, it may be time to
   3881	 * break the queues apart again.
   3882	 */
   3883	if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
   3884		bfq_mark_bfqq_split_coop(bfqq);
   3885
   3886	/*
   3887	 * Consider queues with a higher finish virtual time than
   3888	 * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
   3889	 * true, then bfqq's bandwidth would be violated if an
   3890	 * uncontrolled amount of I/O from these queues were
   3891	 * dispatched while bfqq is waiting for its new I/O to
   3892	 * arrive. This is exactly what may happen if this is a forced
   3893	 * expiration caused by a preemption attempt, and if bfqq is
   3894	 * not re-scheduled. To prevent this from happening, re-queue
   3895	 * bfqq if it needs I/O-dispatch plugging, even if it is
   3896	 * empty. By doing so, bfqq is granted to be served before the
   3897	 * above queues (provided that bfqq is of course eligible).
   3898	 */
   3899	if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
   3900	    !(reason == BFQQE_PREEMPTED &&
   3901	      idling_needed_for_service_guarantees(bfqd, bfqq))) {
   3902		if (bfqq->dispatched == 0)
   3903			/*
   3904			 * Overloading budget_timeout field to store
   3905			 * the time at which the queue remains with no
   3906			 * backlog and no outstanding request; used by
   3907			 * the weight-raising mechanism.
   3908			 */
   3909			bfqq->budget_timeout = jiffies;
   3910
   3911		bfq_del_bfqq_busy(bfqd, bfqq, true);
   3912	} else {
   3913		bfq_requeue_bfqq(bfqd, bfqq, true);
   3914		/*
   3915		 * Resort priority tree of potential close cooperators.
   3916		 * See comments on bfq_pos_tree_add_move() for the unlikely().
   3917		 */
   3918		if (unlikely(!bfqd->nonrot_with_queueing &&
   3919			     !RB_EMPTY_ROOT(&bfqq->sort_list)))
   3920			bfq_pos_tree_add_move(bfqd, bfqq);
   3921	}
   3922
   3923	/*
   3924	 * All in-service entities must have been properly deactivated
   3925	 * or requeued before executing the next function, which
   3926	 * resets all in-service entities as no more in service. This
   3927	 * may cause bfqq to be freed. If this happens, the next
   3928	 * function returns true.
   3929	 */
   3930	return __bfq_bfqd_reset_in_service(bfqd);
   3931}
   3932
   3933/**
   3934 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
   3935 * @bfqd: device data.
   3936 * @bfqq: queue to update.
   3937 * @reason: reason for expiration.
   3938 *
   3939 * Handle the feedback on @bfqq budget at queue expiration.
   3940 * See the body for detailed comments.
   3941 */
   3942static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
   3943				     struct bfq_queue *bfqq,
   3944				     enum bfqq_expiration reason)
   3945{
   3946	struct request *next_rq;
   3947	int budget, min_budget;
   3948
   3949	min_budget = bfq_min_budget(bfqd);
   3950
   3951	if (bfqq->wr_coeff == 1)
   3952		budget = bfqq->max_budget;
   3953	else /*
   3954	      * Use a constant, low budget for weight-raised queues,
   3955	      * to help achieve a low latency. Keep it slightly higher
   3956	      * than the minimum possible budget, to cause a little
   3957	      * bit fewer expirations.
   3958	      */
   3959		budget = 2 * min_budget;
   3960
   3961	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
   3962		bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
   3963	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
   3964		budget, bfq_min_budget(bfqd));
   3965	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
   3966		bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
   3967
   3968	if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
   3969		switch (reason) {
   3970		/*
   3971		 * Caveat: in all the following cases we trade latency
   3972		 * for throughput.
   3973		 */
   3974		case BFQQE_TOO_IDLE:
   3975			/*
   3976			 * This is the only case where we may reduce
   3977			 * the budget: if there is no request of the
   3978			 * process still waiting for completion, then
   3979			 * we assume (tentatively) that the timer has
   3980			 * expired because the batch of requests of
   3981			 * the process could have been served with a
   3982			 * smaller budget.  Hence, betting that
   3983			 * process will behave in the same way when it
   3984			 * becomes backlogged again, we reduce its
   3985			 * next budget.  As long as we guess right,
   3986			 * this budget cut reduces the latency
   3987			 * experienced by the process.
   3988			 *
   3989			 * However, if there are still outstanding
   3990			 * requests, then the process may have not yet
   3991			 * issued its next request just because it is
   3992			 * still waiting for the completion of some of
   3993			 * the still outstanding ones.  So in this
   3994			 * subcase we do not reduce its budget, on the
   3995			 * contrary we increase it to possibly boost
   3996			 * the throughput, as discussed in the
   3997			 * comments to the BUDGET_TIMEOUT case.
   3998			 */
   3999			if (bfqq->dispatched > 0) /* still outstanding reqs */
   4000				budget = min(budget * 2, bfqd->bfq_max_budget);
   4001			else {
   4002				if (budget > 5 * min_budget)
   4003					budget -= 4 * min_budget;
   4004				else
   4005					budget = min_budget;
   4006			}
   4007			break;
   4008		case BFQQE_BUDGET_TIMEOUT:
   4009			/*
   4010			 * We double the budget here because it gives
   4011			 * the chance to boost the throughput if this
   4012			 * is not a seeky process (and has bumped into
   4013			 * this timeout because of, e.g., ZBR).
   4014			 */
   4015			budget = min(budget * 2, bfqd->bfq_max_budget);
   4016			break;
   4017		case BFQQE_BUDGET_EXHAUSTED:
   4018			/*
   4019			 * The process still has backlog, and did not
   4020			 * let either the budget timeout or the disk
   4021			 * idling timeout expire. Hence it is not
   4022			 * seeky, has a short thinktime and may be
   4023			 * happy with a higher budget too. So
   4024			 * definitely increase the budget of this good
   4025			 * candidate to boost the disk throughput.
   4026			 */
   4027			budget = min(budget * 4, bfqd->bfq_max_budget);
   4028			break;
   4029		case BFQQE_NO_MORE_REQUESTS:
   4030			/*
   4031			 * For queues that expire for this reason, it
   4032			 * is particularly important to keep the
   4033			 * budget close to the actual service they
   4034			 * need. Doing so reduces the timestamp
   4035			 * misalignment problem described in the
   4036			 * comments in the body of
   4037			 * __bfq_activate_entity. In fact, suppose
   4038			 * that a queue systematically expires for
   4039			 * BFQQE_NO_MORE_REQUESTS and presents a
   4040			 * new request in time to enjoy timestamp
   4041			 * back-shifting. The larger the budget of the
   4042			 * queue is with respect to the service the
   4043			 * queue actually requests in each service
   4044			 * slot, the more times the queue can be
   4045			 * reactivated with the same virtual finish
   4046			 * time. It follows that, even if this finish
   4047			 * time is pushed to the system virtual time
   4048			 * to reduce the consequent timestamp
   4049			 * misalignment, the queue unjustly enjoys for
   4050			 * many re-activations a lower finish time
   4051			 * than all newly activated queues.
   4052			 *
   4053			 * The service needed by bfqq is measured
   4054			 * quite precisely by bfqq->entity.service.
   4055			 * Since bfqq does not enjoy device idling,
   4056			 * bfqq->entity.service is equal to the number
   4057			 * of sectors that the process associated with
   4058			 * bfqq requested to read/write before waiting
   4059			 * for request completions, or blocking for
   4060			 * other reasons.
   4061			 */
   4062			budget = max_t(int, bfqq->entity.service, min_budget);
   4063			break;
   4064		default:
   4065			return;
   4066		}
   4067	} else if (!bfq_bfqq_sync(bfqq)) {
   4068		/*
   4069		 * Async queues get always the maximum possible
   4070		 * budget, as for them we do not care about latency
   4071		 * (in addition, their ability to dispatch is limited
   4072		 * by the charging factor).
   4073		 */
   4074		budget = bfqd->bfq_max_budget;
   4075	}
   4076
   4077	bfqq->max_budget = budget;
   4078
   4079	if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
   4080	    !bfqd->bfq_user_max_budget)
   4081		bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
   4082
   4083	/*
   4084	 * If there is still backlog, then assign a new budget, making
   4085	 * sure that it is large enough for the next request.  Since
   4086	 * the finish time of bfqq must be kept in sync with the
   4087	 * budget, be sure to call __bfq_bfqq_expire() *after* this
   4088	 * update.
   4089	 *
   4090	 * If there is no backlog, then no need to update the budget;
   4091	 * it will be updated on the arrival of a new request.
   4092	 */
   4093	next_rq = bfqq->next_rq;
   4094	if (next_rq)
   4095		bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
   4096					    bfq_serv_to_charge(next_rq, bfqq));
   4097
   4098	bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
   4099			next_rq ? blk_rq_sectors(next_rq) : 0,
   4100			bfqq->entity.budget);
   4101}
   4102
   4103/*
   4104 * Return true if the process associated with bfqq is "slow". The slow
   4105 * flag is used, in addition to the budget timeout, to reduce the
   4106 * amount of service provided to seeky processes, and thus reduce
   4107 * their chances to lower the throughput. More details in the comments
   4108 * on the function bfq_bfqq_expire().
   4109 *
   4110 * An important observation is in order: as discussed in the comments
   4111 * on the function bfq_update_peak_rate(), with devices with internal
   4112 * queues, it is hard if ever possible to know when and for how long
   4113 * an I/O request is processed by the device (apart from the trivial
   4114 * I/O pattern where a new request is dispatched only after the
   4115 * previous one has been completed). This makes it hard to evaluate
   4116 * the real rate at which the I/O requests of each bfq_queue are
   4117 * served.  In fact, for an I/O scheduler like BFQ, serving a
   4118 * bfq_queue means just dispatching its requests during its service
   4119 * slot (i.e., until the budget of the queue is exhausted, or the
   4120 * queue remains idle, or, finally, a timeout fires). But, during the
   4121 * service slot of a bfq_queue, around 100 ms at most, the device may
   4122 * be even still processing requests of bfq_queues served in previous
   4123 * service slots. On the opposite end, the requests of the in-service
   4124 * bfq_queue may be completed after the service slot of the queue
   4125 * finishes.
   4126 *
   4127 * Anyway, unless more sophisticated solutions are used
   4128 * (where possible), the sum of the sizes of the requests dispatched
   4129 * during the service slot of a bfq_queue is probably the only
   4130 * approximation available for the service received by the bfq_queue
   4131 * during its service slot. And this sum is the quantity used in this
   4132 * function to evaluate the I/O speed of a process.
   4133 */
   4134static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   4135				 bool compensate, enum bfqq_expiration reason,
   4136				 unsigned long *delta_ms)
   4137{
   4138	ktime_t delta_ktime;
   4139	u32 delta_usecs;
   4140	bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
   4141
   4142	if (!bfq_bfqq_sync(bfqq))
   4143		return false;
   4144
   4145	if (compensate)
   4146		delta_ktime = bfqd->last_idling_start;
   4147	else
   4148		delta_ktime = ktime_get();
   4149	delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
   4150	delta_usecs = ktime_to_us(delta_ktime);
   4151
   4152	/* don't use too short time intervals */
   4153	if (delta_usecs < 1000) {
   4154		if (blk_queue_nonrot(bfqd->queue))
   4155			 /*
   4156			  * give same worst-case guarantees as idling
   4157			  * for seeky
   4158			  */
   4159			*delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
   4160		else /* charge at least one seek */
   4161			*delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
   4162
   4163		return slow;
   4164	}
   4165
   4166	*delta_ms = delta_usecs / USEC_PER_MSEC;
   4167
   4168	/*
   4169	 * Use only long (> 20ms) intervals to filter out excessive
   4170	 * spikes in service rate estimation.
   4171	 */
   4172	if (delta_usecs > 20000) {
   4173		/*
   4174		 * Caveat for rotational devices: processes doing I/O
   4175		 * in the slower disk zones tend to be slow(er) even
   4176		 * if not seeky. In this respect, the estimated peak
   4177		 * rate is likely to be an average over the disk
   4178		 * surface. Accordingly, to not be too harsh with
   4179		 * unlucky processes, a process is deemed slow only if
   4180		 * its rate has been lower than half of the estimated
   4181		 * peak rate.
   4182		 */
   4183		slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
   4184	}
   4185
   4186	bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
   4187
   4188	return slow;
   4189}
   4190
   4191/*
   4192 * To be deemed as soft real-time, an application must meet two
   4193 * requirements. First, the application must not require an average
   4194 * bandwidth higher than the approximate bandwidth required to playback or
   4195 * record a compressed high-definition video.
   4196 * The next function is invoked on the completion of the last request of a
   4197 * batch, to compute the next-start time instant, soft_rt_next_start, such
   4198 * that, if the next request of the application does not arrive before
   4199 * soft_rt_next_start, then the above requirement on the bandwidth is met.
   4200 *
   4201 * The second requirement is that the request pattern of the application is
   4202 * isochronous, i.e., that, after issuing a request or a batch of requests,
   4203 * the application stops issuing new requests until all its pending requests
   4204 * have been completed. After that, the application may issue a new batch,
   4205 * and so on.
   4206 * For this reason the next function is invoked to compute
   4207 * soft_rt_next_start only for applications that meet this requirement,
   4208 * whereas soft_rt_next_start is set to infinity for applications that do
   4209 * not.
   4210 *
   4211 * Unfortunately, even a greedy (i.e., I/O-bound) application may
   4212 * happen to meet, occasionally or systematically, both the above
   4213 * bandwidth and isochrony requirements. This may happen at least in
   4214 * the following circumstances. First, if the CPU load is high. The
   4215 * application may stop issuing requests while the CPUs are busy
   4216 * serving other processes, then restart, then stop again for a while,
   4217 * and so on. The other circumstances are related to the storage
   4218 * device: the storage device is highly loaded or reaches a low-enough
   4219 * throughput with the I/O of the application (e.g., because the I/O
   4220 * is random and/or the device is slow). In all these cases, the
   4221 * I/O of the application may be simply slowed down enough to meet
   4222 * the bandwidth and isochrony requirements. To reduce the probability
   4223 * that greedy applications are deemed as soft real-time in these
   4224 * corner cases, a further rule is used in the computation of
   4225 * soft_rt_next_start: the return value of this function is forced to
   4226 * be higher than the maximum between the following two quantities.
   4227 *
   4228 * (a) Current time plus: (1) the maximum time for which the arrival
   4229 *     of a request is waited for when a sync queue becomes idle,
   4230 *     namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
   4231 *     postpone for a moment the reason for adding a few extra
   4232 *     jiffies; we get back to it after next item (b).  Lower-bounding
   4233 *     the return value of this function with the current time plus
   4234 *     bfqd->bfq_slice_idle tends to filter out greedy applications,
   4235 *     because the latter issue their next request as soon as possible
   4236 *     after the last one has been completed. In contrast, a soft
   4237 *     real-time application spends some time processing data, after a
   4238 *     batch of its requests has been completed.
   4239 *
   4240 * (b) Current value of bfqq->soft_rt_next_start. As pointed out
   4241 *     above, greedy applications may happen to meet both the
   4242 *     bandwidth and isochrony requirements under heavy CPU or
   4243 *     storage-device load. In more detail, in these scenarios, these
   4244 *     applications happen, only for limited time periods, to do I/O
   4245 *     slowly enough to meet all the requirements described so far,
   4246 *     including the filtering in above item (a). These slow-speed
   4247 *     time intervals are usually interspersed between other time
   4248 *     intervals during which these applications do I/O at a very high
   4249 *     speed. Fortunately, exactly because of the high speed of the
   4250 *     I/O in the high-speed intervals, the values returned by this
   4251 *     function happen to be so high, near the end of any such
   4252 *     high-speed interval, to be likely to fall *after* the end of
   4253 *     the low-speed time interval that follows. These high values are
   4254 *     stored in bfqq->soft_rt_next_start after each invocation of
   4255 *     this function. As a consequence, if the last value of
   4256 *     bfqq->soft_rt_next_start is constantly used to lower-bound the
   4257 *     next value that this function may return, then, from the very
   4258 *     beginning of a low-speed interval, bfqq->soft_rt_next_start is
   4259 *     likely to be constantly kept so high that any I/O request
   4260 *     issued during the low-speed interval is considered as arriving
   4261 *     to soon for the application to be deemed as soft
   4262 *     real-time. Then, in the high-speed interval that follows, the
   4263 *     application will not be deemed as soft real-time, just because
   4264 *     it will do I/O at a high speed. And so on.
   4265 *
   4266 * Getting back to the filtering in item (a), in the following two
   4267 * cases this filtering might be easily passed by a greedy
   4268 * application, if the reference quantity was just
   4269 * bfqd->bfq_slice_idle:
   4270 * 1) HZ is so low that the duration of a jiffy is comparable to or
   4271 *    higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
   4272 *    devices with HZ=100. The time granularity may be so coarse
   4273 *    that the approximation, in jiffies, of bfqd->bfq_slice_idle
   4274 *    is rather lower than the exact value.
   4275 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
   4276 *    for a while, then suddenly 'jump' by several units to recover the lost
   4277 *    increments. This seems to happen, e.g., inside virtual machines.
   4278 * To address this issue, in the filtering in (a) we do not use as a
   4279 * reference time interval just bfqd->bfq_slice_idle, but
   4280 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
   4281 * minimum number of jiffies for which the filter seems to be quite
   4282 * precise also in embedded systems and KVM/QEMU virtual machines.
   4283 */
   4284static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
   4285						struct bfq_queue *bfqq)
   4286{
   4287	return max3(bfqq->soft_rt_next_start,
   4288		    bfqq->last_idle_bklogged +
   4289		    HZ * bfqq->service_from_backlogged /
   4290		    bfqd->bfq_wr_max_softrt_rate,
   4291		    jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
   4292}
   4293
   4294/**
   4295 * bfq_bfqq_expire - expire a queue.
   4296 * @bfqd: device owning the queue.
   4297 * @bfqq: the queue to expire.
   4298 * @compensate: if true, compensate for the time spent idling.
   4299 * @reason: the reason causing the expiration.
   4300 *
   4301 * If the process associated with bfqq does slow I/O (e.g., because it
   4302 * issues random requests), we charge bfqq with the time it has been
   4303 * in service instead of the service it has received (see
   4304 * bfq_bfqq_charge_time for details on how this goal is achieved). As
   4305 * a consequence, bfqq will typically get higher timestamps upon
   4306 * reactivation, and hence it will be rescheduled as if it had
   4307 * received more service than what it has actually received. In the
   4308 * end, bfqq receives less service in proportion to how slowly its
   4309 * associated process consumes its budgets (and hence how seriously it
   4310 * tends to lower the throughput). In addition, this time-charging
   4311 * strategy guarantees time fairness among slow processes. In
   4312 * contrast, if the process associated with bfqq is not slow, we
   4313 * charge bfqq exactly with the service it has received.
   4314 *
   4315 * Charging time to the first type of queues and the exact service to
   4316 * the other has the effect of using the WF2Q+ policy to schedule the
   4317 * former on a timeslice basis, without violating service domain
   4318 * guarantees among the latter.
   4319 */
   4320void bfq_bfqq_expire(struct bfq_data *bfqd,
   4321		     struct bfq_queue *bfqq,
   4322		     bool compensate,
   4323		     enum bfqq_expiration reason)
   4324{
   4325	bool slow;
   4326	unsigned long delta = 0;
   4327	struct bfq_entity *entity = &bfqq->entity;
   4328
   4329	/*
   4330	 * Check whether the process is slow (see bfq_bfqq_is_slow).
   4331	 */
   4332	slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
   4333
   4334	/*
   4335	 * As above explained, charge slow (typically seeky) and
   4336	 * timed-out queues with the time and not the service
   4337	 * received, to favor sequential workloads.
   4338	 *
   4339	 * Processes doing I/O in the slower disk zones will tend to
   4340	 * be slow(er) even if not seeky. Therefore, since the
   4341	 * estimated peak rate is actually an average over the disk
   4342	 * surface, these processes may timeout just for bad luck. To
   4343	 * avoid punishing them, do not charge time to processes that
   4344	 * succeeded in consuming at least 2/3 of their budget. This
   4345	 * allows BFQ to preserve enough elasticity to still perform
   4346	 * bandwidth, and not time, distribution with little unlucky
   4347	 * or quasi-sequential processes.
   4348	 */
   4349	if (bfqq->wr_coeff == 1 &&
   4350	    (slow ||
   4351	     (reason == BFQQE_BUDGET_TIMEOUT &&
   4352	      bfq_bfqq_budget_left(bfqq) >=  entity->budget / 3)))
   4353		bfq_bfqq_charge_time(bfqd, bfqq, delta);
   4354
   4355	if (bfqd->low_latency && bfqq->wr_coeff == 1)
   4356		bfqq->last_wr_start_finish = jiffies;
   4357
   4358	if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
   4359	    RB_EMPTY_ROOT(&bfqq->sort_list)) {
   4360		/*
   4361		 * If we get here, and there are no outstanding
   4362		 * requests, then the request pattern is isochronous
   4363		 * (see the comments on the function
   4364		 * bfq_bfqq_softrt_next_start()). Therefore we can
   4365		 * compute soft_rt_next_start.
   4366		 *
   4367		 * If, instead, the queue still has outstanding
   4368		 * requests, then we have to wait for the completion
   4369		 * of all the outstanding requests to discover whether
   4370		 * the request pattern is actually isochronous.
   4371		 */
   4372		if (bfqq->dispatched == 0)
   4373			bfqq->soft_rt_next_start =
   4374				bfq_bfqq_softrt_next_start(bfqd, bfqq);
   4375		else if (bfqq->dispatched > 0) {
   4376			/*
   4377			 * Schedule an update of soft_rt_next_start to when
   4378			 * the task may be discovered to be isochronous.
   4379			 */
   4380			bfq_mark_bfqq_softrt_update(bfqq);
   4381		}
   4382	}
   4383
   4384	bfq_log_bfqq(bfqd, bfqq,
   4385		"expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
   4386		slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
   4387
   4388	/*
   4389	 * bfqq expired, so no total service time needs to be computed
   4390	 * any longer: reset state machine for measuring total service
   4391	 * times.
   4392	 */
   4393	bfqd->rqs_injected = bfqd->wait_dispatch = false;
   4394	bfqd->waited_rq = NULL;
   4395
   4396	/*
   4397	 * Increase, decrease or leave budget unchanged according to
   4398	 * reason.
   4399	 */
   4400	__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
   4401	if (__bfq_bfqq_expire(bfqd, bfqq, reason))
   4402		/* bfqq is gone, no more actions on it */
   4403		return;
   4404
   4405	/* mark bfqq as waiting a request only if a bic still points to it */
   4406	if (!bfq_bfqq_busy(bfqq) &&
   4407	    reason != BFQQE_BUDGET_TIMEOUT &&
   4408	    reason != BFQQE_BUDGET_EXHAUSTED) {
   4409		bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
   4410		/*
   4411		 * Not setting service to 0, because, if the next rq
   4412		 * arrives in time, the queue will go on receiving
   4413		 * service with this same budget (as if it never expired)
   4414		 */
   4415	} else
   4416		entity->service = 0;
   4417
   4418	/*
   4419	 * Reset the received-service counter for every parent entity.
   4420	 * Differently from what happens with bfqq->entity.service,
   4421	 * the resetting of this counter never needs to be postponed
   4422	 * for parent entities. In fact, in case bfqq may have a
   4423	 * chance to go on being served using the last, partially
   4424	 * consumed budget, bfqq->entity.service needs to be kept,
   4425	 * because if bfqq then actually goes on being served using
   4426	 * the same budget, the last value of bfqq->entity.service is
   4427	 * needed to properly decrement bfqq->entity.budget by the
   4428	 * portion already consumed. In contrast, it is not necessary
   4429	 * to keep entity->service for parent entities too, because
   4430	 * the bubble up of the new value of bfqq->entity.budget will
   4431	 * make sure that the budgets of parent entities are correct,
   4432	 * even in case bfqq and thus parent entities go on receiving
   4433	 * service with the same budget.
   4434	 */
   4435	entity = entity->parent;
   4436	for_each_entity(entity)
   4437		entity->service = 0;
   4438}
   4439
   4440/*
   4441 * Budget timeout is not implemented through a dedicated timer, but
   4442 * just checked on request arrivals and completions, as well as on
   4443 * idle timer expirations.
   4444 */
   4445static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
   4446{
   4447	return time_is_before_eq_jiffies(bfqq->budget_timeout);
   4448}
   4449
   4450/*
   4451 * If we expire a queue that is actively waiting (i.e., with the
   4452 * device idled) for the arrival of a new request, then we may incur
   4453 * the timestamp misalignment problem described in the body of the
   4454 * function __bfq_activate_entity. Hence we return true only if this
   4455 * condition does not hold, or if the queue is slow enough to deserve
   4456 * only to be kicked off for preserving a high throughput.
   4457 */
   4458static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
   4459{
   4460	bfq_log_bfqq(bfqq->bfqd, bfqq,
   4461		"may_budget_timeout: wait_request %d left %d timeout %d",
   4462		bfq_bfqq_wait_request(bfqq),
   4463			bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
   4464		bfq_bfqq_budget_timeout(bfqq));
   4465
   4466	return (!bfq_bfqq_wait_request(bfqq) ||
   4467		bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
   4468		&&
   4469		bfq_bfqq_budget_timeout(bfqq);
   4470}
   4471
   4472static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
   4473					     struct bfq_queue *bfqq)
   4474{
   4475	bool rot_without_queueing =
   4476		!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
   4477		bfqq_sequential_and_IO_bound,
   4478		idling_boosts_thr;
   4479
   4480	/* No point in idling for bfqq if it won't get requests any longer */
   4481	if (unlikely(!bfqq_process_refs(bfqq)))
   4482		return false;
   4483
   4484	bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
   4485		bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
   4486
   4487	/*
   4488	 * The next variable takes into account the cases where idling
   4489	 * boosts the throughput.
   4490	 *
   4491	 * The value of the variable is computed considering, first, that
   4492	 * idling is virtually always beneficial for the throughput if:
   4493	 * (a) the device is not NCQ-capable and rotational, or
   4494	 * (b) regardless of the presence of NCQ, the device is rotational and
   4495	 *     the request pattern for bfqq is I/O-bound and sequential, or
   4496	 * (c) regardless of whether it is rotational, the device is
   4497	 *     not NCQ-capable and the request pattern for bfqq is
   4498	 *     I/O-bound and sequential.
   4499	 *
   4500	 * Secondly, and in contrast to the above item (b), idling an
   4501	 * NCQ-capable flash-based device would not boost the
   4502	 * throughput even with sequential I/O; rather it would lower
   4503	 * the throughput in proportion to how fast the device
   4504	 * is. Accordingly, the next variable is true if any of the
   4505	 * above conditions (a), (b) or (c) is true, and, in
   4506	 * particular, happens to be false if bfqd is an NCQ-capable
   4507	 * flash-based device.
   4508	 */
   4509	idling_boosts_thr = rot_without_queueing ||
   4510		((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
   4511		 bfqq_sequential_and_IO_bound);
   4512
   4513	/*
   4514	 * The return value of this function is equal to that of
   4515	 * idling_boosts_thr, unless a special case holds. In this
   4516	 * special case, described below, idling may cause problems to
   4517	 * weight-raised queues.
   4518	 *
   4519	 * When the request pool is saturated (e.g., in the presence
   4520	 * of write hogs), if the processes associated with
   4521	 * non-weight-raised queues ask for requests at a lower rate,
   4522	 * then processes associated with weight-raised queues have a
   4523	 * higher probability to get a request from the pool
   4524	 * immediately (or at least soon) when they need one. Thus
   4525	 * they have a higher probability to actually get a fraction
   4526	 * of the device throughput proportional to their high
   4527	 * weight. This is especially true with NCQ-capable drives,
   4528	 * which enqueue several requests in advance, and further
   4529	 * reorder internally-queued requests.
   4530	 *
   4531	 * For this reason, we force to false the return value if
   4532	 * there are weight-raised busy queues. In this case, and if
   4533	 * bfqq is not weight-raised, this guarantees that the device
   4534	 * is not idled for bfqq (if, instead, bfqq is weight-raised,
   4535	 * then idling will be guaranteed by another variable, see
   4536	 * below). Combined with the timestamping rules of BFQ (see
   4537	 * [1] for details), this behavior causes bfqq, and hence any
   4538	 * sync non-weight-raised queue, to get a lower number of
   4539	 * requests served, and thus to ask for a lower number of
   4540	 * requests from the request pool, before the busy
   4541	 * weight-raised queues get served again. This often mitigates
   4542	 * starvation problems in the presence of heavy write
   4543	 * workloads and NCQ, thereby guaranteeing a higher
   4544	 * application and system responsiveness in these hostile
   4545	 * scenarios.
   4546	 */
   4547	return idling_boosts_thr &&
   4548		bfqd->wr_busy_queues == 0;
   4549}
   4550
   4551/*
   4552 * For a queue that becomes empty, device idling is allowed only if
   4553 * this function returns true for that queue. As a consequence, since
   4554 * device idling plays a critical role for both throughput boosting
   4555 * and service guarantees, the return value of this function plays a
   4556 * critical role as well.
   4557 *
   4558 * In a nutshell, this function returns true only if idling is
   4559 * beneficial for throughput or, even if detrimental for throughput,
   4560 * idling is however necessary to preserve service guarantees (low
   4561 * latency, desired throughput distribution, ...). In particular, on
   4562 * NCQ-capable devices, this function tries to return false, so as to
   4563 * help keep the drives' internal queues full, whenever this helps the
   4564 * device boost the throughput without causing any service-guarantee
   4565 * issue.
   4566 *
   4567 * Most of the issues taken into account to get the return value of
   4568 * this function are not trivial. We discuss these issues in the two
   4569 * functions providing the main pieces of information needed by this
   4570 * function.
   4571 */
   4572static bool bfq_better_to_idle(struct bfq_queue *bfqq)
   4573{
   4574	struct bfq_data *bfqd = bfqq->bfqd;
   4575	bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
   4576
   4577	/* No point in idling for bfqq if it won't get requests any longer */
   4578	if (unlikely(!bfqq_process_refs(bfqq)))
   4579		return false;
   4580
   4581	if (unlikely(bfqd->strict_guarantees))
   4582		return true;
   4583
   4584	/*
   4585	 * Idling is performed only if slice_idle > 0. In addition, we
   4586	 * do not idle if
   4587	 * (a) bfqq is async
   4588	 * (b) bfqq is in the idle io prio class: in this case we do
   4589	 * not idle because we want to minimize the bandwidth that
   4590	 * queues in this class can steal to higher-priority queues
   4591	 */
   4592	if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
   4593	   bfq_class_idle(bfqq))
   4594		return false;
   4595
   4596	idling_boosts_thr_with_no_issue =
   4597		idling_boosts_thr_without_issues(bfqd, bfqq);
   4598
   4599	idling_needed_for_service_guar =
   4600		idling_needed_for_service_guarantees(bfqd, bfqq);
   4601
   4602	/*
   4603	 * We have now the two components we need to compute the
   4604	 * return value of the function, which is true only if idling
   4605	 * either boosts the throughput (without issues), or is
   4606	 * necessary to preserve service guarantees.
   4607	 */
   4608	return idling_boosts_thr_with_no_issue ||
   4609		idling_needed_for_service_guar;
   4610}
   4611
   4612/*
   4613 * If the in-service queue is empty but the function bfq_better_to_idle
   4614 * returns true, then:
   4615 * 1) the queue must remain in service and cannot be expired, and
   4616 * 2) the device must be idled to wait for the possible arrival of a new
   4617 *    request for the queue.
   4618 * See the comments on the function bfq_better_to_idle for the reasons
   4619 * why performing device idling is the best choice to boost the throughput
   4620 * and preserve service guarantees when bfq_better_to_idle itself
   4621 * returns true.
   4622 */
   4623static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
   4624{
   4625	return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
   4626}
   4627
   4628/*
   4629 * This function chooses the queue from which to pick the next extra
   4630 * I/O request to inject, if it finds a compatible queue. See the
   4631 * comments on bfq_update_inject_limit() for details on the injection
   4632 * mechanism, and for the definitions of the quantities mentioned
   4633 * below.
   4634 */
   4635static struct bfq_queue *
   4636bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
   4637{
   4638	struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
   4639	unsigned int limit = in_serv_bfqq->inject_limit;
   4640	/*
   4641	 * If
   4642	 * - bfqq is not weight-raised and therefore does not carry
   4643	 *   time-critical I/O,
   4644	 * or
   4645	 * - regardless of whether bfqq is weight-raised, bfqq has
   4646	 *   however a long think time, during which it can absorb the
   4647	 *   effect of an appropriate number of extra I/O requests
   4648	 *   from other queues (see bfq_update_inject_limit for
   4649	 *   details on the computation of this number);
   4650	 * then injection can be performed without restrictions.
   4651	 */
   4652	bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
   4653		!bfq_bfqq_has_short_ttime(in_serv_bfqq);
   4654
   4655	/*
   4656	 * If
   4657	 * - the baseline total service time could not be sampled yet,
   4658	 *   so the inject limit happens to be still 0, and
   4659	 * - a lot of time has elapsed since the plugging of I/O
   4660	 *   dispatching started, so drive speed is being wasted
   4661	 *   significantly;
   4662	 * then temporarily raise inject limit to one request.
   4663	 */
   4664	if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
   4665	    bfq_bfqq_wait_request(in_serv_bfqq) &&
   4666	    time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
   4667				      bfqd->bfq_slice_idle)
   4668		)
   4669		limit = 1;
   4670
   4671	if (bfqd->rq_in_driver >= limit)
   4672		return NULL;
   4673
   4674	/*
   4675	 * Linear search of the source queue for injection; but, with
   4676	 * a high probability, very few steps are needed to find a
   4677	 * candidate queue, i.e., a queue with enough budget left for
   4678	 * its next request. In fact:
   4679	 * - BFQ dynamically updates the budget of every queue so as
   4680	 *   to accommodate the expected backlog of the queue;
   4681	 * - if a queue gets all its requests dispatched as injected
   4682	 *   service, then the queue is removed from the active list
   4683	 *   (and re-added only if it gets new requests, but then it
   4684	 *   is assigned again enough budget for its new backlog).
   4685	 */
   4686	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
   4687		if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
   4688		    (in_serv_always_inject || bfqq->wr_coeff > 1) &&
   4689		    bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
   4690		    bfq_bfqq_budget_left(bfqq)) {
   4691			/*
   4692			 * Allow for only one large in-flight request
   4693			 * on non-rotational devices, for the
   4694			 * following reason. On non-rotationl drives,
   4695			 * large requests take much longer than
   4696			 * smaller requests to be served. In addition,
   4697			 * the drive prefers to serve large requests
   4698			 * w.r.t. to small ones, if it can choose. So,
   4699			 * having more than one large requests queued
   4700			 * in the drive may easily make the next first
   4701			 * request of the in-service queue wait for so
   4702			 * long to break bfqq's service guarantees. On
   4703			 * the bright side, large requests let the
   4704			 * drive reach a very high throughput, even if
   4705			 * there is only one in-flight large request
   4706			 * at a time.
   4707			 */
   4708			if (blk_queue_nonrot(bfqd->queue) &&
   4709			    blk_rq_sectors(bfqq->next_rq) >=
   4710			    BFQQ_SECT_THR_NONROT)
   4711				limit = min_t(unsigned int, 1, limit);
   4712			else
   4713				limit = in_serv_bfqq->inject_limit;
   4714
   4715			if (bfqd->rq_in_driver < limit) {
   4716				bfqd->rqs_injected = true;
   4717				return bfqq;
   4718			}
   4719		}
   4720
   4721	return NULL;
   4722}
   4723
   4724/*
   4725 * Select a queue for service.  If we have a current queue in service,
   4726 * check whether to continue servicing it, or retrieve and set a new one.
   4727 */
   4728static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
   4729{
   4730	struct bfq_queue *bfqq;
   4731	struct request *next_rq;
   4732	enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
   4733
   4734	bfqq = bfqd->in_service_queue;
   4735	if (!bfqq)
   4736		goto new_queue;
   4737
   4738	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
   4739
   4740	/*
   4741	 * Do not expire bfqq for budget timeout if bfqq may be about
   4742	 * to enjoy device idling. The reason why, in this case, we
   4743	 * prevent bfqq from expiring is the same as in the comments
   4744	 * on the case where bfq_bfqq_must_idle() returns true, in
   4745	 * bfq_completed_request().
   4746	 */
   4747	if (bfq_may_expire_for_budg_timeout(bfqq) &&
   4748	    !bfq_bfqq_must_idle(bfqq))
   4749		goto expire;
   4750
   4751check_queue:
   4752	/*
   4753	 * This loop is rarely executed more than once. Even when it
   4754	 * happens, it is much more convenient to re-execute this loop
   4755	 * than to return NULL and trigger a new dispatch to get a
   4756	 * request served.
   4757	 */
   4758	next_rq = bfqq->next_rq;
   4759	/*
   4760	 * If bfqq has requests queued and it has enough budget left to
   4761	 * serve them, keep the queue, otherwise expire it.
   4762	 */
   4763	if (next_rq) {
   4764		if (bfq_serv_to_charge(next_rq, bfqq) >
   4765			bfq_bfqq_budget_left(bfqq)) {
   4766			/*
   4767			 * Expire the queue for budget exhaustion,
   4768			 * which makes sure that the next budget is
   4769			 * enough to serve the next request, even if
   4770			 * it comes from the fifo expired path.
   4771			 */
   4772			reason = BFQQE_BUDGET_EXHAUSTED;
   4773			goto expire;
   4774		} else {
   4775			/*
   4776			 * The idle timer may be pending because we may
   4777			 * not disable disk idling even when a new request
   4778			 * arrives.
   4779			 */
   4780			if (bfq_bfqq_wait_request(bfqq)) {
   4781				/*
   4782				 * If we get here: 1) at least a new request
   4783				 * has arrived but we have not disabled the
   4784				 * timer because the request was too small,
   4785				 * 2) then the block layer has unplugged
   4786				 * the device, causing the dispatch to be
   4787				 * invoked.
   4788				 *
   4789				 * Since the device is unplugged, now the
   4790				 * requests are probably large enough to
   4791				 * provide a reasonable throughput.
   4792				 * So we disable idling.
   4793				 */
   4794				bfq_clear_bfqq_wait_request(bfqq);
   4795				hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
   4796			}
   4797			goto keep_queue;
   4798		}
   4799	}
   4800
   4801	/*
   4802	 * No requests pending. However, if the in-service queue is idling
   4803	 * for a new request, or has requests waiting for a completion and
   4804	 * may idle after their completion, then keep it anyway.
   4805	 *
   4806	 * Yet, inject service from other queues if it boosts
   4807	 * throughput and is possible.
   4808	 */
   4809	if (bfq_bfqq_wait_request(bfqq) ||
   4810	    (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
   4811		struct bfq_queue *async_bfqq =
   4812			bfqq->bic && bfqq->bic->bfqq[0] &&
   4813			bfq_bfqq_busy(bfqq->bic->bfqq[0]) &&
   4814			bfqq->bic->bfqq[0]->next_rq ?
   4815			bfqq->bic->bfqq[0] : NULL;
   4816		struct bfq_queue *blocked_bfqq =
   4817			!hlist_empty(&bfqq->woken_list) ?
   4818			container_of(bfqq->woken_list.first,
   4819				     struct bfq_queue,
   4820				     woken_list_node)
   4821			: NULL;
   4822
   4823		/*
   4824		 * The next four mutually-exclusive ifs decide
   4825		 * whether to try injection, and choose the queue to
   4826		 * pick an I/O request from.
   4827		 *
   4828		 * The first if checks whether the process associated
   4829		 * with bfqq has also async I/O pending. If so, it
   4830		 * injects such I/O unconditionally. Injecting async
   4831		 * I/O from the same process can cause no harm to the
   4832		 * process. On the contrary, it can only increase
   4833		 * bandwidth and reduce latency for the process.
   4834		 *
   4835		 * The second if checks whether there happens to be a
   4836		 * non-empty waker queue for bfqq, i.e., a queue whose
   4837		 * I/O needs to be completed for bfqq to receive new
   4838		 * I/O. This happens, e.g., if bfqq is associated with
   4839		 * a process that does some sync. A sync generates
   4840		 * extra blocking I/O, which must be completed before
   4841		 * the process associated with bfqq can go on with its
   4842		 * I/O. If the I/O of the waker queue is not served,
   4843		 * then bfqq remains empty, and no I/O is dispatched,
   4844		 * until the idle timeout fires for bfqq. This is
   4845		 * likely to result in lower bandwidth and higher
   4846		 * latencies for bfqq, and in a severe loss of total
   4847		 * throughput. The best action to take is therefore to
   4848		 * serve the waker queue as soon as possible. So do it
   4849		 * (without relying on the third alternative below for
   4850		 * eventually serving waker_bfqq's I/O; see the last
   4851		 * paragraph for further details). This systematic
   4852		 * injection of I/O from the waker queue does not
   4853		 * cause any delay to bfqq's I/O. On the contrary,
   4854		 * next bfqq's I/O is brought forward dramatically,
   4855		 * for it is not blocked for milliseconds.
   4856		 *
   4857		 * The third if checks whether there is a queue woken
   4858		 * by bfqq, and currently with pending I/O. Such a
   4859		 * woken queue does not steal bandwidth from bfqq,
   4860		 * because it remains soon without I/O if bfqq is not
   4861		 * served. So there is virtually no risk of loss of
   4862		 * bandwidth for bfqq if this woken queue has I/O
   4863		 * dispatched while bfqq is waiting for new I/O.
   4864		 *
   4865		 * The fourth if checks whether bfqq is a queue for
   4866		 * which it is better to avoid injection. It is so if
   4867		 * bfqq delivers more throughput when served without
   4868		 * any further I/O from other queues in the middle, or
   4869		 * if the service times of bfqq's I/O requests both
   4870		 * count more than overall throughput, and may be
   4871		 * easily increased by injection (this happens if bfqq
   4872		 * has a short think time). If none of these
   4873		 * conditions holds, then a candidate queue for
   4874		 * injection is looked for through
   4875		 * bfq_choose_bfqq_for_injection(). Note that the
   4876		 * latter may return NULL (for example if the inject
   4877		 * limit for bfqq is currently 0).
   4878		 *
   4879		 * NOTE: motivation for the second alternative
   4880		 *
   4881		 * Thanks to the way the inject limit is updated in
   4882		 * bfq_update_has_short_ttime(), it is rather likely
   4883		 * that, if I/O is being plugged for bfqq and the
   4884		 * waker queue has pending I/O requests that are
   4885		 * blocking bfqq's I/O, then the fourth alternative
   4886		 * above lets the waker queue get served before the
   4887		 * I/O-plugging timeout fires. So one may deem the
   4888		 * second alternative superfluous. It is not, because
   4889		 * the fourth alternative may be way less effective in
   4890		 * case of a synchronization. For two main
   4891		 * reasons. First, throughput may be low because the
   4892		 * inject limit may be too low to guarantee the same
   4893		 * amount of injected I/O, from the waker queue or
   4894		 * other queues, that the second alternative
   4895		 * guarantees (the second alternative unconditionally
   4896		 * injects a pending I/O request of the waker queue
   4897		 * for each bfq_dispatch_request()). Second, with the
   4898		 * fourth alternative, the duration of the plugging,
   4899		 * i.e., the time before bfqq finally receives new I/O,
   4900		 * may not be minimized, because the waker queue may
   4901		 * happen to be served only after other queues.
   4902		 */
   4903		if (async_bfqq &&
   4904		    icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
   4905		    bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
   4906		    bfq_bfqq_budget_left(async_bfqq))
   4907			bfqq = bfqq->bic->bfqq[0];
   4908		else if (bfqq->waker_bfqq &&
   4909			   bfq_bfqq_busy(bfqq->waker_bfqq) &&
   4910			   bfqq->waker_bfqq->next_rq &&
   4911			   bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
   4912					      bfqq->waker_bfqq) <=
   4913			   bfq_bfqq_budget_left(bfqq->waker_bfqq)
   4914			)
   4915			bfqq = bfqq->waker_bfqq;
   4916		else if (blocked_bfqq &&
   4917			   bfq_bfqq_busy(blocked_bfqq) &&
   4918			   blocked_bfqq->next_rq &&
   4919			   bfq_serv_to_charge(blocked_bfqq->next_rq,
   4920					      blocked_bfqq) <=
   4921			   bfq_bfqq_budget_left(blocked_bfqq)
   4922			)
   4923			bfqq = blocked_bfqq;
   4924		else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
   4925			 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
   4926			  !bfq_bfqq_has_short_ttime(bfqq)))
   4927			bfqq = bfq_choose_bfqq_for_injection(bfqd);
   4928		else
   4929			bfqq = NULL;
   4930
   4931		goto keep_queue;
   4932	}
   4933
   4934	reason = BFQQE_NO_MORE_REQUESTS;
   4935expire:
   4936	bfq_bfqq_expire(bfqd, bfqq, false, reason);
   4937new_queue:
   4938	bfqq = bfq_set_in_service_queue(bfqd);
   4939	if (bfqq) {
   4940		bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
   4941		goto check_queue;
   4942	}
   4943keep_queue:
   4944	if (bfqq)
   4945		bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
   4946	else
   4947		bfq_log(bfqd, "select_queue: no queue returned");
   4948
   4949	return bfqq;
   4950}
   4951
   4952static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
   4953{
   4954	struct bfq_entity *entity = &bfqq->entity;
   4955
   4956	if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
   4957		bfq_log_bfqq(bfqd, bfqq,
   4958			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
   4959			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
   4960			jiffies_to_msecs(bfqq->wr_cur_max_time),
   4961			bfqq->wr_coeff,
   4962			bfqq->entity.weight, bfqq->entity.orig_weight);
   4963
   4964		if (entity->prio_changed)
   4965			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
   4966
   4967		/*
   4968		 * If the queue was activated in a burst, or too much
   4969		 * time has elapsed from the beginning of this
   4970		 * weight-raising period, then end weight raising.
   4971		 */
   4972		if (bfq_bfqq_in_large_burst(bfqq))
   4973			bfq_bfqq_end_wr(bfqq);
   4974		else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
   4975						bfqq->wr_cur_max_time)) {
   4976			if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
   4977			time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
   4978					       bfq_wr_duration(bfqd))) {
   4979				/*
   4980				 * Either in interactive weight
   4981				 * raising, or in soft_rt weight
   4982				 * raising with the
   4983				 * interactive-weight-raising period
   4984				 * elapsed (so no switch back to
   4985				 * interactive weight raising).
   4986				 */
   4987				bfq_bfqq_end_wr(bfqq);
   4988			} else { /*
   4989				  * soft_rt finishing while still in
   4990				  * interactive period, switch back to
   4991				  * interactive weight raising
   4992				  */
   4993				switch_back_to_interactive_wr(bfqq, bfqd);
   4994				bfqq->entity.prio_changed = 1;
   4995			}
   4996		}
   4997		if (bfqq->wr_coeff > 1 &&
   4998		    bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
   4999		    bfqq->service_from_wr > max_service_from_wr) {
   5000			/* see comments on max_service_from_wr */
   5001			bfq_bfqq_end_wr(bfqq);
   5002		}
   5003	}
   5004	/*
   5005	 * To improve latency (for this or other queues), immediately
   5006	 * update weight both if it must be raised and if it must be
   5007	 * lowered. Since, entity may be on some active tree here, and
   5008	 * might have a pending change of its ioprio class, invoke
   5009	 * next function with the last parameter unset (see the
   5010	 * comments on the function).
   5011	 */
   5012	if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
   5013		__bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
   5014						entity, false);
   5015}
   5016
   5017/*
   5018 * Dispatch next request from bfqq.
   5019 */
   5020static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
   5021						 struct bfq_queue *bfqq)
   5022{
   5023	struct request *rq = bfqq->next_rq;
   5024	unsigned long service_to_charge;
   5025
   5026	service_to_charge = bfq_serv_to_charge(rq, bfqq);
   5027
   5028	bfq_bfqq_served(bfqq, service_to_charge);
   5029
   5030	if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
   5031		bfqd->wait_dispatch = false;
   5032		bfqd->waited_rq = rq;
   5033	}
   5034
   5035	bfq_dispatch_remove(bfqd->queue, rq);
   5036
   5037	if (bfqq != bfqd->in_service_queue)
   5038		goto return_rq;
   5039
   5040	/*
   5041	 * If weight raising has to terminate for bfqq, then next
   5042	 * function causes an immediate update of bfqq's weight,
   5043	 * without waiting for next activation. As a consequence, on
   5044	 * expiration, bfqq will be timestamped as if has never been
   5045	 * weight-raised during this service slot, even if it has
   5046	 * received part or even most of the service as a
   5047	 * weight-raised queue. This inflates bfqq's timestamps, which
   5048	 * is beneficial, as bfqq is then more willing to leave the
   5049	 * device immediately to possible other weight-raised queues.
   5050	 */
   5051	bfq_update_wr_data(bfqd, bfqq);
   5052
   5053	/*
   5054	 * Expire bfqq, pretending that its budget expired, if bfqq
   5055	 * belongs to CLASS_IDLE and other queues are waiting for
   5056	 * service.
   5057	 */
   5058	if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
   5059		goto return_rq;
   5060
   5061	bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
   5062
   5063return_rq:
   5064	return rq;
   5065}
   5066
   5067static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
   5068{
   5069	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
   5070
   5071	/*
   5072	 * Avoiding lock: a race on bfqd->queued should cause at
   5073	 * most a call to dispatch for nothing
   5074	 */
   5075	return !list_empty_careful(&bfqd->dispatch) ||
   5076		READ_ONCE(bfqd->queued);
   5077}
   5078
   5079static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
   5080{
   5081	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
   5082	struct request *rq = NULL;
   5083	struct bfq_queue *bfqq = NULL;
   5084
   5085	if (!list_empty(&bfqd->dispatch)) {
   5086		rq = list_first_entry(&bfqd->dispatch, struct request,
   5087				      queuelist);
   5088		list_del_init(&rq->queuelist);
   5089
   5090		bfqq = RQ_BFQQ(rq);
   5091
   5092		if (bfqq) {
   5093			/*
   5094			 * Increment counters here, because this
   5095			 * dispatch does not follow the standard
   5096			 * dispatch flow (where counters are
   5097			 * incremented)
   5098			 */
   5099			bfqq->dispatched++;
   5100
   5101			goto inc_in_driver_start_rq;
   5102		}
   5103
   5104		/*
   5105		 * We exploit the bfq_finish_requeue_request hook to
   5106		 * decrement rq_in_driver, but
   5107		 * bfq_finish_requeue_request will not be invoked on
   5108		 * this request. So, to avoid unbalance, just start
   5109		 * this request, without incrementing rq_in_driver. As
   5110		 * a negative consequence, rq_in_driver is deceptively
   5111		 * lower than it should be while this request is in
   5112		 * service. This may cause bfq_schedule_dispatch to be
   5113		 * invoked uselessly.
   5114		 *
   5115		 * As for implementing an exact solution, the
   5116		 * bfq_finish_requeue_request hook, if defined, is
   5117		 * probably invoked also on this request. So, by
   5118		 * exploiting this hook, we could 1) increment
   5119		 * rq_in_driver here, and 2) decrement it in
   5120		 * bfq_finish_requeue_request. Such a solution would
   5121		 * let the value of the counter be always accurate,
   5122		 * but it would entail using an extra interface
   5123		 * function. This cost seems higher than the benefit,
   5124		 * being the frequency of non-elevator-private
   5125		 * requests very low.
   5126		 */
   5127		goto start_rq;
   5128	}
   5129
   5130	bfq_log(bfqd, "dispatch requests: %d busy queues",
   5131		bfq_tot_busy_queues(bfqd));
   5132
   5133	if (bfq_tot_busy_queues(bfqd) == 0)
   5134		goto exit;
   5135
   5136	/*
   5137	 * Force device to serve one request at a time if
   5138	 * strict_guarantees is true. Forcing this service scheme is
   5139	 * currently the ONLY way to guarantee that the request
   5140	 * service order enforced by the scheduler is respected by a
   5141	 * queueing device. Otherwise the device is free even to make
   5142	 * some unlucky request wait for as long as the device
   5143	 * wishes.
   5144	 *
   5145	 * Of course, serving one request at a time may cause loss of
   5146	 * throughput.
   5147	 */
   5148	if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
   5149		goto exit;
   5150
   5151	bfqq = bfq_select_queue(bfqd);
   5152	if (!bfqq)
   5153		goto exit;
   5154
   5155	rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
   5156
   5157	if (rq) {
   5158inc_in_driver_start_rq:
   5159		bfqd->rq_in_driver++;
   5160start_rq:
   5161		rq->rq_flags |= RQF_STARTED;
   5162	}
   5163exit:
   5164	return rq;
   5165}
   5166
   5167#ifdef CONFIG_BFQ_CGROUP_DEBUG
   5168static void bfq_update_dispatch_stats(struct request_queue *q,
   5169				      struct request *rq,
   5170				      struct bfq_queue *in_serv_queue,
   5171				      bool idle_timer_disabled)
   5172{
   5173	struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
   5174
   5175	if (!idle_timer_disabled && !bfqq)
   5176		return;
   5177
   5178	/*
   5179	 * rq and bfqq are guaranteed to exist until this function
   5180	 * ends, for the following reasons. First, rq can be
   5181	 * dispatched to the device, and then can be completed and
   5182	 * freed, only after this function ends. Second, rq cannot be
   5183	 * merged (and thus freed because of a merge) any longer,
   5184	 * because it has already started. Thus rq cannot be freed
   5185	 * before this function ends, and, since rq has a reference to
   5186	 * bfqq, the same guarantee holds for bfqq too.
   5187	 *
   5188	 * In addition, the following queue lock guarantees that
   5189	 * bfqq_group(bfqq) exists as well.
   5190	 */
   5191	spin_lock_irq(&q->queue_lock);
   5192	if (idle_timer_disabled)
   5193		/*
   5194		 * Since the idle timer has been disabled,
   5195		 * in_serv_queue contained some request when
   5196		 * __bfq_dispatch_request was invoked above, which
   5197		 * implies that rq was picked exactly from
   5198		 * in_serv_queue. Thus in_serv_queue == bfqq, and is
   5199		 * therefore guaranteed to exist because of the above
   5200		 * arguments.
   5201		 */
   5202		bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
   5203	if (bfqq) {
   5204		struct bfq_group *bfqg = bfqq_group(bfqq);
   5205
   5206		bfqg_stats_update_avg_queue_size(bfqg);
   5207		bfqg_stats_set_start_empty_time(bfqg);
   5208		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
   5209	}
   5210	spin_unlock_irq(&q->queue_lock);
   5211}
   5212#else
   5213static inline void bfq_update_dispatch_stats(struct request_queue *q,
   5214					     struct request *rq,
   5215					     struct bfq_queue *in_serv_queue,
   5216					     bool idle_timer_disabled) {}
   5217#endif /* CONFIG_BFQ_CGROUP_DEBUG */
   5218
   5219static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
   5220{
   5221	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
   5222	struct request *rq;
   5223	struct bfq_queue *in_serv_queue;
   5224	bool waiting_rq, idle_timer_disabled = false;
   5225
   5226	spin_lock_irq(&bfqd->lock);
   5227
   5228	in_serv_queue = bfqd->in_service_queue;
   5229	waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
   5230
   5231	rq = __bfq_dispatch_request(hctx);
   5232	if (in_serv_queue == bfqd->in_service_queue) {
   5233		idle_timer_disabled =
   5234			waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
   5235	}
   5236
   5237	spin_unlock_irq(&bfqd->lock);
   5238	bfq_update_dispatch_stats(hctx->queue, rq,
   5239			idle_timer_disabled ? in_serv_queue : NULL,
   5240				idle_timer_disabled);
   5241
   5242	return rq;
   5243}
   5244
   5245/*
   5246 * Task holds one reference to the queue, dropped when task exits.  Each rq
   5247 * in-flight on this queue also holds a reference, dropped when rq is freed.
   5248 *
   5249 * Scheduler lock must be held here. Recall not to use bfqq after calling
   5250 * this function on it.
   5251 */
   5252void bfq_put_queue(struct bfq_queue *bfqq)
   5253{
   5254	struct bfq_queue *item;
   5255	struct hlist_node *n;
   5256	struct bfq_group *bfqg = bfqq_group(bfqq);
   5257
   5258	if (bfqq->bfqd)
   5259		bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
   5260			     bfqq, bfqq->ref);
   5261
   5262	bfqq->ref--;
   5263	if (bfqq->ref)
   5264		return;
   5265
   5266	if (!hlist_unhashed(&bfqq->burst_list_node)) {
   5267		hlist_del_init(&bfqq->burst_list_node);
   5268		/*
   5269		 * Decrement also burst size after the removal, if the
   5270		 * process associated with bfqq is exiting, and thus
   5271		 * does not contribute to the burst any longer. This
   5272		 * decrement helps filter out false positives of large
   5273		 * bursts, when some short-lived process (often due to
   5274		 * the execution of commands by some service) happens
   5275		 * to start and exit while a complex application is
   5276		 * starting, and thus spawning several processes that
   5277		 * do I/O (and that *must not* be treated as a large
   5278		 * burst, see comments on bfq_handle_burst).
   5279		 *
   5280		 * In particular, the decrement is performed only if:
   5281		 * 1) bfqq is not a merged queue, because, if it is,
   5282		 * then this free of bfqq is not triggered by the exit
   5283		 * of the process bfqq is associated with, but exactly
   5284		 * by the fact that bfqq has just been merged.
   5285		 * 2) burst_size is greater than 0, to handle
   5286		 * unbalanced decrements. Unbalanced decrements may
   5287		 * happen in te following case: bfqq is inserted into
   5288		 * the current burst list--without incrementing
   5289		 * bust_size--because of a split, but the current
   5290		 * burst list is not the burst list bfqq belonged to
   5291		 * (see comments on the case of a split in
   5292		 * bfq_set_request).
   5293		 */
   5294		if (bfqq->bic && bfqq->bfqd->burst_size > 0)
   5295			bfqq->bfqd->burst_size--;
   5296	}
   5297
   5298	/*
   5299	 * bfqq does not exist any longer, so it cannot be woken by
   5300	 * any other queue, and cannot wake any other queue. Then bfqq
   5301	 * must be removed from the woken list of its possible waker
   5302	 * queue, and all queues in the woken list of bfqq must stop
   5303	 * having a waker queue. Strictly speaking, these updates
   5304	 * should be performed when bfqq remains with no I/O source
   5305	 * attached to it, which happens before bfqq gets freed. In
   5306	 * particular, this happens when the last process associated
   5307	 * with bfqq exits or gets associated with a different
   5308	 * queue. However, both events lead to bfqq being freed soon,
   5309	 * and dangling references would come out only after bfqq gets
   5310	 * freed. So these updates are done here, as a simple and safe
   5311	 * way to handle all cases.
   5312	 */
   5313	/* remove bfqq from woken list */
   5314	if (!hlist_unhashed(&bfqq->woken_list_node))
   5315		hlist_del_init(&bfqq->woken_list_node);
   5316
   5317	/* reset waker for all queues in woken list */
   5318	hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
   5319				  woken_list_node) {
   5320		item->waker_bfqq = NULL;
   5321		hlist_del_init(&item->woken_list_node);
   5322	}
   5323
   5324	if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
   5325		bfqq->bfqd->last_completed_rq_bfqq = NULL;
   5326
   5327	kmem_cache_free(bfq_pool, bfqq);
   5328	bfqg_and_blkg_put(bfqg);
   5329}
   5330
   5331static void bfq_put_stable_ref(struct bfq_queue *bfqq)
   5332{
   5333	bfqq->stable_ref--;
   5334	bfq_put_queue(bfqq);
   5335}
   5336
   5337void bfq_put_cooperator(struct bfq_queue *bfqq)
   5338{
   5339	struct bfq_queue *__bfqq, *next;
   5340
   5341	/*
   5342	 * If this queue was scheduled to merge with another queue, be
   5343	 * sure to drop the reference taken on that queue (and others in
   5344	 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
   5345	 */
   5346	__bfqq = bfqq->new_bfqq;
   5347	while (__bfqq) {
   5348		if (__bfqq == bfqq)
   5349			break;
   5350		next = __bfqq->new_bfqq;
   5351		bfq_put_queue(__bfqq);
   5352		__bfqq = next;
   5353	}
   5354}
   5355
   5356static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
   5357{
   5358	if (bfqq == bfqd->in_service_queue) {
   5359		__bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
   5360		bfq_schedule_dispatch(bfqd);
   5361	}
   5362
   5363	bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
   5364
   5365	bfq_put_cooperator(bfqq);
   5366
   5367	bfq_release_process_ref(bfqd, bfqq);
   5368}
   5369
   5370static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
   5371{
   5372	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
   5373	struct bfq_data *bfqd;
   5374
   5375	if (bfqq)
   5376		bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
   5377
   5378	if (bfqq && bfqd) {
   5379		unsigned long flags;
   5380
   5381		spin_lock_irqsave(&bfqd->lock, flags);
   5382		bfqq->bic = NULL;
   5383		bfq_exit_bfqq(bfqd, bfqq);
   5384		bic_set_bfqq(bic, NULL, is_sync);
   5385		spin_unlock_irqrestore(&bfqd->lock, flags);
   5386	}
   5387}
   5388
   5389static void bfq_exit_icq(struct io_cq *icq)
   5390{
   5391	struct bfq_io_cq *bic = icq_to_bic(icq);
   5392
   5393	if (bic->stable_merge_bfqq) {
   5394		struct bfq_data *bfqd = bic->stable_merge_bfqq->bfqd;
   5395
   5396		/*
   5397		 * bfqd is NULL if scheduler already exited, and in
   5398		 * that case this is the last time bfqq is accessed.
   5399		 */
   5400		if (bfqd) {
   5401			unsigned long flags;
   5402
   5403			spin_lock_irqsave(&bfqd->lock, flags);
   5404			bfq_put_stable_ref(bic->stable_merge_bfqq);
   5405			spin_unlock_irqrestore(&bfqd->lock, flags);
   5406		} else {
   5407			bfq_put_stable_ref(bic->stable_merge_bfqq);
   5408		}
   5409	}
   5410
   5411	bfq_exit_icq_bfqq(bic, true);
   5412	bfq_exit_icq_bfqq(bic, false);
   5413}
   5414
   5415/*
   5416 * Update the entity prio values; note that the new values will not
   5417 * be used until the next (re)activation.
   5418 */
   5419static void
   5420bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
   5421{
   5422	struct task_struct *tsk = current;
   5423	int ioprio_class;
   5424	struct bfq_data *bfqd = bfqq->bfqd;
   5425
   5426	if (!bfqd)
   5427		return;
   5428
   5429	ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
   5430	switch (ioprio_class) {
   5431	default:
   5432		pr_err("bdi %s: bfq: bad prio class %d\n",
   5433			bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
   5434			ioprio_class);
   5435		fallthrough;
   5436	case IOPRIO_CLASS_NONE:
   5437		/*
   5438		 * No prio set, inherit CPU scheduling settings.
   5439		 */
   5440		bfqq->new_ioprio = task_nice_ioprio(tsk);
   5441		bfqq->new_ioprio_class = task_nice_ioclass(tsk);
   5442		break;
   5443	case IOPRIO_CLASS_RT:
   5444		bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
   5445		bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
   5446		break;
   5447	case IOPRIO_CLASS_BE:
   5448		bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
   5449		bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
   5450		break;
   5451	case IOPRIO_CLASS_IDLE:
   5452		bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
   5453		bfqq->new_ioprio = 7;
   5454		break;
   5455	}
   5456
   5457	if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
   5458		pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
   5459			bfqq->new_ioprio);
   5460		bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
   5461	}
   5462
   5463	bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
   5464	bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d",
   5465		     bfqq->new_ioprio, bfqq->entity.new_weight);
   5466	bfqq->entity.prio_changed = 1;
   5467}
   5468
   5469static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
   5470				       struct bio *bio, bool is_sync,
   5471				       struct bfq_io_cq *bic,
   5472				       bool respawn);
   5473
   5474static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
   5475{
   5476	struct bfq_data *bfqd = bic_to_bfqd(bic);
   5477	struct bfq_queue *bfqq;
   5478	int ioprio = bic->icq.ioc->ioprio;
   5479
   5480	/*
   5481	 * This condition may trigger on a newly created bic, be sure to
   5482	 * drop the lock before returning.
   5483	 */
   5484	if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
   5485		return;
   5486
   5487	bic->ioprio = ioprio;
   5488
   5489	bfqq = bic_to_bfqq(bic, false);
   5490	if (bfqq) {
   5491		bfq_release_process_ref(bfqd, bfqq);
   5492		bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
   5493		bic_set_bfqq(bic, bfqq, false);
   5494	}
   5495
   5496	bfqq = bic_to_bfqq(bic, true);
   5497	if (bfqq)
   5498		bfq_set_next_ioprio_data(bfqq, bic);
   5499}
   5500
   5501static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   5502			  struct bfq_io_cq *bic, pid_t pid, int is_sync)
   5503{
   5504	u64 now_ns = ktime_get_ns();
   5505
   5506	RB_CLEAR_NODE(&bfqq->entity.rb_node);
   5507	INIT_LIST_HEAD(&bfqq->fifo);
   5508	INIT_HLIST_NODE(&bfqq->burst_list_node);
   5509	INIT_HLIST_NODE(&bfqq->woken_list_node);
   5510	INIT_HLIST_HEAD(&bfqq->woken_list);
   5511
   5512	bfqq->ref = 0;
   5513	bfqq->bfqd = bfqd;
   5514
   5515	if (bic)
   5516		bfq_set_next_ioprio_data(bfqq, bic);
   5517
   5518	if (is_sync) {
   5519		/*
   5520		 * No need to mark as has_short_ttime if in
   5521		 * idle_class, because no device idling is performed
   5522		 * for queues in idle class
   5523		 */
   5524		if (!bfq_class_idle(bfqq))
   5525			/* tentatively mark as has_short_ttime */
   5526			bfq_mark_bfqq_has_short_ttime(bfqq);
   5527		bfq_mark_bfqq_sync(bfqq);
   5528		bfq_mark_bfqq_just_created(bfqq);
   5529	} else
   5530		bfq_clear_bfqq_sync(bfqq);
   5531
   5532	/* set end request to minus infinity from now */
   5533	bfqq->ttime.last_end_request = now_ns + 1;
   5534
   5535	bfqq->creation_time = jiffies;
   5536
   5537	bfqq->io_start_time = now_ns;
   5538
   5539	bfq_mark_bfqq_IO_bound(bfqq);
   5540
   5541	bfqq->pid = pid;
   5542
   5543	/* Tentative initial value to trade off between thr and lat */
   5544	bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
   5545	bfqq->budget_timeout = bfq_smallest_from_now();
   5546
   5547	bfqq->wr_coeff = 1;
   5548	bfqq->last_wr_start_finish = jiffies;
   5549	bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
   5550	bfqq->split_time = bfq_smallest_from_now();
   5551
   5552	/*
   5553	 * To not forget the possibly high bandwidth consumed by a
   5554	 * process/queue in the recent past,
   5555	 * bfq_bfqq_softrt_next_start() returns a value at least equal
   5556	 * to the current value of bfqq->soft_rt_next_start (see
   5557	 * comments on bfq_bfqq_softrt_next_start).  Set
   5558	 * soft_rt_next_start to now, to mean that bfqq has consumed
   5559	 * no bandwidth so far.
   5560	 */
   5561	bfqq->soft_rt_next_start = jiffies;
   5562
   5563	/* first request is almost certainly seeky */
   5564	bfqq->seek_history = 1;
   5565}
   5566
   5567static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
   5568					       struct bfq_group *bfqg,
   5569					       int ioprio_class, int ioprio)
   5570{
   5571	switch (ioprio_class) {
   5572	case IOPRIO_CLASS_RT:
   5573		return &bfqg->async_bfqq[0][ioprio];
   5574	case IOPRIO_CLASS_NONE:
   5575		ioprio = IOPRIO_BE_NORM;
   5576		fallthrough;
   5577	case IOPRIO_CLASS_BE:
   5578		return &bfqg->async_bfqq[1][ioprio];
   5579	case IOPRIO_CLASS_IDLE:
   5580		return &bfqg->async_idle_bfqq;
   5581	default:
   5582		return NULL;
   5583	}
   5584}
   5585
   5586static struct bfq_queue *
   5587bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   5588			  struct bfq_io_cq *bic,
   5589			  struct bfq_queue *last_bfqq_created)
   5590{
   5591	struct bfq_queue *new_bfqq =
   5592		bfq_setup_merge(bfqq, last_bfqq_created);
   5593
   5594	if (!new_bfqq)
   5595		return bfqq;
   5596
   5597	if (new_bfqq->bic)
   5598		new_bfqq->bic->stably_merged = true;
   5599	bic->stably_merged = true;
   5600
   5601	/*
   5602	 * Reusing merge functions. This implies that
   5603	 * bfqq->bic must be set too, for
   5604	 * bfq_merge_bfqqs to correctly save bfqq's
   5605	 * state before killing it.
   5606	 */
   5607	bfqq->bic = bic;
   5608	bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
   5609
   5610	return new_bfqq;
   5611}
   5612
   5613/*
   5614 * Many throughput-sensitive workloads are made of several parallel
   5615 * I/O flows, with all flows generated by the same application, or
   5616 * more generically by the same task (e.g., system boot). The most
   5617 * counterproductive action with these workloads is plugging I/O
   5618 * dispatch when one of the bfq_queues associated with these flows
   5619 * remains temporarily empty.
   5620 *
   5621 * To avoid this plugging, BFQ has been using a burst-handling
   5622 * mechanism for years now. This mechanism has proven effective for
   5623 * throughput, and not detrimental for service guarantees. The
   5624 * following function pushes this mechanism a little bit further,
   5625 * basing on the following two facts.
   5626 *
   5627 * First, all the I/O flows of a the same application or task
   5628 * contribute to the execution/completion of that common application
   5629 * or task. So the performance figures that matter are total
   5630 * throughput of the flows and task-wide I/O latency.  In particular,
   5631 * these flows do not need to be protected from each other, in terms
   5632 * of individual bandwidth or latency.
   5633 *
   5634 * Second, the above fact holds regardless of the number of flows.
   5635 *
   5636 * Putting these two facts together, this commits merges stably the
   5637 * bfq_queues associated with these I/O flows, i.e., with the
   5638 * processes that generate these IO/ flows, regardless of how many the
   5639 * involved processes are.
   5640 *
   5641 * To decide whether a set of bfq_queues is actually associated with
   5642 * the I/O flows of a common application or task, and to merge these
   5643 * queues stably, this function operates as follows: given a bfq_queue,
   5644 * say Q2, currently being created, and the last bfq_queue, say Q1,
   5645 * created before Q2, Q2 is merged stably with Q1 if
   5646 * - very little time has elapsed since when Q1 was created
   5647 * - Q2 has the same ioprio as Q1
   5648 * - Q2 belongs to the same group as Q1
   5649 *
   5650 * Merging bfq_queues also reduces scheduling overhead. A fio test
   5651 * with ten random readers on /dev/nullb shows a throughput boost of
   5652 * 40%, with a quadcore. Since BFQ's execution time amounts to ~50% of
   5653 * the total per-request processing time, the above throughput boost
   5654 * implies that BFQ's overhead is reduced by more than 50%.
   5655 *
   5656 * This new mechanism most certainly obsoletes the current
   5657 * burst-handling heuristics. We keep those heuristics for the moment.
   5658 */
   5659static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd,
   5660						      struct bfq_queue *bfqq,
   5661						      struct bfq_io_cq *bic)
   5662{
   5663	struct bfq_queue **source_bfqq = bfqq->entity.parent ?
   5664		&bfqq->entity.parent->last_bfqq_created :
   5665		&bfqd->last_bfqq_created;
   5666
   5667	struct bfq_queue *last_bfqq_created = *source_bfqq;
   5668
   5669	/*
   5670	 * If last_bfqq_created has not been set yet, then init it. If
   5671	 * it has been set already, but too long ago, then move it
   5672	 * forward to bfqq. Finally, move also if bfqq belongs to a
   5673	 * different group than last_bfqq_created, or if bfqq has a
   5674	 * different ioprio or ioprio_class. If none of these
   5675	 * conditions holds true, then try an early stable merge or
   5676	 * schedule a delayed stable merge.
   5677	 *
   5678	 * A delayed merge is scheduled (instead of performing an
   5679	 * early merge), in case bfqq might soon prove to be more
   5680	 * throughput-beneficial if not merged. Currently this is
   5681	 * possible only if bfqd is rotational with no queueing. For
   5682	 * such a drive, not merging bfqq is better for throughput if
   5683	 * bfqq happens to contain sequential I/O. So, we wait a
   5684	 * little bit for enough I/O to flow through bfqq. After that,
   5685	 * if such an I/O is sequential, then the merge is
   5686	 * canceled. Otherwise the merge is finally performed.
   5687	 */
   5688	if (!last_bfqq_created ||
   5689	    time_before(last_bfqq_created->creation_time +
   5690			msecs_to_jiffies(bfq_activation_stable_merging),
   5691			bfqq->creation_time) ||
   5692		bfqq->entity.parent != last_bfqq_created->entity.parent ||
   5693		bfqq->ioprio != last_bfqq_created->ioprio ||
   5694		bfqq->ioprio_class != last_bfqq_created->ioprio_class)
   5695		*source_bfqq = bfqq;
   5696	else if (time_after_eq(last_bfqq_created->creation_time +
   5697				 bfqd->bfq_burst_interval,
   5698				 bfqq->creation_time)) {
   5699		if (likely(bfqd->nonrot_with_queueing))
   5700			/*
   5701			 * With this type of drive, leaving
   5702			 * bfqq alone may provide no
   5703			 * throughput benefits compared with
   5704			 * merging bfqq. So merge bfqq now.
   5705			 */
   5706			bfqq = bfq_do_early_stable_merge(bfqd, bfqq,
   5707							 bic,
   5708							 last_bfqq_created);
   5709		else { /* schedule tentative stable merge */
   5710			/*
   5711			 * get reference on last_bfqq_created,
   5712			 * to prevent it from being freed,
   5713			 * until we decide whether to merge
   5714			 */
   5715			last_bfqq_created->ref++;
   5716			/*
   5717			 * need to keep track of stable refs, to
   5718			 * compute process refs correctly
   5719			 */
   5720			last_bfqq_created->stable_ref++;
   5721			/*
   5722			 * Record the bfqq to merge to.
   5723			 */
   5724			bic->stable_merge_bfqq = last_bfqq_created;
   5725		}
   5726	}
   5727
   5728	return bfqq;
   5729}
   5730
   5731
   5732static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
   5733				       struct bio *bio, bool is_sync,
   5734				       struct bfq_io_cq *bic,
   5735				       bool respawn)
   5736{
   5737	const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
   5738	const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
   5739	struct bfq_queue **async_bfqq = NULL;
   5740	struct bfq_queue *bfqq;
   5741	struct bfq_group *bfqg;
   5742
   5743	bfqg = bfq_bio_bfqg(bfqd, bio);
   5744	if (!is_sync) {
   5745		async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
   5746						  ioprio);
   5747		bfqq = *async_bfqq;
   5748		if (bfqq)
   5749			goto out;
   5750	}
   5751
   5752	bfqq = kmem_cache_alloc_node(bfq_pool,
   5753				     GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
   5754				     bfqd->queue->node);
   5755
   5756	if (bfqq) {
   5757		bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
   5758			      is_sync);
   5759		bfq_init_entity(&bfqq->entity, bfqg);
   5760		bfq_log_bfqq(bfqd, bfqq, "allocated");
   5761	} else {
   5762		bfqq = &bfqd->oom_bfqq;
   5763		bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
   5764		goto out;
   5765	}
   5766
   5767	/*
   5768	 * Pin the queue now that it's allocated, scheduler exit will
   5769	 * prune it.
   5770	 */
   5771	if (async_bfqq) {
   5772		bfqq->ref++; /*
   5773			      * Extra group reference, w.r.t. sync
   5774			      * queue. This extra reference is removed
   5775			      * only if bfqq->bfqg disappears, to
   5776			      * guarantee that this queue is not freed
   5777			      * until its group goes away.
   5778			      */
   5779		bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
   5780			     bfqq, bfqq->ref);
   5781		*async_bfqq = bfqq;
   5782	}
   5783
   5784out:
   5785	bfqq->ref++; /* get a process reference to this queue */
   5786
   5787	if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
   5788		bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
   5789	return bfqq;
   5790}
   5791
   5792static void bfq_update_io_thinktime(struct bfq_data *bfqd,
   5793				    struct bfq_queue *bfqq)
   5794{
   5795	struct bfq_ttime *ttime = &bfqq->ttime;
   5796	u64 elapsed;
   5797
   5798	/*
   5799	 * We are really interested in how long it takes for the queue to
   5800	 * become busy when there is no outstanding IO for this queue. So
   5801	 * ignore cases when the bfq queue has already IO queued.
   5802	 */
   5803	if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
   5804		return;
   5805	elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
   5806	elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
   5807
   5808	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
   5809	ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
   5810	ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
   5811				     ttime->ttime_samples);
   5812}
   5813
   5814static void
   5815bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   5816		       struct request *rq)
   5817{
   5818	bfqq->seek_history <<= 1;
   5819	bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
   5820
   5821	if (bfqq->wr_coeff > 1 &&
   5822	    bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
   5823	    BFQQ_TOTALLY_SEEKY(bfqq)) {
   5824		if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
   5825					   bfq_wr_duration(bfqd))) {
   5826			/*
   5827			 * In soft_rt weight raising with the
   5828			 * interactive-weight-raising period
   5829			 * elapsed (so no switch back to
   5830			 * interactive weight raising).
   5831			 */
   5832			bfq_bfqq_end_wr(bfqq);
   5833		} else { /*
   5834			  * stopping soft_rt weight raising
   5835			  * while still in interactive period,
   5836			  * switch back to interactive weight
   5837			  * raising
   5838			  */
   5839			switch_back_to_interactive_wr(bfqq, bfqd);
   5840			bfqq->entity.prio_changed = 1;
   5841		}
   5842	}
   5843}
   5844
   5845static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
   5846				       struct bfq_queue *bfqq,
   5847				       struct bfq_io_cq *bic)
   5848{
   5849	bool has_short_ttime = true, state_changed;
   5850
   5851	/*
   5852	 * No need to update has_short_ttime if bfqq is async or in
   5853	 * idle io prio class, or if bfq_slice_idle is zero, because
   5854	 * no device idling is performed for bfqq in this case.
   5855	 */
   5856	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
   5857	    bfqd->bfq_slice_idle == 0)
   5858		return;
   5859
   5860	/* Idle window just restored, statistics are meaningless. */
   5861	if (time_is_after_eq_jiffies(bfqq->split_time +
   5862				     bfqd->bfq_wr_min_idle_time))
   5863		return;
   5864
   5865	/* Think time is infinite if no process is linked to
   5866	 * bfqq. Otherwise check average think time to decide whether
   5867	 * to mark as has_short_ttime. To this goal, compare average
   5868	 * think time with half the I/O-plugging timeout.
   5869	 */
   5870	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
   5871	    (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
   5872	     bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
   5873		has_short_ttime = false;
   5874
   5875	state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
   5876
   5877	if (has_short_ttime)
   5878		bfq_mark_bfqq_has_short_ttime(bfqq);
   5879	else
   5880		bfq_clear_bfqq_has_short_ttime(bfqq);
   5881
   5882	/*
   5883	 * Until the base value for the total service time gets
   5884	 * finally computed for bfqq, the inject limit does depend on
   5885	 * the think-time state (short|long). In particular, the limit
   5886	 * is 0 or 1 if the think time is deemed, respectively, as
   5887	 * short or long (details in the comments in
   5888	 * bfq_update_inject_limit()). Accordingly, the next
   5889	 * instructions reset the inject limit if the think-time state
   5890	 * has changed and the above base value is still to be
   5891	 * computed.
   5892	 *
   5893	 * However, the reset is performed only if more than 100 ms
   5894	 * have elapsed since the last update of the inject limit, or
   5895	 * (inclusive) if the change is from short to long think
   5896	 * time. The reason for this waiting is as follows.
   5897	 *
   5898	 * bfqq may have a long think time because of a
   5899	 * synchronization with some other queue, i.e., because the
   5900	 * I/O of some other queue may need to be completed for bfqq
   5901	 * to receive new I/O. Details in the comments on the choice
   5902	 * of the queue for injection in bfq_select_queue().
   5903	 *
   5904	 * As stressed in those comments, if such a synchronization is
   5905	 * actually in place, then, without injection on bfqq, the
   5906	 * blocking I/O cannot happen to served while bfqq is in
   5907	 * service. As a consequence, if bfqq is granted
   5908	 * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
   5909	 * is dispatched, until the idle timeout fires. This is likely
   5910	 * to result in lower bandwidth and higher latencies for bfqq,
   5911	 * and in a severe loss of total throughput.
   5912	 *
   5913	 * On the opposite end, a non-zero inject limit may allow the
   5914	 * I/O that blocks bfqq to be executed soon, and therefore
   5915	 * bfqq to receive new I/O soon.
   5916	 *
   5917	 * But, if the blocking gets actually eliminated, then the
   5918	 * next think-time sample for bfqq may be very low. This in
   5919	 * turn may cause bfqq's think time to be deemed
   5920	 * short. Without the 100 ms barrier, this new state change
   5921	 * would cause the body of the next if to be executed
   5922	 * immediately. But this would set to 0 the inject
   5923	 * limit. Without injection, the blocking I/O would cause the
   5924	 * think time of bfqq to become long again, and therefore the
   5925	 * inject limit to be raised again, and so on. The only effect
   5926	 * of such a steady oscillation between the two think-time
   5927	 * states would be to prevent effective injection on bfqq.
   5928	 *
   5929	 * In contrast, if the inject limit is not reset during such a
   5930	 * long time interval as 100 ms, then the number of short
   5931	 * think time samples can grow significantly before the reset
   5932	 * is performed. As a consequence, the think time state can
   5933	 * become stable before the reset. Therefore there will be no
   5934	 * state change when the 100 ms elapse, and no reset of the
   5935	 * inject limit. The inject limit remains steadily equal to 1
   5936	 * both during and after the 100 ms. So injection can be
   5937	 * performed at all times, and throughput gets boosted.
   5938	 *
   5939	 * An inject limit equal to 1 is however in conflict, in
   5940	 * general, with the fact that the think time of bfqq is
   5941	 * short, because injection may be likely to delay bfqq's I/O
   5942	 * (as explained in the comments in
   5943	 * bfq_update_inject_limit()). But this does not happen in
   5944	 * this special case, because bfqq's low think time is due to
   5945	 * an effective handling of a synchronization, through
   5946	 * injection. In this special case, bfqq's I/O does not get
   5947	 * delayed by injection; on the contrary, bfqq's I/O is
   5948	 * brought forward, because it is not blocked for
   5949	 * milliseconds.
   5950	 *
   5951	 * In addition, serving the blocking I/O much sooner, and much
   5952	 * more frequently than once per I/O-plugging timeout, makes
   5953	 * it much quicker to detect a waker queue (the concept of
   5954	 * waker queue is defined in the comments in
   5955	 * bfq_add_request()). This makes it possible to start sooner
   5956	 * to boost throughput more effectively, by injecting the I/O
   5957	 * of the waker queue unconditionally on every
   5958	 * bfq_dispatch_request().
   5959	 *
   5960	 * One last, important benefit of not resetting the inject
   5961	 * limit before 100 ms is that, during this time interval, the
   5962	 * base value for the total service time is likely to get
   5963	 * finally computed for bfqq, freeing the inject limit from
   5964	 * its relation with the think time.
   5965	 */
   5966	if (state_changed && bfqq->last_serv_time_ns == 0 &&
   5967	    (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
   5968				      msecs_to_jiffies(100)) ||
   5969	     !has_short_ttime))
   5970		bfq_reset_inject_limit(bfqd, bfqq);
   5971}
   5972
   5973/*
   5974 * Called when a new fs request (rq) is added to bfqq.  Check if there's
   5975 * something we should do about it.
   5976 */
   5977static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
   5978			    struct request *rq)
   5979{
   5980	if (rq->cmd_flags & REQ_META)
   5981		bfqq->meta_pending++;
   5982
   5983	bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
   5984
   5985	if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
   5986		bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
   5987				 blk_rq_sectors(rq) < 32;
   5988		bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
   5989
   5990		/*
   5991		 * There is just this request queued: if
   5992		 * - the request is small, and
   5993		 * - we are idling to boost throughput, and
   5994		 * - the queue is not to be expired,
   5995		 * then just exit.
   5996		 *
   5997		 * In this way, if the device is being idled to wait
   5998		 * for a new request from the in-service queue, we
   5999		 * avoid unplugging the device and committing the
   6000		 * device to serve just a small request. In contrast
   6001		 * we wait for the block layer to decide when to
   6002		 * unplug the device: hopefully, new requests will be
   6003		 * merged to this one quickly, then the device will be
   6004		 * unplugged and larger requests will be dispatched.
   6005		 */
   6006		if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
   6007		    !budget_timeout)
   6008			return;
   6009
   6010		/*
   6011		 * A large enough request arrived, or idling is being
   6012		 * performed to preserve service guarantees, or
   6013		 * finally the queue is to be expired: in all these
   6014		 * cases disk idling is to be stopped, so clear
   6015		 * wait_request flag and reset timer.
   6016		 */
   6017		bfq_clear_bfqq_wait_request(bfqq);
   6018		hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
   6019
   6020		/*
   6021		 * The queue is not empty, because a new request just
   6022		 * arrived. Hence we can safely expire the queue, in
   6023		 * case of budget timeout, without risking that the
   6024		 * timestamps of the queue are not updated correctly.
   6025		 * See [1] for more details.
   6026		 */
   6027		if (budget_timeout)
   6028			bfq_bfqq_expire(bfqd, bfqq, false,
   6029					BFQQE_BUDGET_TIMEOUT);
   6030	}
   6031}
   6032
   6033static void bfqq_request_allocated(struct bfq_queue *bfqq)
   6034{
   6035	struct bfq_entity *entity = &bfqq->entity;
   6036
   6037	for_each_entity(entity)
   6038		entity->allocated++;
   6039}
   6040
   6041static void bfqq_request_freed(struct bfq_queue *bfqq)
   6042{
   6043	struct bfq_entity *entity = &bfqq->entity;
   6044
   6045	for_each_entity(entity)
   6046		entity->allocated--;
   6047}
   6048
   6049/* returns true if it causes the idle timer to be disabled */
   6050static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
   6051{
   6052	struct bfq_queue *bfqq = RQ_BFQQ(rq),
   6053		*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
   6054						 RQ_BIC(rq));
   6055	bool waiting, idle_timer_disabled = false;
   6056
   6057	if (new_bfqq) {
   6058		/*
   6059		 * Release the request's reference to the old bfqq
   6060		 * and make sure one is taken to the shared queue.
   6061		 */
   6062		bfqq_request_allocated(new_bfqq);
   6063		bfqq_request_freed(bfqq);
   6064		new_bfqq->ref++;
   6065		/*
   6066		 * If the bic associated with the process
   6067		 * issuing this request still points to bfqq
   6068		 * (and thus has not been already redirected
   6069		 * to new_bfqq or even some other bfq_queue),
   6070		 * then complete the merge and redirect it to
   6071		 * new_bfqq.
   6072		 */
   6073		if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
   6074			bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
   6075					bfqq, new_bfqq);
   6076
   6077		bfq_clear_bfqq_just_created(bfqq);
   6078		/*
   6079		 * rq is about to be enqueued into new_bfqq,
   6080		 * release rq reference on bfqq
   6081		 */
   6082		bfq_put_queue(bfqq);
   6083		rq->elv.priv[1] = new_bfqq;
   6084		bfqq = new_bfqq;
   6085	}
   6086
   6087	bfq_update_io_thinktime(bfqd, bfqq);
   6088	bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
   6089	bfq_update_io_seektime(bfqd, bfqq, rq);
   6090
   6091	waiting = bfqq && bfq_bfqq_wait_request(bfqq);
   6092	bfq_add_request(rq);
   6093	idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
   6094
   6095	rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
   6096	list_add_tail(&rq->queuelist, &bfqq->fifo);
   6097
   6098	bfq_rq_enqueued(bfqd, bfqq, rq);
   6099
   6100	return idle_timer_disabled;
   6101}
   6102
   6103#ifdef CONFIG_BFQ_CGROUP_DEBUG
   6104static void bfq_update_insert_stats(struct request_queue *q,
   6105				    struct bfq_queue *bfqq,
   6106				    bool idle_timer_disabled,
   6107				    unsigned int cmd_flags)
   6108{
   6109	if (!bfqq)
   6110		return;
   6111
   6112	/*
   6113	 * bfqq still exists, because it can disappear only after
   6114	 * either it is merged with another queue, or the process it
   6115	 * is associated with exits. But both actions must be taken by
   6116	 * the same process currently executing this flow of
   6117	 * instructions.
   6118	 *
   6119	 * In addition, the following queue lock guarantees that
   6120	 * bfqq_group(bfqq) exists as well.
   6121	 */
   6122	spin_lock_irq(&q->queue_lock);
   6123	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
   6124	if (idle_timer_disabled)
   6125		bfqg_stats_update_idle_time(bfqq_group(bfqq));
   6126	spin_unlock_irq(&q->queue_lock);
   6127}
   6128#else
   6129static inline void bfq_update_insert_stats(struct request_queue *q,
   6130					   struct bfq_queue *bfqq,
   6131					   bool idle_timer_disabled,
   6132					   unsigned int cmd_flags) {}
   6133#endif /* CONFIG_BFQ_CGROUP_DEBUG */
   6134
   6135static struct bfq_queue *bfq_init_rq(struct request *rq);
   6136
   6137static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
   6138			       bool at_head)
   6139{
   6140	struct request_queue *q = hctx->queue;
   6141	struct bfq_data *bfqd = q->elevator->elevator_data;
   6142	struct bfq_queue *bfqq;
   6143	bool idle_timer_disabled = false;
   6144	unsigned int cmd_flags;
   6145	LIST_HEAD(free);
   6146
   6147#ifdef CONFIG_BFQ_GROUP_IOSCHED
   6148	if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
   6149		bfqg_stats_update_legacy_io(q, rq);
   6150#endif
   6151	spin_lock_irq(&bfqd->lock);
   6152	bfqq = bfq_init_rq(rq);
   6153	if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
   6154		spin_unlock_irq(&bfqd->lock);
   6155		blk_mq_free_requests(&free);
   6156		return;
   6157	}
   6158
   6159	trace_block_rq_insert(rq);
   6160
   6161	if (!bfqq || at_head) {
   6162		if (at_head)
   6163			list_add(&rq->queuelist, &bfqd->dispatch);
   6164		else
   6165			list_add_tail(&rq->queuelist, &bfqd->dispatch);
   6166	} else {
   6167		idle_timer_disabled = __bfq_insert_request(bfqd, rq);
   6168		/*
   6169		 * Update bfqq, because, if a queue merge has occurred
   6170		 * in __bfq_insert_request, then rq has been
   6171		 * redirected into a new queue.
   6172		 */
   6173		bfqq = RQ_BFQQ(rq);
   6174
   6175		if (rq_mergeable(rq)) {
   6176			elv_rqhash_add(q, rq);
   6177			if (!q->last_merge)
   6178				q->last_merge = rq;
   6179		}
   6180	}
   6181
   6182	/*
   6183	 * Cache cmd_flags before releasing scheduler lock, because rq
   6184	 * may disappear afterwards (for example, because of a request
   6185	 * merge).
   6186	 */
   6187	cmd_flags = rq->cmd_flags;
   6188	spin_unlock_irq(&bfqd->lock);
   6189
   6190	bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
   6191				cmd_flags);
   6192}
   6193
   6194static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
   6195				struct list_head *list, bool at_head)
   6196{
   6197	while (!list_empty(list)) {
   6198		struct request *rq;
   6199
   6200		rq = list_first_entry(list, struct request, queuelist);
   6201		list_del_init(&rq->queuelist);
   6202		bfq_insert_request(hctx, rq, at_head);
   6203	}
   6204}
   6205
   6206static void bfq_update_hw_tag(struct bfq_data *bfqd)
   6207{
   6208	struct bfq_queue *bfqq = bfqd->in_service_queue;
   6209
   6210	bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
   6211				       bfqd->rq_in_driver);
   6212
   6213	if (bfqd->hw_tag == 1)
   6214		return;
   6215
   6216	/*
   6217	 * This sample is valid if the number of outstanding requests
   6218	 * is large enough to allow a queueing behavior.  Note that the
   6219	 * sum is not exact, as it's not taking into account deactivated
   6220	 * requests.
   6221	 */
   6222	if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
   6223		return;
   6224
   6225	/*
   6226	 * If active queue hasn't enough requests and can idle, bfq might not
   6227	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
   6228	 * case
   6229	 */
   6230	if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
   6231	    bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
   6232	    BFQ_HW_QUEUE_THRESHOLD &&
   6233	    bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
   6234		return;
   6235
   6236	if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
   6237		return;
   6238
   6239	bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
   6240	bfqd->max_rq_in_driver = 0;
   6241	bfqd->hw_tag_samples = 0;
   6242
   6243	bfqd->nonrot_with_queueing =
   6244		blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
   6245}
   6246
   6247static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
   6248{
   6249	u64 now_ns;
   6250	u32 delta_us;
   6251
   6252	bfq_update_hw_tag(bfqd);
   6253
   6254	bfqd->rq_in_driver--;
   6255	bfqq->dispatched--;
   6256
   6257	if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
   6258		/*
   6259		 * Set budget_timeout (which we overload to store the
   6260		 * time at which the queue remains with no backlog and
   6261		 * no outstanding request; used by the weight-raising
   6262		 * mechanism).
   6263		 */
   6264		bfqq->budget_timeout = jiffies;
   6265
   6266		bfq_weights_tree_remove(bfqd, bfqq);
   6267	}
   6268
   6269	now_ns = ktime_get_ns();
   6270
   6271	bfqq->ttime.last_end_request = now_ns;
   6272
   6273	/*
   6274	 * Using us instead of ns, to get a reasonable precision in
   6275	 * computing rate in next check.
   6276	 */
   6277	delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
   6278
   6279	/*
   6280	 * If the request took rather long to complete, and, according
   6281	 * to the maximum request size recorded, this completion latency
   6282	 * implies that the request was certainly served at a very low
   6283	 * rate (less than 1M sectors/sec), then the whole observation
   6284	 * interval that lasts up to this time instant cannot be a
   6285	 * valid time interval for computing a new peak rate.  Invoke
   6286	 * bfq_update_rate_reset to have the following three steps
   6287	 * taken:
   6288	 * - close the observation interval at the last (previous)
   6289	 *   request dispatch or completion
   6290	 * - compute rate, if possible, for that observation interval
   6291	 * - reset to zero samples, which will trigger a proper
   6292	 *   re-initialization of the observation interval on next
   6293	 *   dispatch
   6294	 */
   6295	if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
   6296	   (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
   6297			1UL<<(BFQ_RATE_SHIFT - 10))
   6298		bfq_update_rate_reset(bfqd, NULL);
   6299	bfqd->last_completion = now_ns;
   6300	/*
   6301	 * Shared queues are likely to receive I/O at a high
   6302	 * rate. This may deceptively let them be considered as wakers
   6303	 * of other queues. But a false waker will unjustly steal
   6304	 * bandwidth to its supposedly woken queue. So considering
   6305	 * also shared queues in the waking mechanism may cause more
   6306	 * control troubles than throughput benefits. Then reset
   6307	 * last_completed_rq_bfqq if bfqq is a shared queue.
   6308	 */
   6309	if (!bfq_bfqq_coop(bfqq))
   6310		bfqd->last_completed_rq_bfqq = bfqq;
   6311	else
   6312		bfqd->last_completed_rq_bfqq = NULL;
   6313
   6314	/*
   6315	 * If we are waiting to discover whether the request pattern
   6316	 * of the task associated with the queue is actually
   6317	 * isochronous, and both requisites for this condition to hold
   6318	 * are now satisfied, then compute soft_rt_next_start (see the
   6319	 * comments on the function bfq_bfqq_softrt_next_start()). We
   6320	 * do not compute soft_rt_next_start if bfqq is in interactive
   6321	 * weight raising (see the comments in bfq_bfqq_expire() for
   6322	 * an explanation). We schedule this delayed update when bfqq
   6323	 * expires, if it still has in-flight requests.
   6324	 */
   6325	if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
   6326	    RB_EMPTY_ROOT(&bfqq->sort_list) &&
   6327	    bfqq->wr_coeff != bfqd->bfq_wr_coeff)
   6328		bfqq->soft_rt_next_start =
   6329			bfq_bfqq_softrt_next_start(bfqd, bfqq);
   6330
   6331	/*
   6332	 * If this is the in-service queue, check if it needs to be expired,
   6333	 * or if we want to idle in case it has no pending requests.
   6334	 */
   6335	if (bfqd->in_service_queue == bfqq) {
   6336		if (bfq_bfqq_must_idle(bfqq)) {
   6337			if (bfqq->dispatched == 0)
   6338				bfq_arm_slice_timer(bfqd);
   6339			/*
   6340			 * If we get here, we do not expire bfqq, even
   6341			 * if bfqq was in budget timeout or had no
   6342			 * more requests (as controlled in the next
   6343			 * conditional instructions). The reason for
   6344			 * not expiring bfqq is as follows.
   6345			 *
   6346			 * Here bfqq->dispatched > 0 holds, but
   6347			 * bfq_bfqq_must_idle() returned true. This
   6348			 * implies that, even if no request arrives
   6349			 * for bfqq before bfqq->dispatched reaches 0,
   6350			 * bfqq will, however, not be expired on the
   6351			 * completion event that causes bfqq->dispatch
   6352			 * to reach zero. In contrast, on this event,
   6353			 * bfqq will start enjoying device idling
   6354			 * (I/O-dispatch plugging).
   6355			 *
   6356			 * But, if we expired bfqq here, bfqq would
   6357			 * not have the chance to enjoy device idling
   6358			 * when bfqq->dispatched finally reaches
   6359			 * zero. This would expose bfqq to violation
   6360			 * of its reserved service guarantees.
   6361			 */
   6362			return;
   6363		} else if (bfq_may_expire_for_budg_timeout(bfqq))
   6364			bfq_bfqq_expire(bfqd, bfqq, false,
   6365					BFQQE_BUDGET_TIMEOUT);
   6366		else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
   6367			 (bfqq->dispatched == 0 ||
   6368			  !bfq_better_to_idle(bfqq)))
   6369			bfq_bfqq_expire(bfqd, bfqq, false,
   6370					BFQQE_NO_MORE_REQUESTS);
   6371	}
   6372
   6373	if (!bfqd->rq_in_driver)
   6374		bfq_schedule_dispatch(bfqd);
   6375}
   6376
   6377/*
   6378 * The processes associated with bfqq may happen to generate their
   6379 * cumulative I/O at a lower rate than the rate at which the device
   6380 * could serve the same I/O. This is rather probable, e.g., if only
   6381 * one process is associated with bfqq and the device is an SSD. It
   6382 * results in bfqq becoming often empty while in service. In this
   6383 * respect, if BFQ is allowed to switch to another queue when bfqq
   6384 * remains empty, then the device goes on being fed with I/O requests,
   6385 * and the throughput is not affected. In contrast, if BFQ is not
   6386 * allowed to switch to another queue---because bfqq is sync and
   6387 * I/O-dispatch needs to be plugged while bfqq is temporarily
   6388 * empty---then, during the service of bfqq, there will be frequent
   6389 * "service holes", i.e., time intervals during which bfqq gets empty
   6390 * and the device can only consume the I/O already queued in its
   6391 * hardware queues. During service holes, the device may even get to
   6392 * remaining idle. In the end, during the service of bfqq, the device
   6393 * is driven at a lower speed than the one it can reach with the kind
   6394 * of I/O flowing through bfqq.
   6395 *
   6396 * To counter this loss of throughput, BFQ implements a "request
   6397 * injection mechanism", which tries to fill the above service holes
   6398 * with I/O requests taken from other queues. The hard part in this
   6399 * mechanism is finding the right amount of I/O to inject, so as to
   6400 * both boost throughput and not break bfqq's bandwidth and latency
   6401 * guarantees. In this respect, the mechanism maintains a per-queue
   6402 * inject limit, computed as below. While bfqq is empty, the injection
   6403 * mechanism dispatches extra I/O requests only until the total number
   6404 * of I/O requests in flight---i.e., already dispatched but not yet
   6405 * completed---remains lower than this limit.
   6406 *
   6407 * A first definition comes in handy to introduce the algorithm by
   6408 * which the inject limit is computed.  We define as first request for
   6409 * bfqq, an I/O request for bfqq that arrives while bfqq is in
   6410 * service, and causes bfqq to switch from empty to non-empty. The
   6411 * algorithm updates the limit as a function of the effect of
   6412 * injection on the service times of only the first requests of
   6413 * bfqq. The reason for this restriction is that these are the
   6414 * requests whose service time is affected most, because they are the
   6415 * first to arrive after injection possibly occurred.
   6416 *
   6417 * To evaluate the effect of injection, the algorithm measures the
   6418 * "total service time" of first requests. We define as total service
   6419 * time of an I/O request, the time that elapses since when the
   6420 * request is enqueued into bfqq, to when it is completed. This
   6421 * quantity allows the whole effect of injection to be measured. It is
   6422 * easy to see why. Suppose that some requests of other queues are
   6423 * actually injected while bfqq is empty, and that a new request R
   6424 * then arrives for bfqq. If the device does start to serve all or
   6425 * part of the injected requests during the service hole, then,
   6426 * because of this extra service, it may delay the next invocation of
   6427 * the dispatch hook of BFQ. Then, even after R gets eventually
   6428 * dispatched, the device may delay the actual service of R if it is
   6429 * still busy serving the extra requests, or if it decides to serve,
   6430 * before R, some extra request still present in its queues. As a
   6431 * conclusion, the cumulative extra delay caused by injection can be
   6432 * easily evaluated by just comparing the total service time of first
   6433 * requests with and without injection.
   6434 *
   6435 * The limit-update algorithm works as follows. On the arrival of a
   6436 * first request of bfqq, the algorithm measures the total time of the
   6437 * request only if one of the three cases below holds, and, for each
   6438 * case, it updates the limit as described below:
   6439 *
   6440 * (1) If there is no in-flight request. This gives a baseline for the
   6441 *     total service time of the requests of bfqq. If the baseline has
   6442 *     not been computed yet, then, after computing it, the limit is
   6443 *     set to 1, to start boosting throughput, and to prepare the
   6444 *     ground for the next case. If the baseline has already been
   6445 *     computed, then it is updated, in case it results to be lower
   6446 *     than the previous value.
   6447 *
   6448 * (2) If the limit is higher than 0 and there are in-flight
   6449 *     requests. By comparing the total service time in this case with
   6450 *     the above baseline, it is possible to know at which extent the
   6451 *     current value of the limit is inflating the total service
   6452 *     time. If the inflation is below a certain threshold, then bfqq
   6453 *     is assumed to be suffering from no perceivable loss of its
   6454 *     service guarantees, and the limit is even tentatively
   6455 *     increased. If the inflation is above the threshold, then the
   6456 *     limit is decreased. Due to the lack of any hysteresis, this
   6457 *     logic makes the limit oscillate even in steady workload
   6458 *     conditions. Yet we opted for it, because it is fast in reaching
   6459 *     the best value for the limit, as a function of the current I/O
   6460 *     workload. To reduce oscillations, this step is disabled for a
   6461 *     short time interval after the limit happens to be decreased.
   6462 *
   6463 * (3) Periodically, after resetting the limit, to make sure that the
   6464 *     limit eventually drops in case the workload changes. This is
   6465 *     needed because, after the limit has gone safely up for a
   6466 *     certain workload, it is impossible to guess whether the
   6467 *     baseline total service time may have changed, without measuring
   6468 *     it again without injection. A more effective version of this
   6469 *     step might be to just sample the baseline, by interrupting
   6470 *     injection only once, and then to reset/lower the limit only if
   6471 *     the total service time with the current limit does happen to be
   6472 *     too large.
   6473 *
   6474 * More details on each step are provided in the comments on the
   6475 * pieces of code that implement these steps: the branch handling the
   6476 * transition from empty to non empty in bfq_add_request(), the branch
   6477 * handling injection in bfq_select_queue(), and the function
   6478 * bfq_choose_bfqq_for_injection(). These comments also explain some
   6479 * exceptions, made by the injection mechanism in some special cases.
   6480 */
   6481static void bfq_update_inject_limit(struct bfq_data *bfqd,
   6482				    struct bfq_queue *bfqq)
   6483{
   6484	u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
   6485	unsigned int old_limit = bfqq->inject_limit;
   6486
   6487	if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
   6488		u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
   6489
   6490		if (tot_time_ns >= threshold && old_limit > 0) {
   6491			bfqq->inject_limit--;
   6492			bfqq->decrease_time_jif = jiffies;
   6493		} else if (tot_time_ns < threshold &&
   6494			   old_limit <= bfqd->max_rq_in_driver)
   6495			bfqq->inject_limit++;
   6496	}
   6497
   6498	/*
   6499	 * Either we still have to compute the base value for the
   6500	 * total service time, and there seem to be the right
   6501	 * conditions to do it, or we can lower the last base value
   6502	 * computed.
   6503	 *
   6504	 * NOTE: (bfqd->rq_in_driver == 1) means that there is no I/O
   6505	 * request in flight, because this function is in the code
   6506	 * path that handles the completion of a request of bfqq, and,
   6507	 * in particular, this function is executed before
   6508	 * bfqd->rq_in_driver is decremented in such a code path.
   6509	 */
   6510	if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
   6511	    tot_time_ns < bfqq->last_serv_time_ns) {
   6512		if (bfqq->last_serv_time_ns == 0) {
   6513			/*
   6514			 * Now we certainly have a base value: make sure we
   6515			 * start trying injection.
   6516			 */
   6517			bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
   6518		}
   6519		bfqq->last_serv_time_ns = tot_time_ns;
   6520	} else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
   6521		/*
   6522		 * No I/O injected and no request still in service in
   6523		 * the drive: these are the exact conditions for
   6524		 * computing the base value of the total service time
   6525		 * for bfqq. So let's update this value, because it is
   6526		 * rather variable. For example, it varies if the size
   6527		 * or the spatial locality of the I/O requests in bfqq
   6528		 * change.
   6529		 */
   6530		bfqq->last_serv_time_ns = tot_time_ns;
   6531
   6532
   6533	/* update complete, not waiting for any request completion any longer */
   6534	bfqd->waited_rq = NULL;
   6535	bfqd->rqs_injected = false;
   6536}
   6537
   6538/*
   6539 * Handle either a requeue or a finish for rq. The things to do are
   6540 * the same in both cases: all references to rq are to be dropped. In
   6541 * particular, rq is considered completed from the point of view of
   6542 * the scheduler.
   6543 */
   6544static void bfq_finish_requeue_request(struct request *rq)
   6545{
   6546	struct bfq_queue *bfqq = RQ_BFQQ(rq);
   6547	struct bfq_data *bfqd;
   6548	unsigned long flags;
   6549
   6550	/*
   6551	 * rq either is not associated with any icq, or is an already
   6552	 * requeued request that has not (yet) been re-inserted into
   6553	 * a bfq_queue.
   6554	 */
   6555	if (!rq->elv.icq || !bfqq)
   6556		return;
   6557
   6558	bfqd = bfqq->bfqd;
   6559
   6560	if (rq->rq_flags & RQF_STARTED)
   6561		bfqg_stats_update_completion(bfqq_group(bfqq),
   6562					     rq->start_time_ns,
   6563					     rq->io_start_time_ns,
   6564					     rq->cmd_flags);
   6565
   6566	spin_lock_irqsave(&bfqd->lock, flags);
   6567	if (likely(rq->rq_flags & RQF_STARTED)) {
   6568		if (rq == bfqd->waited_rq)
   6569			bfq_update_inject_limit(bfqd, bfqq);
   6570
   6571		bfq_completed_request(bfqq, bfqd);
   6572	}
   6573	bfqq_request_freed(bfqq);
   6574	bfq_put_queue(bfqq);
   6575	RQ_BIC(rq)->requests--;
   6576	spin_unlock_irqrestore(&bfqd->lock, flags);
   6577
   6578	/*
   6579	 * Reset private fields. In case of a requeue, this allows
   6580	 * this function to correctly do nothing if it is spuriously
   6581	 * invoked again on this same request (see the check at the
   6582	 * beginning of the function). Probably, a better general
   6583	 * design would be to prevent blk-mq from invoking the requeue
   6584	 * or finish hooks of an elevator, for a request that is not
   6585	 * referred by that elevator.
   6586	 *
   6587	 * Resetting the following fields would break the
   6588	 * request-insertion logic if rq is re-inserted into a bfq
   6589	 * internal queue, without a re-preparation. Here we assume
   6590	 * that re-insertions of requeued requests, without
   6591	 * re-preparation, can happen only for pass_through or at_head
   6592	 * requests (which are not re-inserted into bfq internal
   6593	 * queues).
   6594	 */
   6595	rq->elv.priv[0] = NULL;
   6596	rq->elv.priv[1] = NULL;
   6597}
   6598
   6599static void bfq_finish_request(struct request *rq)
   6600{
   6601	bfq_finish_requeue_request(rq);
   6602
   6603	if (rq->elv.icq) {
   6604		put_io_context(rq->elv.icq->ioc);
   6605		rq->elv.icq = NULL;
   6606	}
   6607}
   6608
   6609/*
   6610 * Removes the association between the current task and bfqq, assuming
   6611 * that bic points to the bfq iocontext of the task.
   6612 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
   6613 * was the last process referring to that bfqq.
   6614 */
   6615static struct bfq_queue *
   6616bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
   6617{
   6618	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
   6619
   6620	if (bfqq_process_refs(bfqq) == 1) {
   6621		bfqq->pid = current->pid;
   6622		bfq_clear_bfqq_coop(bfqq);
   6623		bfq_clear_bfqq_split_coop(bfqq);
   6624		return bfqq;
   6625	}
   6626
   6627	bic_set_bfqq(bic, NULL, 1);
   6628
   6629	bfq_put_cooperator(bfqq);
   6630
   6631	bfq_release_process_ref(bfqq->bfqd, bfqq);
   6632	return NULL;
   6633}
   6634
   6635static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
   6636						   struct bfq_io_cq *bic,
   6637						   struct bio *bio,
   6638						   bool split, bool is_sync,
   6639						   bool *new_queue)
   6640{
   6641	struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
   6642
   6643	if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
   6644		return bfqq;
   6645
   6646	if (new_queue)
   6647		*new_queue = true;
   6648
   6649	if (bfqq)
   6650		bfq_put_queue(bfqq);
   6651	bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
   6652
   6653	bic_set_bfqq(bic, bfqq, is_sync);
   6654	if (split && is_sync) {
   6655		if ((bic->was_in_burst_list && bfqd->large_burst) ||
   6656		    bic->saved_in_large_burst)
   6657			bfq_mark_bfqq_in_large_burst(bfqq);
   6658		else {
   6659			bfq_clear_bfqq_in_large_burst(bfqq);
   6660			if (bic->was_in_burst_list)
   6661				/*
   6662				 * If bfqq was in the current
   6663				 * burst list before being
   6664				 * merged, then we have to add
   6665				 * it back. And we do not need
   6666				 * to increase burst_size, as
   6667				 * we did not decrement
   6668				 * burst_size when we removed
   6669				 * bfqq from the burst list as
   6670				 * a consequence of a merge
   6671				 * (see comments in
   6672				 * bfq_put_queue). In this
   6673				 * respect, it would be rather
   6674				 * costly to know whether the
   6675				 * current burst list is still
   6676				 * the same burst list from
   6677				 * which bfqq was removed on
   6678				 * the merge. To avoid this
   6679				 * cost, if bfqq was in a
   6680				 * burst list, then we add
   6681				 * bfqq to the current burst
   6682				 * list without any further
   6683				 * check. This can cause
   6684				 * inappropriate insertions,
   6685				 * but rarely enough to not
   6686				 * harm the detection of large
   6687				 * bursts significantly.
   6688				 */
   6689				hlist_add_head(&bfqq->burst_list_node,
   6690					       &bfqd->burst_list);
   6691		}
   6692		bfqq->split_time = jiffies;
   6693	}
   6694
   6695	return bfqq;
   6696}
   6697
   6698/*
   6699 * Only reset private fields. The actual request preparation will be
   6700 * performed by bfq_init_rq, when rq is either inserted or merged. See
   6701 * comments on bfq_init_rq for the reason behind this delayed
   6702 * preparation.
   6703 */
   6704static void bfq_prepare_request(struct request *rq)
   6705{
   6706	rq->elv.icq = ioc_find_get_icq(rq->q);
   6707
   6708	/*
   6709	 * Regardless of whether we have an icq attached, we have to
   6710	 * clear the scheduler pointers, as they might point to
   6711	 * previously allocated bic/bfqq structs.
   6712	 */
   6713	rq->elv.priv[0] = rq->elv.priv[1] = NULL;
   6714}
   6715
   6716/*
   6717 * If needed, init rq, allocate bfq data structures associated with
   6718 * rq, and increment reference counters in the destination bfq_queue
   6719 * for rq. Return the destination bfq_queue for rq, or NULL is rq is
   6720 * not associated with any bfq_queue.
   6721 *
   6722 * This function is invoked by the functions that perform rq insertion
   6723 * or merging. One may have expected the above preparation operations
   6724 * to be performed in bfq_prepare_request, and not delayed to when rq
   6725 * is inserted or merged. The rationale behind this delayed
   6726 * preparation is that, after the prepare_request hook is invoked for
   6727 * rq, rq may still be transformed into a request with no icq, i.e., a
   6728 * request not associated with any queue. No bfq hook is invoked to
   6729 * signal this transformation. As a consequence, should these
   6730 * preparation operations be performed when the prepare_request hook
   6731 * is invoked, and should rq be transformed one moment later, bfq
   6732 * would end up in an inconsistent state, because it would have
   6733 * incremented some queue counters for an rq destined to
   6734 * transformation, without any chance to correctly lower these
   6735 * counters back. In contrast, no transformation can still happen for
   6736 * rq after rq has been inserted or merged. So, it is safe to execute
   6737 * these preparation operations when rq is finally inserted or merged.
   6738 */
   6739static struct bfq_queue *bfq_init_rq(struct request *rq)
   6740{
   6741	struct request_queue *q = rq->q;
   6742	struct bio *bio = rq->bio;
   6743	struct bfq_data *bfqd = q->elevator->elevator_data;
   6744	struct bfq_io_cq *bic;
   6745	const int is_sync = rq_is_sync(rq);
   6746	struct bfq_queue *bfqq;
   6747	bool new_queue = false;
   6748	bool bfqq_already_existing = false, split = false;
   6749
   6750	if (unlikely(!rq->elv.icq))
   6751		return NULL;
   6752
   6753	/*
   6754	 * Assuming that elv.priv[1] is set only if everything is set
   6755	 * for this rq. This holds true, because this function is
   6756	 * invoked only for insertion or merging, and, after such
   6757	 * events, a request cannot be manipulated any longer before
   6758	 * being removed from bfq.
   6759	 */
   6760	if (rq->elv.priv[1])
   6761		return rq->elv.priv[1];
   6762
   6763	bic = icq_to_bic(rq->elv.icq);
   6764
   6765	bfq_check_ioprio_change(bic, bio);
   6766
   6767	bfq_bic_update_cgroup(bic, bio);
   6768
   6769	bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
   6770					 &new_queue);
   6771
   6772	if (likely(!new_queue)) {
   6773		/* If the queue was seeky for too long, break it apart. */
   6774		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
   6775			!bic->stably_merged) {
   6776			struct bfq_queue *old_bfqq = bfqq;
   6777
   6778			/* Update bic before losing reference to bfqq */
   6779			if (bfq_bfqq_in_large_burst(bfqq))
   6780				bic->saved_in_large_burst = true;
   6781
   6782			bfqq = bfq_split_bfqq(bic, bfqq);
   6783			split = true;
   6784
   6785			if (!bfqq) {
   6786				bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
   6787								 true, is_sync,
   6788								 NULL);
   6789				bfqq->waker_bfqq = old_bfqq->waker_bfqq;
   6790				bfqq->tentative_waker_bfqq = NULL;
   6791
   6792				/*
   6793				 * If the waker queue disappears, then
   6794				 * new_bfqq->waker_bfqq must be
   6795				 * reset. So insert new_bfqq into the
   6796				 * woken_list of the waker. See
   6797				 * bfq_check_waker for details.
   6798				 */
   6799				if (bfqq->waker_bfqq)
   6800					hlist_add_head(&bfqq->woken_list_node,
   6801						       &bfqq->waker_bfqq->woken_list);
   6802			} else
   6803				bfqq_already_existing = true;
   6804		}
   6805	}
   6806
   6807	bfqq_request_allocated(bfqq);
   6808	bfqq->ref++;
   6809	bic->requests++;
   6810	bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
   6811		     rq, bfqq, bfqq->ref);
   6812
   6813	rq->elv.priv[0] = bic;
   6814	rq->elv.priv[1] = bfqq;
   6815
   6816	/*
   6817	 * If a bfq_queue has only one process reference, it is owned
   6818	 * by only this bic: we can then set bfqq->bic = bic. in
   6819	 * addition, if the queue has also just been split, we have to
   6820	 * resume its state.
   6821	 */
   6822	if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
   6823		bfqq->bic = bic;
   6824		if (split) {
   6825			/*
   6826			 * The queue has just been split from a shared
   6827			 * queue: restore the idle window and the
   6828			 * possible weight raising period.
   6829			 */
   6830			bfq_bfqq_resume_state(bfqq, bfqd, bic,
   6831					      bfqq_already_existing);
   6832		}
   6833	}
   6834
   6835	/*
   6836	 * Consider bfqq as possibly belonging to a burst of newly
   6837	 * created queues only if:
   6838	 * 1) A burst is actually happening (bfqd->burst_size > 0)
   6839	 * or
   6840	 * 2) There is no other active queue. In fact, if, in
   6841	 *    contrast, there are active queues not belonging to the
   6842	 *    possible burst bfqq may belong to, then there is no gain
   6843	 *    in considering bfqq as belonging to a burst, and
   6844	 *    therefore in not weight-raising bfqq. See comments on
   6845	 *    bfq_handle_burst().
   6846	 *
   6847	 * This filtering also helps eliminating false positives,
   6848	 * occurring when bfqq does not belong to an actual large
   6849	 * burst, but some background task (e.g., a service) happens
   6850	 * to trigger the creation of new queues very close to when
   6851	 * bfqq and its possible companion queues are created. See
   6852	 * comments on bfq_handle_burst() for further details also on
   6853	 * this issue.
   6854	 */
   6855	if (unlikely(bfq_bfqq_just_created(bfqq) &&
   6856		     (bfqd->burst_size > 0 ||
   6857		      bfq_tot_busy_queues(bfqd) == 0)))
   6858		bfq_handle_burst(bfqd, bfqq);
   6859
   6860	return bfqq;
   6861}
   6862
   6863static void
   6864bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
   6865{
   6866	enum bfqq_expiration reason;
   6867	unsigned long flags;
   6868
   6869	spin_lock_irqsave(&bfqd->lock, flags);
   6870
   6871	/*
   6872	 * Considering that bfqq may be in race, we should firstly check
   6873	 * whether bfqq is in service before doing something on it. If
   6874	 * the bfqq in race is not in service, it has already been expired
   6875	 * through __bfq_bfqq_expire func and its wait_request flags has
   6876	 * been cleared in __bfq_bfqd_reset_in_service func.
   6877	 */
   6878	if (bfqq != bfqd->in_service_queue) {
   6879		spin_unlock_irqrestore(&bfqd->lock, flags);
   6880		return;
   6881	}
   6882
   6883	bfq_clear_bfqq_wait_request(bfqq);
   6884
   6885	if (bfq_bfqq_budget_timeout(bfqq))
   6886		/*
   6887		 * Also here the queue can be safely expired
   6888		 * for budget timeout without wasting
   6889		 * guarantees
   6890		 */
   6891		reason = BFQQE_BUDGET_TIMEOUT;
   6892	else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
   6893		/*
   6894		 * The queue may not be empty upon timer expiration,
   6895		 * because we may not disable the timer when the
   6896		 * first request of the in-service queue arrives
   6897		 * during disk idling.
   6898		 */
   6899		reason = BFQQE_TOO_IDLE;
   6900	else
   6901		goto schedule_dispatch;
   6902
   6903	bfq_bfqq_expire(bfqd, bfqq, true, reason);
   6904
   6905schedule_dispatch:
   6906	bfq_schedule_dispatch(bfqd);
   6907	spin_unlock_irqrestore(&bfqd->lock, flags);
   6908}
   6909
   6910/*
   6911 * Handler of the expiration of the timer running if the in-service queue
   6912 * is idling inside its time slice.
   6913 */
   6914static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
   6915{
   6916	struct bfq_data *bfqd = container_of(timer, struct bfq_data,
   6917					     idle_slice_timer);
   6918	struct bfq_queue *bfqq = bfqd->in_service_queue;
   6919
   6920	/*
   6921	 * Theoretical race here: the in-service queue can be NULL or
   6922	 * different from the queue that was idling if a new request
   6923	 * arrives for the current queue and there is a full dispatch
   6924	 * cycle that changes the in-service queue.  This can hardly
   6925	 * happen, but in the worst case we just expire a queue too
   6926	 * early.
   6927	 */
   6928	if (bfqq)
   6929		bfq_idle_slice_timer_body(bfqd, bfqq);
   6930
   6931	return HRTIMER_NORESTART;
   6932}
   6933
   6934static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
   6935				 struct bfq_queue **bfqq_ptr)
   6936{
   6937	struct bfq_queue *bfqq = *bfqq_ptr;
   6938
   6939	bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
   6940	if (bfqq) {
   6941		bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
   6942
   6943		bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
   6944			     bfqq, bfqq->ref);
   6945		bfq_put_queue(bfqq);
   6946		*bfqq_ptr = NULL;
   6947	}
   6948}
   6949
   6950/*
   6951 * Release all the bfqg references to its async queues.  If we are
   6952 * deallocating the group these queues may still contain requests, so
   6953 * we reparent them to the root cgroup (i.e., the only one that will
   6954 * exist for sure until all the requests on a device are gone).
   6955 */
   6956void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
   6957{
   6958	int i, j;
   6959
   6960	for (i = 0; i < 2; i++)
   6961		for (j = 0; j < IOPRIO_NR_LEVELS; j++)
   6962			__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
   6963
   6964	__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
   6965}
   6966
   6967/*
   6968 * See the comments on bfq_limit_depth for the purpose of
   6969 * the depths set in the function. Return minimum shallow depth we'll use.
   6970 */
   6971static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
   6972{
   6973	unsigned int depth = 1U << bt->sb.shift;
   6974
   6975	bfqd->full_depth_shift = bt->sb.shift;
   6976	/*
   6977	 * In-word depths if no bfq_queue is being weight-raised:
   6978	 * leaving 25% of tags only for sync reads.
   6979	 *
   6980	 * In next formulas, right-shift the value
   6981	 * (1U<<bt->sb.shift), instead of computing directly
   6982	 * (1U<<(bt->sb.shift - something)), to be robust against
   6983	 * any possible value of bt->sb.shift, without having to
   6984	 * limit 'something'.
   6985	 */
   6986	/* no more than 50% of tags for async I/O */
   6987	bfqd->word_depths[0][0] = max(depth >> 1, 1U);
   6988	/*
   6989	 * no more than 75% of tags for sync writes (25% extra tags
   6990	 * w.r.t. async I/O, to prevent async I/O from starving sync
   6991	 * writes)
   6992	 */
   6993	bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
   6994
   6995	/*
   6996	 * In-word depths in case some bfq_queue is being weight-
   6997	 * raised: leaving ~63% of tags for sync reads. This is the
   6998	 * highest percentage for which, in our tests, application
   6999	 * start-up times didn't suffer from any regression due to tag
   7000	 * shortage.
   7001	 */
   7002	/* no more than ~18% of tags for async I/O */
   7003	bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
   7004	/* no more than ~37% of tags for sync writes (~20% extra tags) */
   7005	bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
   7006}
   7007
   7008static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
   7009{
   7010	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
   7011	struct blk_mq_tags *tags = hctx->sched_tags;
   7012
   7013	bfq_update_depths(bfqd, &tags->bitmap_tags);
   7014	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
   7015}
   7016
   7017static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
   7018{
   7019	bfq_depth_updated(hctx);
   7020	return 0;
   7021}
   7022
   7023static void bfq_exit_queue(struct elevator_queue *e)
   7024{
   7025	struct bfq_data *bfqd = e->elevator_data;
   7026	struct bfq_queue *bfqq, *n;
   7027
   7028	hrtimer_cancel(&bfqd->idle_slice_timer);
   7029
   7030	spin_lock_irq(&bfqd->lock);
   7031	list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
   7032		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
   7033	spin_unlock_irq(&bfqd->lock);
   7034
   7035	hrtimer_cancel(&bfqd->idle_slice_timer);
   7036
   7037	/* release oom-queue reference to root group */
   7038	bfqg_and_blkg_put(bfqd->root_group);
   7039
   7040#ifdef CONFIG_BFQ_GROUP_IOSCHED
   7041	blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
   7042#else
   7043	spin_lock_irq(&bfqd->lock);
   7044	bfq_put_async_queues(bfqd, bfqd->root_group);
   7045	kfree(bfqd->root_group);
   7046	spin_unlock_irq(&bfqd->lock);
   7047#endif
   7048
   7049	blk_stat_disable_accounting(bfqd->queue);
   7050	wbt_enable_default(bfqd->queue);
   7051
   7052	kfree(bfqd);
   7053}
   7054
   7055static void bfq_init_root_group(struct bfq_group *root_group,
   7056				struct bfq_data *bfqd)
   7057{
   7058	int i;
   7059
   7060#ifdef CONFIG_BFQ_GROUP_IOSCHED
   7061	root_group->entity.parent = NULL;
   7062	root_group->my_entity = NULL;
   7063	root_group->bfqd = bfqd;
   7064#endif
   7065	root_group->rq_pos_tree = RB_ROOT;
   7066	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
   7067		root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
   7068	root_group->sched_data.bfq_class_idle_last_service = jiffies;
   7069}
   7070
   7071static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
   7072{
   7073	struct bfq_data *bfqd;
   7074	struct elevator_queue *eq;
   7075
   7076	eq = elevator_alloc(q, e);
   7077	if (!eq)
   7078		return -ENOMEM;
   7079
   7080	bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
   7081	if (!bfqd) {
   7082		kobject_put(&eq->kobj);
   7083		return -ENOMEM;
   7084	}
   7085	eq->elevator_data = bfqd;
   7086
   7087	spin_lock_irq(&q->queue_lock);
   7088	q->elevator = eq;
   7089	spin_unlock_irq(&q->queue_lock);
   7090
   7091	/*
   7092	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
   7093	 * Grab a permanent reference to it, so that the normal code flow
   7094	 * will not attempt to free it.
   7095	 */
   7096	bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
   7097	bfqd->oom_bfqq.ref++;
   7098	bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
   7099	bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
   7100	bfqd->oom_bfqq.entity.new_weight =
   7101		bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
   7102
   7103	/* oom_bfqq does not participate to bursts */
   7104	bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
   7105
   7106	/*
   7107	 * Trigger weight initialization, according to ioprio, at the
   7108	 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
   7109	 * class won't be changed any more.
   7110	 */
   7111	bfqd->oom_bfqq.entity.prio_changed = 1;
   7112
   7113	bfqd->queue = q;
   7114
   7115	INIT_LIST_HEAD(&bfqd->dispatch);
   7116
   7117	hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
   7118		     HRTIMER_MODE_REL);
   7119	bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
   7120
   7121	bfqd->queue_weights_tree = RB_ROOT_CACHED;
   7122	bfqd->num_groups_with_pending_reqs = 0;
   7123
   7124	INIT_LIST_HEAD(&bfqd->active_list);
   7125	INIT_LIST_HEAD(&bfqd->idle_list);
   7126	INIT_HLIST_HEAD(&bfqd->burst_list);
   7127
   7128	bfqd->hw_tag = -1;
   7129	bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
   7130
   7131	bfqd->bfq_max_budget = bfq_default_max_budget;
   7132
   7133	bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
   7134	bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
   7135	bfqd->bfq_back_max = bfq_back_max;
   7136	bfqd->bfq_back_penalty = bfq_back_penalty;
   7137	bfqd->bfq_slice_idle = bfq_slice_idle;
   7138	bfqd->bfq_timeout = bfq_timeout;
   7139
   7140	bfqd->bfq_large_burst_thresh = 8;
   7141	bfqd->bfq_burst_interval = msecs_to_jiffies(180);
   7142
   7143	bfqd->low_latency = true;
   7144
   7145	/*
   7146	 * Trade-off between responsiveness and fairness.
   7147	 */
   7148	bfqd->bfq_wr_coeff = 30;
   7149	bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
   7150	bfqd->bfq_wr_max_time = 0;
   7151	bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
   7152	bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
   7153	bfqd->bfq_wr_max_softrt_rate = 7000; /*
   7154					      * Approximate rate required
   7155					      * to playback or record a
   7156					      * high-definition compressed
   7157					      * video.
   7158					      */
   7159	bfqd->wr_busy_queues = 0;
   7160
   7161	/*
   7162	 * Begin by assuming, optimistically, that the device peak
   7163	 * rate is equal to 2/3 of the highest reference rate.
   7164	 */
   7165	bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
   7166		ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
   7167	bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
   7168
   7169	spin_lock_init(&bfqd->lock);
   7170
   7171	/*
   7172	 * The invocation of the next bfq_create_group_hierarchy
   7173	 * function is the head of a chain of function calls
   7174	 * (bfq_create_group_hierarchy->blkcg_activate_policy->
   7175	 * blk_mq_freeze_queue) that may lead to the invocation of the
   7176	 * has_work hook function. For this reason,
   7177	 * bfq_create_group_hierarchy is invoked only after all
   7178	 * scheduler data has been initialized, apart from the fields
   7179	 * that can be initialized only after invoking
   7180	 * bfq_create_group_hierarchy. This, in particular, enables
   7181	 * has_work to correctly return false. Of course, to avoid
   7182	 * other inconsistencies, the blk-mq stack must then refrain
   7183	 * from invoking further scheduler hooks before this init
   7184	 * function is finished.
   7185	 */
   7186	bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
   7187	if (!bfqd->root_group)
   7188		goto out_free;
   7189	bfq_init_root_group(bfqd->root_group, bfqd);
   7190	bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
   7191
   7192	/* We dispatch from request queue wide instead of hw queue */
   7193	blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
   7194
   7195	wbt_disable_default(q);
   7196	blk_stat_enable_accounting(q);
   7197
   7198	return 0;
   7199
   7200out_free:
   7201	kfree(bfqd);
   7202	kobject_put(&eq->kobj);
   7203	return -ENOMEM;
   7204}
   7205
   7206static void bfq_slab_kill(void)
   7207{
   7208	kmem_cache_destroy(bfq_pool);
   7209}
   7210
   7211static int __init bfq_slab_setup(void)
   7212{
   7213	bfq_pool = KMEM_CACHE(bfq_queue, 0);
   7214	if (!bfq_pool)
   7215		return -ENOMEM;
   7216	return 0;
   7217}
   7218
   7219static ssize_t bfq_var_show(unsigned int var, char *page)
   7220{
   7221	return sprintf(page, "%u\n", var);
   7222}
   7223
   7224static int bfq_var_store(unsigned long *var, const char *page)
   7225{
   7226	unsigned long new_val;
   7227	int ret = kstrtoul(page, 10, &new_val);
   7228
   7229	if (ret)
   7230		return ret;
   7231	*var = new_val;
   7232	return 0;
   7233}
   7234
   7235#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
   7236static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
   7237{									\
   7238	struct bfq_data *bfqd = e->elevator_data;			\
   7239	u64 __data = __VAR;						\
   7240	if (__CONV == 1)						\
   7241		__data = jiffies_to_msecs(__data);			\
   7242	else if (__CONV == 2)						\
   7243		__data = div_u64(__data, NSEC_PER_MSEC);		\
   7244	return bfq_var_show(__data, (page));				\
   7245}
   7246SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
   7247SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
   7248SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
   7249SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
   7250SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
   7251SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
   7252SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
   7253SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
   7254SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
   7255#undef SHOW_FUNCTION
   7256
   7257#define USEC_SHOW_FUNCTION(__FUNC, __VAR)				\
   7258static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
   7259{									\
   7260	struct bfq_data *bfqd = e->elevator_data;			\
   7261	u64 __data = __VAR;						\
   7262	__data = div_u64(__data, NSEC_PER_USEC);			\
   7263	return bfq_var_show(__data, (page));				\
   7264}
   7265USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
   7266#undef USEC_SHOW_FUNCTION
   7267
   7268#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
   7269static ssize_t								\
   7270__FUNC(struct elevator_queue *e, const char *page, size_t count)	\
   7271{									\
   7272	struct bfq_data *bfqd = e->elevator_data;			\
   7273	unsigned long __data, __min = (MIN), __max = (MAX);		\
   7274	int ret;							\
   7275									\
   7276	ret = bfq_var_store(&__data, (page));				\
   7277	if (ret)							\
   7278		return ret;						\
   7279	if (__data < __min)						\
   7280		__data = __min;						\
   7281	else if (__data > __max)					\
   7282		__data = __max;						\
   7283	if (__CONV == 1)						\
   7284		*(__PTR) = msecs_to_jiffies(__data);			\
   7285	else if (__CONV == 2)						\
   7286		*(__PTR) = (u64)__data * NSEC_PER_MSEC;			\
   7287	else								\
   7288		*(__PTR) = __data;					\
   7289	return count;							\
   7290}
   7291STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
   7292		INT_MAX, 2);
   7293STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
   7294		INT_MAX, 2);
   7295STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
   7296STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
   7297		INT_MAX, 0);
   7298STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
   7299#undef STORE_FUNCTION
   7300
   7301#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)			\
   7302static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
   7303{									\
   7304	struct bfq_data *bfqd = e->elevator_data;			\
   7305	unsigned long __data, __min = (MIN), __max = (MAX);		\
   7306	int ret;							\
   7307									\
   7308	ret = bfq_var_store(&__data, (page));				\
   7309	if (ret)							\
   7310		return ret;						\
   7311	if (__data < __min)						\
   7312		__data = __min;						\
   7313	else if (__data > __max)					\
   7314		__data = __max;						\
   7315	*(__PTR) = (u64)__data * NSEC_PER_USEC;				\
   7316	return count;							\
   7317}
   7318USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
   7319		    UINT_MAX);
   7320#undef USEC_STORE_FUNCTION
   7321
   7322static ssize_t bfq_max_budget_store(struct elevator_queue *e,
   7323				    const char *page, size_t count)
   7324{
   7325	struct bfq_data *bfqd = e->elevator_data;
   7326	unsigned long __data;
   7327	int ret;
   7328
   7329	ret = bfq_var_store(&__data, (page));
   7330	if (ret)
   7331		return ret;
   7332
   7333	if (__data == 0)
   7334		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
   7335	else {
   7336		if (__data > INT_MAX)
   7337			__data = INT_MAX;
   7338		bfqd->bfq_max_budget = __data;
   7339	}
   7340
   7341	bfqd->bfq_user_max_budget = __data;
   7342
   7343	return count;
   7344}
   7345
   7346/*
   7347 * Leaving this name to preserve name compatibility with cfq
   7348 * parameters, but this timeout is used for both sync and async.
   7349 */
   7350static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
   7351				      const char *page, size_t count)
   7352{
   7353	struct bfq_data *bfqd = e->elevator_data;
   7354	unsigned long __data;
   7355	int ret;
   7356
   7357	ret = bfq_var_store(&__data, (page));
   7358	if (ret)
   7359		return ret;
   7360
   7361	if (__data < 1)
   7362		__data = 1;
   7363	else if (__data > INT_MAX)
   7364		__data = INT_MAX;
   7365
   7366	bfqd->bfq_timeout = msecs_to_jiffies(__data);
   7367	if (bfqd->bfq_user_max_budget == 0)
   7368		bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
   7369
   7370	return count;
   7371}
   7372
   7373static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
   7374				     const char *page, size_t count)
   7375{
   7376	struct bfq_data *bfqd = e->elevator_data;
   7377	unsigned long __data;
   7378	int ret;
   7379
   7380	ret = bfq_var_store(&__data, (page));
   7381	if (ret)
   7382		return ret;
   7383
   7384	if (__data > 1)
   7385		__data = 1;
   7386	if (!bfqd->strict_guarantees && __data == 1
   7387	    && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
   7388		bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
   7389
   7390	bfqd->strict_guarantees = __data;
   7391
   7392	return count;
   7393}
   7394
   7395static ssize_t bfq_low_latency_store(struct elevator_queue *e,
   7396				     const char *page, size_t count)
   7397{
   7398	struct bfq_data *bfqd = e->elevator_data;
   7399	unsigned long __data;
   7400	int ret;
   7401
   7402	ret = bfq_var_store(&__data, (page));
   7403	if (ret)
   7404		return ret;
   7405
   7406	if (__data > 1)
   7407		__data = 1;
   7408	if (__data == 0 && bfqd->low_latency != 0)
   7409		bfq_end_wr(bfqd);
   7410	bfqd->low_latency = __data;
   7411
   7412	return count;
   7413}
   7414
   7415#define BFQ_ATTR(name) \
   7416	__ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
   7417
   7418static struct elv_fs_entry bfq_attrs[] = {
   7419	BFQ_ATTR(fifo_expire_sync),
   7420	BFQ_ATTR(fifo_expire_async),
   7421	BFQ_ATTR(back_seek_max),
   7422	BFQ_ATTR(back_seek_penalty),
   7423	BFQ_ATTR(slice_idle),
   7424	BFQ_ATTR(slice_idle_us),
   7425	BFQ_ATTR(max_budget),
   7426	BFQ_ATTR(timeout_sync),
   7427	BFQ_ATTR(strict_guarantees),
   7428	BFQ_ATTR(low_latency),
   7429	__ATTR_NULL
   7430};
   7431
   7432static struct elevator_type iosched_bfq_mq = {
   7433	.ops = {
   7434		.limit_depth		= bfq_limit_depth,
   7435		.prepare_request	= bfq_prepare_request,
   7436		.requeue_request        = bfq_finish_requeue_request,
   7437		.finish_request		= bfq_finish_request,
   7438		.exit_icq		= bfq_exit_icq,
   7439		.insert_requests	= bfq_insert_requests,
   7440		.dispatch_request	= bfq_dispatch_request,
   7441		.next_request		= elv_rb_latter_request,
   7442		.former_request		= elv_rb_former_request,
   7443		.allow_merge		= bfq_allow_bio_merge,
   7444		.bio_merge		= bfq_bio_merge,
   7445		.request_merge		= bfq_request_merge,
   7446		.requests_merged	= bfq_requests_merged,
   7447		.request_merged		= bfq_request_merged,
   7448		.has_work		= bfq_has_work,
   7449		.depth_updated		= bfq_depth_updated,
   7450		.init_hctx		= bfq_init_hctx,
   7451		.init_sched		= bfq_init_queue,
   7452		.exit_sched		= bfq_exit_queue,
   7453	},
   7454
   7455	.icq_size =		sizeof(struct bfq_io_cq),
   7456	.icq_align =		__alignof__(struct bfq_io_cq),
   7457	.elevator_attrs =	bfq_attrs,
   7458	.elevator_name =	"bfq",
   7459	.elevator_owner =	THIS_MODULE,
   7460};
   7461MODULE_ALIAS("bfq-iosched");
   7462
   7463static int __init bfq_init(void)
   7464{
   7465	int ret;
   7466
   7467#ifdef CONFIG_BFQ_GROUP_IOSCHED
   7468	ret = blkcg_policy_register(&blkcg_policy_bfq);
   7469	if (ret)
   7470		return ret;
   7471#endif
   7472
   7473	ret = -ENOMEM;
   7474	if (bfq_slab_setup())
   7475		goto err_pol_unreg;
   7476
   7477	/*
   7478	 * Times to load large popular applications for the typical
   7479	 * systems installed on the reference devices (see the
   7480	 * comments before the definition of the next
   7481	 * array). Actually, we use slightly lower values, as the
   7482	 * estimated peak rate tends to be smaller than the actual
   7483	 * peak rate.  The reason for this last fact is that estimates
   7484	 * are computed over much shorter time intervals than the long
   7485	 * intervals typically used for benchmarking. Why? First, to
   7486	 * adapt more quickly to variations. Second, because an I/O
   7487	 * scheduler cannot rely on a peak-rate-evaluation workload to
   7488	 * be run for a long time.
   7489	 */
   7490	ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */
   7491	ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */
   7492
   7493	ret = elv_register(&iosched_bfq_mq);
   7494	if (ret)
   7495		goto slab_kill;
   7496
   7497	return 0;
   7498
   7499slab_kill:
   7500	bfq_slab_kill();
   7501err_pol_unreg:
   7502#ifdef CONFIG_BFQ_GROUP_IOSCHED
   7503	blkcg_policy_unregister(&blkcg_policy_bfq);
   7504#endif
   7505	return ret;
   7506}
   7507
   7508static void __exit bfq_exit(void)
   7509{
   7510	elv_unregister(&iosched_bfq_mq);
   7511#ifdef CONFIG_BFQ_GROUP_IOSCHED
   7512	blkcg_policy_unregister(&blkcg_policy_bfq);
   7513#endif
   7514	bfq_slab_kill();
   7515}
   7516
   7517module_init(bfq_init);
   7518module_exit(bfq_exit);
   7519
   7520MODULE_AUTHOR("Paolo Valente");
   7521MODULE_LICENSE("GPL");
   7522MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");