cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-throttle.c (65962B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Interface for controlling IO bandwidth on a request queue
      4 *
      5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/slab.h>
     10#include <linux/blkdev.h>
     11#include <linux/bio.h>
     12#include <linux/blktrace_api.h>
     13#include "blk.h"
     14#include "blk-cgroup-rwstat.h"
     15#include "blk-stat.h"
     16#include "blk-throttle.h"
     17
     18/* Max dispatch from a group in 1 round */
     19#define THROTL_GRP_QUANTUM 8
     20
     21/* Total max dispatch from all groups in one round */
     22#define THROTL_QUANTUM 32
     23
     24/* Throttling is performed over a slice and after that slice is renewed */
     25#define DFL_THROTL_SLICE_HD (HZ / 10)
     26#define DFL_THROTL_SLICE_SSD (HZ / 50)
     27#define MAX_THROTL_SLICE (HZ)
     28#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
     29#define MIN_THROTL_BPS (320 * 1024)
     30#define MIN_THROTL_IOPS (10)
     31#define DFL_LATENCY_TARGET (-1L)
     32#define DFL_IDLE_THRESHOLD (0)
     33#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
     34#define LATENCY_FILTERED_SSD (0)
     35/*
     36 * For HD, very small latency comes from sequential IO. Such IO is helpless to
     37 * help determine if its IO is impacted by others, hence we ignore the IO
     38 */
     39#define LATENCY_FILTERED_HD (1000L) /* 1ms */
     40
     41/* A workqueue to queue throttle related work */
     42static struct workqueue_struct *kthrotld_workqueue;
     43
     44#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
     45
     46/* We measure latency for request size from <= 4k to >= 1M */
     47#define LATENCY_BUCKET_SIZE 9
     48
     49struct latency_bucket {
     50	unsigned long total_latency; /* ns / 1024 */
     51	int samples;
     52};
     53
     54struct avg_latency_bucket {
     55	unsigned long latency; /* ns / 1024 */
     56	bool valid;
     57};
     58
     59struct throtl_data
     60{
     61	/* service tree for active throtl groups */
     62	struct throtl_service_queue service_queue;
     63
     64	struct request_queue *queue;
     65
     66	/* Total Number of queued bios on READ and WRITE lists */
     67	unsigned int nr_queued[2];
     68
     69	unsigned int throtl_slice;
     70
     71	/* Work for dispatching throttled bios */
     72	struct work_struct dispatch_work;
     73	unsigned int limit_index;
     74	bool limit_valid[LIMIT_CNT];
     75
     76	unsigned long low_upgrade_time;
     77	unsigned long low_downgrade_time;
     78
     79	unsigned int scale;
     80
     81	struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
     82	struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
     83	struct latency_bucket __percpu *latency_buckets[2];
     84	unsigned long last_calculate_time;
     85	unsigned long filtered_latency;
     86
     87	bool track_bio_latency;
     88};
     89
     90static void throtl_pending_timer_fn(struct timer_list *t);
     91
     92static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
     93{
     94	return pd_to_blkg(&tg->pd);
     95}
     96
     97/**
     98 * sq_to_tg - return the throl_grp the specified service queue belongs to
     99 * @sq: the throtl_service_queue of interest
    100 *
    101 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
    102 * embedded in throtl_data, %NULL is returned.
    103 */
    104static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
    105{
    106	if (sq && sq->parent_sq)
    107		return container_of(sq, struct throtl_grp, service_queue);
    108	else
    109		return NULL;
    110}
    111
    112/**
    113 * sq_to_td - return throtl_data the specified service queue belongs to
    114 * @sq: the throtl_service_queue of interest
    115 *
    116 * A service_queue can be embedded in either a throtl_grp or throtl_data.
    117 * Determine the associated throtl_data accordingly and return it.
    118 */
    119static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
    120{
    121	struct throtl_grp *tg = sq_to_tg(sq);
    122
    123	if (tg)
    124		return tg->td;
    125	else
    126		return container_of(sq, struct throtl_data, service_queue);
    127}
    128
    129/*
    130 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
    131 * make the IO dispatch more smooth.
    132 * Scale up: linearly scale up according to lapsed time since upgrade. For
    133 *           every throtl_slice, the limit scales up 1/2 .low limit till the
    134 *           limit hits .max limit
    135 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
    136 */
    137static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
    138{
    139	/* arbitrary value to avoid too big scale */
    140	if (td->scale < 4096 && time_after_eq(jiffies,
    141	    td->low_upgrade_time + td->scale * td->throtl_slice))
    142		td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
    143
    144	return low + (low >> 1) * td->scale;
    145}
    146
    147static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
    148{
    149	struct blkcg_gq *blkg = tg_to_blkg(tg);
    150	struct throtl_data *td;
    151	uint64_t ret;
    152
    153	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
    154		return U64_MAX;
    155
    156	td = tg->td;
    157	ret = tg->bps[rw][td->limit_index];
    158	if (ret == 0 && td->limit_index == LIMIT_LOW) {
    159		/* intermediate node or iops isn't 0 */
    160		if (!list_empty(&blkg->blkcg->css.children) ||
    161		    tg->iops[rw][td->limit_index])
    162			return U64_MAX;
    163		else
    164			return MIN_THROTL_BPS;
    165	}
    166
    167	if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
    168	    tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
    169		uint64_t adjusted;
    170
    171		adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
    172		ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
    173	}
    174	return ret;
    175}
    176
    177static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
    178{
    179	struct blkcg_gq *blkg = tg_to_blkg(tg);
    180	struct throtl_data *td;
    181	unsigned int ret;
    182
    183	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
    184		return UINT_MAX;
    185
    186	td = tg->td;
    187	ret = tg->iops[rw][td->limit_index];
    188	if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
    189		/* intermediate node or bps isn't 0 */
    190		if (!list_empty(&blkg->blkcg->css.children) ||
    191		    tg->bps[rw][td->limit_index])
    192			return UINT_MAX;
    193		else
    194			return MIN_THROTL_IOPS;
    195	}
    196
    197	if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
    198	    tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
    199		uint64_t adjusted;
    200
    201		adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
    202		if (adjusted > UINT_MAX)
    203			adjusted = UINT_MAX;
    204		ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
    205	}
    206	return ret;
    207}
    208
    209#define request_bucket_index(sectors) \
    210	clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
    211
    212/**
    213 * throtl_log - log debug message via blktrace
    214 * @sq: the service_queue being reported
    215 * @fmt: printf format string
    216 * @args: printf args
    217 *
    218 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
    219 * throtl_grp; otherwise, just "throtl".
    220 */
    221#define throtl_log(sq, fmt, args...)	do {				\
    222	struct throtl_grp *__tg = sq_to_tg((sq));			\
    223	struct throtl_data *__td = sq_to_td((sq));			\
    224									\
    225	(void)__td;							\
    226	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
    227		break;							\
    228	if ((__tg)) {							\
    229		blk_add_cgroup_trace_msg(__td->queue,			\
    230			&tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
    231	} else {							\
    232		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
    233	}								\
    234} while (0)
    235
    236static inline unsigned int throtl_bio_data_size(struct bio *bio)
    237{
    238	/* assume it's one sector */
    239	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
    240		return 512;
    241	return bio->bi_iter.bi_size;
    242}
    243
    244static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
    245{
    246	INIT_LIST_HEAD(&qn->node);
    247	bio_list_init(&qn->bios);
    248	qn->tg = tg;
    249}
    250
    251/**
    252 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
    253 * @bio: bio being added
    254 * @qn: qnode to add bio to
    255 * @queued: the service_queue->queued[] list @qn belongs to
    256 *
    257 * Add @bio to @qn and put @qn on @queued if it's not already on.
    258 * @qn->tg's reference count is bumped when @qn is activated.  See the
    259 * comment on top of throtl_qnode definition for details.
    260 */
    261static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
    262				 struct list_head *queued)
    263{
    264	bio_list_add(&qn->bios, bio);
    265	if (list_empty(&qn->node)) {
    266		list_add_tail(&qn->node, queued);
    267		blkg_get(tg_to_blkg(qn->tg));
    268	}
    269}
    270
    271/**
    272 * throtl_peek_queued - peek the first bio on a qnode list
    273 * @queued: the qnode list to peek
    274 */
    275static struct bio *throtl_peek_queued(struct list_head *queued)
    276{
    277	struct throtl_qnode *qn;
    278	struct bio *bio;
    279
    280	if (list_empty(queued))
    281		return NULL;
    282
    283	qn = list_first_entry(queued, struct throtl_qnode, node);
    284	bio = bio_list_peek(&qn->bios);
    285	WARN_ON_ONCE(!bio);
    286	return bio;
    287}
    288
    289/**
    290 * throtl_pop_queued - pop the first bio form a qnode list
    291 * @queued: the qnode list to pop a bio from
    292 * @tg_to_put: optional out argument for throtl_grp to put
    293 *
    294 * Pop the first bio from the qnode list @queued.  After popping, the first
    295 * qnode is removed from @queued if empty or moved to the end of @queued so
    296 * that the popping order is round-robin.
    297 *
    298 * When the first qnode is removed, its associated throtl_grp should be put
    299 * too.  If @tg_to_put is NULL, this function automatically puts it;
    300 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
    301 * responsible for putting it.
    302 */
    303static struct bio *throtl_pop_queued(struct list_head *queued,
    304				     struct throtl_grp **tg_to_put)
    305{
    306	struct throtl_qnode *qn;
    307	struct bio *bio;
    308
    309	if (list_empty(queued))
    310		return NULL;
    311
    312	qn = list_first_entry(queued, struct throtl_qnode, node);
    313	bio = bio_list_pop(&qn->bios);
    314	WARN_ON_ONCE(!bio);
    315
    316	if (bio_list_empty(&qn->bios)) {
    317		list_del_init(&qn->node);
    318		if (tg_to_put)
    319			*tg_to_put = qn->tg;
    320		else
    321			blkg_put(tg_to_blkg(qn->tg));
    322	} else {
    323		list_move_tail(&qn->node, queued);
    324	}
    325
    326	return bio;
    327}
    328
    329/* init a service_queue, assumes the caller zeroed it */
    330static void throtl_service_queue_init(struct throtl_service_queue *sq)
    331{
    332	INIT_LIST_HEAD(&sq->queued[0]);
    333	INIT_LIST_HEAD(&sq->queued[1]);
    334	sq->pending_tree = RB_ROOT_CACHED;
    335	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
    336}
    337
    338static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
    339						struct request_queue *q,
    340						struct blkcg *blkcg)
    341{
    342	struct throtl_grp *tg;
    343	int rw;
    344
    345	tg = kzalloc_node(sizeof(*tg), gfp, q->node);
    346	if (!tg)
    347		return NULL;
    348
    349	if (blkg_rwstat_init(&tg->stat_bytes, gfp))
    350		goto err_free_tg;
    351
    352	if (blkg_rwstat_init(&tg->stat_ios, gfp))
    353		goto err_exit_stat_bytes;
    354
    355	throtl_service_queue_init(&tg->service_queue);
    356
    357	for (rw = READ; rw <= WRITE; rw++) {
    358		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
    359		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
    360	}
    361
    362	RB_CLEAR_NODE(&tg->rb_node);
    363	tg->bps[READ][LIMIT_MAX] = U64_MAX;
    364	tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
    365	tg->iops[READ][LIMIT_MAX] = UINT_MAX;
    366	tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
    367	tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
    368	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
    369	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
    370	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
    371	/* LIMIT_LOW will have default value 0 */
    372
    373	tg->latency_target = DFL_LATENCY_TARGET;
    374	tg->latency_target_conf = DFL_LATENCY_TARGET;
    375	tg->idletime_threshold = DFL_IDLE_THRESHOLD;
    376	tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
    377
    378	return &tg->pd;
    379
    380err_exit_stat_bytes:
    381	blkg_rwstat_exit(&tg->stat_bytes);
    382err_free_tg:
    383	kfree(tg);
    384	return NULL;
    385}
    386
    387static void throtl_pd_init(struct blkg_policy_data *pd)
    388{
    389	struct throtl_grp *tg = pd_to_tg(pd);
    390	struct blkcg_gq *blkg = tg_to_blkg(tg);
    391	struct throtl_data *td = blkg->q->td;
    392	struct throtl_service_queue *sq = &tg->service_queue;
    393
    394	/*
    395	 * If on the default hierarchy, we switch to properly hierarchical
    396	 * behavior where limits on a given throtl_grp are applied to the
    397	 * whole subtree rather than just the group itself.  e.g. If 16M
    398	 * read_bps limit is set on the root group, the whole system can't
    399	 * exceed 16M for the device.
    400	 *
    401	 * If not on the default hierarchy, the broken flat hierarchy
    402	 * behavior is retained where all throtl_grps are treated as if
    403	 * they're all separate root groups right below throtl_data.
    404	 * Limits of a group don't interact with limits of other groups
    405	 * regardless of the position of the group in the hierarchy.
    406	 */
    407	sq->parent_sq = &td->service_queue;
    408	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
    409		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
    410	tg->td = td;
    411}
    412
    413/*
    414 * Set has_rules[] if @tg or any of its parents have limits configured.
    415 * This doesn't require walking up to the top of the hierarchy as the
    416 * parent's has_rules[] is guaranteed to be correct.
    417 */
    418static void tg_update_has_rules(struct throtl_grp *tg)
    419{
    420	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
    421	struct throtl_data *td = tg->td;
    422	int rw;
    423	int has_iops_limit = 0;
    424
    425	for (rw = READ; rw <= WRITE; rw++) {
    426		unsigned int iops_limit = tg_iops_limit(tg, rw);
    427
    428		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
    429			(td->limit_valid[td->limit_index] &&
    430			 (tg_bps_limit(tg, rw) != U64_MAX ||
    431			  iops_limit != UINT_MAX));
    432
    433		if (iops_limit != UINT_MAX)
    434			has_iops_limit = 1;
    435	}
    436
    437	if (has_iops_limit)
    438		tg->flags |= THROTL_TG_HAS_IOPS_LIMIT;
    439	else
    440		tg->flags &= ~THROTL_TG_HAS_IOPS_LIMIT;
    441}
    442
    443static void throtl_pd_online(struct blkg_policy_data *pd)
    444{
    445	struct throtl_grp *tg = pd_to_tg(pd);
    446	/*
    447	 * We don't want new groups to escape the limits of its ancestors.
    448	 * Update has_rules[] after a new group is brought online.
    449	 */
    450	tg_update_has_rules(tg);
    451}
    452
    453#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
    454static void blk_throtl_update_limit_valid(struct throtl_data *td)
    455{
    456	struct cgroup_subsys_state *pos_css;
    457	struct blkcg_gq *blkg;
    458	bool low_valid = false;
    459
    460	rcu_read_lock();
    461	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
    462		struct throtl_grp *tg = blkg_to_tg(blkg);
    463
    464		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
    465		    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
    466			low_valid = true;
    467			break;
    468		}
    469	}
    470	rcu_read_unlock();
    471
    472	td->limit_valid[LIMIT_LOW] = low_valid;
    473}
    474#else
    475static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
    476{
    477}
    478#endif
    479
    480static void throtl_upgrade_state(struct throtl_data *td);
    481static void throtl_pd_offline(struct blkg_policy_data *pd)
    482{
    483	struct throtl_grp *tg = pd_to_tg(pd);
    484
    485	tg->bps[READ][LIMIT_LOW] = 0;
    486	tg->bps[WRITE][LIMIT_LOW] = 0;
    487	tg->iops[READ][LIMIT_LOW] = 0;
    488	tg->iops[WRITE][LIMIT_LOW] = 0;
    489
    490	blk_throtl_update_limit_valid(tg->td);
    491
    492	if (!tg->td->limit_valid[tg->td->limit_index])
    493		throtl_upgrade_state(tg->td);
    494}
    495
    496static void throtl_pd_free(struct blkg_policy_data *pd)
    497{
    498	struct throtl_grp *tg = pd_to_tg(pd);
    499
    500	del_timer_sync(&tg->service_queue.pending_timer);
    501	blkg_rwstat_exit(&tg->stat_bytes);
    502	blkg_rwstat_exit(&tg->stat_ios);
    503	kfree(tg);
    504}
    505
    506static struct throtl_grp *
    507throtl_rb_first(struct throtl_service_queue *parent_sq)
    508{
    509	struct rb_node *n;
    510
    511	n = rb_first_cached(&parent_sq->pending_tree);
    512	WARN_ON_ONCE(!n);
    513	if (!n)
    514		return NULL;
    515	return rb_entry_tg(n);
    516}
    517
    518static void throtl_rb_erase(struct rb_node *n,
    519			    struct throtl_service_queue *parent_sq)
    520{
    521	rb_erase_cached(n, &parent_sq->pending_tree);
    522	RB_CLEAR_NODE(n);
    523	--parent_sq->nr_pending;
    524}
    525
    526static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
    527{
    528	struct throtl_grp *tg;
    529
    530	tg = throtl_rb_first(parent_sq);
    531	if (!tg)
    532		return;
    533
    534	parent_sq->first_pending_disptime = tg->disptime;
    535}
    536
    537static void tg_service_queue_add(struct throtl_grp *tg)
    538{
    539	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
    540	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
    541	struct rb_node *parent = NULL;
    542	struct throtl_grp *__tg;
    543	unsigned long key = tg->disptime;
    544	bool leftmost = true;
    545
    546	while (*node != NULL) {
    547		parent = *node;
    548		__tg = rb_entry_tg(parent);
    549
    550		if (time_before(key, __tg->disptime))
    551			node = &parent->rb_left;
    552		else {
    553			node = &parent->rb_right;
    554			leftmost = false;
    555		}
    556	}
    557
    558	rb_link_node(&tg->rb_node, parent, node);
    559	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
    560			       leftmost);
    561}
    562
    563static void throtl_enqueue_tg(struct throtl_grp *tg)
    564{
    565	if (!(tg->flags & THROTL_TG_PENDING)) {
    566		tg_service_queue_add(tg);
    567		tg->flags |= THROTL_TG_PENDING;
    568		tg->service_queue.parent_sq->nr_pending++;
    569	}
    570}
    571
    572static void throtl_dequeue_tg(struct throtl_grp *tg)
    573{
    574	if (tg->flags & THROTL_TG_PENDING) {
    575		throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
    576		tg->flags &= ~THROTL_TG_PENDING;
    577	}
    578}
    579
    580/* Call with queue lock held */
    581static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
    582					  unsigned long expires)
    583{
    584	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
    585
    586	/*
    587	 * Since we are adjusting the throttle limit dynamically, the sleep
    588	 * time calculated according to previous limit might be invalid. It's
    589	 * possible the cgroup sleep time is very long and no other cgroups
    590	 * have IO running so notify the limit changes. Make sure the cgroup
    591	 * doesn't sleep too long to avoid the missed notification.
    592	 */
    593	if (time_after(expires, max_expire))
    594		expires = max_expire;
    595	mod_timer(&sq->pending_timer, expires);
    596	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
    597		   expires - jiffies, jiffies);
    598}
    599
    600/**
    601 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
    602 * @sq: the service_queue to schedule dispatch for
    603 * @force: force scheduling
    604 *
    605 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
    606 * dispatch time of the first pending child.  Returns %true if either timer
    607 * is armed or there's no pending child left.  %false if the current
    608 * dispatch window is still open and the caller should continue
    609 * dispatching.
    610 *
    611 * If @force is %true, the dispatch timer is always scheduled and this
    612 * function is guaranteed to return %true.  This is to be used when the
    613 * caller can't dispatch itself and needs to invoke pending_timer
    614 * unconditionally.  Note that forced scheduling is likely to induce short
    615 * delay before dispatch starts even if @sq->first_pending_disptime is not
    616 * in the future and thus shouldn't be used in hot paths.
    617 */
    618static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
    619					  bool force)
    620{
    621	/* any pending children left? */
    622	if (!sq->nr_pending)
    623		return true;
    624
    625	update_min_dispatch_time(sq);
    626
    627	/* is the next dispatch time in the future? */
    628	if (force || time_after(sq->first_pending_disptime, jiffies)) {
    629		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
    630		return true;
    631	}
    632
    633	/* tell the caller to continue dispatching */
    634	return false;
    635}
    636
    637static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
    638		bool rw, unsigned long start)
    639{
    640	tg->bytes_disp[rw] = 0;
    641	tg->io_disp[rw] = 0;
    642
    643	/*
    644	 * Previous slice has expired. We must have trimmed it after last
    645	 * bio dispatch. That means since start of last slice, we never used
    646	 * that bandwidth. Do try to make use of that bandwidth while giving
    647	 * credit.
    648	 */
    649	if (time_after_eq(start, tg->slice_start[rw]))
    650		tg->slice_start[rw] = start;
    651
    652	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
    653	throtl_log(&tg->service_queue,
    654		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
    655		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
    656		   tg->slice_end[rw], jiffies);
    657}
    658
    659static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
    660{
    661	tg->bytes_disp[rw] = 0;
    662	tg->io_disp[rw] = 0;
    663	tg->slice_start[rw] = jiffies;
    664	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
    665
    666	throtl_log(&tg->service_queue,
    667		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
    668		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
    669		   tg->slice_end[rw], jiffies);
    670}
    671
    672static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
    673					unsigned long jiffy_end)
    674{
    675	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
    676}
    677
    678static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
    679				       unsigned long jiffy_end)
    680{
    681	throtl_set_slice_end(tg, rw, jiffy_end);
    682	throtl_log(&tg->service_queue,
    683		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
    684		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
    685		   tg->slice_end[rw], jiffies);
    686}
    687
    688/* Determine if previously allocated or extended slice is complete or not */
    689static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
    690{
    691	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
    692		return false;
    693
    694	return true;
    695}
    696
    697/* Trim the used slices and adjust slice start accordingly */
    698static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
    699{
    700	unsigned long nr_slices, time_elapsed, io_trim;
    701	u64 bytes_trim, tmp;
    702
    703	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
    704
    705	/*
    706	 * If bps are unlimited (-1), then time slice don't get
    707	 * renewed. Don't try to trim the slice if slice is used. A new
    708	 * slice will start when appropriate.
    709	 */
    710	if (throtl_slice_used(tg, rw))
    711		return;
    712
    713	/*
    714	 * A bio has been dispatched. Also adjust slice_end. It might happen
    715	 * that initially cgroup limit was very low resulting in high
    716	 * slice_end, but later limit was bumped up and bio was dispatched
    717	 * sooner, then we need to reduce slice_end. A high bogus slice_end
    718	 * is bad because it does not allow new slice to start.
    719	 */
    720
    721	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
    722
    723	time_elapsed = jiffies - tg->slice_start[rw];
    724
    725	nr_slices = time_elapsed / tg->td->throtl_slice;
    726
    727	if (!nr_slices)
    728		return;
    729	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
    730	do_div(tmp, HZ);
    731	bytes_trim = tmp;
    732
    733	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
    734		HZ;
    735
    736	if (!bytes_trim && !io_trim)
    737		return;
    738
    739	if (tg->bytes_disp[rw] >= bytes_trim)
    740		tg->bytes_disp[rw] -= bytes_trim;
    741	else
    742		tg->bytes_disp[rw] = 0;
    743
    744	if (tg->io_disp[rw] >= io_trim)
    745		tg->io_disp[rw] -= io_trim;
    746	else
    747		tg->io_disp[rw] = 0;
    748
    749	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
    750
    751	throtl_log(&tg->service_queue,
    752		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
    753		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
    754		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
    755}
    756
    757static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
    758				  u32 iops_limit, unsigned long *wait)
    759{
    760	bool rw = bio_data_dir(bio);
    761	unsigned int io_allowed;
    762	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
    763	u64 tmp;
    764
    765	if (iops_limit == UINT_MAX) {
    766		if (wait)
    767			*wait = 0;
    768		return true;
    769	}
    770
    771	jiffy_elapsed = jiffies - tg->slice_start[rw];
    772
    773	/* Round up to the next throttle slice, wait time must be nonzero */
    774	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
    775
    776	/*
    777	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
    778	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
    779	 * will allow dispatch after 1 second and after that slice should
    780	 * have been trimmed.
    781	 */
    782
    783	tmp = (u64)iops_limit * jiffy_elapsed_rnd;
    784	do_div(tmp, HZ);
    785
    786	if (tmp > UINT_MAX)
    787		io_allowed = UINT_MAX;
    788	else
    789		io_allowed = tmp;
    790
    791	if (tg->io_disp[rw] + 1 <= io_allowed) {
    792		if (wait)
    793			*wait = 0;
    794		return true;
    795	}
    796
    797	/* Calc approx time to dispatch */
    798	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
    799
    800	if (wait)
    801		*wait = jiffy_wait;
    802	return false;
    803}
    804
    805static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
    806				 u64 bps_limit, unsigned long *wait)
    807{
    808	bool rw = bio_data_dir(bio);
    809	u64 bytes_allowed, extra_bytes, tmp;
    810	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
    811	unsigned int bio_size = throtl_bio_data_size(bio);
    812
    813	/* no need to throttle if this bio's bytes have been accounted */
    814	if (bps_limit == U64_MAX || bio_flagged(bio, BIO_THROTTLED)) {
    815		if (wait)
    816			*wait = 0;
    817		return true;
    818	}
    819
    820	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
    821
    822	/* Slice has just started. Consider one slice interval */
    823	if (!jiffy_elapsed)
    824		jiffy_elapsed_rnd = tg->td->throtl_slice;
    825
    826	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
    827
    828	tmp = bps_limit * jiffy_elapsed_rnd;
    829	do_div(tmp, HZ);
    830	bytes_allowed = tmp;
    831
    832	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
    833		if (wait)
    834			*wait = 0;
    835		return true;
    836	}
    837
    838	/* Calc approx time to dispatch */
    839	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
    840	jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
    841
    842	if (!jiffy_wait)
    843		jiffy_wait = 1;
    844
    845	/*
    846	 * This wait time is without taking into consideration the rounding
    847	 * up we did. Add that time also.
    848	 */
    849	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
    850	if (wait)
    851		*wait = jiffy_wait;
    852	return false;
    853}
    854
    855/*
    856 * Returns whether one can dispatch a bio or not. Also returns approx number
    857 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
    858 */
    859static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
    860			    unsigned long *wait)
    861{
    862	bool rw = bio_data_dir(bio);
    863	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
    864	u64 bps_limit = tg_bps_limit(tg, rw);
    865	u32 iops_limit = tg_iops_limit(tg, rw);
    866
    867	/*
    868 	 * Currently whole state machine of group depends on first bio
    869	 * queued in the group bio list. So one should not be calling
    870	 * this function with a different bio if there are other bios
    871	 * queued.
    872	 */
    873	BUG_ON(tg->service_queue.nr_queued[rw] &&
    874	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
    875
    876	/* If tg->bps = -1, then BW is unlimited */
    877	if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
    878	    tg->flags & THROTL_TG_CANCELING) {
    879		if (wait)
    880			*wait = 0;
    881		return true;
    882	}
    883
    884	/*
    885	 * If previous slice expired, start a new one otherwise renew/extend
    886	 * existing slice to make sure it is at least throtl_slice interval
    887	 * long since now. New slice is started only for empty throttle group.
    888	 * If there is queued bio, that means there should be an active
    889	 * slice and it should be extended instead.
    890	 */
    891	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
    892		throtl_start_new_slice(tg, rw);
    893	else {
    894		if (time_before(tg->slice_end[rw],
    895		    jiffies + tg->td->throtl_slice))
    896			throtl_extend_slice(tg, rw,
    897				jiffies + tg->td->throtl_slice);
    898	}
    899
    900	if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
    901	    tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
    902		if (wait)
    903			*wait = 0;
    904		return true;
    905	}
    906
    907	max_wait = max(bps_wait, iops_wait);
    908
    909	if (wait)
    910		*wait = max_wait;
    911
    912	if (time_before(tg->slice_end[rw], jiffies + max_wait))
    913		throtl_extend_slice(tg, rw, jiffies + max_wait);
    914
    915	return false;
    916}
    917
    918static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
    919{
    920	bool rw = bio_data_dir(bio);
    921	unsigned int bio_size = throtl_bio_data_size(bio);
    922
    923	/* Charge the bio to the group */
    924	if (!bio_flagged(bio, BIO_THROTTLED)) {
    925		tg->bytes_disp[rw] += bio_size;
    926		tg->last_bytes_disp[rw] += bio_size;
    927	}
    928
    929	tg->io_disp[rw]++;
    930	tg->last_io_disp[rw]++;
    931
    932	/*
    933	 * BIO_THROTTLED is used to prevent the same bio to be throttled
    934	 * more than once as a throttled bio will go through blk-throtl the
    935	 * second time when it eventually gets issued.  Set it when a bio
    936	 * is being charged to a tg.
    937	 */
    938	if (!bio_flagged(bio, BIO_THROTTLED))
    939		bio_set_flag(bio, BIO_THROTTLED);
    940}
    941
    942/**
    943 * throtl_add_bio_tg - add a bio to the specified throtl_grp
    944 * @bio: bio to add
    945 * @qn: qnode to use
    946 * @tg: the target throtl_grp
    947 *
    948 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
    949 * tg->qnode_on_self[] is used.
    950 */
    951static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
    952			      struct throtl_grp *tg)
    953{
    954	struct throtl_service_queue *sq = &tg->service_queue;
    955	bool rw = bio_data_dir(bio);
    956
    957	if (!qn)
    958		qn = &tg->qnode_on_self[rw];
    959
    960	/*
    961	 * If @tg doesn't currently have any bios queued in the same
    962	 * direction, queueing @bio can change when @tg should be
    963	 * dispatched.  Mark that @tg was empty.  This is automatically
    964	 * cleared on the next tg_update_disptime().
    965	 */
    966	if (!sq->nr_queued[rw])
    967		tg->flags |= THROTL_TG_WAS_EMPTY;
    968
    969	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
    970
    971	sq->nr_queued[rw]++;
    972	throtl_enqueue_tg(tg);
    973}
    974
    975static void tg_update_disptime(struct throtl_grp *tg)
    976{
    977	struct throtl_service_queue *sq = &tg->service_queue;
    978	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
    979	struct bio *bio;
    980
    981	bio = throtl_peek_queued(&sq->queued[READ]);
    982	if (bio)
    983		tg_may_dispatch(tg, bio, &read_wait);
    984
    985	bio = throtl_peek_queued(&sq->queued[WRITE]);
    986	if (bio)
    987		tg_may_dispatch(tg, bio, &write_wait);
    988
    989	min_wait = min(read_wait, write_wait);
    990	disptime = jiffies + min_wait;
    991
    992	/* Update dispatch time */
    993	throtl_dequeue_tg(tg);
    994	tg->disptime = disptime;
    995	throtl_enqueue_tg(tg);
    996
    997	/* see throtl_add_bio_tg() */
    998	tg->flags &= ~THROTL_TG_WAS_EMPTY;
    999}
   1000
   1001static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
   1002					struct throtl_grp *parent_tg, bool rw)
   1003{
   1004	if (throtl_slice_used(parent_tg, rw)) {
   1005		throtl_start_new_slice_with_credit(parent_tg, rw,
   1006				child_tg->slice_start[rw]);
   1007	}
   1008
   1009}
   1010
   1011static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
   1012{
   1013	struct throtl_service_queue *sq = &tg->service_queue;
   1014	struct throtl_service_queue *parent_sq = sq->parent_sq;
   1015	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
   1016	struct throtl_grp *tg_to_put = NULL;
   1017	struct bio *bio;
   1018
   1019	/*
   1020	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
   1021	 * from @tg may put its reference and @parent_sq might end up
   1022	 * getting released prematurely.  Remember the tg to put and put it
   1023	 * after @bio is transferred to @parent_sq.
   1024	 */
   1025	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
   1026	sq->nr_queued[rw]--;
   1027
   1028	throtl_charge_bio(tg, bio);
   1029
   1030	/*
   1031	 * If our parent is another tg, we just need to transfer @bio to
   1032	 * the parent using throtl_add_bio_tg().  If our parent is
   1033	 * @td->service_queue, @bio is ready to be issued.  Put it on its
   1034	 * bio_lists[] and decrease total number queued.  The caller is
   1035	 * responsible for issuing these bios.
   1036	 */
   1037	if (parent_tg) {
   1038		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
   1039		start_parent_slice_with_credit(tg, parent_tg, rw);
   1040	} else {
   1041		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
   1042				     &parent_sq->queued[rw]);
   1043		BUG_ON(tg->td->nr_queued[rw] <= 0);
   1044		tg->td->nr_queued[rw]--;
   1045	}
   1046
   1047	throtl_trim_slice(tg, rw);
   1048
   1049	if (tg_to_put)
   1050		blkg_put(tg_to_blkg(tg_to_put));
   1051}
   1052
   1053static int throtl_dispatch_tg(struct throtl_grp *tg)
   1054{
   1055	struct throtl_service_queue *sq = &tg->service_queue;
   1056	unsigned int nr_reads = 0, nr_writes = 0;
   1057	unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
   1058	unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
   1059	struct bio *bio;
   1060
   1061	/* Try to dispatch 75% READS and 25% WRITES */
   1062
   1063	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
   1064	       tg_may_dispatch(tg, bio, NULL)) {
   1065
   1066		tg_dispatch_one_bio(tg, bio_data_dir(bio));
   1067		nr_reads++;
   1068
   1069		if (nr_reads >= max_nr_reads)
   1070			break;
   1071	}
   1072
   1073	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
   1074	       tg_may_dispatch(tg, bio, NULL)) {
   1075
   1076		tg_dispatch_one_bio(tg, bio_data_dir(bio));
   1077		nr_writes++;
   1078
   1079		if (nr_writes >= max_nr_writes)
   1080			break;
   1081	}
   1082
   1083	return nr_reads + nr_writes;
   1084}
   1085
   1086static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
   1087{
   1088	unsigned int nr_disp = 0;
   1089
   1090	while (1) {
   1091		struct throtl_grp *tg;
   1092		struct throtl_service_queue *sq;
   1093
   1094		if (!parent_sq->nr_pending)
   1095			break;
   1096
   1097		tg = throtl_rb_first(parent_sq);
   1098		if (!tg)
   1099			break;
   1100
   1101		if (time_before(jiffies, tg->disptime))
   1102			break;
   1103
   1104		throtl_dequeue_tg(tg);
   1105
   1106		nr_disp += throtl_dispatch_tg(tg);
   1107
   1108		sq = &tg->service_queue;
   1109		if (sq->nr_queued[0] || sq->nr_queued[1])
   1110			tg_update_disptime(tg);
   1111
   1112		if (nr_disp >= THROTL_QUANTUM)
   1113			break;
   1114	}
   1115
   1116	return nr_disp;
   1117}
   1118
   1119static bool throtl_can_upgrade(struct throtl_data *td,
   1120	struct throtl_grp *this_tg);
   1121/**
   1122 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
   1123 * @t: the pending_timer member of the throtl_service_queue being serviced
   1124 *
   1125 * This timer is armed when a child throtl_grp with active bio's become
   1126 * pending and queued on the service_queue's pending_tree and expires when
   1127 * the first child throtl_grp should be dispatched.  This function
   1128 * dispatches bio's from the children throtl_grps to the parent
   1129 * service_queue.
   1130 *
   1131 * If the parent's parent is another throtl_grp, dispatching is propagated
   1132 * by either arming its pending_timer or repeating dispatch directly.  If
   1133 * the top-level service_tree is reached, throtl_data->dispatch_work is
   1134 * kicked so that the ready bio's are issued.
   1135 */
   1136static void throtl_pending_timer_fn(struct timer_list *t)
   1137{
   1138	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
   1139	struct throtl_grp *tg = sq_to_tg(sq);
   1140	struct throtl_data *td = sq_to_td(sq);
   1141	struct throtl_service_queue *parent_sq;
   1142	struct request_queue *q;
   1143	bool dispatched;
   1144	int ret;
   1145
   1146	/* throtl_data may be gone, so figure out request queue by blkg */
   1147	if (tg)
   1148		q = tg->pd.blkg->q;
   1149	else
   1150		q = td->queue;
   1151
   1152	spin_lock_irq(&q->queue_lock);
   1153
   1154	if (!q->root_blkg)
   1155		goto out_unlock;
   1156
   1157	if (throtl_can_upgrade(td, NULL))
   1158		throtl_upgrade_state(td);
   1159
   1160again:
   1161	parent_sq = sq->parent_sq;
   1162	dispatched = false;
   1163
   1164	while (true) {
   1165		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
   1166			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
   1167			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
   1168
   1169		ret = throtl_select_dispatch(sq);
   1170		if (ret) {
   1171			throtl_log(sq, "bios disp=%u", ret);
   1172			dispatched = true;
   1173		}
   1174
   1175		if (throtl_schedule_next_dispatch(sq, false))
   1176			break;
   1177
   1178		/* this dispatch windows is still open, relax and repeat */
   1179		spin_unlock_irq(&q->queue_lock);
   1180		cpu_relax();
   1181		spin_lock_irq(&q->queue_lock);
   1182	}
   1183
   1184	if (!dispatched)
   1185		goto out_unlock;
   1186
   1187	if (parent_sq) {
   1188		/* @parent_sq is another throl_grp, propagate dispatch */
   1189		if (tg->flags & THROTL_TG_WAS_EMPTY) {
   1190			tg_update_disptime(tg);
   1191			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
   1192				/* window is already open, repeat dispatching */
   1193				sq = parent_sq;
   1194				tg = sq_to_tg(sq);
   1195				goto again;
   1196			}
   1197		}
   1198	} else {
   1199		/* reached the top-level, queue issuing */
   1200		queue_work(kthrotld_workqueue, &td->dispatch_work);
   1201	}
   1202out_unlock:
   1203	spin_unlock_irq(&q->queue_lock);
   1204}
   1205
   1206/**
   1207 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
   1208 * @work: work item being executed
   1209 *
   1210 * This function is queued for execution when bios reach the bio_lists[]
   1211 * of throtl_data->service_queue.  Those bios are ready and issued by this
   1212 * function.
   1213 */
   1214static void blk_throtl_dispatch_work_fn(struct work_struct *work)
   1215{
   1216	struct throtl_data *td = container_of(work, struct throtl_data,
   1217					      dispatch_work);
   1218	struct throtl_service_queue *td_sq = &td->service_queue;
   1219	struct request_queue *q = td->queue;
   1220	struct bio_list bio_list_on_stack;
   1221	struct bio *bio;
   1222	struct blk_plug plug;
   1223	int rw;
   1224
   1225	bio_list_init(&bio_list_on_stack);
   1226
   1227	spin_lock_irq(&q->queue_lock);
   1228	for (rw = READ; rw <= WRITE; rw++)
   1229		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
   1230			bio_list_add(&bio_list_on_stack, bio);
   1231	spin_unlock_irq(&q->queue_lock);
   1232
   1233	if (!bio_list_empty(&bio_list_on_stack)) {
   1234		blk_start_plug(&plug);
   1235		while ((bio = bio_list_pop(&bio_list_on_stack)))
   1236			submit_bio_noacct_nocheck(bio);
   1237		blk_finish_plug(&plug);
   1238	}
   1239}
   1240
   1241static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
   1242			      int off)
   1243{
   1244	struct throtl_grp *tg = pd_to_tg(pd);
   1245	u64 v = *(u64 *)((void *)tg + off);
   1246
   1247	if (v == U64_MAX)
   1248		return 0;
   1249	return __blkg_prfill_u64(sf, pd, v);
   1250}
   1251
   1252static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
   1253			       int off)
   1254{
   1255	struct throtl_grp *tg = pd_to_tg(pd);
   1256	unsigned int v = *(unsigned int *)((void *)tg + off);
   1257
   1258	if (v == UINT_MAX)
   1259		return 0;
   1260	return __blkg_prfill_u64(sf, pd, v);
   1261}
   1262
   1263static int tg_print_conf_u64(struct seq_file *sf, void *v)
   1264{
   1265	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
   1266			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
   1267	return 0;
   1268}
   1269
   1270static int tg_print_conf_uint(struct seq_file *sf, void *v)
   1271{
   1272	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
   1273			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
   1274	return 0;
   1275}
   1276
   1277static void tg_conf_updated(struct throtl_grp *tg, bool global)
   1278{
   1279	struct throtl_service_queue *sq = &tg->service_queue;
   1280	struct cgroup_subsys_state *pos_css;
   1281	struct blkcg_gq *blkg;
   1282
   1283	throtl_log(&tg->service_queue,
   1284		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
   1285		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
   1286		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
   1287
   1288	/*
   1289	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
   1290	 * considered to have rules if either the tg itself or any of its
   1291	 * ancestors has rules.  This identifies groups without any
   1292	 * restrictions in the whole hierarchy and allows them to bypass
   1293	 * blk-throttle.
   1294	 */
   1295	blkg_for_each_descendant_pre(blkg, pos_css,
   1296			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
   1297		struct throtl_grp *this_tg = blkg_to_tg(blkg);
   1298		struct throtl_grp *parent_tg;
   1299
   1300		tg_update_has_rules(this_tg);
   1301		/* ignore root/second level */
   1302		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
   1303		    !blkg->parent->parent)
   1304			continue;
   1305		parent_tg = blkg_to_tg(blkg->parent);
   1306		/*
   1307		 * make sure all children has lower idle time threshold and
   1308		 * higher latency target
   1309		 */
   1310		this_tg->idletime_threshold = min(this_tg->idletime_threshold,
   1311				parent_tg->idletime_threshold);
   1312		this_tg->latency_target = max(this_tg->latency_target,
   1313				parent_tg->latency_target);
   1314	}
   1315
   1316	/*
   1317	 * We're already holding queue_lock and know @tg is valid.  Let's
   1318	 * apply the new config directly.
   1319	 *
   1320	 * Restart the slices for both READ and WRITES. It might happen
   1321	 * that a group's limit are dropped suddenly and we don't want to
   1322	 * account recently dispatched IO with new low rate.
   1323	 */
   1324	throtl_start_new_slice(tg, READ);
   1325	throtl_start_new_slice(tg, WRITE);
   1326
   1327	if (tg->flags & THROTL_TG_PENDING) {
   1328		tg_update_disptime(tg);
   1329		throtl_schedule_next_dispatch(sq->parent_sq, true);
   1330	}
   1331}
   1332
   1333static ssize_t tg_set_conf(struct kernfs_open_file *of,
   1334			   char *buf, size_t nbytes, loff_t off, bool is_u64)
   1335{
   1336	struct blkcg *blkcg = css_to_blkcg(of_css(of));
   1337	struct blkg_conf_ctx ctx;
   1338	struct throtl_grp *tg;
   1339	int ret;
   1340	u64 v;
   1341
   1342	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
   1343	if (ret)
   1344		return ret;
   1345
   1346	ret = -EINVAL;
   1347	if (sscanf(ctx.body, "%llu", &v) != 1)
   1348		goto out_finish;
   1349	if (!v)
   1350		v = U64_MAX;
   1351
   1352	tg = blkg_to_tg(ctx.blkg);
   1353
   1354	if (is_u64)
   1355		*(u64 *)((void *)tg + of_cft(of)->private) = v;
   1356	else
   1357		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
   1358
   1359	tg_conf_updated(tg, false);
   1360	ret = 0;
   1361out_finish:
   1362	blkg_conf_finish(&ctx);
   1363	return ret ?: nbytes;
   1364}
   1365
   1366static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
   1367			       char *buf, size_t nbytes, loff_t off)
   1368{
   1369	return tg_set_conf(of, buf, nbytes, off, true);
   1370}
   1371
   1372static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
   1373				char *buf, size_t nbytes, loff_t off)
   1374{
   1375	return tg_set_conf(of, buf, nbytes, off, false);
   1376}
   1377
   1378static int tg_print_rwstat(struct seq_file *sf, void *v)
   1379{
   1380	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
   1381			  blkg_prfill_rwstat, &blkcg_policy_throtl,
   1382			  seq_cft(sf)->private, true);
   1383	return 0;
   1384}
   1385
   1386static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
   1387				      struct blkg_policy_data *pd, int off)
   1388{
   1389	struct blkg_rwstat_sample sum;
   1390
   1391	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
   1392				  &sum);
   1393	return __blkg_prfill_rwstat(sf, pd, &sum);
   1394}
   1395
   1396static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
   1397{
   1398	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
   1399			  tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
   1400			  seq_cft(sf)->private, true);
   1401	return 0;
   1402}
   1403
   1404static struct cftype throtl_legacy_files[] = {
   1405	{
   1406		.name = "throttle.read_bps_device",
   1407		.private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
   1408		.seq_show = tg_print_conf_u64,
   1409		.write = tg_set_conf_u64,
   1410	},
   1411	{
   1412		.name = "throttle.write_bps_device",
   1413		.private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
   1414		.seq_show = tg_print_conf_u64,
   1415		.write = tg_set_conf_u64,
   1416	},
   1417	{
   1418		.name = "throttle.read_iops_device",
   1419		.private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
   1420		.seq_show = tg_print_conf_uint,
   1421		.write = tg_set_conf_uint,
   1422	},
   1423	{
   1424		.name = "throttle.write_iops_device",
   1425		.private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
   1426		.seq_show = tg_print_conf_uint,
   1427		.write = tg_set_conf_uint,
   1428	},
   1429	{
   1430		.name = "throttle.io_service_bytes",
   1431		.private = offsetof(struct throtl_grp, stat_bytes),
   1432		.seq_show = tg_print_rwstat,
   1433	},
   1434	{
   1435		.name = "throttle.io_service_bytes_recursive",
   1436		.private = offsetof(struct throtl_grp, stat_bytes),
   1437		.seq_show = tg_print_rwstat_recursive,
   1438	},
   1439	{
   1440		.name = "throttle.io_serviced",
   1441		.private = offsetof(struct throtl_grp, stat_ios),
   1442		.seq_show = tg_print_rwstat,
   1443	},
   1444	{
   1445		.name = "throttle.io_serviced_recursive",
   1446		.private = offsetof(struct throtl_grp, stat_ios),
   1447		.seq_show = tg_print_rwstat_recursive,
   1448	},
   1449	{ }	/* terminate */
   1450};
   1451
   1452static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
   1453			 int off)
   1454{
   1455	struct throtl_grp *tg = pd_to_tg(pd);
   1456	const char *dname = blkg_dev_name(pd->blkg);
   1457	char bufs[4][21] = { "max", "max", "max", "max" };
   1458	u64 bps_dft;
   1459	unsigned int iops_dft;
   1460	char idle_time[26] = "";
   1461	char latency_time[26] = "";
   1462
   1463	if (!dname)
   1464		return 0;
   1465
   1466	if (off == LIMIT_LOW) {
   1467		bps_dft = 0;
   1468		iops_dft = 0;
   1469	} else {
   1470		bps_dft = U64_MAX;
   1471		iops_dft = UINT_MAX;
   1472	}
   1473
   1474	if (tg->bps_conf[READ][off] == bps_dft &&
   1475	    tg->bps_conf[WRITE][off] == bps_dft &&
   1476	    tg->iops_conf[READ][off] == iops_dft &&
   1477	    tg->iops_conf[WRITE][off] == iops_dft &&
   1478	    (off != LIMIT_LOW ||
   1479	     (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
   1480	      tg->latency_target_conf == DFL_LATENCY_TARGET)))
   1481		return 0;
   1482
   1483	if (tg->bps_conf[READ][off] != U64_MAX)
   1484		snprintf(bufs[0], sizeof(bufs[0]), "%llu",
   1485			tg->bps_conf[READ][off]);
   1486	if (tg->bps_conf[WRITE][off] != U64_MAX)
   1487		snprintf(bufs[1], sizeof(bufs[1]), "%llu",
   1488			tg->bps_conf[WRITE][off]);
   1489	if (tg->iops_conf[READ][off] != UINT_MAX)
   1490		snprintf(bufs[2], sizeof(bufs[2]), "%u",
   1491			tg->iops_conf[READ][off]);
   1492	if (tg->iops_conf[WRITE][off] != UINT_MAX)
   1493		snprintf(bufs[3], sizeof(bufs[3]), "%u",
   1494			tg->iops_conf[WRITE][off]);
   1495	if (off == LIMIT_LOW) {
   1496		if (tg->idletime_threshold_conf == ULONG_MAX)
   1497			strcpy(idle_time, " idle=max");
   1498		else
   1499			snprintf(idle_time, sizeof(idle_time), " idle=%lu",
   1500				tg->idletime_threshold_conf);
   1501
   1502		if (tg->latency_target_conf == ULONG_MAX)
   1503			strcpy(latency_time, " latency=max");
   1504		else
   1505			snprintf(latency_time, sizeof(latency_time),
   1506				" latency=%lu", tg->latency_target_conf);
   1507	}
   1508
   1509	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
   1510		   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
   1511		   latency_time);
   1512	return 0;
   1513}
   1514
   1515static int tg_print_limit(struct seq_file *sf, void *v)
   1516{
   1517	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
   1518			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
   1519	return 0;
   1520}
   1521
   1522static ssize_t tg_set_limit(struct kernfs_open_file *of,
   1523			  char *buf, size_t nbytes, loff_t off)
   1524{
   1525	struct blkcg *blkcg = css_to_blkcg(of_css(of));
   1526	struct blkg_conf_ctx ctx;
   1527	struct throtl_grp *tg;
   1528	u64 v[4];
   1529	unsigned long idle_time;
   1530	unsigned long latency_time;
   1531	int ret;
   1532	int index = of_cft(of)->private;
   1533
   1534	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
   1535	if (ret)
   1536		return ret;
   1537
   1538	tg = blkg_to_tg(ctx.blkg);
   1539
   1540	v[0] = tg->bps_conf[READ][index];
   1541	v[1] = tg->bps_conf[WRITE][index];
   1542	v[2] = tg->iops_conf[READ][index];
   1543	v[3] = tg->iops_conf[WRITE][index];
   1544
   1545	idle_time = tg->idletime_threshold_conf;
   1546	latency_time = tg->latency_target_conf;
   1547	while (true) {
   1548		char tok[27];	/* wiops=18446744073709551616 */
   1549		char *p;
   1550		u64 val = U64_MAX;
   1551		int len;
   1552
   1553		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
   1554			break;
   1555		if (tok[0] == '\0')
   1556			break;
   1557		ctx.body += len;
   1558
   1559		ret = -EINVAL;
   1560		p = tok;
   1561		strsep(&p, "=");
   1562		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
   1563			goto out_finish;
   1564
   1565		ret = -ERANGE;
   1566		if (!val)
   1567			goto out_finish;
   1568
   1569		ret = -EINVAL;
   1570		if (!strcmp(tok, "rbps") && val > 1)
   1571			v[0] = val;
   1572		else if (!strcmp(tok, "wbps") && val > 1)
   1573			v[1] = val;
   1574		else if (!strcmp(tok, "riops") && val > 1)
   1575			v[2] = min_t(u64, val, UINT_MAX);
   1576		else if (!strcmp(tok, "wiops") && val > 1)
   1577			v[3] = min_t(u64, val, UINT_MAX);
   1578		else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
   1579			idle_time = val;
   1580		else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
   1581			latency_time = val;
   1582		else
   1583			goto out_finish;
   1584	}
   1585
   1586	tg->bps_conf[READ][index] = v[0];
   1587	tg->bps_conf[WRITE][index] = v[1];
   1588	tg->iops_conf[READ][index] = v[2];
   1589	tg->iops_conf[WRITE][index] = v[3];
   1590
   1591	if (index == LIMIT_MAX) {
   1592		tg->bps[READ][index] = v[0];
   1593		tg->bps[WRITE][index] = v[1];
   1594		tg->iops[READ][index] = v[2];
   1595		tg->iops[WRITE][index] = v[3];
   1596	}
   1597	tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
   1598		tg->bps_conf[READ][LIMIT_MAX]);
   1599	tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
   1600		tg->bps_conf[WRITE][LIMIT_MAX]);
   1601	tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
   1602		tg->iops_conf[READ][LIMIT_MAX]);
   1603	tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
   1604		tg->iops_conf[WRITE][LIMIT_MAX]);
   1605	tg->idletime_threshold_conf = idle_time;
   1606	tg->latency_target_conf = latency_time;
   1607
   1608	/* force user to configure all settings for low limit  */
   1609	if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
   1610	      tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
   1611	    tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
   1612	    tg->latency_target_conf == DFL_LATENCY_TARGET) {
   1613		tg->bps[READ][LIMIT_LOW] = 0;
   1614		tg->bps[WRITE][LIMIT_LOW] = 0;
   1615		tg->iops[READ][LIMIT_LOW] = 0;
   1616		tg->iops[WRITE][LIMIT_LOW] = 0;
   1617		tg->idletime_threshold = DFL_IDLE_THRESHOLD;
   1618		tg->latency_target = DFL_LATENCY_TARGET;
   1619	} else if (index == LIMIT_LOW) {
   1620		tg->idletime_threshold = tg->idletime_threshold_conf;
   1621		tg->latency_target = tg->latency_target_conf;
   1622	}
   1623
   1624	blk_throtl_update_limit_valid(tg->td);
   1625	if (tg->td->limit_valid[LIMIT_LOW]) {
   1626		if (index == LIMIT_LOW)
   1627			tg->td->limit_index = LIMIT_LOW;
   1628	} else
   1629		tg->td->limit_index = LIMIT_MAX;
   1630	tg_conf_updated(tg, index == LIMIT_LOW &&
   1631		tg->td->limit_valid[LIMIT_LOW]);
   1632	ret = 0;
   1633out_finish:
   1634	blkg_conf_finish(&ctx);
   1635	return ret ?: nbytes;
   1636}
   1637
   1638static struct cftype throtl_files[] = {
   1639#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
   1640	{
   1641		.name = "low",
   1642		.flags = CFTYPE_NOT_ON_ROOT,
   1643		.seq_show = tg_print_limit,
   1644		.write = tg_set_limit,
   1645		.private = LIMIT_LOW,
   1646	},
   1647#endif
   1648	{
   1649		.name = "max",
   1650		.flags = CFTYPE_NOT_ON_ROOT,
   1651		.seq_show = tg_print_limit,
   1652		.write = tg_set_limit,
   1653		.private = LIMIT_MAX,
   1654	},
   1655	{ }	/* terminate */
   1656};
   1657
   1658static void throtl_shutdown_wq(struct request_queue *q)
   1659{
   1660	struct throtl_data *td = q->td;
   1661
   1662	cancel_work_sync(&td->dispatch_work);
   1663}
   1664
   1665struct blkcg_policy blkcg_policy_throtl = {
   1666	.dfl_cftypes		= throtl_files,
   1667	.legacy_cftypes		= throtl_legacy_files,
   1668
   1669	.pd_alloc_fn		= throtl_pd_alloc,
   1670	.pd_init_fn		= throtl_pd_init,
   1671	.pd_online_fn		= throtl_pd_online,
   1672	.pd_offline_fn		= throtl_pd_offline,
   1673	.pd_free_fn		= throtl_pd_free,
   1674};
   1675
   1676static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
   1677{
   1678	unsigned long rtime = jiffies, wtime = jiffies;
   1679
   1680	if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
   1681		rtime = tg->last_low_overflow_time[READ];
   1682	if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
   1683		wtime = tg->last_low_overflow_time[WRITE];
   1684	return min(rtime, wtime);
   1685}
   1686
   1687/* tg should not be an intermediate node */
   1688static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
   1689{
   1690	struct throtl_service_queue *parent_sq;
   1691	struct throtl_grp *parent = tg;
   1692	unsigned long ret = __tg_last_low_overflow_time(tg);
   1693
   1694	while (true) {
   1695		parent_sq = parent->service_queue.parent_sq;
   1696		parent = sq_to_tg(parent_sq);
   1697		if (!parent)
   1698			break;
   1699
   1700		/*
   1701		 * The parent doesn't have low limit, it always reaches low
   1702		 * limit. Its overflow time is useless for children
   1703		 */
   1704		if (!parent->bps[READ][LIMIT_LOW] &&
   1705		    !parent->iops[READ][LIMIT_LOW] &&
   1706		    !parent->bps[WRITE][LIMIT_LOW] &&
   1707		    !parent->iops[WRITE][LIMIT_LOW])
   1708			continue;
   1709		if (time_after(__tg_last_low_overflow_time(parent), ret))
   1710			ret = __tg_last_low_overflow_time(parent);
   1711	}
   1712	return ret;
   1713}
   1714
   1715static bool throtl_tg_is_idle(struct throtl_grp *tg)
   1716{
   1717	/*
   1718	 * cgroup is idle if:
   1719	 * - single idle is too long, longer than a fixed value (in case user
   1720	 *   configure a too big threshold) or 4 times of idletime threshold
   1721	 * - average think time is more than threshold
   1722	 * - IO latency is largely below threshold
   1723	 */
   1724	unsigned long time;
   1725	bool ret;
   1726
   1727	time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
   1728	ret = tg->latency_target == DFL_LATENCY_TARGET ||
   1729	      tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
   1730	      (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
   1731	      tg->avg_idletime > tg->idletime_threshold ||
   1732	      (tg->latency_target && tg->bio_cnt &&
   1733		tg->bad_bio_cnt * 5 < tg->bio_cnt);
   1734	throtl_log(&tg->service_queue,
   1735		"avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
   1736		tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
   1737		tg->bio_cnt, ret, tg->td->scale);
   1738	return ret;
   1739}
   1740
   1741static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
   1742{
   1743	struct throtl_service_queue *sq = &tg->service_queue;
   1744	bool read_limit, write_limit;
   1745
   1746	/*
   1747	 * if cgroup reaches low limit (if low limit is 0, the cgroup always
   1748	 * reaches), it's ok to upgrade to next limit
   1749	 */
   1750	read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
   1751	write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
   1752	if (!read_limit && !write_limit)
   1753		return true;
   1754	if (read_limit && sq->nr_queued[READ] &&
   1755	    (!write_limit || sq->nr_queued[WRITE]))
   1756		return true;
   1757	if (write_limit && sq->nr_queued[WRITE] &&
   1758	    (!read_limit || sq->nr_queued[READ]))
   1759		return true;
   1760
   1761	if (time_after_eq(jiffies,
   1762		tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
   1763	    throtl_tg_is_idle(tg))
   1764		return true;
   1765	return false;
   1766}
   1767
   1768static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
   1769{
   1770	while (true) {
   1771		if (throtl_tg_can_upgrade(tg))
   1772			return true;
   1773		tg = sq_to_tg(tg->service_queue.parent_sq);
   1774		if (!tg || !tg_to_blkg(tg)->parent)
   1775			return false;
   1776	}
   1777	return false;
   1778}
   1779
   1780void blk_throtl_cancel_bios(struct request_queue *q)
   1781{
   1782	struct cgroup_subsys_state *pos_css;
   1783	struct blkcg_gq *blkg;
   1784
   1785	spin_lock_irq(&q->queue_lock);
   1786	/*
   1787	 * queue_lock is held, rcu lock is not needed here technically.
   1788	 * However, rcu lock is still held to emphasize that following
   1789	 * path need RCU protection and to prevent warning from lockdep.
   1790	 */
   1791	rcu_read_lock();
   1792	blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
   1793		struct throtl_grp *tg = blkg_to_tg(blkg);
   1794		struct throtl_service_queue *sq = &tg->service_queue;
   1795
   1796		/*
   1797		 * Set the flag to make sure throtl_pending_timer_fn() won't
   1798		 * stop until all throttled bios are dispatched.
   1799		 */
   1800		blkg_to_tg(blkg)->flags |= THROTL_TG_CANCELING;
   1801		/*
   1802		 * Update disptime after setting the above flag to make sure
   1803		 * throtl_select_dispatch() won't exit without dispatching.
   1804		 */
   1805		tg_update_disptime(tg);
   1806
   1807		throtl_schedule_pending_timer(sq, jiffies + 1);
   1808	}
   1809	rcu_read_unlock();
   1810	spin_unlock_irq(&q->queue_lock);
   1811}
   1812
   1813static bool throtl_can_upgrade(struct throtl_data *td,
   1814	struct throtl_grp *this_tg)
   1815{
   1816	struct cgroup_subsys_state *pos_css;
   1817	struct blkcg_gq *blkg;
   1818
   1819	if (td->limit_index != LIMIT_LOW)
   1820		return false;
   1821
   1822	if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
   1823		return false;
   1824
   1825	rcu_read_lock();
   1826	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
   1827		struct throtl_grp *tg = blkg_to_tg(blkg);
   1828
   1829		if (tg == this_tg)
   1830			continue;
   1831		if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
   1832			continue;
   1833		if (!throtl_hierarchy_can_upgrade(tg)) {
   1834			rcu_read_unlock();
   1835			return false;
   1836		}
   1837	}
   1838	rcu_read_unlock();
   1839	return true;
   1840}
   1841
   1842static void throtl_upgrade_check(struct throtl_grp *tg)
   1843{
   1844	unsigned long now = jiffies;
   1845
   1846	if (tg->td->limit_index != LIMIT_LOW)
   1847		return;
   1848
   1849	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
   1850		return;
   1851
   1852	tg->last_check_time = now;
   1853
   1854	if (!time_after_eq(now,
   1855	     __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
   1856		return;
   1857
   1858	if (throtl_can_upgrade(tg->td, NULL))
   1859		throtl_upgrade_state(tg->td);
   1860}
   1861
   1862static void throtl_upgrade_state(struct throtl_data *td)
   1863{
   1864	struct cgroup_subsys_state *pos_css;
   1865	struct blkcg_gq *blkg;
   1866
   1867	throtl_log(&td->service_queue, "upgrade to max");
   1868	td->limit_index = LIMIT_MAX;
   1869	td->low_upgrade_time = jiffies;
   1870	td->scale = 0;
   1871	rcu_read_lock();
   1872	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
   1873		struct throtl_grp *tg = blkg_to_tg(blkg);
   1874		struct throtl_service_queue *sq = &tg->service_queue;
   1875
   1876		tg->disptime = jiffies - 1;
   1877		throtl_select_dispatch(sq);
   1878		throtl_schedule_next_dispatch(sq, true);
   1879	}
   1880	rcu_read_unlock();
   1881	throtl_select_dispatch(&td->service_queue);
   1882	throtl_schedule_next_dispatch(&td->service_queue, true);
   1883	queue_work(kthrotld_workqueue, &td->dispatch_work);
   1884}
   1885
   1886static void throtl_downgrade_state(struct throtl_data *td)
   1887{
   1888	td->scale /= 2;
   1889
   1890	throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
   1891	if (td->scale) {
   1892		td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
   1893		return;
   1894	}
   1895
   1896	td->limit_index = LIMIT_LOW;
   1897	td->low_downgrade_time = jiffies;
   1898}
   1899
   1900static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
   1901{
   1902	struct throtl_data *td = tg->td;
   1903	unsigned long now = jiffies;
   1904
   1905	/*
   1906	 * If cgroup is below low limit, consider downgrade and throttle other
   1907	 * cgroups
   1908	 */
   1909	if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
   1910	    time_after_eq(now, tg_last_low_overflow_time(tg) +
   1911					td->throtl_slice) &&
   1912	    (!throtl_tg_is_idle(tg) ||
   1913	     !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
   1914		return true;
   1915	return false;
   1916}
   1917
   1918static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
   1919{
   1920	while (true) {
   1921		if (!throtl_tg_can_downgrade(tg))
   1922			return false;
   1923		tg = sq_to_tg(tg->service_queue.parent_sq);
   1924		if (!tg || !tg_to_blkg(tg)->parent)
   1925			break;
   1926	}
   1927	return true;
   1928}
   1929
   1930static void throtl_downgrade_check(struct throtl_grp *tg)
   1931{
   1932	uint64_t bps;
   1933	unsigned int iops;
   1934	unsigned long elapsed_time;
   1935	unsigned long now = jiffies;
   1936
   1937	if (tg->td->limit_index != LIMIT_MAX ||
   1938	    !tg->td->limit_valid[LIMIT_LOW])
   1939		return;
   1940	if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
   1941		return;
   1942	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
   1943		return;
   1944
   1945	elapsed_time = now - tg->last_check_time;
   1946	tg->last_check_time = now;
   1947
   1948	if (time_before(now, tg_last_low_overflow_time(tg) +
   1949			tg->td->throtl_slice))
   1950		return;
   1951
   1952	if (tg->bps[READ][LIMIT_LOW]) {
   1953		bps = tg->last_bytes_disp[READ] * HZ;
   1954		do_div(bps, elapsed_time);
   1955		if (bps >= tg->bps[READ][LIMIT_LOW])
   1956			tg->last_low_overflow_time[READ] = now;
   1957	}
   1958
   1959	if (tg->bps[WRITE][LIMIT_LOW]) {
   1960		bps = tg->last_bytes_disp[WRITE] * HZ;
   1961		do_div(bps, elapsed_time);
   1962		if (bps >= tg->bps[WRITE][LIMIT_LOW])
   1963			tg->last_low_overflow_time[WRITE] = now;
   1964	}
   1965
   1966	if (tg->iops[READ][LIMIT_LOW]) {
   1967		iops = tg->last_io_disp[READ] * HZ / elapsed_time;
   1968		if (iops >= tg->iops[READ][LIMIT_LOW])
   1969			tg->last_low_overflow_time[READ] = now;
   1970	}
   1971
   1972	if (tg->iops[WRITE][LIMIT_LOW]) {
   1973		iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
   1974		if (iops >= tg->iops[WRITE][LIMIT_LOW])
   1975			tg->last_low_overflow_time[WRITE] = now;
   1976	}
   1977
   1978	/*
   1979	 * If cgroup is below low limit, consider downgrade and throttle other
   1980	 * cgroups
   1981	 */
   1982	if (throtl_hierarchy_can_downgrade(tg))
   1983		throtl_downgrade_state(tg->td);
   1984
   1985	tg->last_bytes_disp[READ] = 0;
   1986	tg->last_bytes_disp[WRITE] = 0;
   1987	tg->last_io_disp[READ] = 0;
   1988	tg->last_io_disp[WRITE] = 0;
   1989}
   1990
   1991static void blk_throtl_update_idletime(struct throtl_grp *tg)
   1992{
   1993	unsigned long now;
   1994	unsigned long last_finish_time = tg->last_finish_time;
   1995
   1996	if (last_finish_time == 0)
   1997		return;
   1998
   1999	now = ktime_get_ns() >> 10;
   2000	if (now <= last_finish_time ||
   2001	    last_finish_time == tg->checked_last_finish_time)
   2002		return;
   2003
   2004	tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
   2005	tg->checked_last_finish_time = last_finish_time;
   2006}
   2007
   2008#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
   2009static void throtl_update_latency_buckets(struct throtl_data *td)
   2010{
   2011	struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
   2012	int i, cpu, rw;
   2013	unsigned long last_latency[2] = { 0 };
   2014	unsigned long latency[2];
   2015
   2016	if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
   2017		return;
   2018	if (time_before(jiffies, td->last_calculate_time + HZ))
   2019		return;
   2020	td->last_calculate_time = jiffies;
   2021
   2022	memset(avg_latency, 0, sizeof(avg_latency));
   2023	for (rw = READ; rw <= WRITE; rw++) {
   2024		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
   2025			struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
   2026
   2027			for_each_possible_cpu(cpu) {
   2028				struct latency_bucket *bucket;
   2029
   2030				/* this isn't race free, but ok in practice */
   2031				bucket = per_cpu_ptr(td->latency_buckets[rw],
   2032					cpu);
   2033				tmp->total_latency += bucket[i].total_latency;
   2034				tmp->samples += bucket[i].samples;
   2035				bucket[i].total_latency = 0;
   2036				bucket[i].samples = 0;
   2037			}
   2038
   2039			if (tmp->samples >= 32) {
   2040				int samples = tmp->samples;
   2041
   2042				latency[rw] = tmp->total_latency;
   2043
   2044				tmp->total_latency = 0;
   2045				tmp->samples = 0;
   2046				latency[rw] /= samples;
   2047				if (latency[rw] == 0)
   2048					continue;
   2049				avg_latency[rw][i].latency = latency[rw];
   2050			}
   2051		}
   2052	}
   2053
   2054	for (rw = READ; rw <= WRITE; rw++) {
   2055		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
   2056			if (!avg_latency[rw][i].latency) {
   2057				if (td->avg_buckets[rw][i].latency < last_latency[rw])
   2058					td->avg_buckets[rw][i].latency =
   2059						last_latency[rw];
   2060				continue;
   2061			}
   2062
   2063			if (!td->avg_buckets[rw][i].valid)
   2064				latency[rw] = avg_latency[rw][i].latency;
   2065			else
   2066				latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
   2067					avg_latency[rw][i].latency) >> 3;
   2068
   2069			td->avg_buckets[rw][i].latency = max(latency[rw],
   2070				last_latency[rw]);
   2071			td->avg_buckets[rw][i].valid = true;
   2072			last_latency[rw] = td->avg_buckets[rw][i].latency;
   2073		}
   2074	}
   2075
   2076	for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
   2077		throtl_log(&td->service_queue,
   2078			"Latency bucket %d: read latency=%ld, read valid=%d, "
   2079			"write latency=%ld, write valid=%d", i,
   2080			td->avg_buckets[READ][i].latency,
   2081			td->avg_buckets[READ][i].valid,
   2082			td->avg_buckets[WRITE][i].latency,
   2083			td->avg_buckets[WRITE][i].valid);
   2084}
   2085#else
   2086static inline void throtl_update_latency_buckets(struct throtl_data *td)
   2087{
   2088}
   2089#endif
   2090
   2091bool __blk_throtl_bio(struct bio *bio)
   2092{
   2093	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
   2094	struct blkcg_gq *blkg = bio->bi_blkg;
   2095	struct throtl_qnode *qn = NULL;
   2096	struct throtl_grp *tg = blkg_to_tg(blkg);
   2097	struct throtl_service_queue *sq;
   2098	bool rw = bio_data_dir(bio);
   2099	bool throttled = false;
   2100	struct throtl_data *td = tg->td;
   2101
   2102	rcu_read_lock();
   2103
   2104	if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
   2105		blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
   2106				bio->bi_iter.bi_size);
   2107		blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
   2108	}
   2109
   2110	spin_lock_irq(&q->queue_lock);
   2111
   2112	throtl_update_latency_buckets(td);
   2113
   2114	blk_throtl_update_idletime(tg);
   2115
   2116	sq = &tg->service_queue;
   2117
   2118again:
   2119	while (true) {
   2120		if (tg->last_low_overflow_time[rw] == 0)
   2121			tg->last_low_overflow_time[rw] = jiffies;
   2122		throtl_downgrade_check(tg);
   2123		throtl_upgrade_check(tg);
   2124		/* throtl is FIFO - if bios are already queued, should queue */
   2125		if (sq->nr_queued[rw])
   2126			break;
   2127
   2128		/* if above limits, break to queue */
   2129		if (!tg_may_dispatch(tg, bio, NULL)) {
   2130			tg->last_low_overflow_time[rw] = jiffies;
   2131			if (throtl_can_upgrade(td, tg)) {
   2132				throtl_upgrade_state(td);
   2133				goto again;
   2134			}
   2135			break;
   2136		}
   2137
   2138		/* within limits, let's charge and dispatch directly */
   2139		throtl_charge_bio(tg, bio);
   2140
   2141		/*
   2142		 * We need to trim slice even when bios are not being queued
   2143		 * otherwise it might happen that a bio is not queued for
   2144		 * a long time and slice keeps on extending and trim is not
   2145		 * called for a long time. Now if limits are reduced suddenly
   2146		 * we take into account all the IO dispatched so far at new
   2147		 * low rate and * newly queued IO gets a really long dispatch
   2148		 * time.
   2149		 *
   2150		 * So keep on trimming slice even if bio is not queued.
   2151		 */
   2152		throtl_trim_slice(tg, rw);
   2153
   2154		/*
   2155		 * @bio passed through this layer without being throttled.
   2156		 * Climb up the ladder.  If we're already at the top, it
   2157		 * can be executed directly.
   2158		 */
   2159		qn = &tg->qnode_on_parent[rw];
   2160		sq = sq->parent_sq;
   2161		tg = sq_to_tg(sq);
   2162		if (!tg)
   2163			goto out_unlock;
   2164	}
   2165
   2166	/* out-of-limit, queue to @tg */
   2167	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
   2168		   rw == READ ? 'R' : 'W',
   2169		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
   2170		   tg_bps_limit(tg, rw),
   2171		   tg->io_disp[rw], tg_iops_limit(tg, rw),
   2172		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
   2173
   2174	tg->last_low_overflow_time[rw] = jiffies;
   2175
   2176	td->nr_queued[rw]++;
   2177	throtl_add_bio_tg(bio, qn, tg);
   2178	throttled = true;
   2179
   2180	/*
   2181	 * Update @tg's dispatch time and force schedule dispatch if @tg
   2182	 * was empty before @bio.  The forced scheduling isn't likely to
   2183	 * cause undue delay as @bio is likely to be dispatched directly if
   2184	 * its @tg's disptime is not in the future.
   2185	 */
   2186	if (tg->flags & THROTL_TG_WAS_EMPTY) {
   2187		tg_update_disptime(tg);
   2188		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
   2189	}
   2190
   2191out_unlock:
   2192	bio_set_flag(bio, BIO_THROTTLED);
   2193
   2194#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
   2195	if (throttled || !td->track_bio_latency)
   2196		bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
   2197#endif
   2198	spin_unlock_irq(&q->queue_lock);
   2199
   2200	rcu_read_unlock();
   2201	return throttled;
   2202}
   2203
   2204#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
   2205static void throtl_track_latency(struct throtl_data *td, sector_t size,
   2206	int op, unsigned long time)
   2207{
   2208	struct latency_bucket *latency;
   2209	int index;
   2210
   2211	if (!td || td->limit_index != LIMIT_LOW ||
   2212	    !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
   2213	    !blk_queue_nonrot(td->queue))
   2214		return;
   2215
   2216	index = request_bucket_index(size);
   2217
   2218	latency = get_cpu_ptr(td->latency_buckets[op]);
   2219	latency[index].total_latency += time;
   2220	latency[index].samples++;
   2221	put_cpu_ptr(td->latency_buckets[op]);
   2222}
   2223
   2224void blk_throtl_stat_add(struct request *rq, u64 time_ns)
   2225{
   2226	struct request_queue *q = rq->q;
   2227	struct throtl_data *td = q->td;
   2228
   2229	throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
   2230			     time_ns >> 10);
   2231}
   2232
   2233void blk_throtl_bio_endio(struct bio *bio)
   2234{
   2235	struct blkcg_gq *blkg;
   2236	struct throtl_grp *tg;
   2237	u64 finish_time_ns;
   2238	unsigned long finish_time;
   2239	unsigned long start_time;
   2240	unsigned long lat;
   2241	int rw = bio_data_dir(bio);
   2242
   2243	blkg = bio->bi_blkg;
   2244	if (!blkg)
   2245		return;
   2246	tg = blkg_to_tg(blkg);
   2247	if (!tg->td->limit_valid[LIMIT_LOW])
   2248		return;
   2249
   2250	finish_time_ns = ktime_get_ns();
   2251	tg->last_finish_time = finish_time_ns >> 10;
   2252
   2253	start_time = bio_issue_time(&bio->bi_issue) >> 10;
   2254	finish_time = __bio_issue_time(finish_time_ns) >> 10;
   2255	if (!start_time || finish_time <= start_time)
   2256		return;
   2257
   2258	lat = finish_time - start_time;
   2259	/* this is only for bio based driver */
   2260	if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
   2261		throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
   2262				     bio_op(bio), lat);
   2263
   2264	if (tg->latency_target && lat >= tg->td->filtered_latency) {
   2265		int bucket;
   2266		unsigned int threshold;
   2267
   2268		bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
   2269		threshold = tg->td->avg_buckets[rw][bucket].latency +
   2270			tg->latency_target;
   2271		if (lat > threshold)
   2272			tg->bad_bio_cnt++;
   2273		/*
   2274		 * Not race free, could get wrong count, which means cgroups
   2275		 * will be throttled
   2276		 */
   2277		tg->bio_cnt++;
   2278	}
   2279
   2280	if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
   2281		tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
   2282		tg->bio_cnt /= 2;
   2283		tg->bad_bio_cnt /= 2;
   2284	}
   2285}
   2286#endif
   2287
   2288int blk_throtl_init(struct request_queue *q)
   2289{
   2290	struct throtl_data *td;
   2291	int ret;
   2292
   2293	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
   2294	if (!td)
   2295		return -ENOMEM;
   2296	td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
   2297		LATENCY_BUCKET_SIZE, __alignof__(u64));
   2298	if (!td->latency_buckets[READ]) {
   2299		kfree(td);
   2300		return -ENOMEM;
   2301	}
   2302	td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
   2303		LATENCY_BUCKET_SIZE, __alignof__(u64));
   2304	if (!td->latency_buckets[WRITE]) {
   2305		free_percpu(td->latency_buckets[READ]);
   2306		kfree(td);
   2307		return -ENOMEM;
   2308	}
   2309
   2310	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
   2311	throtl_service_queue_init(&td->service_queue);
   2312
   2313	q->td = td;
   2314	td->queue = q;
   2315
   2316	td->limit_valid[LIMIT_MAX] = true;
   2317	td->limit_index = LIMIT_MAX;
   2318	td->low_upgrade_time = jiffies;
   2319	td->low_downgrade_time = jiffies;
   2320
   2321	/* activate policy */
   2322	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
   2323	if (ret) {
   2324		free_percpu(td->latency_buckets[READ]);
   2325		free_percpu(td->latency_buckets[WRITE]);
   2326		kfree(td);
   2327	}
   2328	return ret;
   2329}
   2330
   2331void blk_throtl_exit(struct request_queue *q)
   2332{
   2333	BUG_ON(!q->td);
   2334	del_timer_sync(&q->td->service_queue.pending_timer);
   2335	throtl_shutdown_wq(q);
   2336	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
   2337	free_percpu(q->td->latency_buckets[READ]);
   2338	free_percpu(q->td->latency_buckets[WRITE]);
   2339	kfree(q->td);
   2340}
   2341
   2342void blk_throtl_register_queue(struct request_queue *q)
   2343{
   2344	struct throtl_data *td;
   2345	int i;
   2346
   2347	td = q->td;
   2348	BUG_ON(!td);
   2349
   2350	if (blk_queue_nonrot(q)) {
   2351		td->throtl_slice = DFL_THROTL_SLICE_SSD;
   2352		td->filtered_latency = LATENCY_FILTERED_SSD;
   2353	} else {
   2354		td->throtl_slice = DFL_THROTL_SLICE_HD;
   2355		td->filtered_latency = LATENCY_FILTERED_HD;
   2356		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
   2357			td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
   2358			td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
   2359		}
   2360	}
   2361#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
   2362	/* if no low limit, use previous default */
   2363	td->throtl_slice = DFL_THROTL_SLICE_HD;
   2364#endif
   2365
   2366	td->track_bio_latency = !queue_is_mq(q);
   2367	if (!td->track_bio_latency)
   2368		blk_stat_enable_accounting(q);
   2369}
   2370
   2371#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
   2372ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
   2373{
   2374	if (!q->td)
   2375		return -EINVAL;
   2376	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
   2377}
   2378
   2379ssize_t blk_throtl_sample_time_store(struct request_queue *q,
   2380	const char *page, size_t count)
   2381{
   2382	unsigned long v;
   2383	unsigned long t;
   2384
   2385	if (!q->td)
   2386		return -EINVAL;
   2387	if (kstrtoul(page, 10, &v))
   2388		return -EINVAL;
   2389	t = msecs_to_jiffies(v);
   2390	if (t == 0 || t > MAX_THROTL_SLICE)
   2391		return -EINVAL;
   2392	q->td->throtl_slice = t;
   2393	return count;
   2394}
   2395#endif
   2396
   2397static int __init throtl_init(void)
   2398{
   2399	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
   2400	if (!kthrotld_workqueue)
   2401		panic("Failed to create kthrotld\n");
   2402
   2403	return blkcg_policy_register(&blkcg_policy_throtl);
   2404}
   2405
   2406module_init(throtl_init);