cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-mq-tag.c (18853B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
      4 * fairer distribution of tags between multiple submitters when a shared tag map
      5 * is used.
      6 *
      7 * Copyright (C) 2013-2014 Jens Axboe
      8 */
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11
     12#include <linux/blk-mq.h>
     13#include <linux/delay.h>
     14#include "blk.h"
     15#include "blk-mq.h"
     16#include "blk-mq-sched.h"
     17#include "blk-mq-tag.h"
     18
     19/*
     20 * Recalculate wakeup batch when tag is shared by hctx.
     21 */
     22static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
     23		unsigned int users)
     24{
     25	if (!users)
     26		return;
     27
     28	sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
     29			users);
     30	sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
     31			users);
     32}
     33
     34/*
     35 * If a previously inactive queue goes active, bump the active user count.
     36 * We need to do this before try to allocate driver tag, then even if fail
     37 * to get tag when first time, the other shared-tag users could reserve
     38 * budget for it.
     39 */
     40bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
     41{
     42	unsigned int users;
     43
     44	if (blk_mq_is_shared_tags(hctx->flags)) {
     45		struct request_queue *q = hctx->queue;
     46
     47		if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
     48		    test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
     49			return true;
     50		}
     51	} else {
     52		if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
     53		    test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
     54			return true;
     55		}
     56	}
     57
     58	users = atomic_inc_return(&hctx->tags->active_queues);
     59
     60	blk_mq_update_wake_batch(hctx->tags, users);
     61
     62	return true;
     63}
     64
     65/*
     66 * Wakeup all potentially sleeping on tags
     67 */
     68void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
     69{
     70	sbitmap_queue_wake_all(&tags->bitmap_tags);
     71	if (include_reserve)
     72		sbitmap_queue_wake_all(&tags->breserved_tags);
     73}
     74
     75/*
     76 * If a previously busy queue goes inactive, potential waiters could now
     77 * be allowed to queue. Wake them up and check.
     78 */
     79void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
     80{
     81	struct blk_mq_tags *tags = hctx->tags;
     82	unsigned int users;
     83
     84	if (blk_mq_is_shared_tags(hctx->flags)) {
     85		struct request_queue *q = hctx->queue;
     86
     87		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
     88					&q->queue_flags))
     89			return;
     90	} else {
     91		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
     92			return;
     93	}
     94
     95	users = atomic_dec_return(&tags->active_queues);
     96
     97	blk_mq_update_wake_batch(tags, users);
     98
     99	blk_mq_tag_wakeup_all(tags, false);
    100}
    101
    102static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
    103			    struct sbitmap_queue *bt)
    104{
    105	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
    106			!hctx_may_queue(data->hctx, bt))
    107		return BLK_MQ_NO_TAG;
    108
    109	if (data->shallow_depth)
    110		return sbitmap_queue_get_shallow(bt, data->shallow_depth);
    111	else
    112		return __sbitmap_queue_get(bt);
    113}
    114
    115unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
    116			      unsigned int *offset)
    117{
    118	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
    119	struct sbitmap_queue *bt = &tags->bitmap_tags;
    120	unsigned long ret;
    121
    122	if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
    123	    data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
    124		return 0;
    125	ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
    126	*offset += tags->nr_reserved_tags;
    127	return ret;
    128}
    129
    130unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
    131{
    132	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
    133	struct sbitmap_queue *bt;
    134	struct sbq_wait_state *ws;
    135	DEFINE_SBQ_WAIT(wait);
    136	unsigned int tag_offset;
    137	int tag;
    138
    139	if (data->flags & BLK_MQ_REQ_RESERVED) {
    140		if (unlikely(!tags->nr_reserved_tags)) {
    141			WARN_ON_ONCE(1);
    142			return BLK_MQ_NO_TAG;
    143		}
    144		bt = &tags->breserved_tags;
    145		tag_offset = 0;
    146	} else {
    147		bt = &tags->bitmap_tags;
    148		tag_offset = tags->nr_reserved_tags;
    149	}
    150
    151	tag = __blk_mq_get_tag(data, bt);
    152	if (tag != BLK_MQ_NO_TAG)
    153		goto found_tag;
    154
    155	if (data->flags & BLK_MQ_REQ_NOWAIT)
    156		return BLK_MQ_NO_TAG;
    157
    158	ws = bt_wait_ptr(bt, data->hctx);
    159	do {
    160		struct sbitmap_queue *bt_prev;
    161
    162		/*
    163		 * We're out of tags on this hardware queue, kick any
    164		 * pending IO submits before going to sleep waiting for
    165		 * some to complete.
    166		 */
    167		blk_mq_run_hw_queue(data->hctx, false);
    168
    169		/*
    170		 * Retry tag allocation after running the hardware queue,
    171		 * as running the queue may also have found completions.
    172		 */
    173		tag = __blk_mq_get_tag(data, bt);
    174		if (tag != BLK_MQ_NO_TAG)
    175			break;
    176
    177		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
    178
    179		tag = __blk_mq_get_tag(data, bt);
    180		if (tag != BLK_MQ_NO_TAG)
    181			break;
    182
    183		bt_prev = bt;
    184		io_schedule();
    185
    186		sbitmap_finish_wait(bt, ws, &wait);
    187
    188		data->ctx = blk_mq_get_ctx(data->q);
    189		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
    190						data->ctx);
    191		tags = blk_mq_tags_from_data(data);
    192		if (data->flags & BLK_MQ_REQ_RESERVED)
    193			bt = &tags->breserved_tags;
    194		else
    195			bt = &tags->bitmap_tags;
    196
    197		/*
    198		 * If destination hw queue is changed, fake wake up on
    199		 * previous queue for compensating the wake up miss, so
    200		 * other allocations on previous queue won't be starved.
    201		 */
    202		if (bt != bt_prev)
    203			sbitmap_queue_wake_up(bt_prev);
    204
    205		ws = bt_wait_ptr(bt, data->hctx);
    206	} while (1);
    207
    208	sbitmap_finish_wait(bt, ws, &wait);
    209
    210found_tag:
    211	/*
    212	 * Give up this allocation if the hctx is inactive.  The caller will
    213	 * retry on an active hctx.
    214	 */
    215	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
    216		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
    217		return BLK_MQ_NO_TAG;
    218	}
    219	return tag + tag_offset;
    220}
    221
    222void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
    223		    unsigned int tag)
    224{
    225	if (!blk_mq_tag_is_reserved(tags, tag)) {
    226		const int real_tag = tag - tags->nr_reserved_tags;
    227
    228		BUG_ON(real_tag >= tags->nr_tags);
    229		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
    230	} else {
    231		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
    232	}
    233}
    234
    235void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
    236{
    237	sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
    238					tag_array, nr_tags);
    239}
    240
    241struct bt_iter_data {
    242	struct blk_mq_hw_ctx *hctx;
    243	struct request_queue *q;
    244	busy_tag_iter_fn *fn;
    245	void *data;
    246	bool reserved;
    247};
    248
    249static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
    250		unsigned int bitnr)
    251{
    252	struct request *rq;
    253	unsigned long flags;
    254
    255	spin_lock_irqsave(&tags->lock, flags);
    256	rq = tags->rqs[bitnr];
    257	if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
    258		rq = NULL;
    259	spin_unlock_irqrestore(&tags->lock, flags);
    260	return rq;
    261}
    262
    263static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
    264{
    265	struct bt_iter_data *iter_data = data;
    266	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
    267	struct request_queue *q = iter_data->q;
    268	struct blk_mq_tag_set *set = q->tag_set;
    269	bool reserved = iter_data->reserved;
    270	struct blk_mq_tags *tags;
    271	struct request *rq;
    272	bool ret = true;
    273
    274	if (blk_mq_is_shared_tags(set->flags))
    275		tags = set->shared_tags;
    276	else
    277		tags = hctx->tags;
    278
    279	if (!reserved)
    280		bitnr += tags->nr_reserved_tags;
    281	/*
    282	 * We can hit rq == NULL here, because the tagging functions
    283	 * test and set the bit before assigning ->rqs[].
    284	 */
    285	rq = blk_mq_find_and_get_req(tags, bitnr);
    286	if (!rq)
    287		return true;
    288
    289	if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
    290		ret = iter_data->fn(rq, iter_data->data, reserved);
    291	blk_mq_put_rq_ref(rq);
    292	return ret;
    293}
    294
    295/**
    296 * bt_for_each - iterate over the requests associated with a hardware queue
    297 * @hctx:	Hardware queue to examine.
    298 * @q:		Request queue to examine.
    299 * @bt:		sbitmap to examine. This is either the breserved_tags member
    300 *		or the bitmap_tags member of struct blk_mq_tags.
    301 * @fn:		Pointer to the function that will be called for each request
    302 *		associated with @hctx that has been assigned a driver tag.
    303 *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
    304 *		where rq is a pointer to a request. Return true to continue
    305 *		iterating tags, false to stop.
    306 * @data:	Will be passed as third argument to @fn.
    307 * @reserved:	Indicates whether @bt is the breserved_tags member or the
    308 *		bitmap_tags member of struct blk_mq_tags.
    309 */
    310static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
    311			struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
    312			void *data, bool reserved)
    313{
    314	struct bt_iter_data iter_data = {
    315		.hctx = hctx,
    316		.fn = fn,
    317		.data = data,
    318		.reserved = reserved,
    319		.q = q,
    320	};
    321
    322	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
    323}
    324
    325struct bt_tags_iter_data {
    326	struct blk_mq_tags *tags;
    327	busy_tag_iter_fn *fn;
    328	void *data;
    329	unsigned int flags;
    330};
    331
    332#define BT_TAG_ITER_RESERVED		(1 << 0)
    333#define BT_TAG_ITER_STARTED		(1 << 1)
    334#define BT_TAG_ITER_STATIC_RQS		(1 << 2)
    335
    336static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
    337{
    338	struct bt_tags_iter_data *iter_data = data;
    339	struct blk_mq_tags *tags = iter_data->tags;
    340	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
    341	struct request *rq;
    342	bool ret = true;
    343	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
    344
    345	if (!reserved)
    346		bitnr += tags->nr_reserved_tags;
    347
    348	/*
    349	 * We can hit rq == NULL here, because the tagging functions
    350	 * test and set the bit before assigning ->rqs[].
    351	 */
    352	if (iter_static_rqs)
    353		rq = tags->static_rqs[bitnr];
    354	else
    355		rq = blk_mq_find_and_get_req(tags, bitnr);
    356	if (!rq)
    357		return true;
    358
    359	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
    360	    blk_mq_request_started(rq))
    361		ret = iter_data->fn(rq, iter_data->data, reserved);
    362	if (!iter_static_rqs)
    363		blk_mq_put_rq_ref(rq);
    364	return ret;
    365}
    366
    367/**
    368 * bt_tags_for_each - iterate over the requests in a tag map
    369 * @tags:	Tag map to iterate over.
    370 * @bt:		sbitmap to examine. This is either the breserved_tags member
    371 *		or the bitmap_tags member of struct blk_mq_tags.
    372 * @fn:		Pointer to the function that will be called for each started
    373 *		request. @fn will be called as follows: @fn(rq, @data,
    374 *		@reserved) where rq is a pointer to a request. Return true
    375 *		to continue iterating tags, false to stop.
    376 * @data:	Will be passed as second argument to @fn.
    377 * @flags:	BT_TAG_ITER_*
    378 */
    379static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
    380			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
    381{
    382	struct bt_tags_iter_data iter_data = {
    383		.tags = tags,
    384		.fn = fn,
    385		.data = data,
    386		.flags = flags,
    387	};
    388
    389	if (tags->rqs)
    390		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
    391}
    392
    393static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
    394		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
    395{
    396	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
    397
    398	if (tags->nr_reserved_tags)
    399		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
    400				 flags | BT_TAG_ITER_RESERVED);
    401	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
    402}
    403
    404/**
    405 * blk_mq_all_tag_iter - iterate over all requests in a tag map
    406 * @tags:	Tag map to iterate over.
    407 * @fn:		Pointer to the function that will be called for each
    408 *		request. @fn will be called as follows: @fn(rq, @priv,
    409 *		reserved) where rq is a pointer to a request. 'reserved'
    410 *		indicates whether or not @rq is a reserved request. Return
    411 *		true to continue iterating tags, false to stop.
    412 * @priv:	Will be passed as second argument to @fn.
    413 *
    414 * Caller has to pass the tag map from which requests are allocated.
    415 */
    416void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
    417		void *priv)
    418{
    419	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
    420}
    421
    422/**
    423 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
    424 * @tagset:	Tag set to iterate over.
    425 * @fn:		Pointer to the function that will be called for each started
    426 *		request. @fn will be called as follows: @fn(rq, @priv,
    427 *		reserved) where rq is a pointer to a request. 'reserved'
    428 *		indicates whether or not @rq is a reserved request. Return
    429 *		true to continue iterating tags, false to stop.
    430 * @priv:	Will be passed as second argument to @fn.
    431 *
    432 * We grab one request reference before calling @fn and release it after
    433 * @fn returns.
    434 */
    435void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
    436		busy_tag_iter_fn *fn, void *priv)
    437{
    438	unsigned int flags = tagset->flags;
    439	int i, nr_tags;
    440
    441	nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
    442
    443	for (i = 0; i < nr_tags; i++) {
    444		if (tagset->tags && tagset->tags[i])
    445			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
    446					      BT_TAG_ITER_STARTED);
    447	}
    448}
    449EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
    450
    451static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
    452		void *data, bool reserved)
    453{
    454	unsigned *count = data;
    455
    456	if (blk_mq_request_completed(rq))
    457		(*count)++;
    458	return true;
    459}
    460
    461/**
    462 * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
    463 * completions have finished.
    464 * @tagset:	Tag set to drain completed request
    465 *
    466 * Note: This function has to be run after all IO queues are shutdown
    467 */
    468void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
    469{
    470	while (true) {
    471		unsigned count = 0;
    472
    473		blk_mq_tagset_busy_iter(tagset,
    474				blk_mq_tagset_count_completed_rqs, &count);
    475		if (!count)
    476			break;
    477		msleep(5);
    478	}
    479}
    480EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
    481
    482/**
    483 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
    484 * @q:		Request queue to examine.
    485 * @fn:		Pointer to the function that will be called for each request
    486 *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
    487 *		reserved) where rq is a pointer to a request and hctx points
    488 *		to the hardware queue associated with the request. 'reserved'
    489 *		indicates whether or not @rq is a reserved request.
    490 * @priv:	Will be passed as third argument to @fn.
    491 *
    492 * Note: if @q->tag_set is shared with other request queues then @fn will be
    493 * called for all requests on all queues that share that tag set and not only
    494 * for requests associated with @q.
    495 */
    496void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
    497		void *priv)
    498{
    499	/*
    500	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
    501	 * while the queue is frozen. So we can use q_usage_counter to avoid
    502	 * racing with it.
    503	 */
    504	if (!percpu_ref_tryget(&q->q_usage_counter))
    505		return;
    506
    507	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
    508		struct blk_mq_tags *tags = q->tag_set->shared_tags;
    509		struct sbitmap_queue *bresv = &tags->breserved_tags;
    510		struct sbitmap_queue *btags = &tags->bitmap_tags;
    511
    512		if (tags->nr_reserved_tags)
    513			bt_for_each(NULL, q, bresv, fn, priv, true);
    514		bt_for_each(NULL, q, btags, fn, priv, false);
    515	} else {
    516		struct blk_mq_hw_ctx *hctx;
    517		unsigned long i;
    518
    519		queue_for_each_hw_ctx(q, hctx, i) {
    520			struct blk_mq_tags *tags = hctx->tags;
    521			struct sbitmap_queue *bresv = &tags->breserved_tags;
    522			struct sbitmap_queue *btags = &tags->bitmap_tags;
    523
    524			/*
    525			 * If no software queues are currently mapped to this
    526			 * hardware queue, there's nothing to check
    527			 */
    528			if (!blk_mq_hw_queue_mapped(hctx))
    529				continue;
    530
    531			if (tags->nr_reserved_tags)
    532				bt_for_each(hctx, q, bresv, fn, priv, true);
    533			bt_for_each(hctx, q, btags, fn, priv, false);
    534		}
    535	}
    536	blk_queue_exit(q);
    537}
    538
    539static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
    540		    bool round_robin, int node)
    541{
    542	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
    543				       node);
    544}
    545
    546int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
    547			struct sbitmap_queue *breserved_tags,
    548			unsigned int queue_depth, unsigned int reserved,
    549			int node, int alloc_policy)
    550{
    551	unsigned int depth = queue_depth - reserved;
    552	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
    553
    554	if (bt_alloc(bitmap_tags, depth, round_robin, node))
    555		return -ENOMEM;
    556	if (bt_alloc(breserved_tags, reserved, round_robin, node))
    557		goto free_bitmap_tags;
    558
    559	return 0;
    560
    561free_bitmap_tags:
    562	sbitmap_queue_free(bitmap_tags);
    563	return -ENOMEM;
    564}
    565
    566struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
    567				     unsigned int reserved_tags,
    568				     int node, int alloc_policy)
    569{
    570	struct blk_mq_tags *tags;
    571
    572	if (total_tags > BLK_MQ_TAG_MAX) {
    573		pr_err("blk-mq: tag depth too large\n");
    574		return NULL;
    575	}
    576
    577	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
    578	if (!tags)
    579		return NULL;
    580
    581	tags->nr_tags = total_tags;
    582	tags->nr_reserved_tags = reserved_tags;
    583	spin_lock_init(&tags->lock);
    584
    585	if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
    586				total_tags, reserved_tags, node,
    587				alloc_policy) < 0) {
    588		kfree(tags);
    589		return NULL;
    590	}
    591	return tags;
    592}
    593
    594void blk_mq_free_tags(struct blk_mq_tags *tags)
    595{
    596	sbitmap_queue_free(&tags->bitmap_tags);
    597	sbitmap_queue_free(&tags->breserved_tags);
    598	kfree(tags);
    599}
    600
    601int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
    602			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
    603			    bool can_grow)
    604{
    605	struct blk_mq_tags *tags = *tagsptr;
    606
    607	if (tdepth <= tags->nr_reserved_tags)
    608		return -EINVAL;
    609
    610	/*
    611	 * If we are allowed to grow beyond the original size, allocate
    612	 * a new set of tags before freeing the old one.
    613	 */
    614	if (tdepth > tags->nr_tags) {
    615		struct blk_mq_tag_set *set = hctx->queue->tag_set;
    616		struct blk_mq_tags *new;
    617
    618		if (!can_grow)
    619			return -EINVAL;
    620
    621		/*
    622		 * We need some sort of upper limit, set it high enough that
    623		 * no valid use cases should require more.
    624		 */
    625		if (tdepth > MAX_SCHED_RQ)
    626			return -EINVAL;
    627
    628		/*
    629		 * Only the sbitmap needs resizing since we allocated the max
    630		 * initially.
    631		 */
    632		if (blk_mq_is_shared_tags(set->flags))
    633			return 0;
    634
    635		new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
    636		if (!new)
    637			return -ENOMEM;
    638
    639		blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
    640		*tagsptr = new;
    641	} else {
    642		/*
    643		 * Don't need (or can't) update reserved tags here, they
    644		 * remain static and should never need resizing.
    645		 */
    646		sbitmap_queue_resize(&tags->bitmap_tags,
    647				tdepth - tags->nr_reserved_tags);
    648	}
    649
    650	return 0;
    651}
    652
    653void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
    654{
    655	struct blk_mq_tags *tags = set->shared_tags;
    656
    657	sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
    658}
    659
    660void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
    661{
    662	sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
    663			     q->nr_requests - q->tag_set->reserved_tags);
    664}
    665
    666/**
    667 * blk_mq_unique_tag() - return a tag that is unique queue-wide
    668 * @rq: request for which to compute a unique tag
    669 *
    670 * The tag field in struct request is unique per hardware queue but not over
    671 * all hardware queues. Hence this function that returns a tag with the
    672 * hardware context index in the upper bits and the per hardware queue tag in
    673 * the lower bits.
    674 *
    675 * Note: When called for a request that is queued on a non-multiqueue request
    676 * queue, the hardware context index is set to zero.
    677 */
    678u32 blk_mq_unique_tag(struct request *rq)
    679{
    680	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
    681		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
    682}
    683EXPORT_SYMBOL(blk_mq_unique_tag);