cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sbitmap.c (19668B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2016 Facebook
      4 * Copyright (C) 2013-2014 Jens Axboe
      5 */
      6
      7#include <linux/sched.h>
      8#include <linux/random.h>
      9#include <linux/sbitmap.h>
     10#include <linux/seq_file.h>
     11
     12static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
     13{
     14	unsigned depth = sb->depth;
     15
     16	sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
     17	if (!sb->alloc_hint)
     18		return -ENOMEM;
     19
     20	if (depth && !sb->round_robin) {
     21		int i;
     22
     23		for_each_possible_cpu(i)
     24			*per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
     25	}
     26	return 0;
     27}
     28
     29static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
     30						    unsigned int depth)
     31{
     32	unsigned hint;
     33
     34	hint = this_cpu_read(*sb->alloc_hint);
     35	if (unlikely(hint >= depth)) {
     36		hint = depth ? prandom_u32() % depth : 0;
     37		this_cpu_write(*sb->alloc_hint, hint);
     38	}
     39
     40	return hint;
     41}
     42
     43static inline void update_alloc_hint_after_get(struct sbitmap *sb,
     44					       unsigned int depth,
     45					       unsigned int hint,
     46					       unsigned int nr)
     47{
     48	if (nr == -1) {
     49		/* If the map is full, a hint won't do us much good. */
     50		this_cpu_write(*sb->alloc_hint, 0);
     51	} else if (nr == hint || unlikely(sb->round_robin)) {
     52		/* Only update the hint if we used it. */
     53		hint = nr + 1;
     54		if (hint >= depth - 1)
     55			hint = 0;
     56		this_cpu_write(*sb->alloc_hint, hint);
     57	}
     58}
     59
     60/*
     61 * See if we have deferred clears that we can batch move
     62 */
     63static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
     64{
     65	unsigned long mask;
     66
     67	if (!READ_ONCE(map->cleared))
     68		return false;
     69
     70	/*
     71	 * First get a stable cleared mask, setting the old mask to 0.
     72	 */
     73	mask = xchg(&map->cleared, 0);
     74
     75	/*
     76	 * Now clear the masked bits in our free word
     77	 */
     78	atomic_long_andnot(mask, (atomic_long_t *)&map->word);
     79	BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
     80	return true;
     81}
     82
     83int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
     84		      gfp_t flags, int node, bool round_robin,
     85		      bool alloc_hint)
     86{
     87	unsigned int bits_per_word;
     88
     89	if (shift < 0)
     90		shift = sbitmap_calculate_shift(depth);
     91
     92	bits_per_word = 1U << shift;
     93	if (bits_per_word > BITS_PER_LONG)
     94		return -EINVAL;
     95
     96	sb->shift = shift;
     97	sb->depth = depth;
     98	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
     99	sb->round_robin = round_robin;
    100
    101	if (depth == 0) {
    102		sb->map = NULL;
    103		return 0;
    104	}
    105
    106	if (alloc_hint) {
    107		if (init_alloc_hint(sb, flags))
    108			return -ENOMEM;
    109	} else {
    110		sb->alloc_hint = NULL;
    111	}
    112
    113	sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
    114	if (!sb->map) {
    115		free_percpu(sb->alloc_hint);
    116		return -ENOMEM;
    117	}
    118
    119	return 0;
    120}
    121EXPORT_SYMBOL_GPL(sbitmap_init_node);
    122
    123void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
    124{
    125	unsigned int bits_per_word = 1U << sb->shift;
    126	unsigned int i;
    127
    128	for (i = 0; i < sb->map_nr; i++)
    129		sbitmap_deferred_clear(&sb->map[i]);
    130
    131	sb->depth = depth;
    132	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
    133}
    134EXPORT_SYMBOL_GPL(sbitmap_resize);
    135
    136static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
    137			      unsigned int hint, bool wrap)
    138{
    139	int nr;
    140
    141	/* don't wrap if starting from 0 */
    142	wrap = wrap && hint;
    143
    144	while (1) {
    145		nr = find_next_zero_bit(word, depth, hint);
    146		if (unlikely(nr >= depth)) {
    147			/*
    148			 * We started with an offset, and we didn't reset the
    149			 * offset to 0 in a failure case, so start from 0 to
    150			 * exhaust the map.
    151			 */
    152			if (hint && wrap) {
    153				hint = 0;
    154				continue;
    155			}
    156			return -1;
    157		}
    158
    159		if (!test_and_set_bit_lock(nr, word))
    160			break;
    161
    162		hint = nr + 1;
    163		if (hint >= depth - 1)
    164			hint = 0;
    165	}
    166
    167	return nr;
    168}
    169
    170static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
    171				     unsigned int alloc_hint)
    172{
    173	struct sbitmap_word *map = &sb->map[index];
    174	int nr;
    175
    176	do {
    177		nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
    178					alloc_hint, !sb->round_robin);
    179		if (nr != -1)
    180			break;
    181		if (!sbitmap_deferred_clear(map))
    182			break;
    183	} while (1);
    184
    185	return nr;
    186}
    187
    188static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
    189{
    190	unsigned int i, index;
    191	int nr = -1;
    192
    193	index = SB_NR_TO_INDEX(sb, alloc_hint);
    194
    195	/*
    196	 * Unless we're doing round robin tag allocation, just use the
    197	 * alloc_hint to find the right word index. No point in looping
    198	 * twice in find_next_zero_bit() for that case.
    199	 */
    200	if (sb->round_robin)
    201		alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
    202	else
    203		alloc_hint = 0;
    204
    205	for (i = 0; i < sb->map_nr; i++) {
    206		nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
    207		if (nr != -1) {
    208			nr += index << sb->shift;
    209			break;
    210		}
    211
    212		/* Jump to next index. */
    213		alloc_hint = 0;
    214		if (++index >= sb->map_nr)
    215			index = 0;
    216	}
    217
    218	return nr;
    219}
    220
    221int sbitmap_get(struct sbitmap *sb)
    222{
    223	int nr;
    224	unsigned int hint, depth;
    225
    226	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
    227		return -1;
    228
    229	depth = READ_ONCE(sb->depth);
    230	hint = update_alloc_hint_before_get(sb, depth);
    231	nr = __sbitmap_get(sb, hint);
    232	update_alloc_hint_after_get(sb, depth, hint, nr);
    233
    234	return nr;
    235}
    236EXPORT_SYMBOL_GPL(sbitmap_get);
    237
    238static int __sbitmap_get_shallow(struct sbitmap *sb,
    239				 unsigned int alloc_hint,
    240				 unsigned long shallow_depth)
    241{
    242	unsigned int i, index;
    243	int nr = -1;
    244
    245	index = SB_NR_TO_INDEX(sb, alloc_hint);
    246
    247	for (i = 0; i < sb->map_nr; i++) {
    248again:
    249		nr = __sbitmap_get_word(&sb->map[index].word,
    250					min_t(unsigned int,
    251					      __map_depth(sb, index),
    252					      shallow_depth),
    253					SB_NR_TO_BIT(sb, alloc_hint), true);
    254		if (nr != -1) {
    255			nr += index << sb->shift;
    256			break;
    257		}
    258
    259		if (sbitmap_deferred_clear(&sb->map[index]))
    260			goto again;
    261
    262		/* Jump to next index. */
    263		index++;
    264		alloc_hint = index << sb->shift;
    265
    266		if (index >= sb->map_nr) {
    267			index = 0;
    268			alloc_hint = 0;
    269		}
    270	}
    271
    272	return nr;
    273}
    274
    275int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
    276{
    277	int nr;
    278	unsigned int hint, depth;
    279
    280	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
    281		return -1;
    282
    283	depth = READ_ONCE(sb->depth);
    284	hint = update_alloc_hint_before_get(sb, depth);
    285	nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
    286	update_alloc_hint_after_get(sb, depth, hint, nr);
    287
    288	return nr;
    289}
    290EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
    291
    292bool sbitmap_any_bit_set(const struct sbitmap *sb)
    293{
    294	unsigned int i;
    295
    296	for (i = 0; i < sb->map_nr; i++) {
    297		if (sb->map[i].word & ~sb->map[i].cleared)
    298			return true;
    299	}
    300	return false;
    301}
    302EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
    303
    304static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
    305{
    306	unsigned int i, weight = 0;
    307
    308	for (i = 0; i < sb->map_nr; i++) {
    309		const struct sbitmap_word *word = &sb->map[i];
    310		unsigned int word_depth = __map_depth(sb, i);
    311
    312		if (set)
    313			weight += bitmap_weight(&word->word, word_depth);
    314		else
    315			weight += bitmap_weight(&word->cleared, word_depth);
    316	}
    317	return weight;
    318}
    319
    320static unsigned int sbitmap_cleared(const struct sbitmap *sb)
    321{
    322	return __sbitmap_weight(sb, false);
    323}
    324
    325unsigned int sbitmap_weight(const struct sbitmap *sb)
    326{
    327	return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
    328}
    329EXPORT_SYMBOL_GPL(sbitmap_weight);
    330
    331void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
    332{
    333	seq_printf(m, "depth=%u\n", sb->depth);
    334	seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
    335	seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
    336	seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
    337	seq_printf(m, "map_nr=%u\n", sb->map_nr);
    338}
    339EXPORT_SYMBOL_GPL(sbitmap_show);
    340
    341static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
    342{
    343	if ((offset & 0xf) == 0) {
    344		if (offset != 0)
    345			seq_putc(m, '\n');
    346		seq_printf(m, "%08x:", offset);
    347	}
    348	if ((offset & 0x1) == 0)
    349		seq_putc(m, ' ');
    350	seq_printf(m, "%02x", byte);
    351}
    352
    353void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
    354{
    355	u8 byte = 0;
    356	unsigned int byte_bits = 0;
    357	unsigned int offset = 0;
    358	int i;
    359
    360	for (i = 0; i < sb->map_nr; i++) {
    361		unsigned long word = READ_ONCE(sb->map[i].word);
    362		unsigned long cleared = READ_ONCE(sb->map[i].cleared);
    363		unsigned int word_bits = __map_depth(sb, i);
    364
    365		word &= ~cleared;
    366
    367		while (word_bits > 0) {
    368			unsigned int bits = min(8 - byte_bits, word_bits);
    369
    370			byte |= (word & (BIT(bits) - 1)) << byte_bits;
    371			byte_bits += bits;
    372			if (byte_bits == 8) {
    373				emit_byte(m, offset, byte);
    374				byte = 0;
    375				byte_bits = 0;
    376				offset++;
    377			}
    378			word >>= bits;
    379			word_bits -= bits;
    380		}
    381	}
    382	if (byte_bits) {
    383		emit_byte(m, offset, byte);
    384		offset++;
    385	}
    386	if (offset)
    387		seq_putc(m, '\n');
    388}
    389EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
    390
    391static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
    392					unsigned int depth)
    393{
    394	unsigned int wake_batch;
    395	unsigned int shallow_depth;
    396
    397	/*
    398	 * For each batch, we wake up one queue. We need to make sure that our
    399	 * batch size is small enough that the full depth of the bitmap,
    400	 * potentially limited by a shallow depth, is enough to wake up all of
    401	 * the queues.
    402	 *
    403	 * Each full word of the bitmap has bits_per_word bits, and there might
    404	 * be a partial word. There are depth / bits_per_word full words and
    405	 * depth % bits_per_word bits left over. In bitwise arithmetic:
    406	 *
    407	 * bits_per_word = 1 << shift
    408	 * depth / bits_per_word = depth >> shift
    409	 * depth % bits_per_word = depth & ((1 << shift) - 1)
    410	 *
    411	 * Each word can be limited to sbq->min_shallow_depth bits.
    412	 */
    413	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
    414	depth = ((depth >> sbq->sb.shift) * shallow_depth +
    415		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
    416	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
    417			     SBQ_WAKE_BATCH);
    418
    419	return wake_batch;
    420}
    421
    422int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
    423			    int shift, bool round_robin, gfp_t flags, int node)
    424{
    425	int ret;
    426	int i;
    427
    428	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
    429				round_robin, true);
    430	if (ret)
    431		return ret;
    432
    433	sbq->min_shallow_depth = UINT_MAX;
    434	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
    435	atomic_set(&sbq->wake_index, 0);
    436	atomic_set(&sbq->ws_active, 0);
    437
    438	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
    439	if (!sbq->ws) {
    440		sbitmap_free(&sbq->sb);
    441		return -ENOMEM;
    442	}
    443
    444	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
    445		init_waitqueue_head(&sbq->ws[i].wait);
    446		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
    447	}
    448
    449	return 0;
    450}
    451EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
    452
    453static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
    454					    unsigned int wake_batch)
    455{
    456	int i;
    457
    458	if (sbq->wake_batch != wake_batch) {
    459		WRITE_ONCE(sbq->wake_batch, wake_batch);
    460		/*
    461		 * Pairs with the memory barrier in sbitmap_queue_wake_up()
    462		 * to ensure that the batch size is updated before the wait
    463		 * counts.
    464		 */
    465		smp_mb();
    466		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
    467			atomic_set(&sbq->ws[i].wait_cnt, 1);
    468	}
    469}
    470
    471static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
    472					    unsigned int depth)
    473{
    474	unsigned int wake_batch;
    475
    476	wake_batch = sbq_calc_wake_batch(sbq, depth);
    477	__sbitmap_queue_update_wake_batch(sbq, wake_batch);
    478}
    479
    480void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
    481					    unsigned int users)
    482{
    483	unsigned int wake_batch;
    484	unsigned int min_batch;
    485	unsigned int depth = (sbq->sb.depth + users - 1) / users;
    486
    487	min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
    488
    489	wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
    490			min_batch, SBQ_WAKE_BATCH);
    491	__sbitmap_queue_update_wake_batch(sbq, wake_batch);
    492}
    493EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
    494
    495void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
    496{
    497	sbitmap_queue_update_wake_batch(sbq, depth);
    498	sbitmap_resize(&sbq->sb, depth);
    499}
    500EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
    501
    502int __sbitmap_queue_get(struct sbitmap_queue *sbq)
    503{
    504	return sbitmap_get(&sbq->sb);
    505}
    506EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
    507
    508unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
    509					unsigned int *offset)
    510{
    511	struct sbitmap *sb = &sbq->sb;
    512	unsigned int hint, depth;
    513	unsigned long index, nr;
    514	int i;
    515
    516	if (unlikely(sb->round_robin))
    517		return 0;
    518
    519	depth = READ_ONCE(sb->depth);
    520	hint = update_alloc_hint_before_get(sb, depth);
    521
    522	index = SB_NR_TO_INDEX(sb, hint);
    523
    524	for (i = 0; i < sb->map_nr; i++) {
    525		struct sbitmap_word *map = &sb->map[index];
    526		unsigned long get_mask;
    527		unsigned int map_depth = __map_depth(sb, index);
    528
    529		sbitmap_deferred_clear(map);
    530		if (map->word == (1UL << (map_depth - 1)) - 1)
    531			goto next;
    532
    533		nr = find_first_zero_bit(&map->word, map_depth);
    534		if (nr + nr_tags <= map_depth) {
    535			atomic_long_t *ptr = (atomic_long_t *) &map->word;
    536			int map_tags = min_t(int, nr_tags, map_depth);
    537			unsigned long val, ret;
    538
    539			get_mask = ((1UL << map_tags) - 1) << nr;
    540			do {
    541				val = READ_ONCE(map->word);
    542				if ((val & ~get_mask) != val)
    543					goto next;
    544				ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
    545			} while (ret != val);
    546			get_mask = (get_mask & ~ret) >> nr;
    547			if (get_mask) {
    548				*offset = nr + (index << sb->shift);
    549				update_alloc_hint_after_get(sb, depth, hint,
    550							*offset + map_tags - 1);
    551				return get_mask;
    552			}
    553		}
    554next:
    555		/* Jump to next index. */
    556		if (++index >= sb->map_nr)
    557			index = 0;
    558	}
    559
    560	return 0;
    561}
    562
    563int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
    564			      unsigned int shallow_depth)
    565{
    566	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
    567
    568	return sbitmap_get_shallow(&sbq->sb, shallow_depth);
    569}
    570EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
    571
    572void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
    573				     unsigned int min_shallow_depth)
    574{
    575	sbq->min_shallow_depth = min_shallow_depth;
    576	sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
    577}
    578EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
    579
    580static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
    581{
    582	int i, wake_index;
    583
    584	if (!atomic_read(&sbq->ws_active))
    585		return NULL;
    586
    587	wake_index = atomic_read(&sbq->wake_index);
    588	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
    589		struct sbq_wait_state *ws = &sbq->ws[wake_index];
    590
    591		if (waitqueue_active(&ws->wait)) {
    592			if (wake_index != atomic_read(&sbq->wake_index))
    593				atomic_set(&sbq->wake_index, wake_index);
    594			return ws;
    595		}
    596
    597		wake_index = sbq_index_inc(wake_index);
    598	}
    599
    600	return NULL;
    601}
    602
    603static bool __sbq_wake_up(struct sbitmap_queue *sbq)
    604{
    605	struct sbq_wait_state *ws;
    606	unsigned int wake_batch;
    607	int wait_cnt;
    608
    609	ws = sbq_wake_ptr(sbq);
    610	if (!ws)
    611		return false;
    612
    613	wait_cnt = atomic_dec_return(&ws->wait_cnt);
    614	if (wait_cnt <= 0) {
    615		int ret;
    616
    617		wake_batch = READ_ONCE(sbq->wake_batch);
    618
    619		/*
    620		 * Pairs with the memory barrier in sbitmap_queue_resize() to
    621		 * ensure that we see the batch size update before the wait
    622		 * count is reset.
    623		 */
    624		smp_mb__before_atomic();
    625
    626		/*
    627		 * For concurrent callers of this, the one that failed the
    628		 * atomic_cmpxhcg() race should call this function again
    629		 * to wakeup a new batch on a different 'ws'.
    630		 */
    631		ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
    632		if (ret == wait_cnt) {
    633			sbq_index_atomic_inc(&sbq->wake_index);
    634			wake_up_nr(&ws->wait, wake_batch);
    635			return false;
    636		}
    637
    638		return true;
    639	}
    640
    641	return false;
    642}
    643
    644void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
    645{
    646	while (__sbq_wake_up(sbq))
    647		;
    648}
    649EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
    650
    651static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
    652{
    653	if (likely(!sb->round_robin && tag < sb->depth))
    654		data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
    655}
    656
    657void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
    658				int *tags, int nr_tags)
    659{
    660	struct sbitmap *sb = &sbq->sb;
    661	unsigned long *addr = NULL;
    662	unsigned long mask = 0;
    663	int i;
    664
    665	smp_mb__before_atomic();
    666	for (i = 0; i < nr_tags; i++) {
    667		const int tag = tags[i] - offset;
    668		unsigned long *this_addr;
    669
    670		/* since we're clearing a batch, skip the deferred map */
    671		this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
    672		if (!addr) {
    673			addr = this_addr;
    674		} else if (addr != this_addr) {
    675			atomic_long_andnot(mask, (atomic_long_t *) addr);
    676			mask = 0;
    677			addr = this_addr;
    678		}
    679		mask |= (1UL << SB_NR_TO_BIT(sb, tag));
    680	}
    681
    682	if (mask)
    683		atomic_long_andnot(mask, (atomic_long_t *) addr);
    684
    685	smp_mb__after_atomic();
    686	sbitmap_queue_wake_up(sbq);
    687	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
    688					tags[nr_tags - 1] - offset);
    689}
    690
    691void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
    692			 unsigned int cpu)
    693{
    694	/*
    695	 * Once the clear bit is set, the bit may be allocated out.
    696	 *
    697	 * Orders READ/WRITE on the associated instance(such as request
    698	 * of blk_mq) by this bit for avoiding race with re-allocation,
    699	 * and its pair is the memory barrier implied in __sbitmap_get_word.
    700	 *
    701	 * One invariant is that the clear bit has to be zero when the bit
    702	 * is in use.
    703	 */
    704	smp_mb__before_atomic();
    705	sbitmap_deferred_clear_bit(&sbq->sb, nr);
    706
    707	/*
    708	 * Pairs with the memory barrier in set_current_state() to ensure the
    709	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
    710	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
    711	 * waiter. See the comment on waitqueue_active().
    712	 */
    713	smp_mb__after_atomic();
    714	sbitmap_queue_wake_up(sbq);
    715	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
    716}
    717EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
    718
    719void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
    720{
    721	int i, wake_index;
    722
    723	/*
    724	 * Pairs with the memory barrier in set_current_state() like in
    725	 * sbitmap_queue_wake_up().
    726	 */
    727	smp_mb();
    728	wake_index = atomic_read(&sbq->wake_index);
    729	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
    730		struct sbq_wait_state *ws = &sbq->ws[wake_index];
    731
    732		if (waitqueue_active(&ws->wait))
    733			wake_up(&ws->wait);
    734
    735		wake_index = sbq_index_inc(wake_index);
    736	}
    737}
    738EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
    739
    740void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
    741{
    742	bool first;
    743	int i;
    744
    745	sbitmap_show(&sbq->sb, m);
    746
    747	seq_puts(m, "alloc_hint={");
    748	first = true;
    749	for_each_possible_cpu(i) {
    750		if (!first)
    751			seq_puts(m, ", ");
    752		first = false;
    753		seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
    754	}
    755	seq_puts(m, "}\n");
    756
    757	seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
    758	seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
    759	seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
    760
    761	seq_puts(m, "ws={\n");
    762	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
    763		struct sbq_wait_state *ws = &sbq->ws[i];
    764
    765		seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
    766			   atomic_read(&ws->wait_cnt),
    767			   waitqueue_active(&ws->wait) ? "active" : "inactive");
    768	}
    769	seq_puts(m, "}\n");
    770
    771	seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
    772	seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
    773}
    774EXPORT_SYMBOL_GPL(sbitmap_queue_show);
    775
    776void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
    777			    struct sbq_wait_state *ws,
    778			    struct sbq_wait *sbq_wait)
    779{
    780	if (!sbq_wait->sbq) {
    781		sbq_wait->sbq = sbq;
    782		atomic_inc(&sbq->ws_active);
    783		add_wait_queue(&ws->wait, &sbq_wait->wait);
    784	}
    785}
    786EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
    787
    788void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
    789{
    790	list_del_init(&sbq_wait->wait.entry);
    791	if (sbq_wait->sbq) {
    792		atomic_dec(&sbq_wait->sbq->ws_active);
    793		sbq_wait->sbq = NULL;
    794	}
    795}
    796EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
    797
    798void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
    799			     struct sbq_wait_state *ws,
    800			     struct sbq_wait *sbq_wait, int state)
    801{
    802	if (!sbq_wait->sbq) {
    803		atomic_inc(&sbq->ws_active);
    804		sbq_wait->sbq = sbq;
    805	}
    806	prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
    807}
    808EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
    809
    810void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
    811			 struct sbq_wait *sbq_wait)
    812{
    813	finish_wait(&ws->wait, &sbq_wait->wait);
    814	if (sbq_wait->sbq) {
    815		atomic_dec(&sbq->ws_active);
    816		sbq_wait->sbq = NULL;
    817	}
    818}
    819EXPORT_SYMBOL_GPL(sbitmap_finish_wait);