cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

idr.c (17952B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2#include <linux/bitmap.h>
      3#include <linux/bug.h>
      4#include <linux/export.h>
      5#include <linux/idr.h>
      6#include <linux/slab.h>
      7#include <linux/spinlock.h>
      8#include <linux/xarray.h>
      9
     10/**
     11 * idr_alloc_u32() - Allocate an ID.
     12 * @idr: IDR handle.
     13 * @ptr: Pointer to be associated with the new ID.
     14 * @nextid: Pointer to an ID.
     15 * @max: The maximum ID to allocate (inclusive).
     16 * @gfp: Memory allocation flags.
     17 *
     18 * Allocates an unused ID in the range specified by @nextid and @max.
     19 * Note that @max is inclusive whereas the @end parameter to idr_alloc()
     20 * is exclusive.  The new ID is assigned to @nextid before the pointer
     21 * is inserted into the IDR, so if @nextid points into the object pointed
     22 * to by @ptr, a concurrent lookup will not find an uninitialised ID.
     23 *
     24 * The caller should provide their own locking to ensure that two
     25 * concurrent modifications to the IDR are not possible.  Read-only
     26 * accesses to the IDR may be done under the RCU read lock or may
     27 * exclude simultaneous writers.
     28 *
     29 * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
     30 * or -ENOSPC if no free IDs could be found.  If an error occurred,
     31 * @nextid is unchanged.
     32 */
     33int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
     34			unsigned long max, gfp_t gfp)
     35{
     36	struct radix_tree_iter iter;
     37	void __rcu **slot;
     38	unsigned int base = idr->idr_base;
     39	unsigned int id = *nextid;
     40
     41	if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
     42		idr->idr_rt.xa_flags |= IDR_RT_MARKER;
     43
     44	id = (id < base) ? 0 : id - base;
     45	radix_tree_iter_init(&iter, id);
     46	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
     47	if (IS_ERR(slot))
     48		return PTR_ERR(slot);
     49
     50	*nextid = iter.index + base;
     51	/* there is a memory barrier inside radix_tree_iter_replace() */
     52	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
     53	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
     54
     55	return 0;
     56}
     57EXPORT_SYMBOL_GPL(idr_alloc_u32);
     58
     59/**
     60 * idr_alloc() - Allocate an ID.
     61 * @idr: IDR handle.
     62 * @ptr: Pointer to be associated with the new ID.
     63 * @start: The minimum ID (inclusive).
     64 * @end: The maximum ID (exclusive).
     65 * @gfp: Memory allocation flags.
     66 *
     67 * Allocates an unused ID in the range specified by @start and @end.  If
     68 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
     69 * callers to use @start + N as @end as long as N is within integer range.
     70 *
     71 * The caller should provide their own locking to ensure that two
     72 * concurrent modifications to the IDR are not possible.  Read-only
     73 * accesses to the IDR may be done under the RCU read lock or may
     74 * exclude simultaneous writers.
     75 *
     76 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
     77 * or -ENOSPC if no free IDs could be found.
     78 */
     79int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
     80{
     81	u32 id = start;
     82	int ret;
     83
     84	if (WARN_ON_ONCE(start < 0))
     85		return -EINVAL;
     86
     87	ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
     88	if (ret)
     89		return ret;
     90
     91	return id;
     92}
     93EXPORT_SYMBOL_GPL(idr_alloc);
     94
     95/**
     96 * idr_alloc_cyclic() - Allocate an ID cyclically.
     97 * @idr: IDR handle.
     98 * @ptr: Pointer to be associated with the new ID.
     99 * @start: The minimum ID (inclusive).
    100 * @end: The maximum ID (exclusive).
    101 * @gfp: Memory allocation flags.
    102 *
    103 * Allocates an unused ID in the range specified by @nextid and @end.  If
    104 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
    105 * callers to use @start + N as @end as long as N is within integer range.
    106 * The search for an unused ID will start at the last ID allocated and will
    107 * wrap around to @start if no free IDs are found before reaching @end.
    108 *
    109 * The caller should provide their own locking to ensure that two
    110 * concurrent modifications to the IDR are not possible.  Read-only
    111 * accesses to the IDR may be done under the RCU read lock or may
    112 * exclude simultaneous writers.
    113 *
    114 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
    115 * or -ENOSPC if no free IDs could be found.
    116 */
    117int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
    118{
    119	u32 id = idr->idr_next;
    120	int err, max = end > 0 ? end - 1 : INT_MAX;
    121
    122	if ((int)id < start)
    123		id = start;
    124
    125	err = idr_alloc_u32(idr, ptr, &id, max, gfp);
    126	if ((err == -ENOSPC) && (id > start)) {
    127		id = start;
    128		err = idr_alloc_u32(idr, ptr, &id, max, gfp);
    129	}
    130	if (err)
    131		return err;
    132
    133	idr->idr_next = id + 1;
    134	return id;
    135}
    136EXPORT_SYMBOL(idr_alloc_cyclic);
    137
    138/**
    139 * idr_remove() - Remove an ID from the IDR.
    140 * @idr: IDR handle.
    141 * @id: Pointer ID.
    142 *
    143 * Removes this ID from the IDR.  If the ID was not previously in the IDR,
    144 * this function returns %NULL.
    145 *
    146 * Since this function modifies the IDR, the caller should provide their
    147 * own locking to ensure that concurrent modification of the same IDR is
    148 * not possible.
    149 *
    150 * Return: The pointer formerly associated with this ID.
    151 */
    152void *idr_remove(struct idr *idr, unsigned long id)
    153{
    154	return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
    155}
    156EXPORT_SYMBOL_GPL(idr_remove);
    157
    158/**
    159 * idr_find() - Return pointer for given ID.
    160 * @idr: IDR handle.
    161 * @id: Pointer ID.
    162 *
    163 * Looks up the pointer associated with this ID.  A %NULL pointer may
    164 * indicate that @id is not allocated or that the %NULL pointer was
    165 * associated with this ID.
    166 *
    167 * This function can be called under rcu_read_lock(), given that the leaf
    168 * pointers lifetimes are correctly managed.
    169 *
    170 * Return: The pointer associated with this ID.
    171 */
    172void *idr_find(const struct idr *idr, unsigned long id)
    173{
    174	return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
    175}
    176EXPORT_SYMBOL_GPL(idr_find);
    177
    178/**
    179 * idr_for_each() - Iterate through all stored pointers.
    180 * @idr: IDR handle.
    181 * @fn: Function to be called for each pointer.
    182 * @data: Data passed to callback function.
    183 *
    184 * The callback function will be called for each entry in @idr, passing
    185 * the ID, the entry and @data.
    186 *
    187 * If @fn returns anything other than %0, the iteration stops and that
    188 * value is returned from this function.
    189 *
    190 * idr_for_each() can be called concurrently with idr_alloc() and
    191 * idr_remove() if protected by RCU.  Newly added entries may not be
    192 * seen and deleted entries may be seen, but adding and removing entries
    193 * will not cause other entries to be skipped, nor spurious ones to be seen.
    194 */
    195int idr_for_each(const struct idr *idr,
    196		int (*fn)(int id, void *p, void *data), void *data)
    197{
    198	struct radix_tree_iter iter;
    199	void __rcu **slot;
    200	int base = idr->idr_base;
    201
    202	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
    203		int ret;
    204		unsigned long id = iter.index + base;
    205
    206		if (WARN_ON_ONCE(id > INT_MAX))
    207			break;
    208		ret = fn(id, rcu_dereference_raw(*slot), data);
    209		if (ret)
    210			return ret;
    211	}
    212
    213	return 0;
    214}
    215EXPORT_SYMBOL(idr_for_each);
    216
    217/**
    218 * idr_get_next_ul() - Find next populated entry.
    219 * @idr: IDR handle.
    220 * @nextid: Pointer to an ID.
    221 *
    222 * Returns the next populated entry in the tree with an ID greater than
    223 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
    224 * to the ID of the found value.  To use in a loop, the value pointed to by
    225 * nextid must be incremented by the user.
    226 */
    227void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
    228{
    229	struct radix_tree_iter iter;
    230	void __rcu **slot;
    231	void *entry = NULL;
    232	unsigned long base = idr->idr_base;
    233	unsigned long id = *nextid;
    234
    235	id = (id < base) ? 0 : id - base;
    236	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
    237		entry = rcu_dereference_raw(*slot);
    238		if (!entry)
    239			continue;
    240		if (!xa_is_internal(entry))
    241			break;
    242		if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
    243			break;
    244		slot = radix_tree_iter_retry(&iter);
    245	}
    246	if (!slot)
    247		return NULL;
    248
    249	*nextid = iter.index + base;
    250	return entry;
    251}
    252EXPORT_SYMBOL(idr_get_next_ul);
    253
    254/**
    255 * idr_get_next() - Find next populated entry.
    256 * @idr: IDR handle.
    257 * @nextid: Pointer to an ID.
    258 *
    259 * Returns the next populated entry in the tree with an ID greater than
    260 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
    261 * to the ID of the found value.  To use in a loop, the value pointed to by
    262 * nextid must be incremented by the user.
    263 */
    264void *idr_get_next(struct idr *idr, int *nextid)
    265{
    266	unsigned long id = *nextid;
    267	void *entry = idr_get_next_ul(idr, &id);
    268
    269	if (WARN_ON_ONCE(id > INT_MAX))
    270		return NULL;
    271	*nextid = id;
    272	return entry;
    273}
    274EXPORT_SYMBOL(idr_get_next);
    275
    276/**
    277 * idr_replace() - replace pointer for given ID.
    278 * @idr: IDR handle.
    279 * @ptr: New pointer to associate with the ID.
    280 * @id: ID to change.
    281 *
    282 * Replace the pointer registered with an ID and return the old value.
    283 * This function can be called under the RCU read lock concurrently with
    284 * idr_alloc() and idr_remove() (as long as the ID being removed is not
    285 * the one being replaced!).
    286 *
    287 * Returns: the old value on success.  %-ENOENT indicates that @id was not
    288 * found.  %-EINVAL indicates that @ptr was not valid.
    289 */
    290void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
    291{
    292	struct radix_tree_node *node;
    293	void __rcu **slot = NULL;
    294	void *entry;
    295
    296	id -= idr->idr_base;
    297
    298	entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
    299	if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
    300		return ERR_PTR(-ENOENT);
    301
    302	__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
    303
    304	return entry;
    305}
    306EXPORT_SYMBOL(idr_replace);
    307
    308/**
    309 * DOC: IDA description
    310 *
    311 * The IDA is an ID allocator which does not provide the ability to
    312 * associate an ID with a pointer.  As such, it only needs to store one
    313 * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
    314 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
    315 * then initialise it using ida_init()).  To allocate a new ID, call
    316 * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
    317 * To free an ID, call ida_free().
    318 *
    319 * ida_destroy() can be used to dispose of an IDA without needing to
    320 * free the individual IDs in it.  You can use ida_is_empty() to find
    321 * out whether the IDA has any IDs currently allocated.
    322 *
    323 * The IDA handles its own locking.  It is safe to call any of the IDA
    324 * functions without synchronisation in your code.
    325 *
    326 * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
    327 * limitation, it should be quite straightforward to raise the maximum.
    328 */
    329
    330/*
    331 * Developer's notes:
    332 *
    333 * The IDA uses the functionality provided by the XArray to store bitmaps in
    334 * each entry.  The XA_FREE_MARK is only cleared when all bits in the bitmap
    335 * have been set.
    336 *
    337 * I considered telling the XArray that each slot is an order-10 node
    338 * and indexing by bit number, but the XArray can't allow a single multi-index
    339 * entry in the head, which would significantly increase memory consumption
    340 * for the IDA.  So instead we divide the index by the number of bits in the
    341 * leaf bitmap before doing a radix tree lookup.
    342 *
    343 * As an optimisation, if there are only a few low bits set in any given
    344 * leaf, instead of allocating a 128-byte bitmap, we store the bits
    345 * as a value entry.  Value entries never have the XA_FREE_MARK cleared
    346 * because we can always convert them into a bitmap entry.
    347 *
    348 * It would be possible to optimise further; once we've run out of a
    349 * single 128-byte bitmap, we currently switch to a 576-byte node, put
    350 * the 128-byte bitmap in the first entry and then start allocating extra
    351 * 128-byte entries.  We could instead use the 512 bytes of the node's
    352 * data as a bitmap before moving to that scheme.  I do not believe this
    353 * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
    354 * users of the IDA and almost none of them use more than 1024 entries.
    355 * Those that do use more than the 8192 IDs that the 512 bytes would
    356 * provide.
    357 *
    358 * The IDA always uses a lock to alloc/free.  If we add a 'test_bit'
    359 * equivalent, it will still need locking.  Going to RCU lookup would require
    360 * using RCU to free bitmaps, and that's not trivial without embedding an
    361 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
    362 * bitmap, which is excessive.
    363 */
    364
    365/**
    366 * ida_alloc_range() - Allocate an unused ID.
    367 * @ida: IDA handle.
    368 * @min: Lowest ID to allocate.
    369 * @max: Highest ID to allocate.
    370 * @gfp: Memory allocation flags.
    371 *
    372 * Allocate an ID between @min and @max, inclusive.  The allocated ID will
    373 * not exceed %INT_MAX, even if @max is larger.
    374 *
    375 * Context: Any context. It is safe to call this function without
    376 * locking in your code.
    377 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
    378 * or %-ENOSPC if there are no free IDs.
    379 */
    380int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
    381			gfp_t gfp)
    382{
    383	XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
    384	unsigned bit = min % IDA_BITMAP_BITS;
    385	unsigned long flags;
    386	struct ida_bitmap *bitmap, *alloc = NULL;
    387
    388	if ((int)min < 0)
    389		return -ENOSPC;
    390
    391	if ((int)max < 0)
    392		max = INT_MAX;
    393
    394retry:
    395	xas_lock_irqsave(&xas, flags);
    396next:
    397	bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
    398	if (xas.xa_index > min / IDA_BITMAP_BITS)
    399		bit = 0;
    400	if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
    401		goto nospc;
    402
    403	if (xa_is_value(bitmap)) {
    404		unsigned long tmp = xa_to_value(bitmap);
    405
    406		if (bit < BITS_PER_XA_VALUE) {
    407			bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
    408			if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
    409				goto nospc;
    410			if (bit < BITS_PER_XA_VALUE) {
    411				tmp |= 1UL << bit;
    412				xas_store(&xas, xa_mk_value(tmp));
    413				goto out;
    414			}
    415		}
    416		bitmap = alloc;
    417		if (!bitmap)
    418			bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
    419		if (!bitmap)
    420			goto alloc;
    421		bitmap->bitmap[0] = tmp;
    422		xas_store(&xas, bitmap);
    423		if (xas_error(&xas)) {
    424			bitmap->bitmap[0] = 0;
    425			goto out;
    426		}
    427	}
    428
    429	if (bitmap) {
    430		bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
    431		if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
    432			goto nospc;
    433		if (bit == IDA_BITMAP_BITS)
    434			goto next;
    435
    436		__set_bit(bit, bitmap->bitmap);
    437		if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
    438			xas_clear_mark(&xas, XA_FREE_MARK);
    439	} else {
    440		if (bit < BITS_PER_XA_VALUE) {
    441			bitmap = xa_mk_value(1UL << bit);
    442		} else {
    443			bitmap = alloc;
    444			if (!bitmap)
    445				bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
    446			if (!bitmap)
    447				goto alloc;
    448			__set_bit(bit, bitmap->bitmap);
    449		}
    450		xas_store(&xas, bitmap);
    451	}
    452out:
    453	xas_unlock_irqrestore(&xas, flags);
    454	if (xas_nomem(&xas, gfp)) {
    455		xas.xa_index = min / IDA_BITMAP_BITS;
    456		bit = min % IDA_BITMAP_BITS;
    457		goto retry;
    458	}
    459	if (bitmap != alloc)
    460		kfree(alloc);
    461	if (xas_error(&xas))
    462		return xas_error(&xas);
    463	return xas.xa_index * IDA_BITMAP_BITS + bit;
    464alloc:
    465	xas_unlock_irqrestore(&xas, flags);
    466	alloc = kzalloc(sizeof(*bitmap), gfp);
    467	if (!alloc)
    468		return -ENOMEM;
    469	xas_set(&xas, min / IDA_BITMAP_BITS);
    470	bit = min % IDA_BITMAP_BITS;
    471	goto retry;
    472nospc:
    473	xas_unlock_irqrestore(&xas, flags);
    474	kfree(alloc);
    475	return -ENOSPC;
    476}
    477EXPORT_SYMBOL(ida_alloc_range);
    478
    479/**
    480 * ida_free() - Release an allocated ID.
    481 * @ida: IDA handle.
    482 * @id: Previously allocated ID.
    483 *
    484 * Context: Any context. It is safe to call this function without
    485 * locking in your code.
    486 */
    487void ida_free(struct ida *ida, unsigned int id)
    488{
    489	XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
    490	unsigned bit = id % IDA_BITMAP_BITS;
    491	struct ida_bitmap *bitmap;
    492	unsigned long flags;
    493
    494	if ((int)id < 0)
    495		return;
    496
    497	xas_lock_irqsave(&xas, flags);
    498	bitmap = xas_load(&xas);
    499
    500	if (xa_is_value(bitmap)) {
    501		unsigned long v = xa_to_value(bitmap);
    502		if (bit >= BITS_PER_XA_VALUE)
    503			goto err;
    504		if (!(v & (1UL << bit)))
    505			goto err;
    506		v &= ~(1UL << bit);
    507		if (!v)
    508			goto delete;
    509		xas_store(&xas, xa_mk_value(v));
    510	} else {
    511		if (!test_bit(bit, bitmap->bitmap))
    512			goto err;
    513		__clear_bit(bit, bitmap->bitmap);
    514		xas_set_mark(&xas, XA_FREE_MARK);
    515		if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
    516			kfree(bitmap);
    517delete:
    518			xas_store(&xas, NULL);
    519		}
    520	}
    521	xas_unlock_irqrestore(&xas, flags);
    522	return;
    523 err:
    524	xas_unlock_irqrestore(&xas, flags);
    525	WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
    526}
    527EXPORT_SYMBOL(ida_free);
    528
    529/**
    530 * ida_destroy() - Free all IDs.
    531 * @ida: IDA handle.
    532 *
    533 * Calling this function frees all IDs and releases all resources used
    534 * by an IDA.  When this call returns, the IDA is empty and can be reused
    535 * or freed.  If the IDA is already empty, there is no need to call this
    536 * function.
    537 *
    538 * Context: Any context. It is safe to call this function without
    539 * locking in your code.
    540 */
    541void ida_destroy(struct ida *ida)
    542{
    543	XA_STATE(xas, &ida->xa, 0);
    544	struct ida_bitmap *bitmap;
    545	unsigned long flags;
    546
    547	xas_lock_irqsave(&xas, flags);
    548	xas_for_each(&xas, bitmap, ULONG_MAX) {
    549		if (!xa_is_value(bitmap))
    550			kfree(bitmap);
    551		xas_store(&xas, NULL);
    552	}
    553	xas_unlock_irqrestore(&xas, flags);
    554}
    555EXPORT_SYMBOL(ida_destroy);
    556
    557#ifndef __KERNEL__
    558extern void xa_dump_index(unsigned long index, unsigned int shift);
    559#define IDA_CHUNK_SHIFT		ilog2(IDA_BITMAP_BITS)
    560
    561static void ida_dump_entry(void *entry, unsigned long index)
    562{
    563	unsigned long i;
    564
    565	if (!entry)
    566		return;
    567
    568	if (xa_is_node(entry)) {
    569		struct xa_node *node = xa_to_node(entry);
    570		unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
    571			XA_CHUNK_SHIFT;
    572
    573		xa_dump_index(index * IDA_BITMAP_BITS, shift);
    574		xa_dump_node(node);
    575		for (i = 0; i < XA_CHUNK_SIZE; i++)
    576			ida_dump_entry(node->slots[i],
    577					index | (i << node->shift));
    578	} else if (xa_is_value(entry)) {
    579		xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
    580		pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
    581	} else {
    582		struct ida_bitmap *bitmap = entry;
    583
    584		xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
    585		pr_cont("bitmap: %p data", bitmap);
    586		for (i = 0; i < IDA_BITMAP_LONGS; i++)
    587			pr_cont(" %lx", bitmap->bitmap[i]);
    588		pr_cont("\n");
    589	}
    590}
    591
    592static void ida_dump(struct ida *ida)
    593{
    594	struct xarray *xa = &ida->xa;
    595	pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
    596				xa->xa_flags >> ROOT_TAG_SHIFT);
    597	ida_dump_entry(xa->xa_head, 0);
    598}
    599#endif