cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

slab_common.c (33892B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Slab allocator functions that are independent of the allocator strategy
      4 *
      5 * (C) 2012 Christoph Lameter <cl@linux.com>
      6 */
      7#include <linux/slab.h>
      8
      9#include <linux/mm.h>
     10#include <linux/poison.h>
     11#include <linux/interrupt.h>
     12#include <linux/memory.h>
     13#include <linux/cache.h>
     14#include <linux/compiler.h>
     15#include <linux/kfence.h>
     16#include <linux/module.h>
     17#include <linux/cpu.h>
     18#include <linux/uaccess.h>
     19#include <linux/seq_file.h>
     20#include <linux/proc_fs.h>
     21#include <linux/debugfs.h>
     22#include <linux/kasan.h>
     23#include <asm/cacheflush.h>
     24#include <asm/tlbflush.h>
     25#include <asm/page.h>
     26#include <linux/memcontrol.h>
     27#include <linux/stackdepot.h>
     28
     29#define CREATE_TRACE_POINTS
     30#include <trace/events/kmem.h>
     31
     32#include "internal.h"
     33
     34#include "slab.h"
     35
     36enum slab_state slab_state;
     37LIST_HEAD(slab_caches);
     38DEFINE_MUTEX(slab_mutex);
     39struct kmem_cache *kmem_cache;
     40
     41static LIST_HEAD(slab_caches_to_rcu_destroy);
     42static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
     43static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
     44		    slab_caches_to_rcu_destroy_workfn);
     45
     46/*
     47 * Set of flags that will prevent slab merging
     48 */
     49#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
     50		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
     51		SLAB_FAILSLAB | kasan_never_merge())
     52
     53#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
     54			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
     55
     56/*
     57 * Merge control. If this is set then no merging of slab caches will occur.
     58 */
     59static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
     60
     61static int __init setup_slab_nomerge(char *str)
     62{
     63	slab_nomerge = true;
     64	return 1;
     65}
     66
     67static int __init setup_slab_merge(char *str)
     68{
     69	slab_nomerge = false;
     70	return 1;
     71}
     72
     73#ifdef CONFIG_SLUB
     74__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
     75__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
     76#endif
     77
     78__setup("slab_nomerge", setup_slab_nomerge);
     79__setup("slab_merge", setup_slab_merge);
     80
     81/*
     82 * Determine the size of a slab object
     83 */
     84unsigned int kmem_cache_size(struct kmem_cache *s)
     85{
     86	return s->object_size;
     87}
     88EXPORT_SYMBOL(kmem_cache_size);
     89
     90#ifdef CONFIG_DEBUG_VM
     91static int kmem_cache_sanity_check(const char *name, unsigned int size)
     92{
     93	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
     94		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
     95		return -EINVAL;
     96	}
     97
     98	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
     99	return 0;
    100}
    101#else
    102static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
    103{
    104	return 0;
    105}
    106#endif
    107
    108void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
    109{
    110	size_t i;
    111
    112	for (i = 0; i < nr; i++) {
    113		if (s)
    114			kmem_cache_free(s, p[i]);
    115		else
    116			kfree(p[i]);
    117	}
    118}
    119
    120int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
    121								void **p)
    122{
    123	size_t i;
    124
    125	for (i = 0; i < nr; i++) {
    126		void *x = p[i] = kmem_cache_alloc(s, flags);
    127		if (!x) {
    128			__kmem_cache_free_bulk(s, i, p);
    129			return 0;
    130		}
    131	}
    132	return i;
    133}
    134
    135/*
    136 * Figure out what the alignment of the objects will be given a set of
    137 * flags, a user specified alignment and the size of the objects.
    138 */
    139static unsigned int calculate_alignment(slab_flags_t flags,
    140		unsigned int align, unsigned int size)
    141{
    142	/*
    143	 * If the user wants hardware cache aligned objects then follow that
    144	 * suggestion if the object is sufficiently large.
    145	 *
    146	 * The hardware cache alignment cannot override the specified
    147	 * alignment though. If that is greater then use it.
    148	 */
    149	if (flags & SLAB_HWCACHE_ALIGN) {
    150		unsigned int ralign;
    151
    152		ralign = cache_line_size();
    153		while (size <= ralign / 2)
    154			ralign /= 2;
    155		align = max(align, ralign);
    156	}
    157
    158	align = max(align, arch_slab_minalign());
    159
    160	return ALIGN(align, sizeof(void *));
    161}
    162
    163/*
    164 * Find a mergeable slab cache
    165 */
    166int slab_unmergeable(struct kmem_cache *s)
    167{
    168	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
    169		return 1;
    170
    171	if (s->ctor)
    172		return 1;
    173
    174	if (s->usersize)
    175		return 1;
    176
    177	/*
    178	 * We may have set a slab to be unmergeable during bootstrap.
    179	 */
    180	if (s->refcount < 0)
    181		return 1;
    182
    183	return 0;
    184}
    185
    186struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
    187		slab_flags_t flags, const char *name, void (*ctor)(void *))
    188{
    189	struct kmem_cache *s;
    190
    191	if (slab_nomerge)
    192		return NULL;
    193
    194	if (ctor)
    195		return NULL;
    196
    197	size = ALIGN(size, sizeof(void *));
    198	align = calculate_alignment(flags, align, size);
    199	size = ALIGN(size, align);
    200	flags = kmem_cache_flags(size, flags, name);
    201
    202	if (flags & SLAB_NEVER_MERGE)
    203		return NULL;
    204
    205	list_for_each_entry_reverse(s, &slab_caches, list) {
    206		if (slab_unmergeable(s))
    207			continue;
    208
    209		if (size > s->size)
    210			continue;
    211
    212		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
    213			continue;
    214		/*
    215		 * Check if alignment is compatible.
    216		 * Courtesy of Adrian Drzewiecki
    217		 */
    218		if ((s->size & ~(align - 1)) != s->size)
    219			continue;
    220
    221		if (s->size - size >= sizeof(void *))
    222			continue;
    223
    224		if (IS_ENABLED(CONFIG_SLAB) && align &&
    225			(align > s->align || s->align % align))
    226			continue;
    227
    228		return s;
    229	}
    230	return NULL;
    231}
    232
    233static struct kmem_cache *create_cache(const char *name,
    234		unsigned int object_size, unsigned int align,
    235		slab_flags_t flags, unsigned int useroffset,
    236		unsigned int usersize, void (*ctor)(void *),
    237		struct kmem_cache *root_cache)
    238{
    239	struct kmem_cache *s;
    240	int err;
    241
    242	if (WARN_ON(useroffset + usersize > object_size))
    243		useroffset = usersize = 0;
    244
    245	err = -ENOMEM;
    246	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
    247	if (!s)
    248		goto out;
    249
    250	s->name = name;
    251	s->size = s->object_size = object_size;
    252	s->align = align;
    253	s->ctor = ctor;
    254	s->useroffset = useroffset;
    255	s->usersize = usersize;
    256
    257	err = __kmem_cache_create(s, flags);
    258	if (err)
    259		goto out_free_cache;
    260
    261	s->refcount = 1;
    262	list_add(&s->list, &slab_caches);
    263out:
    264	if (err)
    265		return ERR_PTR(err);
    266	return s;
    267
    268out_free_cache:
    269	kmem_cache_free(kmem_cache, s);
    270	goto out;
    271}
    272
    273/**
    274 * kmem_cache_create_usercopy - Create a cache with a region suitable
    275 * for copying to userspace
    276 * @name: A string which is used in /proc/slabinfo to identify this cache.
    277 * @size: The size of objects to be created in this cache.
    278 * @align: The required alignment for the objects.
    279 * @flags: SLAB flags
    280 * @useroffset: Usercopy region offset
    281 * @usersize: Usercopy region size
    282 * @ctor: A constructor for the objects.
    283 *
    284 * Cannot be called within a interrupt, but can be interrupted.
    285 * The @ctor is run when new pages are allocated by the cache.
    286 *
    287 * The flags are
    288 *
    289 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
    290 * to catch references to uninitialised memory.
    291 *
    292 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
    293 * for buffer overruns.
    294 *
    295 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
    296 * cacheline.  This can be beneficial if you're counting cycles as closely
    297 * as davem.
    298 *
    299 * Return: a pointer to the cache on success, NULL on failure.
    300 */
    301struct kmem_cache *
    302kmem_cache_create_usercopy(const char *name,
    303		  unsigned int size, unsigned int align,
    304		  slab_flags_t flags,
    305		  unsigned int useroffset, unsigned int usersize,
    306		  void (*ctor)(void *))
    307{
    308	struct kmem_cache *s = NULL;
    309	const char *cache_name;
    310	int err;
    311
    312#ifdef CONFIG_SLUB_DEBUG
    313	/*
    314	 * If no slub_debug was enabled globally, the static key is not yet
    315	 * enabled by setup_slub_debug(). Enable it if the cache is being
    316	 * created with any of the debugging flags passed explicitly.
    317	 * It's also possible that this is the first cache created with
    318	 * SLAB_STORE_USER and we should init stack_depot for it.
    319	 */
    320	if (flags & SLAB_DEBUG_FLAGS)
    321		static_branch_enable(&slub_debug_enabled);
    322	if (flags & SLAB_STORE_USER)
    323		stack_depot_init();
    324#endif
    325
    326	mutex_lock(&slab_mutex);
    327
    328	err = kmem_cache_sanity_check(name, size);
    329	if (err) {
    330		goto out_unlock;
    331	}
    332
    333	/* Refuse requests with allocator specific flags */
    334	if (flags & ~SLAB_FLAGS_PERMITTED) {
    335		err = -EINVAL;
    336		goto out_unlock;
    337	}
    338
    339	/*
    340	 * Some allocators will constraint the set of valid flags to a subset
    341	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
    342	 * case, and we'll just provide them with a sanitized version of the
    343	 * passed flags.
    344	 */
    345	flags &= CACHE_CREATE_MASK;
    346
    347	/* Fail closed on bad usersize of useroffset values. */
    348	if (WARN_ON(!usersize && useroffset) ||
    349	    WARN_ON(size < usersize || size - usersize < useroffset))
    350		usersize = useroffset = 0;
    351
    352	if (!usersize)
    353		s = __kmem_cache_alias(name, size, align, flags, ctor);
    354	if (s)
    355		goto out_unlock;
    356
    357	cache_name = kstrdup_const(name, GFP_KERNEL);
    358	if (!cache_name) {
    359		err = -ENOMEM;
    360		goto out_unlock;
    361	}
    362
    363	s = create_cache(cache_name, size,
    364			 calculate_alignment(flags, align, size),
    365			 flags, useroffset, usersize, ctor, NULL);
    366	if (IS_ERR(s)) {
    367		err = PTR_ERR(s);
    368		kfree_const(cache_name);
    369	}
    370
    371out_unlock:
    372	mutex_unlock(&slab_mutex);
    373
    374	if (err) {
    375		if (flags & SLAB_PANIC)
    376			panic("%s: Failed to create slab '%s'. Error %d\n",
    377				__func__, name, err);
    378		else {
    379			pr_warn("%s(%s) failed with error %d\n",
    380				__func__, name, err);
    381			dump_stack();
    382		}
    383		return NULL;
    384	}
    385	return s;
    386}
    387EXPORT_SYMBOL(kmem_cache_create_usercopy);
    388
    389/**
    390 * kmem_cache_create - Create a cache.
    391 * @name: A string which is used in /proc/slabinfo to identify this cache.
    392 * @size: The size of objects to be created in this cache.
    393 * @align: The required alignment for the objects.
    394 * @flags: SLAB flags
    395 * @ctor: A constructor for the objects.
    396 *
    397 * Cannot be called within a interrupt, but can be interrupted.
    398 * The @ctor is run when new pages are allocated by the cache.
    399 *
    400 * The flags are
    401 *
    402 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
    403 * to catch references to uninitialised memory.
    404 *
    405 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
    406 * for buffer overruns.
    407 *
    408 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
    409 * cacheline.  This can be beneficial if you're counting cycles as closely
    410 * as davem.
    411 *
    412 * Return: a pointer to the cache on success, NULL on failure.
    413 */
    414struct kmem_cache *
    415kmem_cache_create(const char *name, unsigned int size, unsigned int align,
    416		slab_flags_t flags, void (*ctor)(void *))
    417{
    418	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
    419					  ctor);
    420}
    421EXPORT_SYMBOL(kmem_cache_create);
    422
    423static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
    424{
    425	LIST_HEAD(to_destroy);
    426	struct kmem_cache *s, *s2;
    427
    428	/*
    429	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
    430	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
    431	 * through RCU and the associated kmem_cache are dereferenced
    432	 * while freeing the pages, so the kmem_caches should be freed only
    433	 * after the pending RCU operations are finished.  As rcu_barrier()
    434	 * is a pretty slow operation, we batch all pending destructions
    435	 * asynchronously.
    436	 */
    437	mutex_lock(&slab_mutex);
    438	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
    439	mutex_unlock(&slab_mutex);
    440
    441	if (list_empty(&to_destroy))
    442		return;
    443
    444	rcu_barrier();
    445
    446	list_for_each_entry_safe(s, s2, &to_destroy, list) {
    447		debugfs_slab_release(s);
    448		kfence_shutdown_cache(s);
    449#ifdef SLAB_SUPPORTS_SYSFS
    450		sysfs_slab_release(s);
    451#else
    452		slab_kmem_cache_release(s);
    453#endif
    454	}
    455}
    456
    457static int shutdown_cache(struct kmem_cache *s)
    458{
    459	/* free asan quarantined objects */
    460	kasan_cache_shutdown(s);
    461
    462	if (__kmem_cache_shutdown(s) != 0)
    463		return -EBUSY;
    464
    465	list_del(&s->list);
    466
    467	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
    468#ifdef SLAB_SUPPORTS_SYSFS
    469		sysfs_slab_unlink(s);
    470#endif
    471		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
    472		schedule_work(&slab_caches_to_rcu_destroy_work);
    473	} else {
    474		kfence_shutdown_cache(s);
    475		debugfs_slab_release(s);
    476#ifdef SLAB_SUPPORTS_SYSFS
    477		sysfs_slab_unlink(s);
    478		sysfs_slab_release(s);
    479#else
    480		slab_kmem_cache_release(s);
    481#endif
    482	}
    483
    484	return 0;
    485}
    486
    487void slab_kmem_cache_release(struct kmem_cache *s)
    488{
    489	__kmem_cache_release(s);
    490	kfree_const(s->name);
    491	kmem_cache_free(kmem_cache, s);
    492}
    493
    494void kmem_cache_destroy(struct kmem_cache *s)
    495{
    496	if (unlikely(!s) || !kasan_check_byte(s))
    497		return;
    498
    499	cpus_read_lock();
    500	mutex_lock(&slab_mutex);
    501
    502	s->refcount--;
    503	if (s->refcount)
    504		goto out_unlock;
    505
    506	WARN(shutdown_cache(s),
    507	     "%s %s: Slab cache still has objects when called from %pS",
    508	     __func__, s->name, (void *)_RET_IP_);
    509out_unlock:
    510	mutex_unlock(&slab_mutex);
    511	cpus_read_unlock();
    512}
    513EXPORT_SYMBOL(kmem_cache_destroy);
    514
    515/**
    516 * kmem_cache_shrink - Shrink a cache.
    517 * @cachep: The cache to shrink.
    518 *
    519 * Releases as many slabs as possible for a cache.
    520 * To help debugging, a zero exit status indicates all slabs were released.
    521 *
    522 * Return: %0 if all slabs were released, non-zero otherwise
    523 */
    524int kmem_cache_shrink(struct kmem_cache *cachep)
    525{
    526	int ret;
    527
    528
    529	kasan_cache_shrink(cachep);
    530	ret = __kmem_cache_shrink(cachep);
    531
    532	return ret;
    533}
    534EXPORT_SYMBOL(kmem_cache_shrink);
    535
    536bool slab_is_available(void)
    537{
    538	return slab_state >= UP;
    539}
    540
    541#ifdef CONFIG_PRINTK
    542/**
    543 * kmem_valid_obj - does the pointer reference a valid slab object?
    544 * @object: pointer to query.
    545 *
    546 * Return: %true if the pointer is to a not-yet-freed object from
    547 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
    548 * is to an already-freed object, and %false otherwise.
    549 */
    550bool kmem_valid_obj(void *object)
    551{
    552	struct folio *folio;
    553
    554	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
    555	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
    556		return false;
    557	folio = virt_to_folio(object);
    558	return folio_test_slab(folio);
    559}
    560EXPORT_SYMBOL_GPL(kmem_valid_obj);
    561
    562static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
    563{
    564	if (__kfence_obj_info(kpp, object, slab))
    565		return;
    566	__kmem_obj_info(kpp, object, slab);
    567}
    568
    569/**
    570 * kmem_dump_obj - Print available slab provenance information
    571 * @object: slab object for which to find provenance information.
    572 *
    573 * This function uses pr_cont(), so that the caller is expected to have
    574 * printed out whatever preamble is appropriate.  The provenance information
    575 * depends on the type of object and on how much debugging is enabled.
    576 * For a slab-cache object, the fact that it is a slab object is printed,
    577 * and, if available, the slab name, return address, and stack trace from
    578 * the allocation and last free path of that object.
    579 *
    580 * This function will splat if passed a pointer to a non-slab object.
    581 * If you are not sure what type of object you have, you should instead
    582 * use mem_dump_obj().
    583 */
    584void kmem_dump_obj(void *object)
    585{
    586	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
    587	int i;
    588	struct slab *slab;
    589	unsigned long ptroffset;
    590	struct kmem_obj_info kp = { };
    591
    592	if (WARN_ON_ONCE(!virt_addr_valid(object)))
    593		return;
    594	slab = virt_to_slab(object);
    595	if (WARN_ON_ONCE(!slab)) {
    596		pr_cont(" non-slab memory.\n");
    597		return;
    598	}
    599	kmem_obj_info(&kp, object, slab);
    600	if (kp.kp_slab_cache)
    601		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
    602	else
    603		pr_cont(" slab%s", cp);
    604	if (is_kfence_address(object))
    605		pr_cont(" (kfence)");
    606	if (kp.kp_objp)
    607		pr_cont(" start %px", kp.kp_objp);
    608	if (kp.kp_data_offset)
    609		pr_cont(" data offset %lu", kp.kp_data_offset);
    610	if (kp.kp_objp) {
    611		ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
    612		pr_cont(" pointer offset %lu", ptroffset);
    613	}
    614	if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
    615		pr_cont(" size %u", kp.kp_slab_cache->usersize);
    616	if (kp.kp_ret)
    617		pr_cont(" allocated at %pS\n", kp.kp_ret);
    618	else
    619		pr_cont("\n");
    620	for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
    621		if (!kp.kp_stack[i])
    622			break;
    623		pr_info("    %pS\n", kp.kp_stack[i]);
    624	}
    625
    626	if (kp.kp_free_stack[0])
    627		pr_cont(" Free path:\n");
    628
    629	for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
    630		if (!kp.kp_free_stack[i])
    631			break;
    632		pr_info("    %pS\n", kp.kp_free_stack[i]);
    633	}
    634
    635}
    636EXPORT_SYMBOL_GPL(kmem_dump_obj);
    637#endif
    638
    639#ifndef CONFIG_SLOB
    640/* Create a cache during boot when no slab services are available yet */
    641void __init create_boot_cache(struct kmem_cache *s, const char *name,
    642		unsigned int size, slab_flags_t flags,
    643		unsigned int useroffset, unsigned int usersize)
    644{
    645	int err;
    646	unsigned int align = ARCH_KMALLOC_MINALIGN;
    647
    648	s->name = name;
    649	s->size = s->object_size = size;
    650
    651	/*
    652	 * For power of two sizes, guarantee natural alignment for kmalloc
    653	 * caches, regardless of SL*B debugging options.
    654	 */
    655	if (is_power_of_2(size))
    656		align = max(align, size);
    657	s->align = calculate_alignment(flags, align, size);
    658
    659	s->useroffset = useroffset;
    660	s->usersize = usersize;
    661
    662	err = __kmem_cache_create(s, flags);
    663
    664	if (err)
    665		panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
    666					name, size, err);
    667
    668	s->refcount = -1;	/* Exempt from merging for now */
    669}
    670
    671struct kmem_cache *__init create_kmalloc_cache(const char *name,
    672		unsigned int size, slab_flags_t flags,
    673		unsigned int useroffset, unsigned int usersize)
    674{
    675	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
    676
    677	if (!s)
    678		panic("Out of memory when creating slab %s\n", name);
    679
    680	create_boot_cache(s, name, size, flags, useroffset, usersize);
    681	kasan_cache_create_kmalloc(s);
    682	list_add(&s->list, &slab_caches);
    683	s->refcount = 1;
    684	return s;
    685}
    686
    687struct kmem_cache *
    688kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
    689{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
    690EXPORT_SYMBOL(kmalloc_caches);
    691
    692/*
    693 * Conversion table for small slabs sizes / 8 to the index in the
    694 * kmalloc array. This is necessary for slabs < 192 since we have non power
    695 * of two cache sizes there. The size of larger slabs can be determined using
    696 * fls.
    697 */
    698static u8 size_index[24] __ro_after_init = {
    699	3,	/* 8 */
    700	4,	/* 16 */
    701	5,	/* 24 */
    702	5,	/* 32 */
    703	6,	/* 40 */
    704	6,	/* 48 */
    705	6,	/* 56 */
    706	6,	/* 64 */
    707	1,	/* 72 */
    708	1,	/* 80 */
    709	1,	/* 88 */
    710	1,	/* 96 */
    711	7,	/* 104 */
    712	7,	/* 112 */
    713	7,	/* 120 */
    714	7,	/* 128 */
    715	2,	/* 136 */
    716	2,	/* 144 */
    717	2,	/* 152 */
    718	2,	/* 160 */
    719	2,	/* 168 */
    720	2,	/* 176 */
    721	2,	/* 184 */
    722	2	/* 192 */
    723};
    724
    725static inline unsigned int size_index_elem(unsigned int bytes)
    726{
    727	return (bytes - 1) / 8;
    728}
    729
    730/*
    731 * Find the kmem_cache structure that serves a given size of
    732 * allocation
    733 */
    734struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
    735{
    736	unsigned int index;
    737
    738	if (size <= 192) {
    739		if (!size)
    740			return ZERO_SIZE_PTR;
    741
    742		index = size_index[size_index_elem(size)];
    743	} else {
    744		if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
    745			return NULL;
    746		index = fls(size - 1);
    747	}
    748
    749	return kmalloc_caches[kmalloc_type(flags)][index];
    750}
    751
    752#ifdef CONFIG_ZONE_DMA
    753#define KMALLOC_DMA_NAME(sz)	.name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
    754#else
    755#define KMALLOC_DMA_NAME(sz)
    756#endif
    757
    758#ifdef CONFIG_MEMCG_KMEM
    759#define KMALLOC_CGROUP_NAME(sz)	.name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
    760#else
    761#define KMALLOC_CGROUP_NAME(sz)
    762#endif
    763
    764#define INIT_KMALLOC_INFO(__size, __short_size)			\
    765{								\
    766	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
    767	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
    768	KMALLOC_CGROUP_NAME(__short_size)			\
    769	KMALLOC_DMA_NAME(__short_size)				\
    770	.size = __size,						\
    771}
    772
    773/*
    774 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
    775 * kmalloc_index() supports up to 2^25=32MB, so the final entry of the table is
    776 * kmalloc-32M.
    777 */
    778const struct kmalloc_info_struct kmalloc_info[] __initconst = {
    779	INIT_KMALLOC_INFO(0, 0),
    780	INIT_KMALLOC_INFO(96, 96),
    781	INIT_KMALLOC_INFO(192, 192),
    782	INIT_KMALLOC_INFO(8, 8),
    783	INIT_KMALLOC_INFO(16, 16),
    784	INIT_KMALLOC_INFO(32, 32),
    785	INIT_KMALLOC_INFO(64, 64),
    786	INIT_KMALLOC_INFO(128, 128),
    787	INIT_KMALLOC_INFO(256, 256),
    788	INIT_KMALLOC_INFO(512, 512),
    789	INIT_KMALLOC_INFO(1024, 1k),
    790	INIT_KMALLOC_INFO(2048, 2k),
    791	INIT_KMALLOC_INFO(4096, 4k),
    792	INIT_KMALLOC_INFO(8192, 8k),
    793	INIT_KMALLOC_INFO(16384, 16k),
    794	INIT_KMALLOC_INFO(32768, 32k),
    795	INIT_KMALLOC_INFO(65536, 64k),
    796	INIT_KMALLOC_INFO(131072, 128k),
    797	INIT_KMALLOC_INFO(262144, 256k),
    798	INIT_KMALLOC_INFO(524288, 512k),
    799	INIT_KMALLOC_INFO(1048576, 1M),
    800	INIT_KMALLOC_INFO(2097152, 2M),
    801	INIT_KMALLOC_INFO(4194304, 4M),
    802	INIT_KMALLOC_INFO(8388608, 8M),
    803	INIT_KMALLOC_INFO(16777216, 16M),
    804	INIT_KMALLOC_INFO(33554432, 32M)
    805};
    806
    807/*
    808 * Patch up the size_index table if we have strange large alignment
    809 * requirements for the kmalloc array. This is only the case for
    810 * MIPS it seems. The standard arches will not generate any code here.
    811 *
    812 * Largest permitted alignment is 256 bytes due to the way we
    813 * handle the index determination for the smaller caches.
    814 *
    815 * Make sure that nothing crazy happens if someone starts tinkering
    816 * around with ARCH_KMALLOC_MINALIGN
    817 */
    818void __init setup_kmalloc_cache_index_table(void)
    819{
    820	unsigned int i;
    821
    822	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
    823		!is_power_of_2(KMALLOC_MIN_SIZE));
    824
    825	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
    826		unsigned int elem = size_index_elem(i);
    827
    828		if (elem >= ARRAY_SIZE(size_index))
    829			break;
    830		size_index[elem] = KMALLOC_SHIFT_LOW;
    831	}
    832
    833	if (KMALLOC_MIN_SIZE >= 64) {
    834		/*
    835		 * The 96 byte sized cache is not used if the alignment
    836		 * is 64 byte.
    837		 */
    838		for (i = 64 + 8; i <= 96; i += 8)
    839			size_index[size_index_elem(i)] = 7;
    840
    841	}
    842
    843	if (KMALLOC_MIN_SIZE >= 128) {
    844		/*
    845		 * The 192 byte sized cache is not used if the alignment
    846		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
    847		 * instead.
    848		 */
    849		for (i = 128 + 8; i <= 192; i += 8)
    850			size_index[size_index_elem(i)] = 8;
    851	}
    852}
    853
    854static void __init
    855new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
    856{
    857	if (type == KMALLOC_RECLAIM) {
    858		flags |= SLAB_RECLAIM_ACCOUNT;
    859	} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
    860		if (mem_cgroup_kmem_disabled()) {
    861			kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
    862			return;
    863		}
    864		flags |= SLAB_ACCOUNT;
    865	} else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
    866		flags |= SLAB_CACHE_DMA;
    867	}
    868
    869	kmalloc_caches[type][idx] = create_kmalloc_cache(
    870					kmalloc_info[idx].name[type],
    871					kmalloc_info[idx].size, flags, 0,
    872					kmalloc_info[idx].size);
    873
    874	/*
    875	 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
    876	 * KMALLOC_NORMAL caches.
    877	 */
    878	if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
    879		kmalloc_caches[type][idx]->refcount = -1;
    880}
    881
    882/*
    883 * Create the kmalloc array. Some of the regular kmalloc arrays
    884 * may already have been created because they were needed to
    885 * enable allocations for slab creation.
    886 */
    887void __init create_kmalloc_caches(slab_flags_t flags)
    888{
    889	int i;
    890	enum kmalloc_cache_type type;
    891
    892	/*
    893	 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
    894	 */
    895	for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
    896		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
    897			if (!kmalloc_caches[type][i])
    898				new_kmalloc_cache(i, type, flags);
    899
    900			/*
    901			 * Caches that are not of the two-to-the-power-of size.
    902			 * These have to be created immediately after the
    903			 * earlier power of two caches
    904			 */
    905			if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
    906					!kmalloc_caches[type][1])
    907				new_kmalloc_cache(1, type, flags);
    908			if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
    909					!kmalloc_caches[type][2])
    910				new_kmalloc_cache(2, type, flags);
    911		}
    912	}
    913
    914	/* Kmalloc array is now usable */
    915	slab_state = UP;
    916}
    917#endif /* !CONFIG_SLOB */
    918
    919gfp_t kmalloc_fix_flags(gfp_t flags)
    920{
    921	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
    922
    923	flags &= ~GFP_SLAB_BUG_MASK;
    924	pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
    925			invalid_mask, &invalid_mask, flags, &flags);
    926	dump_stack();
    927
    928	return flags;
    929}
    930
    931/*
    932 * To avoid unnecessary overhead, we pass through large allocation requests
    933 * directly to the page allocator. We use __GFP_COMP, because we will need to
    934 * know the allocation order to free the pages properly in kfree.
    935 */
    936void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
    937{
    938	void *ret = NULL;
    939	struct page *page;
    940
    941	if (unlikely(flags & GFP_SLAB_BUG_MASK))
    942		flags = kmalloc_fix_flags(flags);
    943
    944	flags |= __GFP_COMP;
    945	page = alloc_pages(flags, order);
    946	if (likely(page)) {
    947		ret = page_address(page);
    948		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
    949				      PAGE_SIZE << order);
    950	}
    951	ret = kasan_kmalloc_large(ret, size, flags);
    952	/* As ret might get tagged, call kmemleak hook after KASAN. */
    953	kmemleak_alloc(ret, size, 1, flags);
    954	return ret;
    955}
    956EXPORT_SYMBOL(kmalloc_order);
    957
    958#ifdef CONFIG_TRACING
    959void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
    960{
    961	void *ret = kmalloc_order(size, flags, order);
    962	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
    963	return ret;
    964}
    965EXPORT_SYMBOL(kmalloc_order_trace);
    966#endif
    967
    968#ifdef CONFIG_SLAB_FREELIST_RANDOM
    969/* Randomize a generic freelist */
    970static void freelist_randomize(struct rnd_state *state, unsigned int *list,
    971			       unsigned int count)
    972{
    973	unsigned int rand;
    974	unsigned int i;
    975
    976	for (i = 0; i < count; i++)
    977		list[i] = i;
    978
    979	/* Fisher-Yates shuffle */
    980	for (i = count - 1; i > 0; i--) {
    981		rand = prandom_u32_state(state);
    982		rand %= (i + 1);
    983		swap(list[i], list[rand]);
    984	}
    985}
    986
    987/* Create a random sequence per cache */
    988int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
    989				    gfp_t gfp)
    990{
    991	struct rnd_state state;
    992
    993	if (count < 2 || cachep->random_seq)
    994		return 0;
    995
    996	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
    997	if (!cachep->random_seq)
    998		return -ENOMEM;
    999
   1000	/* Get best entropy at this stage of boot */
   1001	prandom_seed_state(&state, get_random_long());
   1002
   1003	freelist_randomize(&state, cachep->random_seq, count);
   1004	return 0;
   1005}
   1006
   1007/* Destroy the per-cache random freelist sequence */
   1008void cache_random_seq_destroy(struct kmem_cache *cachep)
   1009{
   1010	kfree(cachep->random_seq);
   1011	cachep->random_seq = NULL;
   1012}
   1013#endif /* CONFIG_SLAB_FREELIST_RANDOM */
   1014
   1015#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
   1016#ifdef CONFIG_SLAB
   1017#define SLABINFO_RIGHTS (0600)
   1018#else
   1019#define SLABINFO_RIGHTS (0400)
   1020#endif
   1021
   1022static void print_slabinfo_header(struct seq_file *m)
   1023{
   1024	/*
   1025	 * Output format version, so at least we can change it
   1026	 * without _too_ many complaints.
   1027	 */
   1028#ifdef CONFIG_DEBUG_SLAB
   1029	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
   1030#else
   1031	seq_puts(m, "slabinfo - version: 2.1\n");
   1032#endif
   1033	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
   1034	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
   1035	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
   1036#ifdef CONFIG_DEBUG_SLAB
   1037	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
   1038	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
   1039#endif
   1040	seq_putc(m, '\n');
   1041}
   1042
   1043static void *slab_start(struct seq_file *m, loff_t *pos)
   1044{
   1045	mutex_lock(&slab_mutex);
   1046	return seq_list_start(&slab_caches, *pos);
   1047}
   1048
   1049static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
   1050{
   1051	return seq_list_next(p, &slab_caches, pos);
   1052}
   1053
   1054static void slab_stop(struct seq_file *m, void *p)
   1055{
   1056	mutex_unlock(&slab_mutex);
   1057}
   1058
   1059static void cache_show(struct kmem_cache *s, struct seq_file *m)
   1060{
   1061	struct slabinfo sinfo;
   1062
   1063	memset(&sinfo, 0, sizeof(sinfo));
   1064	get_slabinfo(s, &sinfo);
   1065
   1066	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
   1067		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
   1068		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
   1069
   1070	seq_printf(m, " : tunables %4u %4u %4u",
   1071		   sinfo.limit, sinfo.batchcount, sinfo.shared);
   1072	seq_printf(m, " : slabdata %6lu %6lu %6lu",
   1073		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
   1074	slabinfo_show_stats(m, s);
   1075	seq_putc(m, '\n');
   1076}
   1077
   1078static int slab_show(struct seq_file *m, void *p)
   1079{
   1080	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
   1081
   1082	if (p == slab_caches.next)
   1083		print_slabinfo_header(m);
   1084	cache_show(s, m);
   1085	return 0;
   1086}
   1087
   1088void dump_unreclaimable_slab(void)
   1089{
   1090	struct kmem_cache *s;
   1091	struct slabinfo sinfo;
   1092
   1093	/*
   1094	 * Here acquiring slab_mutex is risky since we don't prefer to get
   1095	 * sleep in oom path. But, without mutex hold, it may introduce a
   1096	 * risk of crash.
   1097	 * Use mutex_trylock to protect the list traverse, dump nothing
   1098	 * without acquiring the mutex.
   1099	 */
   1100	if (!mutex_trylock(&slab_mutex)) {
   1101		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
   1102		return;
   1103	}
   1104
   1105	pr_info("Unreclaimable slab info:\n");
   1106	pr_info("Name                      Used          Total\n");
   1107
   1108	list_for_each_entry(s, &slab_caches, list) {
   1109		if (s->flags & SLAB_RECLAIM_ACCOUNT)
   1110			continue;
   1111
   1112		get_slabinfo(s, &sinfo);
   1113
   1114		if (sinfo.num_objs > 0)
   1115			pr_info("%-17s %10luKB %10luKB\n", s->name,
   1116				(sinfo.active_objs * s->size) / 1024,
   1117				(sinfo.num_objs * s->size) / 1024);
   1118	}
   1119	mutex_unlock(&slab_mutex);
   1120}
   1121
   1122/*
   1123 * slabinfo_op - iterator that generates /proc/slabinfo
   1124 *
   1125 * Output layout:
   1126 * cache-name
   1127 * num-active-objs
   1128 * total-objs
   1129 * object size
   1130 * num-active-slabs
   1131 * total-slabs
   1132 * num-pages-per-slab
   1133 * + further values on SMP and with statistics enabled
   1134 */
   1135static const struct seq_operations slabinfo_op = {
   1136	.start = slab_start,
   1137	.next = slab_next,
   1138	.stop = slab_stop,
   1139	.show = slab_show,
   1140};
   1141
   1142static int slabinfo_open(struct inode *inode, struct file *file)
   1143{
   1144	return seq_open(file, &slabinfo_op);
   1145}
   1146
   1147static const struct proc_ops slabinfo_proc_ops = {
   1148	.proc_flags	= PROC_ENTRY_PERMANENT,
   1149	.proc_open	= slabinfo_open,
   1150	.proc_read	= seq_read,
   1151	.proc_write	= slabinfo_write,
   1152	.proc_lseek	= seq_lseek,
   1153	.proc_release	= seq_release,
   1154};
   1155
   1156static int __init slab_proc_init(void)
   1157{
   1158	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
   1159	return 0;
   1160}
   1161module_init(slab_proc_init);
   1162
   1163#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
   1164
   1165static __always_inline void *__do_krealloc(const void *p, size_t new_size,
   1166					   gfp_t flags)
   1167{
   1168	void *ret;
   1169	size_t ks;
   1170
   1171	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
   1172	if (likely(!ZERO_OR_NULL_PTR(p))) {
   1173		if (!kasan_check_byte(p))
   1174			return NULL;
   1175		ks = kfence_ksize(p) ?: __ksize(p);
   1176	} else
   1177		ks = 0;
   1178
   1179	/* If the object still fits, repoison it precisely. */
   1180	if (ks >= new_size) {
   1181		p = kasan_krealloc((void *)p, new_size, flags);
   1182		return (void *)p;
   1183	}
   1184
   1185	ret = kmalloc_track_caller(new_size, flags);
   1186	if (ret && p) {
   1187		/* Disable KASAN checks as the object's redzone is accessed. */
   1188		kasan_disable_current();
   1189		memcpy(ret, kasan_reset_tag(p), ks);
   1190		kasan_enable_current();
   1191	}
   1192
   1193	return ret;
   1194}
   1195
   1196/**
   1197 * krealloc - reallocate memory. The contents will remain unchanged.
   1198 * @p: object to reallocate memory for.
   1199 * @new_size: how many bytes of memory are required.
   1200 * @flags: the type of memory to allocate.
   1201 *
   1202 * The contents of the object pointed to are preserved up to the
   1203 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
   1204 * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
   1205 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
   1206 *
   1207 * Return: pointer to the allocated memory or %NULL in case of error
   1208 */
   1209void *krealloc(const void *p, size_t new_size, gfp_t flags)
   1210{
   1211	void *ret;
   1212
   1213	if (unlikely(!new_size)) {
   1214		kfree(p);
   1215		return ZERO_SIZE_PTR;
   1216	}
   1217
   1218	ret = __do_krealloc(p, new_size, flags);
   1219	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
   1220		kfree(p);
   1221
   1222	return ret;
   1223}
   1224EXPORT_SYMBOL(krealloc);
   1225
   1226/**
   1227 * kfree_sensitive - Clear sensitive information in memory before freeing
   1228 * @p: object to free memory of
   1229 *
   1230 * The memory of the object @p points to is zeroed before freed.
   1231 * If @p is %NULL, kfree_sensitive() does nothing.
   1232 *
   1233 * Note: this function zeroes the whole allocated buffer which can be a good
   1234 * deal bigger than the requested buffer size passed to kmalloc(). So be
   1235 * careful when using this function in performance sensitive code.
   1236 */
   1237void kfree_sensitive(const void *p)
   1238{
   1239	size_t ks;
   1240	void *mem = (void *)p;
   1241
   1242	ks = ksize(mem);
   1243	if (ks)
   1244		memzero_explicit(mem, ks);
   1245	kfree(mem);
   1246}
   1247EXPORT_SYMBOL(kfree_sensitive);
   1248
   1249/**
   1250 * ksize - get the actual amount of memory allocated for a given object
   1251 * @objp: Pointer to the object
   1252 *
   1253 * kmalloc may internally round up allocations and return more memory
   1254 * than requested. ksize() can be used to determine the actual amount of
   1255 * memory allocated. The caller may use this additional memory, even though
   1256 * a smaller amount of memory was initially specified with the kmalloc call.
   1257 * The caller must guarantee that objp points to a valid object previously
   1258 * allocated with either kmalloc() or kmem_cache_alloc(). The object
   1259 * must not be freed during the duration of the call.
   1260 *
   1261 * Return: size of the actual memory used by @objp in bytes
   1262 */
   1263size_t ksize(const void *objp)
   1264{
   1265	size_t size;
   1266
   1267	/*
   1268	 * We need to first check that the pointer to the object is valid, and
   1269	 * only then unpoison the memory. The report printed from ksize() is
   1270	 * more useful, then when it's printed later when the behaviour could
   1271	 * be undefined due to a potential use-after-free or double-free.
   1272	 *
   1273	 * We use kasan_check_byte(), which is supported for the hardware
   1274	 * tag-based KASAN mode, unlike kasan_check_read/write().
   1275	 *
   1276	 * If the pointed to memory is invalid, we return 0 to avoid users of
   1277	 * ksize() writing to and potentially corrupting the memory region.
   1278	 *
   1279	 * We want to perform the check before __ksize(), to avoid potentially
   1280	 * crashing in __ksize() due to accessing invalid metadata.
   1281	 */
   1282	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
   1283		return 0;
   1284
   1285	size = kfence_ksize(objp) ?: __ksize(objp);
   1286	/*
   1287	 * We assume that ksize callers could use whole allocated area,
   1288	 * so we need to unpoison this area.
   1289	 */
   1290	kasan_unpoison_range(objp, size);
   1291	return size;
   1292}
   1293EXPORT_SYMBOL(ksize);
   1294
   1295/* Tracepoints definitions. */
   1296EXPORT_TRACEPOINT_SYMBOL(kmalloc);
   1297EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
   1298EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
   1299EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
   1300EXPORT_TRACEPOINT_SYMBOL(kfree);
   1301EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
   1302
   1303int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
   1304{
   1305	if (__should_failslab(s, gfpflags))
   1306		return -ENOMEM;
   1307	return 0;
   1308}
   1309ALLOW_ERROR_INJECTION(should_failslab, ERRNO);