cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

slub_def.h (6057B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_SLUB_DEF_H
      3#define _LINUX_SLUB_DEF_H
      4
      5/*
      6 * SLUB : A Slab allocator without object queues.
      7 *
      8 * (C) 2007 SGI, Christoph Lameter
      9 */
     10#include <linux/kfence.h>
     11#include <linux/kobject.h>
     12#include <linux/reciprocal_div.h>
     13#include <linux/local_lock.h>
     14
     15enum stat_item {
     16	ALLOC_FASTPATH,		/* Allocation from cpu slab */
     17	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
     18	FREE_FASTPATH,		/* Free to cpu slab */
     19	FREE_SLOWPATH,		/* Freeing not to cpu slab */
     20	FREE_FROZEN,		/* Freeing to frozen slab */
     21	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
     22	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
     23	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
     24	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
     25	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
     26	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
     27	FREE_SLAB,		/* Slab freed to the page allocator */
     28	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
     29	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
     30	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
     31	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
     32	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
     33	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
     34	DEACTIVATE_BYPASS,	/* Implicit deactivation */
     35	ORDER_FALLBACK,		/* Number of times fallback was necessary */
     36	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
     37	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
     38	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
     39	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
     40	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
     41	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
     42	NR_SLUB_STAT_ITEMS };
     43
     44/*
     45 * When changing the layout, make sure freelist and tid are still compatible
     46 * with this_cpu_cmpxchg_double() alignment requirements.
     47 */
     48struct kmem_cache_cpu {
     49	void **freelist;	/* Pointer to next available object */
     50	unsigned long tid;	/* Globally unique transaction id */
     51	struct slab *slab;	/* The slab from which we are allocating */
     52#ifdef CONFIG_SLUB_CPU_PARTIAL
     53	struct slab *partial;	/* Partially allocated frozen slabs */
     54#endif
     55	local_lock_t lock;	/* Protects the fields above */
     56#ifdef CONFIG_SLUB_STATS
     57	unsigned stat[NR_SLUB_STAT_ITEMS];
     58#endif
     59};
     60
     61#ifdef CONFIG_SLUB_CPU_PARTIAL
     62#define slub_percpu_partial(c)		((c)->partial)
     63
     64#define slub_set_percpu_partial(c, p)		\
     65({						\
     66	slub_percpu_partial(c) = (p)->next;	\
     67})
     68
     69#define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
     70#else
     71#define slub_percpu_partial(c)			NULL
     72
     73#define slub_set_percpu_partial(c, p)
     74
     75#define slub_percpu_partial_read_once(c)	NULL
     76#endif // CONFIG_SLUB_CPU_PARTIAL
     77
     78/*
     79 * Word size structure that can be atomically updated or read and that
     80 * contains both the order and the number of objects that a slab of the
     81 * given order would contain.
     82 */
     83struct kmem_cache_order_objects {
     84	unsigned int x;
     85};
     86
     87/*
     88 * Slab cache management.
     89 */
     90struct kmem_cache {
     91	struct kmem_cache_cpu __percpu *cpu_slab;
     92	/* Used for retrieving partial slabs, etc. */
     93	slab_flags_t flags;
     94	unsigned long min_partial;
     95	unsigned int size;	/* The size of an object including metadata */
     96	unsigned int object_size;/* The size of an object without metadata */
     97	struct reciprocal_value reciprocal_size;
     98	unsigned int offset;	/* Free pointer offset */
     99#ifdef CONFIG_SLUB_CPU_PARTIAL
    100	/* Number of per cpu partial objects to keep around */
    101	unsigned int cpu_partial;
    102	/* Number of per cpu partial slabs to keep around */
    103	unsigned int cpu_partial_slabs;
    104#endif
    105	struct kmem_cache_order_objects oo;
    106
    107	/* Allocation and freeing of slabs */
    108	struct kmem_cache_order_objects min;
    109	gfp_t allocflags;	/* gfp flags to use on each alloc */
    110	int refcount;		/* Refcount for slab cache destroy */
    111	void (*ctor)(void *);
    112	unsigned int inuse;		/* Offset to metadata */
    113	unsigned int align;		/* Alignment */
    114	unsigned int red_left_pad;	/* Left redzone padding size */
    115	const char *name;	/* Name (only for display!) */
    116	struct list_head list;	/* List of slab caches */
    117#ifdef CONFIG_SYSFS
    118	struct kobject kobj;	/* For sysfs */
    119#endif
    120#ifdef CONFIG_SLAB_FREELIST_HARDENED
    121	unsigned long random;
    122#endif
    123
    124#ifdef CONFIG_NUMA
    125	/*
    126	 * Defragmentation by allocating from a remote node.
    127	 */
    128	unsigned int remote_node_defrag_ratio;
    129#endif
    130
    131#ifdef CONFIG_SLAB_FREELIST_RANDOM
    132	unsigned int *random_seq;
    133#endif
    134
    135#ifdef CONFIG_KASAN
    136	struct kasan_cache kasan_info;
    137#endif
    138
    139	unsigned int useroffset;	/* Usercopy region offset */
    140	unsigned int usersize;		/* Usercopy region size */
    141
    142	struct kmem_cache_node *node[MAX_NUMNODES];
    143};
    144
    145#ifdef CONFIG_SYSFS
    146#define SLAB_SUPPORTS_SYSFS
    147void sysfs_slab_unlink(struct kmem_cache *);
    148void sysfs_slab_release(struct kmem_cache *);
    149#else
    150static inline void sysfs_slab_unlink(struct kmem_cache *s)
    151{
    152}
    153static inline void sysfs_slab_release(struct kmem_cache *s)
    154{
    155}
    156#endif
    157
    158void *fixup_red_left(struct kmem_cache *s, void *p);
    159
    160static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
    161				void *x) {
    162	void *object = x - (x - slab_address(slab)) % cache->size;
    163	void *last_object = slab_address(slab) +
    164		(slab->objects - 1) * cache->size;
    165	void *result = (unlikely(object > last_object)) ? last_object : object;
    166
    167	result = fixup_red_left(cache, result);
    168	return result;
    169}
    170
    171/* Determine object index from a given position */
    172static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
    173					  void *addr, void *obj)
    174{
    175	return reciprocal_divide(kasan_reset_tag(obj) - addr,
    176				 cache->reciprocal_size);
    177}
    178
    179static inline unsigned int obj_to_index(const struct kmem_cache *cache,
    180					const struct slab *slab, void *obj)
    181{
    182	if (is_kfence_address(obj))
    183		return 0;
    184	return __obj_to_index(cache, slab_address(slab), obj);
    185}
    186
    187static inline int objs_per_slab(const struct kmem_cache *cache,
    188				     const struct slab *slab)
    189{
    190	return slab->objects;
    191}
    192#endif /* _LINUX_SLUB_DEF_H */