cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

slab_def.h (3092B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_SLAB_DEF_H
      3#define	_LINUX_SLAB_DEF_H
      4
      5#include <linux/kfence.h>
      6#include <linux/reciprocal_div.h>
      7
      8/*
      9 * Definitions unique to the original Linux SLAB allocator.
     10 */
     11
     12struct kmem_cache {
     13	struct array_cache __percpu *cpu_cache;
     14
     15/* 1) Cache tunables. Protected by slab_mutex */
     16	unsigned int batchcount;
     17	unsigned int limit;
     18	unsigned int shared;
     19
     20	unsigned int size;
     21	struct reciprocal_value reciprocal_buffer_size;
     22/* 2) touched by every alloc & free from the backend */
     23
     24	slab_flags_t flags;		/* constant flags */
     25	unsigned int num;		/* # of objs per slab */
     26
     27/* 3) cache_grow/shrink */
     28	/* order of pgs per slab (2^n) */
     29	unsigned int gfporder;
     30
     31	/* force GFP flags, e.g. GFP_DMA */
     32	gfp_t allocflags;
     33
     34	size_t colour;			/* cache colouring range */
     35	unsigned int colour_off;	/* colour offset */
     36	struct kmem_cache *freelist_cache;
     37	unsigned int freelist_size;
     38
     39	/* constructor func */
     40	void (*ctor)(void *obj);
     41
     42/* 4) cache creation/removal */
     43	const char *name;
     44	struct list_head list;
     45	int refcount;
     46	int object_size;
     47	int align;
     48
     49/* 5) statistics */
     50#ifdef CONFIG_DEBUG_SLAB
     51	unsigned long num_active;
     52	unsigned long num_allocations;
     53	unsigned long high_mark;
     54	unsigned long grown;
     55	unsigned long reaped;
     56	unsigned long errors;
     57	unsigned long max_freeable;
     58	unsigned long node_allocs;
     59	unsigned long node_frees;
     60	unsigned long node_overflow;
     61	atomic_t allochit;
     62	atomic_t allocmiss;
     63	atomic_t freehit;
     64	atomic_t freemiss;
     65
     66	/*
     67	 * If debugging is enabled, then the allocator can add additional
     68	 * fields and/or padding to every object. 'size' contains the total
     69	 * object size including these internal fields, while 'obj_offset'
     70	 * and 'object_size' contain the offset to the user object and its
     71	 * size.
     72	 */
     73	int obj_offset;
     74#endif /* CONFIG_DEBUG_SLAB */
     75
     76#ifdef CONFIG_KASAN
     77	struct kasan_cache kasan_info;
     78#endif
     79
     80#ifdef CONFIG_SLAB_FREELIST_RANDOM
     81	unsigned int *random_seq;
     82#endif
     83
     84	unsigned int useroffset;	/* Usercopy region offset */
     85	unsigned int usersize;		/* Usercopy region size */
     86
     87	struct kmem_cache_node *node[MAX_NUMNODES];
     88};
     89
     90static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
     91				void *x)
     92{
     93	void *object = x - (x - slab->s_mem) % cache->size;
     94	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
     95
     96	if (unlikely(object > last_object))
     97		return last_object;
     98	else
     99		return object;
    100}
    101
    102/*
    103 * We want to avoid an expensive divide : (offset / cache->size)
    104 *   Using the fact that size is a constant for a particular cache,
    105 *   we can replace (offset / cache->size) by
    106 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
    107 */
    108static inline unsigned int obj_to_index(const struct kmem_cache *cache,
    109					const struct slab *slab, void *obj)
    110{
    111	u32 offset = (obj - slab->s_mem);
    112	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
    113}
    114
    115static inline int objs_per_slab(const struct kmem_cache *cache,
    116				     const struct slab *slab)
    117{
    118	if (is_kfence_address(slab_address(slab)))
    119		return 1;
    120	return cache->num;
    121}
    122
    123#endif	/* _LINUX_SLAB_DEF_H */