cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mempolicy.h (7530B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * NUMA memory policies for Linux.
      4 * Copyright 2003,2004 Andi Kleen SuSE Labs
      5 */
      6#ifndef _LINUX_MEMPOLICY_H
      7#define _LINUX_MEMPOLICY_H 1
      8
      9#include <linux/sched.h>
     10#include <linux/mmzone.h>
     11#include <linux/slab.h>
     12#include <linux/rbtree.h>
     13#include <linux/spinlock.h>
     14#include <linux/nodemask.h>
     15#include <linux/pagemap.h>
     16#include <uapi/linux/mempolicy.h>
     17
     18struct mm_struct;
     19
     20#ifdef CONFIG_NUMA
     21
     22/*
     23 * Describe a memory policy.
     24 *
     25 * A mempolicy can be either associated with a process or with a VMA.
     26 * For VMA related allocations the VMA policy is preferred, otherwise
     27 * the process policy is used. Interrupts ignore the memory policy
     28 * of the current process.
     29 *
     30 * Locking policy for interleave:
     31 * In process context there is no locking because only the process accesses
     32 * its own state. All vma manipulation is somewhat protected by a down_read on
     33 * mmap_lock.
     34 *
     35 * Freeing policy:
     36 * Mempolicy objects are reference counted.  A mempolicy will be freed when
     37 * mpol_put() decrements the reference count to zero.
     38 *
     39 * Duplicating policy objects:
     40 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
     41 * to the new storage.  The reference count of the new object is initialized
     42 * to 1, representing the caller of mpol_dup().
     43 */
     44struct mempolicy {
     45	atomic_t refcnt;
     46	unsigned short mode; 	/* See MPOL_* above */
     47	unsigned short flags;	/* See set_mempolicy() MPOL_F_* above */
     48	nodemask_t nodes;	/* interleave/bind/perfer */
     49	int home_node;		/* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
     50
     51	union {
     52		nodemask_t cpuset_mems_allowed;	/* relative to these nodes */
     53		nodemask_t user_nodemask;	/* nodemask passed by user */
     54	} w;
     55};
     56
     57/*
     58 * Support for managing mempolicy data objects (clone, copy, destroy)
     59 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
     60 */
     61
     62extern void __mpol_put(struct mempolicy *pol);
     63static inline void mpol_put(struct mempolicy *pol)
     64{
     65	if (pol)
     66		__mpol_put(pol);
     67}
     68
     69/*
     70 * Does mempolicy pol need explicit unref after use?
     71 * Currently only needed for shared policies.
     72 */
     73static inline int mpol_needs_cond_ref(struct mempolicy *pol)
     74{
     75	return (pol && (pol->flags & MPOL_F_SHARED));
     76}
     77
     78static inline void mpol_cond_put(struct mempolicy *pol)
     79{
     80	if (mpol_needs_cond_ref(pol))
     81		__mpol_put(pol);
     82}
     83
     84extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
     85static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
     86{
     87	if (pol)
     88		pol = __mpol_dup(pol);
     89	return pol;
     90}
     91
     92#define vma_policy(vma) ((vma)->vm_policy)
     93
     94static inline void mpol_get(struct mempolicy *pol)
     95{
     96	if (pol)
     97		atomic_inc(&pol->refcnt);
     98}
     99
    100extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
    101static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
    102{
    103	if (a == b)
    104		return true;
    105	return __mpol_equal(a, b);
    106}
    107
    108/*
    109 * Tree of shared policies for a shared memory region.
    110 * Maintain the policies in a pseudo mm that contains vmas. The vmas
    111 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
    112 * bytes, so that we can work with shared memory segments bigger than
    113 * unsigned long.
    114 */
    115
    116struct sp_node {
    117	struct rb_node nd;
    118	unsigned long start, end;
    119	struct mempolicy *policy;
    120};
    121
    122struct shared_policy {
    123	struct rb_root root;
    124	rwlock_t lock;
    125};
    126
    127int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
    128void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
    129int mpol_set_shared_policy(struct shared_policy *info,
    130				struct vm_area_struct *vma,
    131				struct mempolicy *new);
    132void mpol_free_shared_policy(struct shared_policy *p);
    133struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
    134					    unsigned long idx);
    135
    136struct mempolicy *get_task_policy(struct task_struct *p);
    137struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
    138		unsigned long addr);
    139bool vma_policy_mof(struct vm_area_struct *vma);
    140
    141extern void numa_default_policy(void);
    142extern void numa_policy_init(void);
    143extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
    144extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
    145
    146extern int huge_node(struct vm_area_struct *vma,
    147				unsigned long addr, gfp_t gfp_flags,
    148				struct mempolicy **mpol, nodemask_t **nodemask);
    149extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
    150extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
    151				const nodemask_t *mask);
    152extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
    153
    154static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
    155{
    156	struct mempolicy *mpol = get_task_policy(current);
    157
    158	return policy_nodemask(gfp, mpol);
    159}
    160
    161extern unsigned int mempolicy_slab_node(void);
    162
    163extern enum zone_type policy_zone;
    164
    165static inline void check_highest_zone(enum zone_type k)
    166{
    167	if (k > policy_zone && k != ZONE_MOVABLE)
    168		policy_zone = k;
    169}
    170
    171int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
    172		     const nodemask_t *to, int flags);
    173
    174
    175#ifdef CONFIG_TMPFS
    176extern int mpol_parse_str(char *str, struct mempolicy **mpol);
    177#endif
    178
    179extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
    180
    181/* Check if a vma is migratable */
    182extern bool vma_migratable(struct vm_area_struct *vma);
    183
    184extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
    185extern void mpol_put_task_policy(struct task_struct *);
    186
    187static inline bool mpol_is_preferred_many(struct mempolicy *pol)
    188{
    189	return  (pol->mode == MPOL_PREFERRED_MANY);
    190}
    191
    192
    193#else
    194
    195struct mempolicy {};
    196
    197static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
    198{
    199	return true;
    200}
    201
    202static inline void mpol_put(struct mempolicy *p)
    203{
    204}
    205
    206static inline void mpol_cond_put(struct mempolicy *pol)
    207{
    208}
    209
    210static inline void mpol_get(struct mempolicy *pol)
    211{
    212}
    213
    214struct shared_policy {};
    215
    216static inline void mpol_shared_policy_init(struct shared_policy *sp,
    217						struct mempolicy *mpol)
    218{
    219}
    220
    221static inline void mpol_free_shared_policy(struct shared_policy *p)
    222{
    223}
    224
    225static inline struct mempolicy *
    226mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
    227{
    228	return NULL;
    229}
    230
    231#define vma_policy(vma) NULL
    232
    233static inline int
    234vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
    235{
    236	return 0;
    237}
    238
    239static inline void numa_policy_init(void)
    240{
    241}
    242
    243static inline void numa_default_policy(void)
    244{
    245}
    246
    247static inline void mpol_rebind_task(struct task_struct *tsk,
    248				const nodemask_t *new)
    249{
    250}
    251
    252static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
    253{
    254}
    255
    256static inline int huge_node(struct vm_area_struct *vma,
    257				unsigned long addr, gfp_t gfp_flags,
    258				struct mempolicy **mpol, nodemask_t **nodemask)
    259{
    260	*mpol = NULL;
    261	*nodemask = NULL;
    262	return 0;
    263}
    264
    265static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
    266{
    267	return false;
    268}
    269
    270static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
    271				   const nodemask_t *to, int flags)
    272{
    273	return 0;
    274}
    275
    276static inline void check_highest_zone(int k)
    277{
    278}
    279
    280#ifdef CONFIG_TMPFS
    281static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
    282{
    283	return 1;	/* error */
    284}
    285#endif
    286
    287static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
    288				 unsigned long address)
    289{
    290	return -1; /* no node preference */
    291}
    292
    293static inline void mpol_put_task_policy(struct task_struct *task)
    294{
    295}
    296
    297static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
    298{
    299	return NULL;
    300}
    301
    302static inline bool mpol_is_preferred_many(struct mempolicy *pol)
    303{
    304	return  false;
    305}
    306
    307#endif /* CONFIG_NUMA */
    308#endif