cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu.h (7720B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
      3#define _ASM_POWERPC_BOOK3S_64_MMU_H_
      4
      5#include <asm/page.h>
      6
      7#ifndef __ASSEMBLY__
      8/*
      9 * Page size definition
     10 *
     11 *    shift : is the "PAGE_SHIFT" value for that page size
     12 *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
     13 *            directly to a slbmte "vsid" value
     14 *    penc  : is the HPTE encoding mask for the "LP" field:
     15 *
     16 */
     17struct mmu_psize_def {
     18	unsigned int	shift;	/* number of bits */
     19	int		penc[MMU_PAGE_COUNT];	/* HPTE encoding */
     20	unsigned int	tlbiel;	/* tlbiel supported for that page size */
     21	unsigned long	avpnm;	/* bits to mask out in AVPN in the HPTE */
     22	unsigned long   h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
     23	union {
     24		unsigned long	sllp;	/* SLB L||LP (exact mask to use in slbmte) */
     25		unsigned long ap;	/* Ap encoding used by PowerISA 3.0 */
     26	};
     27};
     28extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
     29#endif /* __ASSEMBLY__ */
     30
     31/* 64-bit classic hash table MMU */
     32#include <asm/book3s/64/mmu-hash.h>
     33
     34#ifndef __ASSEMBLY__
     35/*
     36 * ISA 3.0 partition and process table entry format
     37 */
     38struct prtb_entry {
     39	__be64 prtb0;
     40	__be64 prtb1;
     41};
     42extern struct prtb_entry *process_tb;
     43
     44struct patb_entry {
     45	__be64 patb0;
     46	__be64 patb1;
     47};
     48extern struct patb_entry *partition_tb;
     49
     50/* Bits in patb0 field */
     51#define PATB_HR		(1UL << 63)
     52#define RPDB_MASK	0x0fffffffffffff00UL
     53#define RPDB_SHIFT	(1UL << 8)
     54#define RTS1_SHIFT	61		/* top 2 bits of radix tree size */
     55#define RTS1_MASK	(3UL << RTS1_SHIFT)
     56#define RTS2_SHIFT	5		/* bottom 3 bits of radix tree size */
     57#define RTS2_MASK	(7UL << RTS2_SHIFT)
     58#define RPDS_MASK	0x1f		/* root page dir. size field */
     59
     60/* Bits in patb1 field */
     61#define PATB_GR		(1UL << 63)	/* guest uses radix; must match HR */
     62#define PRTS_MASK	0x1f		/* process table size field */
     63#define PRTB_MASK	0x0ffffffffffff000UL
     64
     65/* Number of supported LPID bits */
     66extern unsigned int mmu_lpid_bits;
     67
     68/* Number of supported PID bits */
     69extern unsigned int mmu_pid_bits;
     70
     71/* Base PID to allocate from */
     72extern unsigned int mmu_base_pid;
     73
     74/*
     75 * memory block size used with radix translation.
     76 */
     77extern unsigned long __ro_after_init radix_mem_block_size;
     78
     79#define PRTB_SIZE_SHIFT	(mmu_pid_bits + 4)
     80#define PRTB_ENTRIES	(1ul << mmu_pid_bits)
     81
     82#define PATB_SIZE_SHIFT	(mmu_lpid_bits + 4)
     83#define PATB_ENTRIES	(1ul << mmu_lpid_bits)
     84
     85typedef unsigned long mm_context_id_t;
     86struct spinlock;
     87
     88/* Maximum possible number of NPUs in a system. */
     89#define NV_MAX_NPUS 8
     90
     91typedef struct {
     92	union {
     93		/*
     94		 * We use id as the PIDR content for radix. On hash we can use
     95		 * more than one id. The extended ids are used when we start
     96		 * having address above 512TB. We allocate one extended id
     97		 * for each 512TB. The new id is then used with the 49 bit
     98		 * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
     99		 * from EA and new context ids to build the new VAs.
    100		 */
    101		mm_context_id_t id;
    102#ifdef CONFIG_PPC_64S_HASH_MMU
    103		mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
    104#endif
    105	};
    106
    107	/* Number of bits in the mm_cpumask */
    108	atomic_t active_cpus;
    109
    110	/* Number of users of the external (Nest) MMU */
    111	atomic_t copros;
    112
    113	/* Number of user space windows opened in process mm_context */
    114	atomic_t vas_windows;
    115
    116#ifdef CONFIG_PPC_64S_HASH_MMU
    117	struct hash_mm_context *hash_context;
    118#endif
    119
    120	void __user *vdso;
    121	/*
    122	 * pagetable fragment support
    123	 */
    124	void *pte_frag;
    125	void *pmd_frag;
    126#ifdef CONFIG_SPAPR_TCE_IOMMU
    127	struct list_head iommu_group_mem_list;
    128#endif
    129
    130#ifdef CONFIG_PPC_MEM_KEYS
    131	/*
    132	 * Each bit represents one protection key.
    133	 * bit set   -> key allocated
    134	 * bit unset -> key available for allocation
    135	 */
    136	u32 pkey_allocation_map;
    137	s16 execute_only_pkey; /* key holding execute-only protection */
    138#endif
    139} mm_context_t;
    140
    141#ifdef CONFIG_PPC_64S_HASH_MMU
    142static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
    143{
    144	return ctx->hash_context->user_psize;
    145}
    146
    147static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
    148{
    149	ctx->hash_context->user_psize = user_psize;
    150}
    151
    152static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
    153{
    154	return ctx->hash_context->low_slices_psize;
    155}
    156
    157static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
    158{
    159	return ctx->hash_context->high_slices_psize;
    160}
    161
    162static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
    163{
    164	return ctx->hash_context->slb_addr_limit;
    165}
    166
    167static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
    168{
    169	ctx->hash_context->slb_addr_limit = limit;
    170}
    171
    172static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
    173{
    174#ifdef CONFIG_PPC_64K_PAGES
    175	if (psize == MMU_PAGE_64K)
    176		return &ctx->hash_context->mask_64k;
    177#endif
    178#ifdef CONFIG_HUGETLB_PAGE
    179	if (psize == MMU_PAGE_16M)
    180		return &ctx->hash_context->mask_16m;
    181	if (psize == MMU_PAGE_16G)
    182		return &ctx->hash_context->mask_16g;
    183#endif
    184	BUG_ON(psize != MMU_PAGE_4K);
    185
    186	return &ctx->hash_context->mask_4k;
    187}
    188
    189#ifdef CONFIG_PPC_SUBPAGE_PROT
    190static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
    191{
    192	return ctx->hash_context->spt;
    193}
    194#endif
    195
    196/*
    197 * The current system page and segment sizes
    198 */
    199extern int mmu_virtual_psize;
    200extern int mmu_vmalloc_psize;
    201extern int mmu_io_psize;
    202#else /* CONFIG_PPC_64S_HASH_MMU */
    203#ifdef CONFIG_PPC_64K_PAGES
    204#define mmu_virtual_psize MMU_PAGE_64K
    205#else
    206#define mmu_virtual_psize MMU_PAGE_4K
    207#endif
    208#endif
    209extern int mmu_linear_psize;
    210extern int mmu_vmemmap_psize;
    211
    212/* MMU initialization */
    213void mmu_early_init_devtree(void);
    214void hash__early_init_devtree(void);
    215void radix__early_init_devtree(void);
    216#ifdef CONFIG_PPC_PKEY
    217void pkey_early_init_devtree(void);
    218#else
    219static inline void pkey_early_init_devtree(void) {}
    220#endif
    221
    222extern void hash__early_init_mmu(void);
    223extern void radix__early_init_mmu(void);
    224static inline void __init early_init_mmu(void)
    225{
    226	if (radix_enabled())
    227		return radix__early_init_mmu();
    228	return hash__early_init_mmu();
    229}
    230extern void hash__early_init_mmu_secondary(void);
    231extern void radix__early_init_mmu_secondary(void);
    232static inline void early_init_mmu_secondary(void)
    233{
    234	if (radix_enabled())
    235		return radix__early_init_mmu_secondary();
    236	return hash__early_init_mmu_secondary();
    237}
    238
    239extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
    240					 phys_addr_t first_memblock_size);
    241static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
    242					      phys_addr_t first_memblock_size)
    243{
    244	/*
    245	 * Hash has more strict restrictions. At this point we don't
    246	 * know which translations we will pick. Hence go with hash
    247	 * restrictions.
    248	 */
    249	if (!early_radix_enabled())
    250		hash__setup_initial_memory_limit(first_memblock_base,
    251						 first_memblock_size);
    252}
    253
    254#ifdef CONFIG_PPC_PSERIES
    255void __init radix_init_pseries(void);
    256#else
    257static inline void radix_init_pseries(void) { }
    258#endif
    259
    260#ifdef CONFIG_HOTPLUG_CPU
    261#define arch_clear_mm_cpumask_cpu(cpu, mm)				\
    262	do {								\
    263		if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {		\
    264			atomic_dec(&(mm)->context.active_cpus);		\
    265			cpumask_clear_cpu(cpu, mm_cpumask(mm));		\
    266		}							\
    267	} while (0)
    268
    269void cleanup_cpu_mmu_context(void);
    270#endif
    271
    272#ifdef CONFIG_PPC_64S_HASH_MMU
    273static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
    274{
    275	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
    276
    277	if (likely(index < ARRAY_SIZE(ctx->extended_id)))
    278		return ctx->extended_id[index];
    279
    280	/* should never happen */
    281	WARN_ON(1);
    282	return 0;
    283}
    284
    285static inline unsigned long get_user_vsid(mm_context_t *ctx,
    286					  unsigned long ea, int ssize)
    287{
    288	unsigned long context = get_user_context(ctx, ea);
    289
    290	return get_vsid(context, ea, ssize);
    291}
    292#endif
    293
    294#endif /* __ASSEMBLY__ */
    295#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */