cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu-hash.h (6269B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
      3#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
      4
      5/*
      6 * 32-bit hash table MMU support
      7 */
      8
      9/*
     10 * BATs
     11 */
     12
     13/* Block size masks */
     14#define BL_128K	0x000
     15#define BL_256K 0x001
     16#define BL_512K 0x003
     17#define BL_1M   0x007
     18#define BL_2M   0x00F
     19#define BL_4M   0x01F
     20#define BL_8M   0x03F
     21#define BL_16M  0x07F
     22#define BL_32M  0x0FF
     23#define BL_64M  0x1FF
     24#define BL_128M 0x3FF
     25#define BL_256M 0x7FF
     26
     27/* BAT Access Protection */
     28#define BPP_XX	0x00		/* No access */
     29#define BPP_RX	0x01		/* Read only */
     30#define BPP_RW	0x02		/* Read/write */
     31
     32#ifndef __ASSEMBLY__
     33/* Contort a phys_addr_t into the right format/bits for a BAT */
     34#ifdef CONFIG_PHYS_64BIT
     35#define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
     36				((x & 0x0000000e00000000ULL) >> 24) | \
     37				((x & 0x0000000100000000ULL) >> 30)))
     38#define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \
     39			  (((u64)(x) << 24) & 0x0000000e00000000ULL) | \
     40			  (((u64)(x) << 30) & 0x0000000100000000ULL))
     41#else
     42#define BAT_PHYS_ADDR(x) (x)
     43#define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000)
     44#endif
     45
     46struct ppc_bat {
     47	u32 batu;
     48	u32 batl;
     49};
     50#endif /* !__ASSEMBLY__ */
     51
     52/*
     53 * Hash table
     54 */
     55
     56/* Values for PP (assumes Ks=0, Kp=1) */
     57#define PP_RWXX	0	/* Supervisor read/write, User none */
     58#define PP_RWRX 1	/* Supervisor read/write, User read */
     59#define PP_RWRW 2	/* Supervisor read/write, User read/write */
     60#define PP_RXRX 3	/* Supervisor read,       User read */
     61
     62/* Values for Segment Registers */
     63#define SR_NX	0x10000000	/* No Execute */
     64#define SR_KP	0x20000000	/* User key */
     65#define SR_KS	0x40000000	/* Supervisor key */
     66
     67#ifdef __ASSEMBLY__
     68
     69#include <asm/asm-offsets.h>
     70
     71.macro uus_addi sr reg1 reg2 imm
     72	.if NUM_USER_SEGMENTS > \sr
     73	addi	\reg1,\reg2,\imm
     74	.endif
     75.endm
     76
     77.macro uus_mtsr sr reg1
     78	.if NUM_USER_SEGMENTS > \sr
     79	mtsr	\sr, \reg1
     80	.endif
     81.endm
     82
     83/*
     84 * This isync() shouldn't be necessary as the kernel is not excepted to run
     85 * any instruction in userspace soon after the update of segments and 'rfi'
     86 * instruction is used to return to userspace, but hash based cores
     87 * (at least G3) seem to exhibit a random behaviour when the 'isync' is not
     88 * there. 603 cores don't have this behaviour so don't do the 'isync' as it
     89 * saves several CPU cycles.
     90 */
     91.macro uus_isync
     92#ifdef CONFIG_PPC_BOOK3S_604
     93BEGIN_MMU_FTR_SECTION
     94	isync
     95END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
     96#endif
     97.endm
     98
     99.macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4
    100	uus_addi	1, \tmp2, \tmp1, 0x111
    101	uus_addi	2, \tmp3, \tmp1, 0x222
    102	uus_addi	3, \tmp4, \tmp1, 0x333
    103
    104	uus_mtsr	0, \tmp1
    105	uus_mtsr	1, \tmp2
    106	uus_mtsr	2, \tmp3
    107	uus_mtsr	3, \tmp4
    108
    109	uus_addi	4, \tmp1, \tmp1, 0x444
    110	uus_addi	5, \tmp2, \tmp2, 0x444
    111	uus_addi	6, \tmp3, \tmp3, 0x444
    112	uus_addi	7, \tmp4, \tmp4, 0x444
    113
    114	uus_mtsr	4, \tmp1
    115	uus_mtsr	5, \tmp2
    116	uus_mtsr	6, \tmp3
    117	uus_mtsr	7, \tmp4
    118
    119	uus_addi	8, \tmp1, \tmp1, 0x444
    120	uus_addi	9, \tmp2, \tmp2, 0x444
    121	uus_addi	10, \tmp3, \tmp3, 0x444
    122	uus_addi	11, \tmp4, \tmp4, 0x444
    123
    124	uus_mtsr	8, \tmp1
    125	uus_mtsr	9, \tmp2
    126	uus_mtsr	10, \tmp3
    127	uus_mtsr	11, \tmp4
    128
    129	uus_addi	12, \tmp1, \tmp1, 0x444
    130	uus_addi	13, \tmp2, \tmp2, 0x444
    131	uus_addi	14, \tmp3, \tmp3, 0x444
    132	uus_addi	15, \tmp4, \tmp4, 0x444
    133
    134	uus_mtsr	12, \tmp1
    135	uus_mtsr	13, \tmp2
    136	uus_mtsr	14, \tmp3
    137	uus_mtsr	15, \tmp4
    138
    139	uus_isync
    140.endm
    141
    142#else
    143
    144/*
    145 * This macro defines the mapping from contexts to VSIDs (virtual
    146 * segment IDs).  We use a skew on both the context and the high 4 bits
    147 * of the 32-bit virtual address (the "effective segment ID") in order
    148 * to spread out the entries in the MMU hash table.  Note, if this
    149 * function is changed then hash functions will have to be
    150 * changed to correspond.
    151 */
    152#define CTX_TO_VSID(c, id)	((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
    153
    154/*
    155 * Hardware Page Table Entry
    156 * Note that the xpn and x bitfields are used only by processors that
    157 * support extended addressing; otherwise, those bits are reserved.
    158 */
    159struct hash_pte {
    160	unsigned long v:1;	/* Entry is valid */
    161	unsigned long vsid:24;	/* Virtual segment identifier */
    162	unsigned long h:1;	/* Hash algorithm indicator */
    163	unsigned long api:6;	/* Abbreviated page index */
    164	unsigned long rpn:20;	/* Real (physical) page number */
    165	unsigned long xpn:3;	/* Real page number bits 0-2, optional */
    166	unsigned long r:1;	/* Referenced */
    167	unsigned long c:1;	/* Changed */
    168	unsigned long w:1;	/* Write-thru cache mode */
    169	unsigned long i:1;	/* Cache inhibited */
    170	unsigned long m:1;	/* Memory coherence */
    171	unsigned long g:1;	/* Guarded */
    172	unsigned long x:1;	/* Real page number bit 3, optional */
    173	unsigned long pp:2;	/* Page protection */
    174};
    175
    176typedef struct {
    177	unsigned long id;
    178	unsigned long sr0;
    179	void __user *vdso;
    180} mm_context_t;
    181
    182#ifdef CONFIG_PPC_KUEP
    183#define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX
    184#endif
    185
    186void update_bats(void);
    187static inline void cleanup_cpu_mmu_context(void) { }
    188
    189/* patch sites */
    190extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
    191extern s32 patch__hash_page_B, patch__hash_page_C;
    192extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
    193extern s32 patch__flush_hash_B;
    194
    195#include <asm/reg.h>
    196#include <asm/task_size_32.h>
    197
    198static __always_inline void update_user_segment(u32 n, u32 val)
    199{
    200	if (n << 28 < TASK_SIZE)
    201		mtsr(val + n * 0x111, n << 28);
    202}
    203
    204static __always_inline void update_user_segments(u32 val)
    205{
    206	val &= 0xf0ffffff;
    207
    208	update_user_segment(0, val);
    209	update_user_segment(1, val);
    210	update_user_segment(2, val);
    211	update_user_segment(3, val);
    212	update_user_segment(4, val);
    213	update_user_segment(5, val);
    214	update_user_segment(6, val);
    215	update_user_segment(7, val);
    216	update_user_segment(8, val);
    217	update_user_segment(9, val);
    218	update_user_segment(10, val);
    219	update_user_segment(11, val);
    220	update_user_segment(12, val);
    221	update_user_segment(13, val);
    222	update_user_segment(14, val);
    223	update_user_segment(15, val);
    224}
    225
    226int __init find_free_bat(void);
    227unsigned int bat_block_size(unsigned long base, unsigned long top);
    228#endif /* !__ASSEMBLY__ */
    229
    230/* We happily ignore the smaller BATs on 601, we don't actually use
    231 * those definitions on hash32 at the moment anyway
    232 */
    233#define mmu_virtual_psize	MMU_PAGE_4K
    234#define mmu_linear_psize	MMU_PAGE_256M
    235
    236#endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */