cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hash.h (7808B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
      3#define _ASM_POWERPC_BOOK3S_64_HASH_H
      4#ifdef __KERNEL__
      5
      6#include <asm/asm-const.h>
      7
      8/*
      9 * Common bits between 4K and 64K pages in a linux-style PTE.
     10 * Additional bits may be defined in pgtable-hash64-*.h
     11 *
     12 */
     13#define H_PTE_NONE_MASK		_PAGE_HPTEFLAGS
     14
     15#ifdef CONFIG_PPC_64K_PAGES
     16#include <asm/book3s/64/hash-64k.h>
     17#else
     18#include <asm/book3s/64/hash-4k.h>
     19#endif
     20
     21#define H_PTRS_PER_PTE		(1 << H_PTE_INDEX_SIZE)
     22#define H_PTRS_PER_PMD		(1 << H_PMD_INDEX_SIZE)
     23#define H_PTRS_PER_PUD		(1 << H_PUD_INDEX_SIZE)
     24
     25/* Bits to set in a PMD/PUD/PGD entry valid bit*/
     26#define HASH_PMD_VAL_BITS		(0x8000000000000000UL)
     27#define HASH_PUD_VAL_BITS		(0x8000000000000000UL)
     28#define HASH_PGD_VAL_BITS		(0x8000000000000000UL)
     29
     30/*
     31 * Size of EA range mapped by our pagetables.
     32 */
     33#define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
     34				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
     35#define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
     36/*
     37 * Top 2 bits are ignored in page table walk.
     38 */
     39#define EA_MASK			(~(0xcUL << 60))
     40
     41/*
     42 * We store the slot details in the second half of page table.
     43 * Increase the pud level table so that hugetlb ptes can be stored
     44 * at pud level.
     45 */
     46#if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
     47#define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE + 1)
     48#else
     49#define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE)
     50#endif
     51
     52/*
     53 * +------------------------------+
     54 * |                              |
     55 * |                              |
     56 * |                              |
     57 * +------------------------------+  Kernel virtual map end (0xc00e000000000000)
     58 * |                              |
     59 * |                              |
     60 * |      512TB/16TB of vmemmap   |
     61 * |                              |
     62 * |                              |
     63 * +------------------------------+  Kernel vmemmap  start
     64 * |                              |
     65 * |      512TB/16TB of IO map    |
     66 * |                              |
     67 * +------------------------------+  Kernel IO map start
     68 * |                              |
     69 * |      512TB/16TB of vmap      |
     70 * |                              |
     71 * +------------------------------+  Kernel virt start (0xc008000000000000)
     72 * |                              |
     73 * |                              |
     74 * |                              |
     75 * +------------------------------+  Kernel linear (0xc.....)
     76 */
     77
     78#define H_VMALLOC_START		H_KERN_VIRT_START
     79#define H_VMALLOC_SIZE		H_KERN_MAP_SIZE
     80#define H_VMALLOC_END		(H_VMALLOC_START + H_VMALLOC_SIZE)
     81
     82#define H_KERN_IO_START		H_VMALLOC_END
     83#define H_KERN_IO_SIZE		H_KERN_MAP_SIZE
     84#define H_KERN_IO_END		(H_KERN_IO_START + H_KERN_IO_SIZE)
     85
     86#define H_VMEMMAP_START		H_KERN_IO_END
     87#define H_VMEMMAP_SIZE		H_KERN_MAP_SIZE
     88#define H_VMEMMAP_END		(H_VMEMMAP_START + H_VMEMMAP_SIZE)
     89
     90#define NON_LINEAR_REGION_ID(ea)	((((unsigned long)ea - H_KERN_VIRT_START) >> REGION_SHIFT) + 2)
     91
     92/*
     93 * Region IDs
     94 */
     95#define USER_REGION_ID		0
     96#define LINEAR_MAP_REGION_ID	1
     97#define VMALLOC_REGION_ID	NON_LINEAR_REGION_ID(H_VMALLOC_START)
     98#define IO_REGION_ID		NON_LINEAR_REGION_ID(H_KERN_IO_START)
     99#define VMEMMAP_REGION_ID	NON_LINEAR_REGION_ID(H_VMEMMAP_START)
    100#define INVALID_REGION_ID	(VMEMMAP_REGION_ID + 1)
    101
    102/*
    103 * Defines the address of the vmemap area, in its own region on
    104 * hash table CPUs.
    105 */
    106
    107/* PTEIDX nibble */
    108#define _PTEIDX_SECONDARY	0x8
    109#define _PTEIDX_GROUP_IX	0x7
    110
    111#define H_PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
    112#define H_PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
    113
    114#ifndef __ASSEMBLY__
    115static inline int get_region_id(unsigned long ea)
    116{
    117	int region_id;
    118	int id = (ea >> 60UL);
    119
    120	if (id == 0)
    121		return USER_REGION_ID;
    122
    123	if (id != (PAGE_OFFSET >> 60))
    124		return INVALID_REGION_ID;
    125
    126	if (ea < H_KERN_VIRT_START)
    127		return LINEAR_MAP_REGION_ID;
    128
    129	BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
    130
    131	region_id = NON_LINEAR_REGION_ID(ea);
    132	return region_id;
    133}
    134
    135#define	hash__pmd_bad(pmd)		(pmd_val(pmd) & H_PMD_BAD_BITS)
    136#define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
    137static inline int hash__p4d_bad(p4d_t p4d)
    138{
    139	return (p4d_val(p4d) == 0);
    140}
    141#ifdef CONFIG_STRICT_KERNEL_RWX
    142extern void hash__mark_rodata_ro(void);
    143extern void hash__mark_initmem_nx(void);
    144#endif
    145
    146extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
    147			    pte_t *ptep, unsigned long pte, int huge);
    148unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags);
    149/* Atomic PTE updates */
    150static inline unsigned long hash__pte_update(struct mm_struct *mm,
    151					 unsigned long addr,
    152					 pte_t *ptep, unsigned long clr,
    153					 unsigned long set,
    154					 int huge)
    155{
    156	__be64 old_be, tmp_be;
    157	unsigned long old;
    158
    159	__asm__ __volatile__(
    160	"1:	ldarx	%0,0,%3		# pte_update\n\
    161	and.	%1,%0,%6\n\
    162	bne-	1b \n\
    163	andc	%1,%0,%4 \n\
    164	or	%1,%1,%7\n\
    165	stdcx.	%1,0,%3 \n\
    166	bne-	1b"
    167	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
    168	: "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
    169	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
    170	: "cc" );
    171	/* huge pages use the old page table lock */
    172	if (!huge)
    173		assert_pte_locked(mm, addr);
    174
    175	old = be64_to_cpu(old_be);
    176	if (old & H_PAGE_HASHPTE)
    177		hpte_need_flush(mm, addr, ptep, old, huge);
    178
    179	return old;
    180}
    181
    182/* Set the dirty and/or accessed bits atomically in a linux PTE, this
    183 * function doesn't need to flush the hash entry
    184 */
    185static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
    186{
    187	__be64 old, tmp, val, mask;
    188
    189	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
    190			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);
    191
    192	val = pte_raw(entry) & mask;
    193
    194	__asm__ __volatile__(
    195	"1:	ldarx	%0,0,%4\n\
    196		and.	%1,%0,%6\n\
    197		bne-	1b \n\
    198		or	%0,%3,%0\n\
    199		stdcx.	%0,0,%4\n\
    200		bne-	1b"
    201	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
    202	:"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
    203	:"cc");
    204}
    205
    206static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
    207{
    208	return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
    209}
    210
    211static inline int hash__pte_none(pte_t pte)
    212{
    213	return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
    214}
    215
    216unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
    217		int ssize, real_pte_t rpte, unsigned int subpg_index);
    218
    219/* This low level function performs the actual PTE insertion
    220 * Setting the PTE depends on the MMU type and other factors. It's
    221 * an horrible mess that I'm not going to try to clean up now but
    222 * I'm keeping it in one place rather than spread around
    223 */
    224static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
    225				  pte_t *ptep, pte_t pte, int percpu)
    226{
    227	/*
    228	 * Anything else just stores the PTE normally. That covers all 64-bit
    229	 * cases, and 32-bit non-hash with 32-bit PTEs.
    230	 */
    231	*ptep = pte;
    232}
    233
    234#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    235extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
    236				   pmd_t *pmdp, unsigned long old_pmd);
    237#else
    238static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
    239					  unsigned long addr, pmd_t *pmdp,
    240					  unsigned long old_pmd)
    241{
    242	WARN(1, "%s called with THP disabled\n", __func__);
    243}
    244#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
    245
    246
    247int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
    248extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
    249					      unsigned long page_size,
    250					      unsigned long phys);
    251extern void hash__vmemmap_remove_mapping(unsigned long start,
    252				     unsigned long page_size);
    253
    254int hash__create_section_mapping(unsigned long start, unsigned long end,
    255				 int nid, pgprot_t prot);
    256int hash__remove_section_mapping(unsigned long start, unsigned long end);
    257
    258void hash__kernel_map_pages(struct page *page, int numpages, int enable);
    259
    260#endif /* !__ASSEMBLY__ */
    261#endif /* __KERNEL__ */
    262#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */