cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable.h (12651B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ALPHA_PGTABLE_H
      3#define _ALPHA_PGTABLE_H
      4
      5#include <asm-generic/pgtable-nopud.h>
      6
      7/*
      8 * This file contains the functions and defines necessary to modify and use
      9 * the Alpha page table tree.
     10 *
     11 * This hopefully works with any standard Alpha page-size, as defined
     12 * in <asm/page.h> (currently 8192).
     13 */
     14#include <linux/mmzone.h>
     15
     16#include <asm/page.h>
     17#include <asm/processor.h>	/* For TASK_SIZE */
     18#include <asm/machvec.h>
     19#include <asm/setup.h>
     20
     21struct mm_struct;
     22struct vm_area_struct;
     23
     24/* Certain architectures need to do special things when PTEs
     25 * within a page table are directly modified.  Thus, the following
     26 * hook is made available.
     27 */
     28#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
     29#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
     30
     31/* PMD_SHIFT determines the size of the area a second-level page table can map */
     32#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
     33#define PMD_SIZE	(1UL << PMD_SHIFT)
     34#define PMD_MASK	(~(PMD_SIZE-1))
     35
     36/* PGDIR_SHIFT determines what a third-level page table entry can map */
     37#define PGDIR_SHIFT	(PAGE_SHIFT + 2*(PAGE_SHIFT-3))
     38#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
     39#define PGDIR_MASK	(~(PGDIR_SIZE-1))
     40
     41/*
     42 * Entries per page directory level:  the Alpha is three-level, with
     43 * all levels having a one-page page table.
     44 */
     45#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
     46#define PTRS_PER_PMD	(1UL << (PAGE_SHIFT-3))
     47#define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-3))
     48#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
     49
     50/* Number of pointers that fit on a page:  this will go away. */
     51#define PTRS_PER_PAGE	(1UL << (PAGE_SHIFT-3))
     52
     53#ifdef CONFIG_ALPHA_LARGE_VMALLOC
     54#define VMALLOC_START		0xfffffe0000000000
     55#else
     56#define VMALLOC_START		(-2*PGDIR_SIZE)
     57#endif
     58#define VMALLOC_END		(-PGDIR_SIZE)
     59
     60/*
     61 * OSF/1 PAL-code-imposed page table bits
     62 */
     63#define _PAGE_VALID	0x0001
     64#define _PAGE_FOR	0x0002	/* used for page protection (fault on read) */
     65#define _PAGE_FOW	0x0004	/* used for page protection (fault on write) */
     66#define _PAGE_FOE	0x0008	/* used for page protection (fault on exec) */
     67#define _PAGE_ASM	0x0010
     68#define _PAGE_KRE	0x0100	/* xxx - see below on the "accessed" bit */
     69#define _PAGE_URE	0x0200	/* xxx */
     70#define _PAGE_KWE	0x1000	/* used to do the dirty bit in software */
     71#define _PAGE_UWE	0x2000	/* used to do the dirty bit in software */
     72
     73/* .. and these are ours ... */
     74#define _PAGE_DIRTY	0x20000
     75#define _PAGE_ACCESSED	0x40000
     76
     77/*
     78 * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
     79 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
     80 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
     81 * the KRE/URE bits to watch for it. That way we don't need to overload the
     82 * KWE/UWE bits with both handling dirty and accessed.
     83 *
     84 * Note that the kernel uses the accessed bit just to check whether to page
     85 * out a page or not, so it doesn't have to be exact anyway.
     86 */
     87
     88#define __DIRTY_BITS	(_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
     89#define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
     90
     91#define _PFN_MASK	0xFFFFFFFF00000000UL
     92
     93#define _PAGE_TABLE	(_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
     94#define _PAGE_CHG_MASK	(_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
     95
     96/*
     97 * All the normal masks have the "page accessed" bits on, as any time they are used,
     98 * the page is accessed. They are cleared only by the page-out routines
     99 */
    100#define PAGE_NONE	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
    101#define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
    102#define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
    103#define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
    104#define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
    105
    106#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
    107
    108#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
    109#define _PAGE_S(x) _PAGE_NORMAL(x)
    110
    111/*
    112 * The hardware can handle write-only mappings, but as the Alpha
    113 * architecture does byte-wide writes with a read-modify-write
    114 * sequence, it's not practical to have write-without-read privs.
    115 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
    116 * arch/alpha/mm/fault.c)
    117 */
    118	/* xwr */
    119#define __P000	_PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
    120#define __P001	_PAGE_P(_PAGE_FOE | _PAGE_FOW)
    121#define __P010	_PAGE_P(_PAGE_FOE)
    122#define __P011	_PAGE_P(_PAGE_FOE)
    123#define __P100	_PAGE_P(_PAGE_FOW | _PAGE_FOR)
    124#define __P101	_PAGE_P(_PAGE_FOW)
    125#define __P110	_PAGE_P(0)
    126#define __P111	_PAGE_P(0)
    127
    128#define __S000	_PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
    129#define __S001	_PAGE_S(_PAGE_FOE | _PAGE_FOW)
    130#define __S010	_PAGE_S(_PAGE_FOE)
    131#define __S011	_PAGE_S(_PAGE_FOE)
    132#define __S100	_PAGE_S(_PAGE_FOW | _PAGE_FOR)
    133#define __S101	_PAGE_S(_PAGE_FOW)
    134#define __S110	_PAGE_S(0)
    135#define __S111	_PAGE_S(0)
    136
    137/*
    138 * pgprot_noncached() is only for infiniband pci support, and a real
    139 * implementation for RAM would be more complicated.
    140 */
    141#define pgprot_noncached(prot)	(prot)
    142
    143/*
    144 * BAD_PAGETABLE is used when we need a bogus page-table, while
    145 * BAD_PAGE is used for a bogus page.
    146 *
    147 * ZERO_PAGE is a global shared page that is always zero:  used
    148 * for zero-mapped memory areas etc..
    149 */
    150extern pte_t __bad_page(void);
    151extern pmd_t * __bad_pagetable(void);
    152
    153extern unsigned long __zero_page(void);
    154
    155#define BAD_PAGETABLE	__bad_pagetable()
    156#define BAD_PAGE	__bad_page()
    157#define ZERO_PAGE(vaddr)	(virt_to_page(ZERO_PGE))
    158
    159/* number of bits that fit into a memory pointer */
    160#define BITS_PER_PTR			(8*sizeof(unsigned long))
    161
    162/* to align the pointer to a pointer address */
    163#define PTR_MASK			(~(sizeof(void*)-1))
    164
    165/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
    166#define SIZEOF_PTR_LOG2			3
    167
    168/* to find an entry in a page-table */
    169#define PAGE_PTR(address)		\
    170  ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
    171
    172/*
    173 * On certain platforms whose physical address space can overlap KSEG,
    174 * namely EV6 and above, we must re-twiddle the physaddr to restore the
    175 * correct high-order bits.
    176 *
    177 * This is extremely confusing until you realize that this is actually
    178 * just working around a userspace bug.  The X server was intending to
    179 * provide the physical address but instead provided the KSEG address.
    180 * Or tried to, except it's not representable.
    181 * 
    182 * On Tsunami there's nothing meaningful at 0x40000000000, so this is
    183 * a safe thing to do.  Come the first core logic that does put something
    184 * in this area -- memory or whathaveyou -- then this hack will have
    185 * to go away.  So be prepared!
    186 */
    187
    188#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
    189#error "EV6-only feature in a generic kernel"
    190#endif
    191#if defined(CONFIG_ALPHA_GENERIC) || \
    192    (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
    193#define KSEG_PFN	(0xc0000000000UL >> PAGE_SHIFT)
    194#define PHYS_TWIDDLE(pfn) \
    195  ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
    196  ? ((pfn) ^= KSEG_PFN) : (pfn))
    197#else
    198#define PHYS_TWIDDLE(pfn) (pfn)
    199#endif
    200
    201/*
    202 * Conversion functions:  convert a page and protection to a page entry,
    203 * and a page entry and page directory to the page they refer to.
    204 */
    205#define page_to_pa(page)	(page_to_pfn(page) << PAGE_SHIFT)
    206#define pte_pfn(pte)	(pte_val(pte) >> 32)
    207
    208#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
    209#define mk_pte(page, pgprot)						\
    210({									\
    211	pte_t pte;							\
    212									\
    213	pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot);	\
    214	pte;								\
    215})
    216
    217extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
    218{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
    219
    220extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    221{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
    222
    223extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
    224{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
    225
    226extern inline void pud_set(pud_t * pudp, pmd_t * pmdp)
    227{ pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
    228
    229
    230extern inline unsigned long
    231pmd_page_vaddr(pmd_t pmd)
    232{
    233	return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
    234}
    235
    236#define pmd_pfn(pmd)	(pmd_val(pmd) >> 32)
    237#define pmd_page(pmd)	(pfn_to_page(pmd_val(pmd) >> 32))
    238#define pud_page(pud)	(pfn_to_page(pud_val(pud) >> 32))
    239
    240extern inline pmd_t *pud_pgtable(pud_t pgd)
    241{
    242	return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)));
    243}
    244
    245extern inline int pte_none(pte_t pte)		{ return !pte_val(pte); }
    246extern inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_VALID; }
    247extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
    248{
    249	pte_val(*ptep) = 0;
    250}
    251
    252extern inline int pmd_none(pmd_t pmd)		{ return !pmd_val(pmd); }
    253extern inline int pmd_bad(pmd_t pmd)		{ return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
    254extern inline int pmd_present(pmd_t pmd)	{ return pmd_val(pmd) & _PAGE_VALID; }
    255extern inline void pmd_clear(pmd_t * pmdp)	{ pmd_val(*pmdp) = 0; }
    256
    257extern inline int pud_none(pud_t pud)		{ return !pud_val(pud); }
    258extern inline int pud_bad(pud_t pud)		{ return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; }
    259extern inline int pud_present(pud_t pud)	{ return pud_val(pud) & _PAGE_VALID; }
    260extern inline void pud_clear(pud_t * pudp)	{ pud_val(*pudp) = 0; }
    261
    262/*
    263 * The following only work if pte_present() is true.
    264 * Undefined behaviour if not..
    265 */
    266extern inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOW); }
    267extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
    268extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
    269
    270extern inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOW; return pte; }
    271extern inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
    272extern inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
    273extern inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOW; return pte; }
    274extern inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= __DIRTY_BITS; return pte; }
    275extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= __ACCESS_BITS; return pte; }
    276
    277/*
    278 * The smp_rmb() in the following functions are required to order the load of
    279 * *dir (the pointer in the top level page table) with any subsequent load of
    280 * the returned pmd_t *ret (ret is data dependent on *dir).
    281 *
    282 * If this ordering is not enforced, the CPU might load an older value of
    283 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
    284 * more details.
    285 *
    286 * Note that we never change the mm->pgd pointer after the task is running, so
    287 * pgd_offset does not require such a barrier.
    288 */
    289
    290/* Find an entry in the second-level page table.. */
    291extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
    292{
    293	pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
    294	smp_rmb(); /* see above */
    295	return ret;
    296}
    297#define pmd_offset pmd_offset
    298
    299/* Find an entry in the third-level page table.. */
    300extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
    301{
    302	pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
    303		+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
    304	smp_rmb(); /* see above */
    305	return ret;
    306}
    307#define pte_offset_kernel pte_offset_kernel
    308
    309extern pgd_t swapper_pg_dir[1024];
    310
    311/*
    312 * The Alpha doesn't have any external MMU info:  the kernel page
    313 * tables contain all the necessary information.
    314 */
    315extern inline void update_mmu_cache(struct vm_area_struct * vma,
    316	unsigned long address, pte_t *ptep)
    317{
    318}
    319
    320/*
    321 * Non-present pages:  high 24 bits are offset, next 8 bits type,
    322 * low 32 bits zero.
    323 */
    324extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
    325{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
    326
    327#define __swp_type(x)		(((x).val >> 32) & 0xff)
    328#define __swp_offset(x)		((x).val >> 40)
    329#define __swp_entry(type, off)	((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
    330#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
    331#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
    332
    333#define kern_addr_valid(addr)	(1)
    334
    335#define pte_ERROR(e) \
    336	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
    337#define pmd_ERROR(e) \
    338	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
    339#define pgd_ERROR(e) \
    340	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
    341
    342extern void paging_init(void);
    343
    344/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
    345#define HAVE_ARCH_UNMAPPED_AREA
    346
    347#endif /* _ALPHA_PGTABLE_H */