cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable.h (12800B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * OpenRISC Linux
      4 *
      5 * Linux architectural port borrowing liberally from similar works of
      6 * others.  All original copyrights apply as per the original source
      7 * declaration.
      8 *
      9 * OpenRISC implementation:
     10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
     11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
     12 * et al.
     13 */
     14
     15/* or1k pgtable.h - macros and functions to manipulate page tables
     16 *
     17 * Based on:
     18 * include/asm-cris/pgtable.h
     19 */
     20
     21#ifndef __ASM_OPENRISC_PGTABLE_H
     22#define __ASM_OPENRISC_PGTABLE_H
     23
     24#include <asm-generic/pgtable-nopmd.h>
     25
     26#ifndef __ASSEMBLY__
     27#include <asm/mmu.h>
     28#include <asm/fixmap.h>
     29
     30/*
     31 * The Linux memory management assumes a three-level page table setup. On
     32 * or1k, we use that, but "fold" the mid level into the top-level page
     33 * table. Since the MMU TLB is software loaded through an interrupt, it
     34 * supports any page table structure, so we could have used a three-level
     35 * setup, but for the amounts of memory we normally use, a two-level is
     36 * probably more efficient.
     37 *
     38 * This file contains the functions and defines necessary to modify and use
     39 * the or1k page table tree.
     40 */
     41
     42extern void paging_init(void);
     43
     44/* Certain architectures need to do special things when pte's
     45 * within a page table are directly modified.  Thus, the following
     46 * hook is made available.
     47 */
     48#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
     49#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
     50/*
     51 * (pmds are folded into pgds so this doesn't get actually called,
     52 * but the define is needed for a generic inline function.)
     53 */
     54#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
     55
     56#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-2))
     57#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
     58#define PGDIR_MASK	(~(PGDIR_SIZE-1))
     59
     60/*
     61 * entries per page directory level: we use a two-level, so
     62 * we don't really have any PMD directory physically.
     63 * pointers are 4 bytes so we can use the page size and
     64 * divide it by 4 (shift by 2).
     65 */
     66#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-2))
     67
     68#define PTRS_PER_PGD	(1UL << (32-PGDIR_SHIFT))
     69
     70/* calculate how many PGD entries a user-level program can use
     71 * the first mappable virtual address is 0
     72 * (TASK_SIZE is the maximum virtual address space)
     73 */
     74
     75#define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
     76
     77/*
     78 * Kernels own virtual memory area.
     79 */
     80
     81/*
     82 * The size and location of the vmalloc area are chosen so that modules
     83 * placed in this area aren't more than a 28-bit signed offset from any
     84 * kernel functions that they may need.  This greatly simplifies handling
     85 * of the relocations for l.j and l.jal instructions as we don't need to
     86 * introduce any trampolines for reaching "distant" code.
     87 *
     88 * 64 MB of vmalloc area is comparable to what's available on other arches.
     89 */
     90
     91#define VMALLOC_START	(PAGE_OFFSET-0x04000000UL)
     92#define VMALLOC_END	(PAGE_OFFSET)
     93#define VMALLOC_VMADDR(x) ((unsigned long)(x))
     94
     95/* Define some higher level generic page attributes.
     96 *
     97 * If you change _PAGE_CI definition be sure to change it in
     98 * io.h for ioremap() too.
     99 */
    100
    101/*
    102 * An OR32 PTE looks like this:
    103 *
    104 * |  31 ... 10 |  9  |  8 ... 6  |  5  |  4  |  3  |  2  |  1  |  0  |
    105 *  Phys pg.num    L     PP Index    D     A    WOM   WBC   CI    CC
    106 *
    107 *  L  : link
    108 *  PPI: Page protection index
    109 *  D  : Dirty
    110 *  A  : Accessed
    111 *  WOM: Weakly ordered memory
    112 *  WBC: Write-back cache
    113 *  CI : Cache inhibit
    114 *  CC : Cache coherent
    115 *
    116 * The protection bits below should correspond to the layout of the actual
    117 * PTE as per above
    118 */
    119
    120#define _PAGE_CC       0x001 /* software: pte contains a translation */
    121#define _PAGE_CI       0x002 /* cache inhibit          */
    122#define _PAGE_WBC      0x004 /* write back cache       */
    123#define _PAGE_WOM      0x008 /* weakly ordered memory  */
    124
    125#define _PAGE_A        0x010 /* accessed               */
    126#define _PAGE_D        0x020 /* dirty                  */
    127#define _PAGE_URE      0x040 /* user read enable       */
    128#define _PAGE_UWE      0x080 /* user write enable      */
    129
    130#define _PAGE_SRE      0x100 /* superuser read enable  */
    131#define _PAGE_SWE      0x200 /* superuser write enable */
    132#define _PAGE_EXEC     0x400 /* software: page is executable */
    133#define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */
    134
    135/* 0x001 is cache coherency bit, which should always be set to
    136 *       1 - for SMP (when we support it)
    137 *       0 - otherwise
    138 *
    139 * we just reuse this bit in software for _PAGE_PRESENT and
    140 * force it to 0 when loading it into TLB.
    141 */
    142#define _PAGE_PRESENT  _PAGE_CC
    143#define _PAGE_USER     _PAGE_URE
    144#define _PAGE_WRITE    (_PAGE_UWE | _PAGE_SWE)
    145#define _PAGE_DIRTY    _PAGE_D
    146#define _PAGE_ACCESSED _PAGE_A
    147#define _PAGE_NO_CACHE _PAGE_CI
    148#define _PAGE_SHARED   _PAGE_U_SHARED
    149#define _PAGE_READ     (_PAGE_URE | _PAGE_SRE)
    150
    151#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
    152#define _PAGE_BASE     (_PAGE_PRESENT | _PAGE_ACCESSED)
    153#define _PAGE_ALL      (_PAGE_PRESENT | _PAGE_ACCESSED)
    154#define _KERNPG_TABLE \
    155	(_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
    156
    157#define PAGE_NONE       __pgprot(_PAGE_ALL)
    158#define PAGE_READONLY   __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
    159#define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
    160#define PAGE_SHARED \
    161	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
    162		 | _PAGE_SHARED)
    163#define PAGE_SHARED_X \
    164	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
    165		 | _PAGE_SHARED | _PAGE_EXEC)
    166#define PAGE_COPY       __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
    167#define PAGE_COPY_X     __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
    168
    169#define PAGE_KERNEL \
    170	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
    171		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
    172#define PAGE_KERNEL_RO \
    173	__pgprot(_PAGE_ALL | _PAGE_SRE \
    174		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
    175#define PAGE_KERNEL_NOCACHE \
    176	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
    177		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
    178
    179#define __P000	PAGE_NONE
    180#define __P001	PAGE_READONLY_X
    181#define __P010	PAGE_COPY
    182#define __P011	PAGE_COPY_X
    183#define __P100	PAGE_READONLY
    184#define __P101	PAGE_READONLY_X
    185#define __P110	PAGE_COPY
    186#define __P111	PAGE_COPY_X
    187
    188#define __S000	PAGE_NONE
    189#define __S001	PAGE_READONLY_X
    190#define __S010	PAGE_SHARED
    191#define __S011	PAGE_SHARED_X
    192#define __S100	PAGE_READONLY
    193#define __S101	PAGE_READONLY_X
    194#define __S110	PAGE_SHARED
    195#define __S111	PAGE_SHARED_X
    196
    197/* zero page used for uninitialized stuff */
    198extern unsigned long empty_zero_page[2048];
    199#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
    200
    201/* number of bits that fit into a memory pointer */
    202#define BITS_PER_PTR			(8*sizeof(unsigned long))
    203
    204/* to align the pointer to a pointer address */
    205#define PTR_MASK			(~(sizeof(void *)-1))
    206
    207/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
    208/* 64-bit machines, beware!  SRB. */
    209#define SIZEOF_PTR_LOG2			2
    210
    211/* to find an entry in a page-table */
    212#define PAGE_PTR(address) \
    213((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
    214
    215/* to set the page-dir */
    216#define SET_PAGE_DIR(tsk, pgdir)
    217
    218#define pte_none(x)	(!pte_val(x))
    219#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
    220#define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)
    221
    222#define pmd_none(x)	(!pmd_val(x))
    223#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE)
    224#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
    225#define pmd_clear(xp)	do { pmd_val(*(xp)) = 0; } while (0)
    226
    227/*
    228 * The following only work if pte_present() is true.
    229 * Undefined behaviour if not..
    230 */
    231
    232static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_READ; }
    233static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
    234static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
    235static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
    236static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
    237
    238static inline pte_t pte_wrprotect(pte_t pte)
    239{
    240	pte_val(pte) &= ~(_PAGE_WRITE);
    241	return pte;
    242}
    243
    244static inline pte_t pte_rdprotect(pte_t pte)
    245{
    246	pte_val(pte) &= ~(_PAGE_READ);
    247	return pte;
    248}
    249
    250static inline pte_t pte_exprotect(pte_t pte)
    251{
    252	pte_val(pte) &= ~(_PAGE_EXEC);
    253	return pte;
    254}
    255
    256static inline pte_t pte_mkclean(pte_t pte)
    257{
    258	pte_val(pte) &= ~(_PAGE_DIRTY);
    259	return pte;
    260}
    261
    262static inline pte_t pte_mkold(pte_t pte)
    263{
    264	pte_val(pte) &= ~(_PAGE_ACCESSED);
    265	return pte;
    266}
    267
    268static inline pte_t pte_mkwrite(pte_t pte)
    269{
    270	pte_val(pte) |= _PAGE_WRITE;
    271	return pte;
    272}
    273
    274static inline pte_t pte_mkread(pte_t pte)
    275{
    276	pte_val(pte) |= _PAGE_READ;
    277	return pte;
    278}
    279
    280static inline pte_t pte_mkexec(pte_t pte)
    281{
    282	pte_val(pte) |= _PAGE_EXEC;
    283	return pte;
    284}
    285
    286static inline pte_t pte_mkdirty(pte_t pte)
    287{
    288	pte_val(pte) |= _PAGE_DIRTY;
    289	return pte;
    290}
    291
    292static inline pte_t pte_mkyoung(pte_t pte)
    293{
    294	pte_val(pte) |= _PAGE_ACCESSED;
    295	return pte;
    296}
    297
    298/*
    299 * Conversion functions: convert a page and protection to a page entry,
    300 * and a page entry and page directory to the page they refer to.
    301 */
    302
    303/* What actually goes as arguments to the various functions is less than
    304 * obvious, but a rule of thumb is that struct page's goes as struct page *,
    305 * really physical DRAM addresses are unsigned long's, and DRAM "virtual"
    306 * addresses (the 0xc0xxxxxx's) goes as void *'s.
    307 */
    308
    309static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
    310{
    311	pte_t pte;
    312	/* the PTE needs a physical address */
    313	pte_val(pte) = __pa(page) | pgprot_val(pgprot);
    314	return pte;
    315}
    316
    317#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
    318
    319#define mk_pte_phys(physpage, pgprot) \
    320({                                                                      \
    321	pte_t __pte;                                                    \
    322									\
    323	pte_val(__pte) = (physpage) + pgprot_val(pgprot);               \
    324	__pte;                                                          \
    325})
    326
    327static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    328{
    329	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
    330	return pte;
    331}
    332
    333
    334/*
    335 * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval
    336 * __pte_page(pte_val) refers to the "virtual" DRAM interval
    337 * pte_pagenr refers to the page-number counted starting from the virtual
    338 * DRAM start
    339 */
    340
    341static inline unsigned long __pte_page(pte_t pte)
    342{
    343	/* the PTE contains a physical address */
    344	return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
    345}
    346
    347#define pte_pagenr(pte)         ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
    348
    349/* permanent address of a page */
    350
    351#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
    352#define pte_page(pte)		(mem_map+pte_pagenr(pte))
    353
    354/*
    355 * only the pte's themselves need to point to physical DRAM (see above)
    356 * the pagetable links are purely handled within the kernel SW and thus
    357 * don't need the __pa and __va transformations.
    358 */
    359static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
    360{
    361	pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep;
    362}
    363
    364#define pmd_pfn(pmd)		(pmd_val(pmd) >> PAGE_SHIFT)
    365#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
    366
    367static inline unsigned long pmd_page_vaddr(pmd_t pmd)
    368{
    369	return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
    370}
    371
    372#define __pmd_offset(address) \
    373	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
    374
    375#define pte_pfn(x)		((unsigned long)(((x).pte)) >> PAGE_SHIFT)
    376#define pfn_pte(pfn, prot)  __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
    377
    378#define pte_ERROR(e) \
    379	printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \
    380	       __FILE__, __LINE__, &(e), pte_val(e))
    381#define pgd_ERROR(e) \
    382	printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \
    383	       __FILE__, __LINE__, &(e), pgd_val(e))
    384
    385extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
    386
    387struct vm_area_struct;
    388
    389static inline void update_tlb(struct vm_area_struct *vma,
    390	unsigned long address, pte_t *pte)
    391{
    392}
    393
    394extern void update_cache(struct vm_area_struct *vma,
    395	unsigned long address, pte_t *pte);
    396
    397static inline void update_mmu_cache(struct vm_area_struct *vma,
    398	unsigned long address, pte_t *pte)
    399{
    400	update_tlb(vma, address, pte);
    401	update_cache(vma, address, pte);
    402}
    403
    404/* __PHX__ FIXME, SWAP, this probably doesn't work */
    405
    406/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
    407/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */
    408
    409#define __swp_type(x)			(((x).val >> 5) & 0x7f)
    410#define __swp_offset(x)			((x).val >> 12)
    411#define __swp_entry(type, offset) \
    412	((swp_entry_t) { ((type) << 5) | ((offset) << 12) })
    413#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
    414#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
    415
    416#define kern_addr_valid(addr)           (1)
    417
    418typedef pte_t *pte_addr_t;
    419
    420#endif /* __ASSEMBLY__ */
    421#endif /* __ASM_OPENRISC_PGTABLE_H */