cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable.h (14526B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * include/asm-xtensa/pgtable.h
      4 *
      5 * Copyright (C) 2001 - 2013 Tensilica Inc.
      6 */
      7
      8#ifndef _XTENSA_PGTABLE_H
      9#define _XTENSA_PGTABLE_H
     10
     11#include <asm/page.h>
     12#include <asm/kmem_layout.h>
     13#include <asm-generic/pgtable-nopmd.h>
     14
     15/*
     16 * We only use two ring levels, user and kernel space.
     17 */
     18
     19#ifdef CONFIG_MMU
     20#define USER_RING		1	/* user ring level */
     21#else
     22#define USER_RING		0
     23#endif
     24#define KERNEL_RING		0	/* kernel ring level */
     25
     26/*
     27 * The Xtensa architecture port of Linux has a two-level page table system,
     28 * i.e. the logical three-level Linux page table layout is folded.
     29 * Each task has the following memory page tables:
     30 *
     31 *   PGD table (page directory), ie. 3rd-level page table:
     32 *	One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
     33 *	(Architectures that don't have the PMD folded point to the PMD tables)
     34 *
     35 *	The pointer to the PGD table for a given task can be retrieved from
     36 *	the task structure (struct task_struct*) t, e.g. current():
     37 *	  (t->mm ? t->mm : t->active_mm)->pgd
     38 *
     39 *   PMD tables (page middle-directory), ie. 2nd-level page tables:
     40 *	Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
     41 *
     42 *   PTE tables (page table entry), ie. 1st-level page tables:
     43 *	One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
     44 *	invalid_pte_table for absent mappings.
     45 *
     46 * The individual pages are 4 kB big with special pages for the empty_zero_page.
     47 */
     48
     49#define PGDIR_SHIFT	22
     50#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
     51#define PGDIR_MASK	(~(PGDIR_SIZE-1))
     52
     53/*
     54 * Entries per page directory level: we use two-level, so
     55 * we don't really have any PMD directory physically.
     56 */
     57#define PTRS_PER_PTE		1024
     58#define PTRS_PER_PTE_SHIFT	10
     59#define PTRS_PER_PGD		1024
     60#define PGD_ORDER		0
     61#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
     62#define FIRST_USER_PGD_NR	(FIRST_USER_ADDRESS >> PGDIR_SHIFT)
     63
     64#ifdef CONFIG_MMU
     65/*
     66 * Virtual memory area. We keep a distance to other memory regions to be
     67 * on the safe side. We also use this area for cache aliasing.
     68 */
     69#define VMALLOC_START		(XCHAL_KSEG_CACHED_VADDR - 0x10000000)
     70#define VMALLOC_END		(VMALLOC_START + 0x07FEFFFF)
     71#define TLBTEMP_BASE_1		(VMALLOC_START + 0x08000000)
     72#define TLBTEMP_BASE_2		(TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
     73#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
     74#define TLBTEMP_SIZE		(2 * DCACHE_WAY_SIZE)
     75#else
     76#define TLBTEMP_SIZE		ICACHE_WAY_SIZE
     77#endif
     78
     79#else
     80
     81#define VMALLOC_START		__XTENSA_UL_CONST(0)
     82#define VMALLOC_END		__XTENSA_UL_CONST(0xffffffff)
     83
     84#endif
     85
     86/*
     87 * For the Xtensa architecture, the PTE layout is as follows:
     88 *
     89 *		31------12  11  10-9   8-6  5-4  3-2  1-0
     90 *		+-----------------------------------------+
     91 *		|           |   Software   |   HARDWARE   |
     92 *		|    PPN    |          ADW | RI |Attribute|
     93 *		+-----------------------------------------+
     94 *   pte_none	|             MBZ          | 01 | 11 | 00 |
     95 *		+-----------------------------------------+
     96 *   present	|    PPN    | 0 | 00 | ADW | RI | CA | wx |
     97 *		+- - - - - - - - - - - - - - - - - - - - -+
     98 *   (PAGE_NONE)|    PPN    | 0 | 00 | ADW | 01 | 11 | 11 |
     99 *		+-----------------------------------------+
    100 *   swap	|     index     |   type   | 01 | 11 | 00 |
    101 *		+-----------------------------------------+
    102 *
    103 * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
    104 *		+-----------------------------------------+
    105 *   present	|    PPN    | 0 | 00 | ADW | RI | CA | w1 |
    106 *		+-----------------------------------------+
    107 *   (PAGE_NONE)|    PPN    | 0 | 00 | ADW | 01 | 01 | 00 |
    108 *		+-----------------------------------------+
    109 *
    110 *  Legend:
    111 *   PPN        Physical Page Number
    112 *   ADW	software: accessed (young) / dirty / writable
    113 *   RI         ring (0=privileged, 1=user, 2 and 3 are unused)
    114 *   CA		cache attribute: 00 bypass, 01 writeback, 10 writethrough
    115 *		(11 is invalid and used to mark pages that are not present)
    116 *   w		page is writable (hw)
    117 *   x		page is executable (hw)
    118 *   index      swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
    119 *		(note that the index is always non-zero)
    120 *   type       swap type (5 bits -> 32 types)
    121 *
    122 *  Notes:
    123 *   - (PROT_NONE) is a special case of 'present' but causes an exception for
    124 *     any access (read, write, and execute).
    125 *   - 'multihit-exception' has the highest priority of all MMU exceptions,
    126 *     so the ring must be set to 'RING_USER' even for 'non-present' pages.
    127 *   - on older hardware, the exectuable flag was not supported and
    128 *     used as a 'valid' flag, so it needs to be always set.
    129 *   - we need to keep track of certain flags in software (dirty and young)
    130 *     to do this, we use write exceptions and have a separate software w-flag.
    131 *   - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
    132 */
    133
    134#define _PAGE_ATTRIB_MASK	0xf
    135
    136#define _PAGE_HW_EXEC		(1<<0)	/* hardware: page is executable */
    137#define _PAGE_HW_WRITE		(1<<1)	/* hardware: page is writable */
    138
    139#define _PAGE_CA_BYPASS		(0<<2)	/* bypass, non-speculative */
    140#define _PAGE_CA_WB		(1<<2)	/* write-back */
    141#define _PAGE_CA_WT		(2<<2)	/* write-through */
    142#define _PAGE_CA_MASK		(3<<2)
    143#define _PAGE_CA_INVALID	(3<<2)
    144
    145/* We use invalid attribute values to distinguish special pte entries */
    146#if XCHAL_HW_VERSION_MAJOR < 2000
    147#define _PAGE_HW_VALID		0x01	/* older HW needed this bit set */
    148#define _PAGE_NONE		0x04
    149#else
    150#define _PAGE_HW_VALID		0x00
    151#define _PAGE_NONE		0x0f
    152#endif
    153
    154#define _PAGE_USER		(1<<4)	/* user access (ring=1) */
    155
    156/* Software */
    157#define _PAGE_WRITABLE_BIT	6
    158#define _PAGE_WRITABLE		(1<<6)	/* software: page writable */
    159#define _PAGE_DIRTY		(1<<7)	/* software: page dirty */
    160#define _PAGE_ACCESSED		(1<<8)	/* software: page accessed (read) */
    161
    162#ifdef CONFIG_MMU
    163
    164#define _PAGE_CHG_MASK	   (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
    165#define _PAGE_PRESENT	   (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
    166
    167#define PAGE_NONE	   __pgprot(_PAGE_NONE | _PAGE_USER)
    168#define PAGE_COPY	   __pgprot(_PAGE_PRESENT | _PAGE_USER)
    169#define PAGE_COPY_EXEC	   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
    170#define PAGE_READONLY	   __pgprot(_PAGE_PRESENT | _PAGE_USER)
    171#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
    172#define PAGE_SHARED	   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
    173#define PAGE_SHARED_EXEC \
    174	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
    175#define PAGE_KERNEL	   __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
    176#define PAGE_KERNEL_RO	   __pgprot(_PAGE_PRESENT)
    177#define PAGE_KERNEL_EXEC   __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
    178
    179#if (DCACHE_WAY_SIZE > PAGE_SIZE)
    180# define _PAGE_DIRECTORY   (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
    181#else
    182# define _PAGE_DIRECTORY   (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
    183#endif
    184
    185#else /* no mmu */
    186
    187# define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
    188# define PAGE_NONE       __pgprot(0)
    189# define PAGE_SHARED     __pgprot(0)
    190# define PAGE_COPY       __pgprot(0)
    191# define PAGE_READONLY   __pgprot(0)
    192# define PAGE_KERNEL     __pgprot(0)
    193
    194#endif
    195
    196/*
    197 * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
    198 * the MMU can't do page protection for execute, and considers that the same as
    199 * read.  Also, write permissions may imply read permissions.
    200 * What follows is the closest we can get by reasonable means..
    201 * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
    202 */
    203#define __P000	PAGE_NONE		/* private --- */
    204#define __P001	PAGE_READONLY		/* private --r */
    205#define __P010	PAGE_COPY		/* private -w- */
    206#define __P011	PAGE_COPY		/* private -wr */
    207#define __P100	PAGE_READONLY_EXEC	/* private x-- */
    208#define __P101	PAGE_READONLY_EXEC	/* private x-r */
    209#define __P110	PAGE_COPY_EXEC		/* private xw- */
    210#define __P111	PAGE_COPY_EXEC		/* private xwr */
    211
    212#define __S000	PAGE_NONE		/* shared  --- */
    213#define __S001	PAGE_READONLY		/* shared  --r */
    214#define __S010	PAGE_SHARED		/* shared  -w- */
    215#define __S011	PAGE_SHARED		/* shared  -wr */
    216#define __S100	PAGE_READONLY_EXEC	/* shared  x-- */
    217#define __S101	PAGE_READONLY_EXEC	/* shared  x-r */
    218#define __S110	PAGE_SHARED_EXEC	/* shared  xw- */
    219#define __S111	PAGE_SHARED_EXEC	/* shared  xwr */
    220
    221#ifndef __ASSEMBLY__
    222
    223#define pte_ERROR(e) \
    224	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
    225#define pgd_ERROR(e) \
    226	printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
    227
    228extern unsigned long empty_zero_page[1024];
    229
    230#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
    231
    232#ifdef CONFIG_MMU
    233extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
    234extern void paging_init(void);
    235#else
    236# define swapper_pg_dir NULL
    237static inline void paging_init(void) { }
    238#endif
    239
    240/*
    241 * The pmd contains the kernel virtual address of the pte page.
    242 */
    243#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
    244#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
    245#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
    246
    247/*
    248 * pte status.
    249 */
    250# define pte_none(pte)	 (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
    251#if XCHAL_HW_VERSION_MAJOR < 2000
    252# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
    253#else
    254# define pte_present(pte)						\
    255	(((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)		\
    256	 || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
    257#endif
    258#define pte_clear(mm,addr,ptep)						\
    259	do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
    260
    261#define pmd_none(pmd)	 (!pmd_val(pmd))
    262#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
    263#define pmd_bad(pmd)	 (pmd_val(pmd) & ~PAGE_MASK)
    264#define pmd_clear(pmdp)	 do { set_pmd(pmdp, __pmd(0)); } while (0)
    265
    266static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
    267static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
    268static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
    269
    270static inline pte_t pte_wrprotect(pte_t pte)
    271	{ pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
    272static inline pte_t pte_mkclean(pte_t pte)
    273	{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
    274static inline pte_t pte_mkold(pte_t pte)
    275	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
    276static inline pte_t pte_mkdirty(pte_t pte)
    277	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
    278static inline pte_t pte_mkyoung(pte_t pte)
    279	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
    280static inline pte_t pte_mkwrite(pte_t pte)
    281	{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
    282
    283#define pgprot_noncached(prot) \
    284		((__pgprot((pgprot_val(prot) & ~_PAGE_CA_MASK) | \
    285			   _PAGE_CA_BYPASS)))
    286
    287/*
    288 * Conversion functions: convert a page and protection to a page entry,
    289 * and a page entry and page directory to the page they refer to.
    290 */
    291
    292#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
    293#define pte_same(a,b)		(pte_val(a) == pte_val(b))
    294#define pte_page(x)		pfn_to_page(pte_pfn(x))
    295#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
    296#define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
    297
    298static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    299{
    300	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
    301}
    302
    303/*
    304 * Certain architectures need to do special things when pte's
    305 * within a page table are directly modified.  Thus, the following
    306 * hook is made available.
    307 */
    308static inline void update_pte(pte_t *ptep, pte_t pteval)
    309{
    310	*ptep = pteval;
    311#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
    312	__asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
    313#endif
    314
    315}
    316
    317struct mm_struct;
    318
    319static inline void
    320set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
    321{
    322	update_pte(ptep, pteval);
    323}
    324
    325static inline void set_pte(pte_t *ptep, pte_t pteval)
    326{
    327	update_pte(ptep, pteval);
    328}
    329
    330static inline void
    331set_pmd(pmd_t *pmdp, pmd_t pmdval)
    332{
    333	*pmdp = pmdval;
    334}
    335
    336struct vm_area_struct;
    337
    338static inline int
    339ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
    340			  pte_t *ptep)
    341{
    342	pte_t pte = *ptep;
    343	if (!pte_young(pte))
    344		return 0;
    345	update_pte(ptep, pte_mkold(pte));
    346	return 1;
    347}
    348
    349static inline pte_t
    350ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
    351{
    352	pte_t pte = *ptep;
    353	pte_clear(mm, addr, ptep);
    354	return pte;
    355}
    356
    357static inline void
    358ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
    359{
    360	pte_t pte = *ptep;
    361	update_pte(ptep, pte_wrprotect(pte));
    362}
    363
    364/*
    365 * Encode and decode a swap and file entry.
    366 */
    367#define SWP_TYPE_BITS		5
    368#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
    369
    370#define __swp_type(entry)	(((entry).val >> 6) & 0x1f)
    371#define __swp_offset(entry)	((entry).val >> 11)
    372#define __swp_entry(type,offs)	\
    373	((swp_entry_t){((type) << 6) | ((offs) << 11) | \
    374	 _PAGE_CA_INVALID | _PAGE_USER})
    375#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
    376#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
    377
    378#endif /*  !defined (__ASSEMBLY__) */
    379
    380
    381#ifdef __ASSEMBLY__
    382
    383/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
    384 *                _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
    385 *                _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
    386 *                _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
    387 *
    388 * Note: We require an additional temporary register which can be the same as
    389 *       the register that holds the address.
    390 *
    391 * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
    392 *
    393 */
    394#define _PGD_INDEX(rt,rs)	extui	rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
    395#define _PTE_INDEX(rt,rs)	extui	rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
    396
    397#define _PGD_OFFSET(mm,adr,tmp)		l32i	mm, mm, MM_PGD;		\
    398					_PGD_INDEX(tmp, adr);		\
    399					addx4	mm, tmp, mm
    400
    401#define _PTE_OFFSET(pmd,adr,tmp)	_PTE_INDEX(tmp, adr);		\
    402					srli	pmd, pmd, PAGE_SHIFT;	\
    403					slli	pmd, pmd, PAGE_SHIFT;	\
    404					addx4	pmd, tmp, pmd
    405
    406#else
    407
    408#define kern_addr_valid(addr)	(1)
    409
    410extern  void update_mmu_cache(struct vm_area_struct * vma,
    411			      unsigned long address, pte_t *ptep);
    412
    413typedef pte_t *pte_addr_t;
    414
    415void update_mmu_tlb(struct vm_area_struct *vma,
    416		    unsigned long address, pte_t *ptep);
    417#define __HAVE_ARCH_UPDATE_MMU_TLB
    418
    419#endif /* !defined (__ASSEMBLY__) */
    420
    421#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
    422#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
    423#define __HAVE_ARCH_PTEP_SET_WRPROTECT
    424#define __HAVE_ARCH_PTEP_MKDIRTY
    425#define __HAVE_ARCH_PTE_SAME
    426/* We provide our own get_unmapped_area to cope with
    427 * SHM area cache aliasing for userland.
    428 */
    429#define HAVE_ARCH_UNMAPPED_AREA
    430
    431#endif /* _XTENSA_PGTABLE_H */