cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable.h (10464B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *  arch/arm/include/asm/pgtable.h
      4 *
      5 *  Copyright (C) 1995-2002 Russell King
      6 */
      7#ifndef _ASMARM_PGTABLE_H
      8#define _ASMARM_PGTABLE_H
      9
     10#include <linux/const.h>
     11#include <asm/proc-fns.h>
     12
     13#ifndef CONFIG_MMU
     14
     15#include <asm-generic/pgtable-nopud.h>
     16#include <asm/pgtable-nommu.h>
     17
     18#else
     19
     20#include <asm-generic/pgtable-nopud.h>
     21#include <asm/memory.h>
     22#include <asm/pgtable-hwdef.h>
     23
     24
     25#include <asm/tlbflush.h>
     26
     27#ifdef CONFIG_ARM_LPAE
     28#include <asm/pgtable-3level.h>
     29#else
     30#include <asm/pgtable-2level.h>
     31#endif
     32
     33/*
     34 * Just any arbitrary offset to the start of the vmalloc VM area: the
     35 * current 8MB value just means that there will be a 8MB "hole" after the
     36 * physical memory until the kernel virtual memory starts.  That means that
     37 * any out-of-bounds memory accesses will hopefully be caught.
     38 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
     39 * area for the same reason. ;)
     40 */
     41#define VMALLOC_OFFSET		(8*1024*1024)
     42#define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
     43#define VMALLOC_END		0xff800000UL
     44
     45#define LIBRARY_TEXT_START	0x0c000000
     46
     47#ifndef __ASSEMBLY__
     48extern void __pte_error(const char *file, int line, pte_t);
     49extern void __pmd_error(const char *file, int line, pmd_t);
     50extern void __pgd_error(const char *file, int line, pgd_t);
     51
     52#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte)
     53#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd)
     54#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
     55
     56/*
     57 * This is the lowest virtual address we can permit any user space
     58 * mapping to be mapped at.  This is particularly important for
     59 * non-high vector CPUs.
     60 */
     61#define FIRST_USER_ADDRESS	(PAGE_SIZE * 2)
     62
     63/*
     64 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
     65 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
     66 * page shared between user and kernel).
     67 */
     68#ifdef CONFIG_ARM_LPAE
     69#define USER_PGTABLES_CEILING	TASK_SIZE
     70#endif
     71
     72/*
     73 * The pgprot_* and protection_map entries will be fixed up in runtime
     74 * to include the cachable and bufferable bits based on memory policy,
     75 * as well as any architecture dependent bits like global/ASID and SMP
     76 * shared mapping bits.
     77 */
     78#define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
     79
     80extern pgprot_t		pgprot_user;
     81extern pgprot_t		pgprot_kernel;
     82
     83#define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
     84
     85#define PAGE_NONE		_MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
     86#define PAGE_SHARED		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
     87#define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER)
     88#define PAGE_COPY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
     89#define PAGE_COPY_EXEC		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
     90#define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
     91#define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
     92#define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN)
     93#define PAGE_KERNEL_EXEC	pgprot_kernel
     94
     95#define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
     96#define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
     97#define __PAGE_SHARED_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER)
     98#define __PAGE_COPY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
     99#define __PAGE_COPY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
    100#define __PAGE_READONLY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
    101#define __PAGE_READONLY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
    102
    103#define __pgprot_modify(prot,mask,bits)		\
    104	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
    105
    106#define pgprot_noncached(prot) \
    107	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
    108
    109#define pgprot_writecombine(prot) \
    110	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
    111
    112#define pgprot_stronglyordered(prot) \
    113	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
    114
    115#define pgprot_device(prot) \
    116	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
    117
    118#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
    119#define pgprot_dmacoherent(prot) \
    120	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
    121#define __HAVE_PHYS_MEM_ACCESS_PROT
    122struct file;
    123extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
    124				     unsigned long size, pgprot_t vma_prot);
    125#else
    126#define pgprot_dmacoherent(prot) \
    127	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
    128#endif
    129
    130#endif /* __ASSEMBLY__ */
    131
    132/*
    133 * The table below defines the page protection levels that we insert into our
    134 * Linux page table version.  These get translated into the best that the
    135 * architecture can perform.  Note that on most ARM hardware:
    136 *  1) We cannot do execute protection
    137 *  2) If we could do execute protection, then read is implied
    138 *  3) write implies read permissions
    139 */
    140#define __P000  __PAGE_NONE
    141#define __P001  __PAGE_READONLY
    142#define __P010  __PAGE_COPY
    143#define __P011  __PAGE_COPY
    144#define __P100  __PAGE_READONLY_EXEC
    145#define __P101  __PAGE_READONLY_EXEC
    146#define __P110  __PAGE_COPY_EXEC
    147#define __P111  __PAGE_COPY_EXEC
    148
    149#define __S000  __PAGE_NONE
    150#define __S001  __PAGE_READONLY
    151#define __S010  __PAGE_SHARED
    152#define __S011  __PAGE_SHARED
    153#define __S100  __PAGE_READONLY_EXEC
    154#define __S101  __PAGE_READONLY_EXEC
    155#define __S110  __PAGE_SHARED_EXEC
    156#define __S111  __PAGE_SHARED_EXEC
    157
    158#ifndef __ASSEMBLY__
    159/*
    160 * ZERO_PAGE is a global shared page that is always zero: used
    161 * for zero-mapped memory areas etc..
    162 */
    163extern struct page *empty_zero_page;
    164#define ZERO_PAGE(vaddr)	(empty_zero_page)
    165
    166
    167extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
    168
    169#define pud_page(pud)		pmd_page(__pmd(pud_val(pud)))
    170#define pud_write(pud)		pmd_write(__pmd(pud_val(pud)))
    171
    172#define pmd_none(pmd)		(!pmd_val(pmd))
    173
    174static inline pte_t *pmd_page_vaddr(pmd_t pmd)
    175{
    176	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
    177}
    178
    179#define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
    180
    181#define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
    182#define pfn_pte(pfn,prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
    183
    184#define pte_page(pte)		pfn_to_page(pte_pfn(pte))
    185#define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot)
    186
    187#define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
    188
    189#define pte_isset(pte, val)	((u32)(val) == (val) ? pte_val(pte) & (val) \
    190						: !!(pte_val(pte) & (val)))
    191#define pte_isclear(pte, val)	(!(pte_val(pte) & (val)))
    192
    193#define pte_none(pte)		(!pte_val(pte))
    194#define pte_present(pte)	(pte_isset((pte), L_PTE_PRESENT))
    195#define pte_valid(pte)		(pte_isset((pte), L_PTE_VALID))
    196#define pte_accessible(mm, pte)	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
    197#define pte_write(pte)		(pte_isclear((pte), L_PTE_RDONLY))
    198#define pte_dirty(pte)		(pte_isset((pte), L_PTE_DIRTY))
    199#define pte_young(pte)		(pte_isset((pte), L_PTE_YOUNG))
    200#define pte_exec(pte)		(pte_isclear((pte), L_PTE_XN))
    201
    202#define pte_valid_user(pte)	\
    203	(pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
    204
    205static inline bool pte_access_permitted(pte_t pte, bool write)
    206{
    207	pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
    208	pteval_t needed = mask;
    209
    210	if (write)
    211		mask |= L_PTE_RDONLY;
    212
    213	return (pte_val(pte) & mask) == needed;
    214}
    215#define pte_access_permitted pte_access_permitted
    216
    217#if __LINUX_ARM_ARCH__ < 6
    218static inline void __sync_icache_dcache(pte_t pteval)
    219{
    220}
    221#else
    222extern void __sync_icache_dcache(pte_t pteval);
    223#endif
    224
    225void set_pte_at(struct mm_struct *mm, unsigned long addr,
    226		      pte_t *ptep, pte_t pteval);
    227
    228static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
    229{
    230	pte_val(pte) &= ~pgprot_val(prot);
    231	return pte;
    232}
    233
    234static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
    235{
    236	pte_val(pte) |= pgprot_val(prot);
    237	return pte;
    238}
    239
    240static inline pte_t pte_wrprotect(pte_t pte)
    241{
    242	return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
    243}
    244
    245static inline pte_t pte_mkwrite(pte_t pte)
    246{
    247	return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
    248}
    249
    250static inline pte_t pte_mkclean(pte_t pte)
    251{
    252	return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
    253}
    254
    255static inline pte_t pte_mkdirty(pte_t pte)
    256{
    257	return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
    258}
    259
    260static inline pte_t pte_mkold(pte_t pte)
    261{
    262	return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
    263}
    264
    265static inline pte_t pte_mkyoung(pte_t pte)
    266{
    267	return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
    268}
    269
    270static inline pte_t pte_mkexec(pte_t pte)
    271{
    272	return clear_pte_bit(pte, __pgprot(L_PTE_XN));
    273}
    274
    275static inline pte_t pte_mknexec(pte_t pte)
    276{
    277	return set_pte_bit(pte, __pgprot(L_PTE_XN));
    278}
    279
    280static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    281{
    282	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
    283		L_PTE_NONE | L_PTE_VALID;
    284	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
    285	return pte;
    286}
    287
    288/*
    289 * Encode and decode a swap entry.  Swap entries are stored in the Linux
    290 * page tables as follows:
    291 *
    292 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
    293 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
    294 *   <--------------- offset ------------------------> < type -> 0 0
    295 *
    296 * This gives us up to 31 swap files and 128GB per swap file.  Note that
    297 * the offset field is always non-zero.
    298 */
    299#define __SWP_TYPE_SHIFT	2
    300#define __SWP_TYPE_BITS		5
    301#define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
    302#define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
    303
    304#define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
    305#define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
    306#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
    307
    308#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
    309#define __swp_entry_to_pte(swp)	__pte((swp).val | PTE_TYPE_FAULT)
    310
    311/*
    312 * It is an error for the kernel to have more swap files than we can
    313 * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
    314 * is increased beyond what we presently support.
    315 */
    316#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
    317
    318/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
    319/* FIXME: this is not correct */
    320#define kern_addr_valid(addr)	(1)
    321
    322/*
    323 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
    324 */
    325#define HAVE_ARCH_UNMAPPED_AREA
    326#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
    327
    328#endif /* !__ASSEMBLY__ */
    329
    330#endif /* CONFIG_MMU */
    331
    332#endif /* _ASMARM_PGTABLE_H */