cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable.h (21673B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2012 Regents of the University of California
      4 */
      5
      6#ifndef _ASM_RISCV_PGTABLE_H
      7#define _ASM_RISCV_PGTABLE_H
      8
      9#include <linux/mmzone.h>
     10#include <linux/sizes.h>
     11
     12#include <asm/pgtable-bits.h>
     13
     14#ifndef CONFIG_MMU
     15#define KERNEL_LINK_ADDR	PAGE_OFFSET
     16#define KERN_VIRT_SIZE		(UL(-1))
     17#else
     18
     19#define ADDRESS_SPACE_END	(UL(-1))
     20
     21#ifdef CONFIG_64BIT
     22/* Leave 2GB for kernel and BPF at the end of the address space */
     23#define KERNEL_LINK_ADDR	(ADDRESS_SPACE_END - SZ_2G + 1)
     24#else
     25#define KERNEL_LINK_ADDR	PAGE_OFFSET
     26#endif
     27
     28/* Number of entries in the page global directory */
     29#define PTRS_PER_PGD    (PAGE_SIZE / sizeof(pgd_t))
     30/* Number of entries in the page table */
     31#define PTRS_PER_PTE    (PAGE_SIZE / sizeof(pte_t))
     32
     33/*
     34 * Half of the kernel address space (half of the entries of the page global
     35 * directory) is for the direct mapping.
     36 */
     37#define KERN_VIRT_SIZE          ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
     38
     39#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
     40#define VMALLOC_END      PAGE_OFFSET
     41#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
     42
     43#define BPF_JIT_REGION_SIZE	(SZ_128M)
     44#ifdef CONFIG_64BIT
     45#define BPF_JIT_REGION_START	(BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
     46#define BPF_JIT_REGION_END	(MODULES_END)
     47#else
     48#define BPF_JIT_REGION_START	(PAGE_OFFSET - BPF_JIT_REGION_SIZE)
     49#define BPF_JIT_REGION_END	(VMALLOC_END)
     50#endif
     51
     52/* Modules always live before the kernel */
     53#ifdef CONFIG_64BIT
     54/* This is used to define the end of the KASAN shadow region */
     55#define MODULES_LOWEST_VADDR	(KERNEL_LINK_ADDR - SZ_2G)
     56#define MODULES_VADDR		(PFN_ALIGN((unsigned long)&_end) - SZ_2G)
     57#define MODULES_END		(PFN_ALIGN((unsigned long)&_start))
     58#endif
     59
     60/*
     61 * Roughly size the vmemmap space to be large enough to fit enough
     62 * struct pages to map half the virtual address space. Then
     63 * position vmemmap directly below the VMALLOC region.
     64 */
     65#ifdef CONFIG_64BIT
     66#define VA_BITS		(pgtable_l5_enabled ? \
     67				57 : (pgtable_l4_enabled ? 48 : 39))
     68#else
     69#define VA_BITS		32
     70#endif
     71
     72#define VMEMMAP_SHIFT \
     73	(VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
     74#define VMEMMAP_SIZE	BIT(VMEMMAP_SHIFT)
     75#define VMEMMAP_END	VMALLOC_START
     76#define VMEMMAP_START	(VMALLOC_START - VMEMMAP_SIZE)
     77
     78/*
     79 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
     80 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
     81 */
     82#define vmemmap		((struct page *)VMEMMAP_START)
     83
     84#define PCI_IO_SIZE      SZ_16M
     85#define PCI_IO_END       VMEMMAP_START
     86#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
     87
     88#define FIXADDR_TOP      PCI_IO_START
     89#ifdef CONFIG_64BIT
     90#define FIXADDR_SIZE     PMD_SIZE
     91#else
     92#define FIXADDR_SIZE     PGDIR_SIZE
     93#endif
     94#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
     95
     96#endif
     97
     98#ifdef CONFIG_XIP_KERNEL
     99#define XIP_OFFSET		SZ_32M
    100#define XIP_OFFSET_MASK		(SZ_32M - 1)
    101#else
    102#define XIP_OFFSET		0
    103#endif
    104
    105#ifndef __ASSEMBLY__
    106
    107#include <asm/page.h>
    108#include <asm/tlbflush.h>
    109#include <linux/mm_types.h>
    110
    111#define __page_val_to_pfn(_val)  (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
    112
    113#ifdef CONFIG_64BIT
    114#include <asm/pgtable-64.h>
    115#else
    116#include <asm/pgtable-32.h>
    117#endif /* CONFIG_64BIT */
    118
    119#include <linux/page_table_check.h>
    120
    121#ifdef CONFIG_XIP_KERNEL
    122#define XIP_FIXUP(addr) ({							\
    123	uintptr_t __a = (uintptr_t)(addr);					\
    124	(__a >= CONFIG_XIP_PHYS_ADDR && \
    125	 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ?	\
    126		__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
    127		__a;								\
    128	})
    129#else
    130#define XIP_FIXUP(addr)		(addr)
    131#endif /* CONFIG_XIP_KERNEL */
    132
    133struct pt_alloc_ops {
    134	pte_t *(*get_pte_virt)(phys_addr_t pa);
    135	phys_addr_t (*alloc_pte)(uintptr_t va);
    136#ifndef __PAGETABLE_PMD_FOLDED
    137	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
    138	phys_addr_t (*alloc_pmd)(uintptr_t va);
    139	pud_t *(*get_pud_virt)(phys_addr_t pa);
    140	phys_addr_t (*alloc_pud)(uintptr_t va);
    141	p4d_t *(*get_p4d_virt)(phys_addr_t pa);
    142	phys_addr_t (*alloc_p4d)(uintptr_t va);
    143#endif
    144};
    145
    146extern struct pt_alloc_ops pt_ops __initdata;
    147
    148#ifdef CONFIG_MMU
    149/* Number of PGD entries that a user-mode program can use */
    150#define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)
    151
    152/* Page protection bits */
    153#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
    154
    155#define PAGE_NONE		__pgprot(_PAGE_PROT_NONE | _PAGE_READ)
    156#define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
    157#define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
    158#define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
    159#define PAGE_READ_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
    160#define PAGE_WRITE_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ |	\
    161					 _PAGE_EXEC | _PAGE_WRITE)
    162
    163#define PAGE_COPY		PAGE_READ
    164#define PAGE_COPY_EXEC		PAGE_EXEC
    165#define PAGE_COPY_READ_EXEC	PAGE_READ_EXEC
    166#define PAGE_SHARED		PAGE_WRITE
    167#define PAGE_SHARED_EXEC	PAGE_WRITE_EXEC
    168
    169#define _PAGE_KERNEL		(_PAGE_READ \
    170				| _PAGE_WRITE \
    171				| _PAGE_PRESENT \
    172				| _PAGE_ACCESSED \
    173				| _PAGE_DIRTY \
    174				| _PAGE_GLOBAL)
    175
    176#define PAGE_KERNEL		__pgprot(_PAGE_KERNEL)
    177#define PAGE_KERNEL_READ	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
    178#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL | _PAGE_EXEC)
    179#define PAGE_KERNEL_READ_EXEC	__pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
    180					 | _PAGE_EXEC)
    181
    182#define PAGE_TABLE		__pgprot(_PAGE_TABLE)
    183
    184#define _PAGE_IOREMAP	((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
    185#define PAGE_KERNEL_IO		__pgprot(_PAGE_IOREMAP)
    186
    187extern pgd_t swapper_pg_dir[];
    188
    189/* MAP_PRIVATE permissions: xwr (copy-on-write) */
    190#define __P000	PAGE_NONE
    191#define __P001	PAGE_READ
    192#define __P010	PAGE_COPY
    193#define __P011	PAGE_COPY
    194#define __P100	PAGE_EXEC
    195#define __P101	PAGE_READ_EXEC
    196#define __P110	PAGE_COPY_EXEC
    197#define __P111	PAGE_COPY_READ_EXEC
    198
    199/* MAP_SHARED permissions: xwr */
    200#define __S000	PAGE_NONE
    201#define __S001	PAGE_READ
    202#define __S010	PAGE_SHARED
    203#define __S011	PAGE_SHARED
    204#define __S100	PAGE_EXEC
    205#define __S101	PAGE_READ_EXEC
    206#define __S110	PAGE_SHARED_EXEC
    207#define __S111	PAGE_SHARED_EXEC
    208
    209#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    210static inline int pmd_present(pmd_t pmd)
    211{
    212	/*
    213	 * Checking for _PAGE_LEAF is needed too because:
    214	 * When splitting a THP, split_huge_page() will temporarily clear
    215	 * the present bit, in this situation, pmd_present() and
    216	 * pmd_trans_huge() still needs to return true.
    217	 */
    218	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
    219}
    220#else
    221static inline int pmd_present(pmd_t pmd)
    222{
    223	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
    224}
    225#endif
    226
    227static inline int pmd_none(pmd_t pmd)
    228{
    229	return (pmd_val(pmd) == 0);
    230}
    231
    232static inline int pmd_bad(pmd_t pmd)
    233{
    234	return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
    235}
    236
    237#define pmd_leaf	pmd_leaf
    238static inline int pmd_leaf(pmd_t pmd)
    239{
    240	return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
    241}
    242
    243static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
    244{
    245	*pmdp = pmd;
    246}
    247
    248static inline void pmd_clear(pmd_t *pmdp)
    249{
    250	set_pmd(pmdp, __pmd(0));
    251}
    252
    253static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
    254{
    255	unsigned long prot_val = pgprot_val(prot);
    256
    257	ALT_THEAD_PMA(prot_val);
    258
    259	return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
    260}
    261
    262static inline unsigned long _pgd_pfn(pgd_t pgd)
    263{
    264	return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
    265}
    266
    267static inline struct page *pmd_page(pmd_t pmd)
    268{
    269	return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
    270}
    271
    272static inline unsigned long pmd_page_vaddr(pmd_t pmd)
    273{
    274	return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
    275}
    276
    277static inline pte_t pmd_pte(pmd_t pmd)
    278{
    279	return __pte(pmd_val(pmd));
    280}
    281
    282static inline pte_t pud_pte(pud_t pud)
    283{
    284	return __pte(pud_val(pud));
    285}
    286
    287/* Yields the page frame number (PFN) of a page table entry */
    288static inline unsigned long pte_pfn(pte_t pte)
    289{
    290	return __page_val_to_pfn(pte_val(pte));
    291}
    292
    293#define pte_page(x)     pfn_to_page(pte_pfn(x))
    294
    295/* Constructs a page table entry */
    296static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
    297{
    298	unsigned long prot_val = pgprot_val(prot);
    299
    300	ALT_THEAD_PMA(prot_val);
    301
    302	return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
    303}
    304
    305#define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
    306
    307static inline int pte_present(pte_t pte)
    308{
    309	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
    310}
    311
    312static inline int pte_none(pte_t pte)
    313{
    314	return (pte_val(pte) == 0);
    315}
    316
    317static inline int pte_write(pte_t pte)
    318{
    319	return pte_val(pte) & _PAGE_WRITE;
    320}
    321
    322static inline int pte_exec(pte_t pte)
    323{
    324	return pte_val(pte) & _PAGE_EXEC;
    325}
    326
    327static inline int pte_user(pte_t pte)
    328{
    329	return pte_val(pte) & _PAGE_USER;
    330}
    331
    332static inline int pte_huge(pte_t pte)
    333{
    334	return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
    335}
    336
    337static inline int pte_dirty(pte_t pte)
    338{
    339	return pte_val(pte) & _PAGE_DIRTY;
    340}
    341
    342static inline int pte_young(pte_t pte)
    343{
    344	return pte_val(pte) & _PAGE_ACCESSED;
    345}
    346
    347static inline int pte_special(pte_t pte)
    348{
    349	return pte_val(pte) & _PAGE_SPECIAL;
    350}
    351
    352/* static inline pte_t pte_rdprotect(pte_t pte) */
    353
    354static inline pte_t pte_wrprotect(pte_t pte)
    355{
    356	return __pte(pte_val(pte) & ~(_PAGE_WRITE));
    357}
    358
    359/* static inline pte_t pte_mkread(pte_t pte) */
    360
    361static inline pte_t pte_mkwrite(pte_t pte)
    362{
    363	return __pte(pte_val(pte) | _PAGE_WRITE);
    364}
    365
    366/* static inline pte_t pte_mkexec(pte_t pte) */
    367
    368static inline pte_t pte_mkdirty(pte_t pte)
    369{
    370	return __pte(pte_val(pte) | _PAGE_DIRTY);
    371}
    372
    373static inline pte_t pte_mkclean(pte_t pte)
    374{
    375	return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
    376}
    377
    378static inline pte_t pte_mkyoung(pte_t pte)
    379{
    380	return __pte(pte_val(pte) | _PAGE_ACCESSED);
    381}
    382
    383static inline pte_t pte_mkold(pte_t pte)
    384{
    385	return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
    386}
    387
    388static inline pte_t pte_mkspecial(pte_t pte)
    389{
    390	return __pte(pte_val(pte) | _PAGE_SPECIAL);
    391}
    392
    393static inline pte_t pte_mkhuge(pte_t pte)
    394{
    395	return pte;
    396}
    397
    398#ifdef CONFIG_NUMA_BALANCING
    399/*
    400 * See the comment in include/asm-generic/pgtable.h
    401 */
    402static inline int pte_protnone(pte_t pte)
    403{
    404	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
    405}
    406
    407static inline int pmd_protnone(pmd_t pmd)
    408{
    409	return pte_protnone(pmd_pte(pmd));
    410}
    411#endif
    412
    413/* Modify page protection bits */
    414static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    415{
    416	unsigned long newprot_val = pgprot_val(newprot);
    417
    418	ALT_THEAD_PMA(newprot_val);
    419
    420	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
    421}
    422
    423#define pgd_ERROR(e) \
    424	pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
    425
    426
    427/* Commit new configuration to MMU hardware */
    428static inline void update_mmu_cache(struct vm_area_struct *vma,
    429	unsigned long address, pte_t *ptep)
    430{
    431	/*
    432	 * The kernel assumes that TLBs don't cache invalid entries, but
    433	 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
    434	 * cache flush; it is necessary even after writing invalid entries.
    435	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
    436	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
    437	 */
    438	local_flush_tlb_page(address);
    439}
    440
    441static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
    442		unsigned long address, pmd_t *pmdp)
    443{
    444	pte_t *ptep = (pte_t *)pmdp;
    445
    446	update_mmu_cache(vma, address, ptep);
    447}
    448
    449#define __HAVE_ARCH_PTE_SAME
    450static inline int pte_same(pte_t pte_a, pte_t pte_b)
    451{
    452	return pte_val(pte_a) == pte_val(pte_b);
    453}
    454
    455/*
    456 * Certain architectures need to do special things when PTEs within
    457 * a page table are directly modified.  Thus, the following hook is
    458 * made available.
    459 */
    460static inline void set_pte(pte_t *ptep, pte_t pteval)
    461{
    462	*ptep = pteval;
    463}
    464
    465void flush_icache_pte(pte_t pte);
    466
    467static inline void __set_pte_at(struct mm_struct *mm,
    468	unsigned long addr, pte_t *ptep, pte_t pteval)
    469{
    470	if (pte_present(pteval) && pte_exec(pteval))
    471		flush_icache_pte(pteval);
    472
    473	set_pte(ptep, pteval);
    474}
    475
    476static inline void set_pte_at(struct mm_struct *mm,
    477	unsigned long addr, pte_t *ptep, pte_t pteval)
    478{
    479	page_table_check_pte_set(mm, addr, ptep, pteval);
    480	__set_pte_at(mm, addr, ptep, pteval);
    481}
    482
    483static inline void pte_clear(struct mm_struct *mm,
    484	unsigned long addr, pte_t *ptep)
    485{
    486	__set_pte_at(mm, addr, ptep, __pte(0));
    487}
    488
    489#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
    490static inline int ptep_set_access_flags(struct vm_area_struct *vma,
    491					unsigned long address, pte_t *ptep,
    492					pte_t entry, int dirty)
    493{
    494	if (!pte_same(*ptep, entry))
    495		set_pte_at(vma->vm_mm, address, ptep, entry);
    496	/*
    497	 * update_mmu_cache will unconditionally execute, handling both
    498	 * the case that the PTE changed and the spurious fault case.
    499	 */
    500	return true;
    501}
    502
    503#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
    504static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
    505				       unsigned long address, pte_t *ptep)
    506{
    507	pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
    508
    509	page_table_check_pte_clear(mm, address, pte);
    510
    511	return pte;
    512}
    513
    514#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
    515static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
    516					    unsigned long address,
    517					    pte_t *ptep)
    518{
    519	if (!pte_young(*ptep))
    520		return 0;
    521	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
    522}
    523
    524#define __HAVE_ARCH_PTEP_SET_WRPROTECT
    525static inline void ptep_set_wrprotect(struct mm_struct *mm,
    526				      unsigned long address, pte_t *ptep)
    527{
    528	atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
    529}
    530
    531#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
    532static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
    533					 unsigned long address, pte_t *ptep)
    534{
    535	/*
    536	 * This comment is borrowed from x86, but applies equally to RISC-V:
    537	 *
    538	 * Clearing the accessed bit without a TLB flush
    539	 * doesn't cause data corruption. [ It could cause incorrect
    540	 * page aging and the (mistaken) reclaim of hot pages, but the
    541	 * chance of that should be relatively low. ]
    542	 *
    543	 * So as a performance optimization don't flush the TLB when
    544	 * clearing the accessed bit, it will eventually be flushed by
    545	 * a context switch or a VM operation anyway. [ In the rare
    546	 * event of it not getting flushed for a long time the delay
    547	 * shouldn't really matter because there's no real memory
    548	 * pressure for swapout to react to. ]
    549	 */
    550	return ptep_test_and_clear_young(vma, address, ptep);
    551}
    552
    553#define pgprot_noncached pgprot_noncached
    554static inline pgprot_t pgprot_noncached(pgprot_t _prot)
    555{
    556	unsigned long prot = pgprot_val(_prot);
    557
    558	prot &= ~_PAGE_MTMASK;
    559	prot |= _PAGE_IO;
    560
    561	return __pgprot(prot);
    562}
    563
    564#define pgprot_writecombine pgprot_writecombine
    565static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
    566{
    567	unsigned long prot = pgprot_val(_prot);
    568
    569	prot &= ~_PAGE_MTMASK;
    570	prot |= _PAGE_NOCACHE;
    571
    572	return __pgprot(prot);
    573}
    574
    575/*
    576 * THP functions
    577 */
    578static inline pmd_t pte_pmd(pte_t pte)
    579{
    580	return __pmd(pte_val(pte));
    581}
    582
    583static inline pmd_t pmd_mkhuge(pmd_t pmd)
    584{
    585	return pmd;
    586}
    587
    588static inline pmd_t pmd_mkinvalid(pmd_t pmd)
    589{
    590	return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
    591}
    592
    593#define __pmd_to_phys(pmd)  (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
    594
    595static inline unsigned long pmd_pfn(pmd_t pmd)
    596{
    597	return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
    598}
    599
    600#define __pud_to_phys(pud)  (pud_val(pud) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
    601
    602static inline unsigned long pud_pfn(pud_t pud)
    603{
    604	return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
    605}
    606
    607static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
    608{
    609	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
    610}
    611
    612#define pmd_write pmd_write
    613static inline int pmd_write(pmd_t pmd)
    614{
    615	return pte_write(pmd_pte(pmd));
    616}
    617
    618static inline int pmd_dirty(pmd_t pmd)
    619{
    620	return pte_dirty(pmd_pte(pmd));
    621}
    622
    623static inline int pmd_young(pmd_t pmd)
    624{
    625	return pte_young(pmd_pte(pmd));
    626}
    627
    628static inline int pmd_user(pmd_t pmd)
    629{
    630	return pte_user(pmd_pte(pmd));
    631}
    632
    633static inline pmd_t pmd_mkold(pmd_t pmd)
    634{
    635	return pte_pmd(pte_mkold(pmd_pte(pmd)));
    636}
    637
    638static inline pmd_t pmd_mkyoung(pmd_t pmd)
    639{
    640	return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
    641}
    642
    643static inline pmd_t pmd_mkwrite(pmd_t pmd)
    644{
    645	return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
    646}
    647
    648static inline pmd_t pmd_wrprotect(pmd_t pmd)
    649{
    650	return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
    651}
    652
    653static inline pmd_t pmd_mkclean(pmd_t pmd)
    654{
    655	return pte_pmd(pte_mkclean(pmd_pte(pmd)));
    656}
    657
    658static inline pmd_t pmd_mkdirty(pmd_t pmd)
    659{
    660	return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
    661}
    662
    663static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
    664				pmd_t *pmdp, pmd_t pmd)
    665{
    666	page_table_check_pmd_set(mm, addr, pmdp, pmd);
    667	return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
    668}
    669
    670static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
    671				pud_t *pudp, pud_t pud)
    672{
    673	page_table_check_pud_set(mm, addr, pudp, pud);
    674	return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
    675}
    676
    677#ifdef CONFIG_PAGE_TABLE_CHECK
    678static inline bool pte_user_accessible_page(pte_t pte)
    679{
    680	return pte_present(pte) && pte_user(pte);
    681}
    682
    683static inline bool pmd_user_accessible_page(pmd_t pmd)
    684{
    685	return pmd_leaf(pmd) && pmd_user(pmd);
    686}
    687
    688static inline bool pud_user_accessible_page(pud_t pud)
    689{
    690	return pud_leaf(pud) && pud_user(pud);
    691}
    692#endif
    693
    694#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    695static inline int pmd_trans_huge(pmd_t pmd)
    696{
    697	return pmd_leaf(pmd);
    698}
    699
    700#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
    701static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
    702					unsigned long address, pmd_t *pmdp,
    703					pmd_t entry, int dirty)
    704{
    705	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
    706}
    707
    708#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
    709static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
    710					unsigned long address, pmd_t *pmdp)
    711{
    712	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
    713}
    714
    715#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
    716static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
    717					unsigned long address, pmd_t *pmdp)
    718{
    719	pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
    720
    721	page_table_check_pmd_clear(mm, address, pmd);
    722
    723	return pmd;
    724}
    725
    726#define __HAVE_ARCH_PMDP_SET_WRPROTECT
    727static inline void pmdp_set_wrprotect(struct mm_struct *mm,
    728					unsigned long address, pmd_t *pmdp)
    729{
    730	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
    731}
    732
    733#define pmdp_establish pmdp_establish
    734static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
    735				unsigned long address, pmd_t *pmdp, pmd_t pmd)
    736{
    737	page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
    738	return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
    739}
    740#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
    741
    742/*
    743 * Encode and decode a swap entry
    744 *
    745 * Format of swap PTE:
    746 *	bit            0:	_PAGE_PRESENT (zero)
    747 *	bit       1 to 3:       _PAGE_LEAF (zero)
    748 *	bit            5:	_PAGE_PROT_NONE (zero)
    749 *	bits      6 to 10:	swap type
    750 *	bits 10 to XLEN-1:	swap offset
    751 */
    752#define __SWP_TYPE_SHIFT	6
    753#define __SWP_TYPE_BITS		5
    754#define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
    755#define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
    756
    757#define MAX_SWAPFILES_CHECK()	\
    758	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
    759
    760#define __swp_type(x)	(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
    761#define __swp_offset(x)	((x).val >> __SWP_OFFSET_SHIFT)
    762#define __swp_entry(type, offset) ((swp_entry_t) \
    763	{ ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
    764
    765#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
    766#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
    767
    768#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
    769#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
    770#define __swp_entry_to_pmd(swp) __pmd((swp).val)
    771#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
    772
    773/*
    774 * In the RV64 Linux scheme, we give the user half of the virtual-address space
    775 * and give the kernel the other (upper) half.
    776 */
    777#ifdef CONFIG_64BIT
    778#define KERN_VIRT_START	(-(BIT(VA_BITS)) + TASK_SIZE)
    779#else
    780#define KERN_VIRT_START	FIXADDR_START
    781#endif
    782
    783/*
    784 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
    785 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
    786 * Task size is:
    787 * -     0x9fc00000 (~2.5GB) for RV32.
    788 * -   0x4000000000 ( 256GB) for RV64 using SV39 mmu
    789 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
    790 *
    791 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
    792 * Instruction Set Manual Volume II: Privileged Architecture" states that
    793 * "load and store effective addresses, which are 64bits, must have bits
    794 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
    795 */
    796#ifdef CONFIG_64BIT
    797#define TASK_SIZE_64	(PGDIR_SIZE * PTRS_PER_PGD / 2)
    798#define TASK_SIZE_MIN	(PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
    799
    800#ifdef CONFIG_COMPAT
    801#define TASK_SIZE_32	(_AC(0x80000000, UL) - PAGE_SIZE)
    802#define TASK_SIZE	(test_thread_flag(TIF_32BIT) ? \
    803			 TASK_SIZE_32 : TASK_SIZE_64)
    804#else
    805#define TASK_SIZE	TASK_SIZE_64
    806#endif
    807
    808#else
    809#define TASK_SIZE	FIXADDR_START
    810#define TASK_SIZE_MIN	TASK_SIZE
    811#endif
    812
    813#else /* CONFIG_MMU */
    814
    815#define PAGE_SHARED		__pgprot(0)
    816#define PAGE_KERNEL		__pgprot(0)
    817#define swapper_pg_dir		NULL
    818#define TASK_SIZE		0xffffffffUL
    819#define VMALLOC_START		0
    820#define VMALLOC_END		TASK_SIZE
    821
    822#endif /* !CONFIG_MMU */
    823
    824#define kern_addr_valid(addr)   (1) /* FIXME */
    825
    826extern char _start[];
    827extern void *_dtb_early_va;
    828extern uintptr_t _dtb_early_pa;
    829#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
    830#define dtb_early_va	(*(void **)XIP_FIXUP(&_dtb_early_va))
    831#define dtb_early_pa	(*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
    832#else
    833#define dtb_early_va	_dtb_early_va
    834#define dtb_early_pa	_dtb_early_pa
    835#endif /* CONFIG_XIP_KERNEL */
    836extern u64 satp_mode;
    837extern bool pgtable_l4_enabled;
    838
    839void paging_init(void);
    840void misc_mem_init(void);
    841
    842/*
    843 * ZERO_PAGE is a global shared page that is always zero,
    844 * used for zero-mapped memory areas, etc.
    845 */
    846extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
    847#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
    848
    849#endif /* !__ASSEMBLY__ */
    850
    851#endif /* _ASM_RISCV_PGTABLE_H */