cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable.h (34879B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_PGTABLE_H
      3#define _ASM_X86_PGTABLE_H
      4
      5#include <linux/mem_encrypt.h>
      6#include <asm/page.h>
      7#include <asm/pgtable_types.h>
      8
      9/*
     10 * Macro to mark a page protection value as UC-
     11 */
     12#define pgprot_noncached(prot)						\
     13	((boot_cpu_data.x86 > 3)					\
     14	 ? (__pgprot(pgprot_val(prot) |					\
     15		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
     16	 : (prot))
     17
     18#ifndef __ASSEMBLY__
     19#include <linux/spinlock.h>
     20#include <asm/x86_init.h>
     21#include <asm/pkru.h>
     22#include <asm/fpu/api.h>
     23#include <asm/coco.h>
     24#include <asm-generic/pgtable_uffd.h>
     25#include <linux/page_table_check.h>
     26
     27extern pgd_t early_top_pgt[PTRS_PER_PGD];
     28bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
     29
     30void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
     31void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
     32				   bool user);
     33void ptdump_walk_pgd_level_checkwx(void);
     34void ptdump_walk_user_pgd_level_checkwx(void);
     35
     36/*
     37 * Macros to add or remove encryption attribute
     38 */
     39#define pgprot_encrypted(prot)	__pgprot(cc_mkenc(pgprot_val(prot)))
     40#define pgprot_decrypted(prot)	__pgprot(cc_mkdec(pgprot_val(prot)))
     41
     42#ifdef CONFIG_DEBUG_WX
     43#define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
     44#define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
     45#else
     46#define debug_checkwx()		do { } while (0)
     47#define debug_checkwx_user()	do { } while (0)
     48#endif
     49
     50/*
     51 * ZERO_PAGE is a global shared page that is always zero: used
     52 * for zero-mapped memory areas etc..
     53 */
     54extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
     55	__visible;
     56#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
     57
     58extern spinlock_t pgd_lock;
     59extern struct list_head pgd_list;
     60
     61extern struct mm_struct *pgd_page_get_mm(struct page *page);
     62
     63extern pmdval_t early_pmd_flags;
     64
     65#ifdef CONFIG_PARAVIRT_XXL
     66#include <asm/paravirt.h>
     67#else  /* !CONFIG_PARAVIRT_XXL */
     68#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
     69
     70#define set_pte_atomic(ptep, pte)					\
     71	native_set_pte_atomic(ptep, pte)
     72
     73#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
     74
     75#ifndef __PAGETABLE_P4D_FOLDED
     76#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
     77#define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
     78#endif
     79
     80#ifndef set_p4d
     81# define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
     82#endif
     83
     84#ifndef __PAGETABLE_PUD_FOLDED
     85#define p4d_clear(p4d)			native_p4d_clear(p4d)
     86#endif
     87
     88#ifndef set_pud
     89# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
     90#endif
     91
     92#ifndef __PAGETABLE_PUD_FOLDED
     93#define pud_clear(pud)			native_pud_clear(pud)
     94#endif
     95
     96#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
     97#define pmd_clear(pmd)			native_pmd_clear(pmd)
     98
     99#define pgd_val(x)	native_pgd_val(x)
    100#define __pgd(x)	native_make_pgd(x)
    101
    102#ifndef __PAGETABLE_P4D_FOLDED
    103#define p4d_val(x)	native_p4d_val(x)
    104#define __p4d(x)	native_make_p4d(x)
    105#endif
    106
    107#ifndef __PAGETABLE_PUD_FOLDED
    108#define pud_val(x)	native_pud_val(x)
    109#define __pud(x)	native_make_pud(x)
    110#endif
    111
    112#ifndef __PAGETABLE_PMD_FOLDED
    113#define pmd_val(x)	native_pmd_val(x)
    114#define __pmd(x)	native_make_pmd(x)
    115#endif
    116
    117#define pte_val(x)	native_pte_val(x)
    118#define __pte(x)	native_make_pte(x)
    119
    120#define arch_end_context_switch(prev)	do {} while(0)
    121#endif	/* CONFIG_PARAVIRT_XXL */
    122
    123/*
    124 * The following only work if pte_present() is true.
    125 * Undefined behaviour if not..
    126 */
    127static inline int pte_dirty(pte_t pte)
    128{
    129	return pte_flags(pte) & _PAGE_DIRTY;
    130}
    131
    132static inline int pte_young(pte_t pte)
    133{
    134	return pte_flags(pte) & _PAGE_ACCESSED;
    135}
    136
    137static inline int pmd_dirty(pmd_t pmd)
    138{
    139	return pmd_flags(pmd) & _PAGE_DIRTY;
    140}
    141
    142static inline int pmd_young(pmd_t pmd)
    143{
    144	return pmd_flags(pmd) & _PAGE_ACCESSED;
    145}
    146
    147static inline int pud_dirty(pud_t pud)
    148{
    149	return pud_flags(pud) & _PAGE_DIRTY;
    150}
    151
    152static inline int pud_young(pud_t pud)
    153{
    154	return pud_flags(pud) & _PAGE_ACCESSED;
    155}
    156
    157static inline int pte_write(pte_t pte)
    158{
    159	return pte_flags(pte) & _PAGE_RW;
    160}
    161
    162static inline int pte_huge(pte_t pte)
    163{
    164	return pte_flags(pte) & _PAGE_PSE;
    165}
    166
    167static inline int pte_global(pte_t pte)
    168{
    169	return pte_flags(pte) & _PAGE_GLOBAL;
    170}
    171
    172static inline int pte_exec(pte_t pte)
    173{
    174	return !(pte_flags(pte) & _PAGE_NX);
    175}
    176
    177static inline int pte_special(pte_t pte)
    178{
    179	return pte_flags(pte) & _PAGE_SPECIAL;
    180}
    181
    182/* Entries that were set to PROT_NONE are inverted */
    183
    184static inline u64 protnone_mask(u64 val);
    185
    186static inline unsigned long pte_pfn(pte_t pte)
    187{
    188	phys_addr_t pfn = pte_val(pte);
    189	pfn ^= protnone_mask(pfn);
    190	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
    191}
    192
    193static inline unsigned long pmd_pfn(pmd_t pmd)
    194{
    195	phys_addr_t pfn = pmd_val(pmd);
    196	pfn ^= protnone_mask(pfn);
    197	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
    198}
    199
    200static inline unsigned long pud_pfn(pud_t pud)
    201{
    202	phys_addr_t pfn = pud_val(pud);
    203	pfn ^= protnone_mask(pfn);
    204	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
    205}
    206
    207static inline unsigned long p4d_pfn(p4d_t p4d)
    208{
    209	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
    210}
    211
    212static inline unsigned long pgd_pfn(pgd_t pgd)
    213{
    214	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
    215}
    216
    217#define p4d_leaf	p4d_large
    218static inline int p4d_large(p4d_t p4d)
    219{
    220	/* No 512 GiB pages yet */
    221	return 0;
    222}
    223
    224#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
    225
    226#define pmd_leaf	pmd_large
    227static inline int pmd_large(pmd_t pte)
    228{
    229	return pmd_flags(pte) & _PAGE_PSE;
    230}
    231
    232#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    233/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
    234static inline int pmd_trans_huge(pmd_t pmd)
    235{
    236	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
    237}
    238
    239#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
    240static inline int pud_trans_huge(pud_t pud)
    241{
    242	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
    243}
    244#endif
    245
    246#define has_transparent_hugepage has_transparent_hugepage
    247static inline int has_transparent_hugepage(void)
    248{
    249	return boot_cpu_has(X86_FEATURE_PSE);
    250}
    251
    252#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
    253static inline int pmd_devmap(pmd_t pmd)
    254{
    255	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
    256}
    257
    258#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
    259static inline int pud_devmap(pud_t pud)
    260{
    261	return !!(pud_val(pud) & _PAGE_DEVMAP);
    262}
    263#else
    264static inline int pud_devmap(pud_t pud)
    265{
    266	return 0;
    267}
    268#endif
    269
    270static inline int pgd_devmap(pgd_t pgd)
    271{
    272	return 0;
    273}
    274#endif
    275#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
    276
    277static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
    278{
    279	pteval_t v = native_pte_val(pte);
    280
    281	return native_make_pte(v | set);
    282}
    283
    284static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
    285{
    286	pteval_t v = native_pte_val(pte);
    287
    288	return native_make_pte(v & ~clear);
    289}
    290
    291#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
    292static inline int pte_uffd_wp(pte_t pte)
    293{
    294	return pte_flags(pte) & _PAGE_UFFD_WP;
    295}
    296
    297static inline pte_t pte_mkuffd_wp(pte_t pte)
    298{
    299	return pte_set_flags(pte, _PAGE_UFFD_WP);
    300}
    301
    302static inline pte_t pte_clear_uffd_wp(pte_t pte)
    303{
    304	return pte_clear_flags(pte, _PAGE_UFFD_WP);
    305}
    306#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
    307
    308static inline pte_t pte_mkclean(pte_t pte)
    309{
    310	return pte_clear_flags(pte, _PAGE_DIRTY);
    311}
    312
    313static inline pte_t pte_mkold(pte_t pte)
    314{
    315	return pte_clear_flags(pte, _PAGE_ACCESSED);
    316}
    317
    318static inline pte_t pte_wrprotect(pte_t pte)
    319{
    320	return pte_clear_flags(pte, _PAGE_RW);
    321}
    322
    323static inline pte_t pte_mkexec(pte_t pte)
    324{
    325	return pte_clear_flags(pte, _PAGE_NX);
    326}
    327
    328static inline pte_t pte_mkdirty(pte_t pte)
    329{
    330	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
    331}
    332
    333static inline pte_t pte_mkyoung(pte_t pte)
    334{
    335	return pte_set_flags(pte, _PAGE_ACCESSED);
    336}
    337
    338static inline pte_t pte_mkwrite(pte_t pte)
    339{
    340	return pte_set_flags(pte, _PAGE_RW);
    341}
    342
    343static inline pte_t pte_mkhuge(pte_t pte)
    344{
    345	return pte_set_flags(pte, _PAGE_PSE);
    346}
    347
    348static inline pte_t pte_clrhuge(pte_t pte)
    349{
    350	return pte_clear_flags(pte, _PAGE_PSE);
    351}
    352
    353static inline pte_t pte_mkglobal(pte_t pte)
    354{
    355	return pte_set_flags(pte, _PAGE_GLOBAL);
    356}
    357
    358static inline pte_t pte_clrglobal(pte_t pte)
    359{
    360	return pte_clear_flags(pte, _PAGE_GLOBAL);
    361}
    362
    363static inline pte_t pte_mkspecial(pte_t pte)
    364{
    365	return pte_set_flags(pte, _PAGE_SPECIAL);
    366}
    367
    368static inline pte_t pte_mkdevmap(pte_t pte)
    369{
    370	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
    371}
    372
    373static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
    374{
    375	pmdval_t v = native_pmd_val(pmd);
    376
    377	return native_make_pmd(v | set);
    378}
    379
    380static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
    381{
    382	pmdval_t v = native_pmd_val(pmd);
    383
    384	return native_make_pmd(v & ~clear);
    385}
    386
    387#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
    388static inline int pmd_uffd_wp(pmd_t pmd)
    389{
    390	return pmd_flags(pmd) & _PAGE_UFFD_WP;
    391}
    392
    393static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
    394{
    395	return pmd_set_flags(pmd, _PAGE_UFFD_WP);
    396}
    397
    398static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
    399{
    400	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
    401}
    402#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
    403
    404static inline pmd_t pmd_mkold(pmd_t pmd)
    405{
    406	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
    407}
    408
    409static inline pmd_t pmd_mkclean(pmd_t pmd)
    410{
    411	return pmd_clear_flags(pmd, _PAGE_DIRTY);
    412}
    413
    414static inline pmd_t pmd_wrprotect(pmd_t pmd)
    415{
    416	return pmd_clear_flags(pmd, _PAGE_RW);
    417}
    418
    419static inline pmd_t pmd_mkdirty(pmd_t pmd)
    420{
    421	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
    422}
    423
    424static inline pmd_t pmd_mkdevmap(pmd_t pmd)
    425{
    426	return pmd_set_flags(pmd, _PAGE_DEVMAP);
    427}
    428
    429static inline pmd_t pmd_mkhuge(pmd_t pmd)
    430{
    431	return pmd_set_flags(pmd, _PAGE_PSE);
    432}
    433
    434static inline pmd_t pmd_mkyoung(pmd_t pmd)
    435{
    436	return pmd_set_flags(pmd, _PAGE_ACCESSED);
    437}
    438
    439static inline pmd_t pmd_mkwrite(pmd_t pmd)
    440{
    441	return pmd_set_flags(pmd, _PAGE_RW);
    442}
    443
    444static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
    445{
    446	pudval_t v = native_pud_val(pud);
    447
    448	return native_make_pud(v | set);
    449}
    450
    451static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
    452{
    453	pudval_t v = native_pud_val(pud);
    454
    455	return native_make_pud(v & ~clear);
    456}
    457
    458static inline pud_t pud_mkold(pud_t pud)
    459{
    460	return pud_clear_flags(pud, _PAGE_ACCESSED);
    461}
    462
    463static inline pud_t pud_mkclean(pud_t pud)
    464{
    465	return pud_clear_flags(pud, _PAGE_DIRTY);
    466}
    467
    468static inline pud_t pud_wrprotect(pud_t pud)
    469{
    470	return pud_clear_flags(pud, _PAGE_RW);
    471}
    472
    473static inline pud_t pud_mkdirty(pud_t pud)
    474{
    475	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
    476}
    477
    478static inline pud_t pud_mkdevmap(pud_t pud)
    479{
    480	return pud_set_flags(pud, _PAGE_DEVMAP);
    481}
    482
    483static inline pud_t pud_mkhuge(pud_t pud)
    484{
    485	return pud_set_flags(pud, _PAGE_PSE);
    486}
    487
    488static inline pud_t pud_mkyoung(pud_t pud)
    489{
    490	return pud_set_flags(pud, _PAGE_ACCESSED);
    491}
    492
    493static inline pud_t pud_mkwrite(pud_t pud)
    494{
    495	return pud_set_flags(pud, _PAGE_RW);
    496}
    497
    498#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
    499static inline int pte_soft_dirty(pte_t pte)
    500{
    501	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
    502}
    503
    504static inline int pmd_soft_dirty(pmd_t pmd)
    505{
    506	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
    507}
    508
    509static inline int pud_soft_dirty(pud_t pud)
    510{
    511	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
    512}
    513
    514static inline pte_t pte_mksoft_dirty(pte_t pte)
    515{
    516	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
    517}
    518
    519static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
    520{
    521	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
    522}
    523
    524static inline pud_t pud_mksoft_dirty(pud_t pud)
    525{
    526	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
    527}
    528
    529static inline pte_t pte_clear_soft_dirty(pte_t pte)
    530{
    531	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
    532}
    533
    534static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
    535{
    536	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
    537}
    538
    539static inline pud_t pud_clear_soft_dirty(pud_t pud)
    540{
    541	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
    542}
    543
    544#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
    545
    546/*
    547 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
    548 * can use those bits for other purposes, so leave them be.
    549 */
    550static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
    551{
    552	pgprotval_t protval = pgprot_val(pgprot);
    553
    554	if (protval & _PAGE_PRESENT)
    555		protval &= __supported_pte_mask;
    556
    557	return protval;
    558}
    559
    560static inline pgprotval_t check_pgprot(pgprot_t pgprot)
    561{
    562	pgprotval_t massaged_val = massage_pgprot(pgprot);
    563
    564	/* mmdebug.h can not be included here because of dependencies */
    565#ifdef CONFIG_DEBUG_VM
    566	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
    567		  "attempted to set unsupported pgprot: %016llx "
    568		  "bits: %016llx supported: %016llx\n",
    569		  (u64)pgprot_val(pgprot),
    570		  (u64)pgprot_val(pgprot) ^ massaged_val,
    571		  (u64)__supported_pte_mask);
    572#endif
    573
    574	return massaged_val;
    575}
    576
    577static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
    578{
    579	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
    580	pfn ^= protnone_mask(pgprot_val(pgprot));
    581	pfn &= PTE_PFN_MASK;
    582	return __pte(pfn | check_pgprot(pgprot));
    583}
    584
    585static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
    586{
    587	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
    588	pfn ^= protnone_mask(pgprot_val(pgprot));
    589	pfn &= PHYSICAL_PMD_PAGE_MASK;
    590	return __pmd(pfn | check_pgprot(pgprot));
    591}
    592
    593static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
    594{
    595	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
    596	pfn ^= protnone_mask(pgprot_val(pgprot));
    597	pfn &= PHYSICAL_PUD_PAGE_MASK;
    598	return __pud(pfn | check_pgprot(pgprot));
    599}
    600
    601static inline pmd_t pmd_mkinvalid(pmd_t pmd)
    602{
    603	return pfn_pmd(pmd_pfn(pmd),
    604		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
    605}
    606
    607static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
    608
    609static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    610{
    611	pteval_t val = pte_val(pte), oldval = val;
    612
    613	/*
    614	 * Chop off the NX bit (if present), and add the NX portion of
    615	 * the newprot (if present):
    616	 */
    617	val &= _PAGE_CHG_MASK;
    618	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
    619	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
    620	return __pte(val);
    621}
    622
    623static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
    624{
    625	pmdval_t val = pmd_val(pmd), oldval = val;
    626
    627	val &= _HPAGE_CHG_MASK;
    628	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
    629	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
    630	return __pmd(val);
    631}
    632
    633/*
    634 * mprotect needs to preserve PAT and encryption bits when updating
    635 * vm_page_prot
    636 */
    637#define pgprot_modify pgprot_modify
    638static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
    639{
    640	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
    641	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
    642	return __pgprot(preservebits | addbits);
    643}
    644
    645#define pte_pgprot(x) __pgprot(pte_flags(x))
    646#define pmd_pgprot(x) __pgprot(pmd_flags(x))
    647#define pud_pgprot(x) __pgprot(pud_flags(x))
    648#define p4d_pgprot(x) __pgprot(p4d_flags(x))
    649
    650#define canon_pgprot(p) __pgprot(massage_pgprot(p))
    651
    652static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
    653					 enum page_cache_mode pcm,
    654					 enum page_cache_mode new_pcm)
    655{
    656	/*
    657	 * PAT type is always WB for untracked ranges, so no need to check.
    658	 */
    659	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
    660		return 1;
    661
    662	/*
    663	 * Certain new memtypes are not allowed with certain
    664	 * requested memtype:
    665	 * - request is uncached, return cannot be write-back
    666	 * - request is write-combine, return cannot be write-back
    667	 * - request is write-through, return cannot be write-back
    668	 * - request is write-through, return cannot be write-combine
    669	 */
    670	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
    671	     new_pcm == _PAGE_CACHE_MODE_WB) ||
    672	    (pcm == _PAGE_CACHE_MODE_WC &&
    673	     new_pcm == _PAGE_CACHE_MODE_WB) ||
    674	    (pcm == _PAGE_CACHE_MODE_WT &&
    675	     new_pcm == _PAGE_CACHE_MODE_WB) ||
    676	    (pcm == _PAGE_CACHE_MODE_WT &&
    677	     new_pcm == _PAGE_CACHE_MODE_WC)) {
    678		return 0;
    679	}
    680
    681	return 1;
    682}
    683
    684pmd_t *populate_extra_pmd(unsigned long vaddr);
    685pte_t *populate_extra_pte(unsigned long vaddr);
    686
    687#ifdef CONFIG_PAGE_TABLE_ISOLATION
    688pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
    689
    690/*
    691 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
    692 * Populates the user and returns the resulting PGD that must be set in
    693 * the kernel copy of the page tables.
    694 */
    695static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
    696{
    697	if (!static_cpu_has(X86_FEATURE_PTI))
    698		return pgd;
    699	return __pti_set_user_pgtbl(pgdp, pgd);
    700}
    701#else   /* CONFIG_PAGE_TABLE_ISOLATION */
    702static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
    703{
    704	return pgd;
    705}
    706#endif  /* CONFIG_PAGE_TABLE_ISOLATION */
    707
    708#endif	/* __ASSEMBLY__ */
    709
    710
    711#ifdef CONFIG_X86_32
    712# include <asm/pgtable_32.h>
    713#else
    714# include <asm/pgtable_64.h>
    715#endif
    716
    717#ifndef __ASSEMBLY__
    718#include <linux/mm_types.h>
    719#include <linux/mmdebug.h>
    720#include <linux/log2.h>
    721#include <asm/fixmap.h>
    722
    723static inline int pte_none(pte_t pte)
    724{
    725	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
    726}
    727
    728#define __HAVE_ARCH_PTE_SAME
    729static inline int pte_same(pte_t a, pte_t b)
    730{
    731	return a.pte == b.pte;
    732}
    733
    734static inline int pte_present(pte_t a)
    735{
    736	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
    737}
    738
    739#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
    740static inline int pte_devmap(pte_t a)
    741{
    742	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
    743}
    744#endif
    745
    746#define pte_accessible pte_accessible
    747static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
    748{
    749	if (pte_flags(a) & _PAGE_PRESENT)
    750		return true;
    751
    752	if ((pte_flags(a) & _PAGE_PROTNONE) &&
    753			atomic_read(&mm->tlb_flush_pending))
    754		return true;
    755
    756	return false;
    757}
    758
    759static inline int pmd_present(pmd_t pmd)
    760{
    761	/*
    762	 * Checking for _PAGE_PSE is needed too because
    763	 * split_huge_page will temporarily clear the present bit (but
    764	 * the _PAGE_PSE flag will remain set at all times while the
    765	 * _PAGE_PRESENT bit is clear).
    766	 */
    767	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
    768}
    769
    770#ifdef CONFIG_NUMA_BALANCING
    771/*
    772 * These work without NUMA balancing but the kernel does not care. See the
    773 * comment in include/linux/pgtable.h
    774 */
    775static inline int pte_protnone(pte_t pte)
    776{
    777	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
    778		== _PAGE_PROTNONE;
    779}
    780
    781static inline int pmd_protnone(pmd_t pmd)
    782{
    783	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
    784		== _PAGE_PROTNONE;
    785}
    786#endif /* CONFIG_NUMA_BALANCING */
    787
    788static inline int pmd_none(pmd_t pmd)
    789{
    790	/* Only check low word on 32-bit platforms, since it might be
    791	   out of sync with upper half. */
    792	unsigned long val = native_pmd_val(pmd);
    793	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
    794}
    795
    796static inline unsigned long pmd_page_vaddr(pmd_t pmd)
    797{
    798	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
    799}
    800
    801/*
    802 * Currently stuck as a macro due to indirect forward reference to
    803 * linux/mmzone.h's __section_mem_map_addr() definition:
    804 */
    805#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
    806
    807/*
    808 * Conversion functions: convert a page and protection to a page entry,
    809 * and a page entry and page directory to the page they refer to.
    810 *
    811 * (Currently stuck as a macro because of indirect forward reference
    812 * to linux/mm.h:page_to_nid())
    813 */
    814#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
    815
    816static inline int pmd_bad(pmd_t pmd)
    817{
    818	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
    819}
    820
    821static inline unsigned long pages_to_mb(unsigned long npg)
    822{
    823	return npg >> (20 - PAGE_SHIFT);
    824}
    825
    826#if CONFIG_PGTABLE_LEVELS > 2
    827static inline int pud_none(pud_t pud)
    828{
    829	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
    830}
    831
    832static inline int pud_present(pud_t pud)
    833{
    834	return pud_flags(pud) & _PAGE_PRESENT;
    835}
    836
    837static inline pmd_t *pud_pgtable(pud_t pud)
    838{
    839	return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
    840}
    841
    842/*
    843 * Currently stuck as a macro due to indirect forward reference to
    844 * linux/mmzone.h's __section_mem_map_addr() definition:
    845 */
    846#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
    847
    848#define pud_leaf	pud_large
    849static inline int pud_large(pud_t pud)
    850{
    851	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
    852		(_PAGE_PSE | _PAGE_PRESENT);
    853}
    854
    855static inline int pud_bad(pud_t pud)
    856{
    857	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
    858}
    859#else
    860#define pud_leaf	pud_large
    861static inline int pud_large(pud_t pud)
    862{
    863	return 0;
    864}
    865#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
    866
    867#if CONFIG_PGTABLE_LEVELS > 3
    868static inline int p4d_none(p4d_t p4d)
    869{
    870	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
    871}
    872
    873static inline int p4d_present(p4d_t p4d)
    874{
    875	return p4d_flags(p4d) & _PAGE_PRESENT;
    876}
    877
    878static inline pud_t *p4d_pgtable(p4d_t p4d)
    879{
    880	return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
    881}
    882
    883/*
    884 * Currently stuck as a macro due to indirect forward reference to
    885 * linux/mmzone.h's __section_mem_map_addr() definition:
    886 */
    887#define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
    888
    889static inline int p4d_bad(p4d_t p4d)
    890{
    891	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
    892
    893	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
    894		ignore_flags |= _PAGE_NX;
    895
    896	return (p4d_flags(p4d) & ~ignore_flags) != 0;
    897}
    898#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
    899
    900static inline unsigned long p4d_index(unsigned long address)
    901{
    902	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
    903}
    904
    905#if CONFIG_PGTABLE_LEVELS > 4
    906static inline int pgd_present(pgd_t pgd)
    907{
    908	if (!pgtable_l5_enabled())
    909		return 1;
    910	return pgd_flags(pgd) & _PAGE_PRESENT;
    911}
    912
    913static inline unsigned long pgd_page_vaddr(pgd_t pgd)
    914{
    915	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
    916}
    917
    918/*
    919 * Currently stuck as a macro due to indirect forward reference to
    920 * linux/mmzone.h's __section_mem_map_addr() definition:
    921 */
    922#define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
    923
    924/* to find an entry in a page-table-directory. */
    925static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
    926{
    927	if (!pgtable_l5_enabled())
    928		return (p4d_t *)pgd;
    929	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
    930}
    931
    932static inline int pgd_bad(pgd_t pgd)
    933{
    934	unsigned long ignore_flags = _PAGE_USER;
    935
    936	if (!pgtable_l5_enabled())
    937		return 0;
    938
    939	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
    940		ignore_flags |= _PAGE_NX;
    941
    942	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
    943}
    944
    945static inline int pgd_none(pgd_t pgd)
    946{
    947	if (!pgtable_l5_enabled())
    948		return 0;
    949	/*
    950	 * There is no need to do a workaround for the KNL stray
    951	 * A/D bit erratum here.  PGDs only point to page tables
    952	 * except on 32-bit non-PAE which is not supported on
    953	 * KNL.
    954	 */
    955	return !native_pgd_val(pgd);
    956}
    957#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
    958
    959#endif	/* __ASSEMBLY__ */
    960
    961#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
    962#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
    963
    964#ifndef __ASSEMBLY__
    965
    966extern int direct_gbpages;
    967void init_mem_mapping(void);
    968void early_alloc_pgt_buf(void);
    969extern void memblock_find_dma_reserve(void);
    970void __init poking_init(void);
    971unsigned long init_memory_mapping(unsigned long start,
    972				  unsigned long end, pgprot_t prot);
    973
    974#ifdef CONFIG_X86_64
    975extern pgd_t trampoline_pgd_entry;
    976#endif
    977
    978/* local pte updates need not use xchg for locking */
    979static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
    980{
    981	pte_t res = *ptep;
    982
    983	/* Pure native function needs no input for mm, addr */
    984	native_pte_clear(NULL, 0, ptep);
    985	return res;
    986}
    987
    988static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
    989{
    990	pmd_t res = *pmdp;
    991
    992	native_pmd_clear(pmdp);
    993	return res;
    994}
    995
    996static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
    997{
    998	pud_t res = *pudp;
    999
   1000	native_pud_clear(pudp);
   1001	return res;
   1002}
   1003
   1004static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
   1005			      pte_t *ptep, pte_t pte)
   1006{
   1007	page_table_check_pte_set(mm, addr, ptep, pte);
   1008	set_pte(ptep, pte);
   1009}
   1010
   1011static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
   1012			      pmd_t *pmdp, pmd_t pmd)
   1013{
   1014	page_table_check_pmd_set(mm, addr, pmdp, pmd);
   1015	set_pmd(pmdp, pmd);
   1016}
   1017
   1018static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
   1019			      pud_t *pudp, pud_t pud)
   1020{
   1021	page_table_check_pud_set(mm, addr, pudp, pud);
   1022	native_set_pud(pudp, pud);
   1023}
   1024
   1025/*
   1026 * We only update the dirty/accessed state if we set
   1027 * the dirty bit by hand in the kernel, since the hardware
   1028 * will do the accessed bit for us, and we don't want to
   1029 * race with other CPU's that might be updating the dirty
   1030 * bit at the same time.
   1031 */
   1032struct vm_area_struct;
   1033
   1034#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
   1035extern int ptep_set_access_flags(struct vm_area_struct *vma,
   1036				 unsigned long address, pte_t *ptep,
   1037				 pte_t entry, int dirty);
   1038
   1039#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
   1040extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
   1041				     unsigned long addr, pte_t *ptep);
   1042
   1043#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
   1044extern int ptep_clear_flush_young(struct vm_area_struct *vma,
   1045				  unsigned long address, pte_t *ptep);
   1046
   1047#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
   1048static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
   1049				       pte_t *ptep)
   1050{
   1051	pte_t pte = native_ptep_get_and_clear(ptep);
   1052	page_table_check_pte_clear(mm, addr, pte);
   1053	return pte;
   1054}
   1055
   1056#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
   1057static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
   1058					    unsigned long addr, pte_t *ptep,
   1059					    int full)
   1060{
   1061	pte_t pte;
   1062	if (full) {
   1063		/*
   1064		 * Full address destruction in progress; paravirt does not
   1065		 * care about updates and native needs no locking
   1066		 */
   1067		pte = native_local_ptep_get_and_clear(ptep);
   1068		page_table_check_pte_clear(mm, addr, pte);
   1069	} else {
   1070		pte = ptep_get_and_clear(mm, addr, ptep);
   1071	}
   1072	return pte;
   1073}
   1074
   1075#define __HAVE_ARCH_PTEP_SET_WRPROTECT
   1076static inline void ptep_set_wrprotect(struct mm_struct *mm,
   1077				      unsigned long addr, pte_t *ptep)
   1078{
   1079	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
   1080}
   1081
   1082#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
   1083
   1084#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
   1085
   1086#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
   1087extern int pmdp_set_access_flags(struct vm_area_struct *vma,
   1088				 unsigned long address, pmd_t *pmdp,
   1089				 pmd_t entry, int dirty);
   1090extern int pudp_set_access_flags(struct vm_area_struct *vma,
   1091				 unsigned long address, pud_t *pudp,
   1092				 pud_t entry, int dirty);
   1093
   1094#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
   1095extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
   1096				     unsigned long addr, pmd_t *pmdp);
   1097extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
   1098				     unsigned long addr, pud_t *pudp);
   1099
   1100#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
   1101extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
   1102				  unsigned long address, pmd_t *pmdp);
   1103
   1104
   1105#define pmd_write pmd_write
   1106static inline int pmd_write(pmd_t pmd)
   1107{
   1108	return pmd_flags(pmd) & _PAGE_RW;
   1109}
   1110
   1111#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
   1112static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
   1113				       pmd_t *pmdp)
   1114{
   1115	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
   1116
   1117	page_table_check_pmd_clear(mm, addr, pmd);
   1118
   1119	return pmd;
   1120}
   1121
   1122#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
   1123static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
   1124					unsigned long addr, pud_t *pudp)
   1125{
   1126	pud_t pud = native_pudp_get_and_clear(pudp);
   1127
   1128	page_table_check_pud_clear(mm, addr, pud);
   1129
   1130	return pud;
   1131}
   1132
   1133#define __HAVE_ARCH_PMDP_SET_WRPROTECT
   1134static inline void pmdp_set_wrprotect(struct mm_struct *mm,
   1135				      unsigned long addr, pmd_t *pmdp)
   1136{
   1137	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
   1138}
   1139
   1140#define pud_write pud_write
   1141static inline int pud_write(pud_t pud)
   1142{
   1143	return pud_flags(pud) & _PAGE_RW;
   1144}
   1145
   1146#ifndef pmdp_establish
   1147#define pmdp_establish pmdp_establish
   1148static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
   1149		unsigned long address, pmd_t *pmdp, pmd_t pmd)
   1150{
   1151	page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
   1152	if (IS_ENABLED(CONFIG_SMP)) {
   1153		return xchg(pmdp, pmd);
   1154	} else {
   1155		pmd_t old = *pmdp;
   1156		WRITE_ONCE(*pmdp, pmd);
   1157		return old;
   1158	}
   1159}
   1160#endif
   1161
   1162#define __HAVE_ARCH_PMDP_INVALIDATE_AD
   1163extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
   1164				unsigned long address, pmd_t *pmdp);
   1165
   1166/*
   1167 * Page table pages are page-aligned.  The lower half of the top
   1168 * level is used for userspace and the top half for the kernel.
   1169 *
   1170 * Returns true for parts of the PGD that map userspace and
   1171 * false for the parts that map the kernel.
   1172 */
   1173static inline bool pgdp_maps_userspace(void *__ptr)
   1174{
   1175	unsigned long ptr = (unsigned long)__ptr;
   1176
   1177	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
   1178}
   1179
   1180#define pgd_leaf	pgd_large
   1181static inline int pgd_large(pgd_t pgd) { return 0; }
   1182
   1183#ifdef CONFIG_PAGE_TABLE_ISOLATION
   1184/*
   1185 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
   1186 * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
   1187 * the user one is in the last 4k.  To switch between them, you
   1188 * just need to flip the 12th bit in their addresses.
   1189 */
   1190#define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
   1191
   1192/*
   1193 * This generates better code than the inline assembly in
   1194 * __set_bit().
   1195 */
   1196static inline void *ptr_set_bit(void *ptr, int bit)
   1197{
   1198	unsigned long __ptr = (unsigned long)ptr;
   1199
   1200	__ptr |= BIT(bit);
   1201	return (void *)__ptr;
   1202}
   1203static inline void *ptr_clear_bit(void *ptr, int bit)
   1204{
   1205	unsigned long __ptr = (unsigned long)ptr;
   1206
   1207	__ptr &= ~BIT(bit);
   1208	return (void *)__ptr;
   1209}
   1210
   1211static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
   1212{
   1213	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
   1214}
   1215
   1216static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
   1217{
   1218	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
   1219}
   1220
   1221static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
   1222{
   1223	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
   1224}
   1225
   1226static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
   1227{
   1228	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
   1229}
   1230#endif /* CONFIG_PAGE_TABLE_ISOLATION */
   1231
   1232/*
   1233 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
   1234 *
   1235 *  dst - pointer to pgd range anywhere on a pgd page
   1236 *  src - ""
   1237 *  count - the number of pgds to copy.
   1238 *
   1239 * dst and src can be on the same page, but the range must not overlap,
   1240 * and must not cross a page boundary.
   1241 */
   1242static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
   1243{
   1244	memcpy(dst, src, count * sizeof(pgd_t));
   1245#ifdef CONFIG_PAGE_TABLE_ISOLATION
   1246	if (!static_cpu_has(X86_FEATURE_PTI))
   1247		return;
   1248	/* Clone the user space pgd as well */
   1249	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
   1250	       count * sizeof(pgd_t));
   1251#endif
   1252}
   1253
   1254#define PTE_SHIFT ilog2(PTRS_PER_PTE)
   1255static inline int page_level_shift(enum pg_level level)
   1256{
   1257	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
   1258}
   1259static inline unsigned long page_level_size(enum pg_level level)
   1260{
   1261	return 1UL << page_level_shift(level);
   1262}
   1263static inline unsigned long page_level_mask(enum pg_level level)
   1264{
   1265	return ~(page_level_size(level) - 1);
   1266}
   1267
   1268/*
   1269 * The x86 doesn't have any external MMU info: the kernel page
   1270 * tables contain all the necessary information.
   1271 */
   1272static inline void update_mmu_cache(struct vm_area_struct *vma,
   1273		unsigned long addr, pte_t *ptep)
   1274{
   1275}
   1276static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
   1277		unsigned long addr, pmd_t *pmd)
   1278{
   1279}
   1280static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
   1281		unsigned long addr, pud_t *pud)
   1282{
   1283}
   1284#ifdef _PAGE_SWP_EXCLUSIVE
   1285#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
   1286static inline pte_t pte_swp_mkexclusive(pte_t pte)
   1287{
   1288	return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
   1289}
   1290
   1291static inline int pte_swp_exclusive(pte_t pte)
   1292{
   1293	return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE;
   1294}
   1295
   1296static inline pte_t pte_swp_clear_exclusive(pte_t pte)
   1297{
   1298	return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE);
   1299}
   1300#endif /* _PAGE_SWP_EXCLUSIVE */
   1301
   1302#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
   1303static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
   1304{
   1305	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
   1306}
   1307
   1308static inline int pte_swp_soft_dirty(pte_t pte)
   1309{
   1310	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
   1311}
   1312
   1313static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
   1314{
   1315	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
   1316}
   1317
   1318#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
   1319static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
   1320{
   1321	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
   1322}
   1323
   1324static inline int pmd_swp_soft_dirty(pmd_t pmd)
   1325{
   1326	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
   1327}
   1328
   1329static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
   1330{
   1331	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
   1332}
   1333#endif
   1334#endif
   1335
   1336#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
   1337static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
   1338{
   1339	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
   1340}
   1341
   1342static inline int pte_swp_uffd_wp(pte_t pte)
   1343{
   1344	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
   1345}
   1346
   1347static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
   1348{
   1349	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
   1350}
   1351
   1352static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
   1353{
   1354	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
   1355}
   1356
   1357static inline int pmd_swp_uffd_wp(pmd_t pmd)
   1358{
   1359	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
   1360}
   1361
   1362static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
   1363{
   1364	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
   1365}
   1366#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
   1367
   1368static inline u16 pte_flags_pkey(unsigned long pte_flags)
   1369{
   1370#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
   1371	/* ifdef to avoid doing 59-bit shift on 32-bit values */
   1372	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
   1373#else
   1374	return 0;
   1375#endif
   1376}
   1377
   1378static inline bool __pkru_allows_pkey(u16 pkey, bool write)
   1379{
   1380	u32 pkru = read_pkru();
   1381
   1382	if (!__pkru_allows_read(pkru, pkey))
   1383		return false;
   1384	if (write && !__pkru_allows_write(pkru, pkey))
   1385		return false;
   1386
   1387	return true;
   1388}
   1389
   1390/*
   1391 * 'pteval' can come from a PTE, PMD or PUD.  We only check
   1392 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
   1393 * same value on all 3 types.
   1394 */
   1395static inline bool __pte_access_permitted(unsigned long pteval, bool write)
   1396{
   1397	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
   1398
   1399	if (write)
   1400		need_pte_bits |= _PAGE_RW;
   1401
   1402	if ((pteval & need_pte_bits) != need_pte_bits)
   1403		return 0;
   1404
   1405	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
   1406}
   1407
   1408#define pte_access_permitted pte_access_permitted
   1409static inline bool pte_access_permitted(pte_t pte, bool write)
   1410{
   1411	return __pte_access_permitted(pte_val(pte), write);
   1412}
   1413
   1414#define pmd_access_permitted pmd_access_permitted
   1415static inline bool pmd_access_permitted(pmd_t pmd, bool write)
   1416{
   1417	return __pte_access_permitted(pmd_val(pmd), write);
   1418}
   1419
   1420#define pud_access_permitted pud_access_permitted
   1421static inline bool pud_access_permitted(pud_t pud, bool write)
   1422{
   1423	return __pte_access_permitted(pud_val(pud), write);
   1424}
   1425
   1426#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
   1427extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
   1428
   1429static inline bool arch_has_pfn_modify_check(void)
   1430{
   1431	return boot_cpu_has_bug(X86_BUG_L1TF);
   1432}
   1433
   1434#define arch_faults_on_old_pte arch_faults_on_old_pte
   1435static inline bool arch_faults_on_old_pte(void)
   1436{
   1437	return false;
   1438}
   1439
   1440#ifdef CONFIG_PAGE_TABLE_CHECK
   1441static inline bool pte_user_accessible_page(pte_t pte)
   1442{
   1443	return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
   1444}
   1445
   1446static inline bool pmd_user_accessible_page(pmd_t pmd)
   1447{
   1448	return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
   1449}
   1450
   1451static inline bool pud_user_accessible_page(pud_t pud)
   1452{
   1453	return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
   1454}
   1455#endif
   1456
   1457#endif	/* __ASSEMBLY__ */
   1458
   1459#endif /* _ASM_X86_PGTABLE_H */