cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable.h (18495B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 2003 Ralf Baechle
      7 */
      8#ifndef _ASM_PGTABLE_H
      9#define _ASM_PGTABLE_H
     10
     11#include <linux/mm_types.h>
     12#include <linux/mmzone.h>
     13#ifdef CONFIG_32BIT
     14#include <asm/pgtable-32.h>
     15#endif
     16#ifdef CONFIG_64BIT
     17#include <asm/pgtable-64.h>
     18#endif
     19
     20#include <asm/cmpxchg.h>
     21#include <asm/io.h>
     22#include <asm/pgtable-bits.h>
     23#include <asm/cpu-features.h>
     24
     25struct mm_struct;
     26struct vm_area_struct;
     27
     28#define PAGE_SHARED	vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED)
     29
     30#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
     31				 _PAGE_GLOBAL | _page_cachable_default)
     32#define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
     33				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
     34#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
     35			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
     36
     37/*
     38 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
     39 * execute, and consider it to be the same as read. Also, write
     40 * permissions imply read permissions. This is the closest we can get
     41 * by reasonable means..
     42 */
     43
     44/*
     45 * Dummy values to fill the table in mmap.c
     46 * The real values will be generated at runtime
     47 */
     48#define __P000 __pgprot(0)
     49#define __P001 __pgprot(0)
     50#define __P010 __pgprot(0)
     51#define __P011 __pgprot(0)
     52#define __P100 __pgprot(0)
     53#define __P101 __pgprot(0)
     54#define __P110 __pgprot(0)
     55#define __P111 __pgprot(0)
     56
     57#define __S000 __pgprot(0)
     58#define __S001 __pgprot(0)
     59#define __S010 __pgprot(0)
     60#define __S011 __pgprot(0)
     61#define __S100 __pgprot(0)
     62#define __S101 __pgprot(0)
     63#define __S110 __pgprot(0)
     64#define __S111 __pgprot(0)
     65
     66extern unsigned long _page_cachable_default;
     67extern void __update_cache(unsigned long address, pte_t pte);
     68
     69/*
     70 * ZERO_PAGE is a global shared page that is always zero; used
     71 * for zero-mapped memory areas etc..
     72 */
     73
     74extern unsigned long empty_zero_page;
     75extern unsigned long zero_page_mask;
     76
     77#define ZERO_PAGE(vaddr) \
     78	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
     79#define __HAVE_COLOR_ZERO_PAGE
     80
     81extern void paging_init(void);
     82
     83/*
     84 * Conversion functions: convert a page and protection to a page entry,
     85 * and a page entry and page directory to the page they refer to.
     86 */
     87#define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
     88
     89static inline unsigned long pmd_pfn(pmd_t pmd)
     90{
     91	return pmd_val(pmd) >> _PFN_SHIFT;
     92}
     93
     94#ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT
     95#define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
     96#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
     97
     98#define pmd_page_vaddr(pmd)	pmd_val(pmd)
     99
    100#define htw_stop()							\
    101do {									\
    102	unsigned long __flags;						\
    103									\
    104	if (cpu_has_htw) {						\
    105		local_irq_save(__flags);				\
    106		if(!raw_current_cpu_data.htw_seq++) {			\
    107			write_c0_pwctl(read_c0_pwctl() &		\
    108				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
    109			back_to_back_c0_hazard();			\
    110		}							\
    111		local_irq_restore(__flags);				\
    112	}								\
    113} while(0)
    114
    115#define htw_start()							\
    116do {									\
    117	unsigned long __flags;						\
    118									\
    119	if (cpu_has_htw) {						\
    120		local_irq_save(__flags);				\
    121		if (!--raw_current_cpu_data.htw_seq) {			\
    122			write_c0_pwctl(read_c0_pwctl() |		\
    123				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
    124			back_to_back_c0_hazard();			\
    125		}							\
    126		local_irq_restore(__flags);				\
    127	}								\
    128} while(0)
    129
    130static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
    131			      pte_t *ptep, pte_t pteval);
    132
    133#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
    134
    135#ifdef CONFIG_XPA
    136# define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
    137#else
    138# define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
    139#endif
    140
    141#define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
    142#define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
    143
    144static inline void set_pte(pte_t *ptep, pte_t pte)
    145{
    146	ptep->pte_high = pte.pte_high;
    147	smp_wmb();
    148	ptep->pte_low = pte.pte_low;
    149
    150#ifdef CONFIG_XPA
    151	if (pte.pte_high & _PAGE_GLOBAL) {
    152#else
    153	if (pte.pte_low & _PAGE_GLOBAL) {
    154#endif
    155		pte_t *buddy = ptep_buddy(ptep);
    156		/*
    157		 * Make sure the buddy is global too (if it's !none,
    158		 * it better already be global)
    159		 */
    160		if (pte_none(*buddy)) {
    161			if (!IS_ENABLED(CONFIG_XPA))
    162				buddy->pte_low |= _PAGE_GLOBAL;
    163			buddy->pte_high |= _PAGE_GLOBAL;
    164		}
    165	}
    166}
    167
    168static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
    169{
    170	pte_t null = __pte(0);
    171
    172	htw_stop();
    173	/* Preserve global status for the pair */
    174	if (IS_ENABLED(CONFIG_XPA)) {
    175		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
    176			null.pte_high = _PAGE_GLOBAL;
    177	} else {
    178		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
    179			null.pte_low = null.pte_high = _PAGE_GLOBAL;
    180	}
    181
    182	set_pte_at(mm, addr, ptep, null);
    183	htw_start();
    184}
    185#else
    186
    187#define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
    188#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
    189#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
    190
    191/*
    192 * Certain architectures need to do special things when pte's
    193 * within a page table are directly modified.  Thus, the following
    194 * hook is made available.
    195 */
    196static inline void set_pte(pte_t *ptep, pte_t pteval)
    197{
    198	*ptep = pteval;
    199#if !defined(CONFIG_CPU_R3K_TLB)
    200	if (pte_val(pteval) & _PAGE_GLOBAL) {
    201		pte_t *buddy = ptep_buddy(ptep);
    202		/*
    203		 * Make sure the buddy is global too (if it's !none,
    204		 * it better already be global)
    205		 */
    206# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
    207		cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
    208# else
    209		cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
    210# endif
    211	}
    212#endif
    213}
    214
    215static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
    216{
    217	htw_stop();
    218#if !defined(CONFIG_CPU_R3K_TLB)
    219	/* Preserve global status for the pair */
    220	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
    221		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
    222	else
    223#endif
    224		set_pte_at(mm, addr, ptep, __pte(0));
    225	htw_start();
    226}
    227#endif
    228
    229static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
    230			      pte_t *ptep, pte_t pteval)
    231{
    232
    233	if (!pte_present(pteval))
    234		goto cache_sync_done;
    235
    236	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
    237		goto cache_sync_done;
    238
    239	__update_cache(addr, pteval);
    240cache_sync_done:
    241	set_pte(ptep, pteval);
    242}
    243
    244/*
    245 * (pmds are folded into puds so this doesn't get actually called,
    246 * but the define is needed for a generic inline function.)
    247 */
    248#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
    249
    250#ifndef __PAGETABLE_PMD_FOLDED
    251/*
    252 * (puds are folded into pgds so this doesn't get actually called,
    253 * but the define is needed for a generic inline function.)
    254 */
    255#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
    256#endif
    257
    258#define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
    259#define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
    260#define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
    261
    262/*
    263 * We used to declare this array with size but gcc 3.3 and older are not able
    264 * to find that this expression is a constant, so the size is dropped.
    265 */
    266extern pgd_t swapper_pg_dir[];
    267
    268/*
    269 * Platform specific pte_special() and pte_mkspecial() definitions
    270 * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
    271 */
    272#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
    273#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
    274static inline int pte_special(pte_t pte)
    275{
    276	return pte.pte_low & _PAGE_SPECIAL;
    277}
    278
    279static inline pte_t pte_mkspecial(pte_t pte)
    280{
    281	pte.pte_low |= _PAGE_SPECIAL;
    282	return pte;
    283}
    284#else
    285static inline int pte_special(pte_t pte)
    286{
    287	return pte_val(pte) & _PAGE_SPECIAL;
    288}
    289
    290static inline pte_t pte_mkspecial(pte_t pte)
    291{
    292	pte_val(pte) |= _PAGE_SPECIAL;
    293	return pte;
    294}
    295#endif
    296#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
    297
    298/*
    299 * The following only work if pte_present() is true.
    300 * Undefined behaviour if not..
    301 */
    302#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
    303static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
    304static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
    305static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
    306
    307static inline pte_t pte_wrprotect(pte_t pte)
    308{
    309	pte.pte_low  &= ~_PAGE_WRITE;
    310	if (!IS_ENABLED(CONFIG_XPA))
    311		pte.pte_low &= ~_PAGE_SILENT_WRITE;
    312	pte.pte_high &= ~_PAGE_SILENT_WRITE;
    313	return pte;
    314}
    315
    316static inline pte_t pte_mkclean(pte_t pte)
    317{
    318	pte.pte_low  &= ~_PAGE_MODIFIED;
    319	if (!IS_ENABLED(CONFIG_XPA))
    320		pte.pte_low &= ~_PAGE_SILENT_WRITE;
    321	pte.pte_high &= ~_PAGE_SILENT_WRITE;
    322	return pte;
    323}
    324
    325static inline pte_t pte_mkold(pte_t pte)
    326{
    327	pte.pte_low  &= ~_PAGE_ACCESSED;
    328	if (!IS_ENABLED(CONFIG_XPA))
    329		pte.pte_low &= ~_PAGE_SILENT_READ;
    330	pte.pte_high &= ~_PAGE_SILENT_READ;
    331	return pte;
    332}
    333
    334static inline pte_t pte_mkwrite(pte_t pte)
    335{
    336	pte.pte_low |= _PAGE_WRITE;
    337	if (pte.pte_low & _PAGE_MODIFIED) {
    338		if (!IS_ENABLED(CONFIG_XPA))
    339			pte.pte_low |= _PAGE_SILENT_WRITE;
    340		pte.pte_high |= _PAGE_SILENT_WRITE;
    341	}
    342	return pte;
    343}
    344
    345static inline pte_t pte_mkdirty(pte_t pte)
    346{
    347	pte.pte_low |= _PAGE_MODIFIED;
    348	if (pte.pte_low & _PAGE_WRITE) {
    349		if (!IS_ENABLED(CONFIG_XPA))
    350			pte.pte_low |= _PAGE_SILENT_WRITE;
    351		pte.pte_high |= _PAGE_SILENT_WRITE;
    352	}
    353	return pte;
    354}
    355
    356static inline pte_t pte_mkyoung(pte_t pte)
    357{
    358	pte.pte_low |= _PAGE_ACCESSED;
    359	if (!(pte.pte_low & _PAGE_NO_READ)) {
    360		if (!IS_ENABLED(CONFIG_XPA))
    361			pte.pte_low |= _PAGE_SILENT_READ;
    362		pte.pte_high |= _PAGE_SILENT_READ;
    363	}
    364	return pte;
    365}
    366#else
    367static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
    368static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
    369static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
    370
    371static inline pte_t pte_wrprotect(pte_t pte)
    372{
    373	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
    374	return pte;
    375}
    376
    377static inline pte_t pte_mkclean(pte_t pte)
    378{
    379	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
    380	return pte;
    381}
    382
    383static inline pte_t pte_mkold(pte_t pte)
    384{
    385	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
    386	return pte;
    387}
    388
    389static inline pte_t pte_mkwrite(pte_t pte)
    390{
    391	pte_val(pte) |= _PAGE_WRITE;
    392	if (pte_val(pte) & _PAGE_MODIFIED)
    393		pte_val(pte) |= _PAGE_SILENT_WRITE;
    394	return pte;
    395}
    396
    397static inline pte_t pte_mkdirty(pte_t pte)
    398{
    399	pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
    400	if (pte_val(pte) & _PAGE_WRITE)
    401		pte_val(pte) |= _PAGE_SILENT_WRITE;
    402	return pte;
    403}
    404
    405static inline pte_t pte_mkyoung(pte_t pte)
    406{
    407	pte_val(pte) |= _PAGE_ACCESSED;
    408	if (!(pte_val(pte) & _PAGE_NO_READ))
    409		pte_val(pte) |= _PAGE_SILENT_READ;
    410	return pte;
    411}
    412
    413#define pte_sw_mkyoung	pte_mkyoung
    414
    415#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
    416static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
    417
    418static inline pte_t pte_mkhuge(pte_t pte)
    419{
    420	pte_val(pte) |= _PAGE_HUGE;
    421	return pte;
    422}
    423
    424#define pmd_write pmd_write
    425static inline int pmd_write(pmd_t pmd)
    426{
    427	return !!(pmd_val(pmd) & _PAGE_WRITE);
    428}
    429
    430static inline struct page *pmd_page(pmd_t pmd)
    431{
    432	if (pmd_val(pmd) & _PAGE_HUGE)
    433		return pfn_to_page(pmd_pfn(pmd));
    434
    435	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
    436}
    437#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
    438
    439#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
    440static inline bool pte_soft_dirty(pte_t pte)
    441{
    442	return pte_val(pte) & _PAGE_SOFT_DIRTY;
    443}
    444#define pte_swp_soft_dirty pte_soft_dirty
    445
    446static inline pte_t pte_mksoft_dirty(pte_t pte)
    447{
    448	pte_val(pte) |= _PAGE_SOFT_DIRTY;
    449	return pte;
    450}
    451#define pte_swp_mksoft_dirty pte_mksoft_dirty
    452
    453static inline pte_t pte_clear_soft_dirty(pte_t pte)
    454{
    455	pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
    456	return pte;
    457}
    458#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
    459
    460#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
    461
    462#endif
    463
    464/*
    465 * Macro to make mark a page protection value as "uncacheable".	 Note
    466 * that "protection" is really a misnomer here as the protection value
    467 * contains the memory attribute bits, dirty bits, and various other
    468 * bits as well.
    469 */
    470#define pgprot_noncached pgprot_noncached
    471
    472static inline pgprot_t pgprot_noncached(pgprot_t _prot)
    473{
    474	unsigned long prot = pgprot_val(_prot);
    475
    476	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
    477
    478	return __pgprot(prot);
    479}
    480
    481#define pgprot_writecombine pgprot_writecombine
    482
    483static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
    484{
    485	unsigned long prot = pgprot_val(_prot);
    486
    487	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
    488	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
    489
    490	return __pgprot(prot);
    491}
    492
    493static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
    494						unsigned long address)
    495{
    496}
    497
    498#define __HAVE_ARCH_PTE_SAME
    499static inline int pte_same(pte_t pte_a, pte_t pte_b)
    500{
    501	return pte_val(pte_a) == pte_val(pte_b);
    502}
    503
    504#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
    505static inline int ptep_set_access_flags(struct vm_area_struct *vma,
    506					unsigned long address, pte_t *ptep,
    507					pte_t entry, int dirty)
    508{
    509	if (!pte_same(*ptep, entry))
    510		set_pte_at(vma->vm_mm, address, ptep, entry);
    511	/*
    512	 * update_mmu_cache will unconditionally execute, handling both
    513	 * the case that the PTE changed and the spurious fault case.
    514	 */
    515	return true;
    516}
    517
    518/*
    519 * Conversion functions: convert a page and protection to a page entry,
    520 * and a page entry and page directory to the page they refer to.
    521 */
    522#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
    523
    524#if defined(CONFIG_XPA)
    525static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    526{
    527	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
    528	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
    529	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
    530	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
    531	return pte;
    532}
    533#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
    534static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    535{
    536	pte.pte_low  &= _PAGE_CHG_MASK;
    537	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
    538	pte.pte_low  |= pgprot_val(newprot);
    539	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
    540	return pte;
    541}
    542#else
    543static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    544{
    545	pte_val(pte) &= _PAGE_CHG_MASK;
    546	pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
    547	if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
    548		pte_val(pte) |= _PAGE_SILENT_READ;
    549	return pte;
    550}
    551#endif
    552
    553
    554extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
    555	pte_t pte);
    556
    557static inline void update_mmu_cache(struct vm_area_struct *vma,
    558	unsigned long address, pte_t *ptep)
    559{
    560	pte_t pte = *ptep;
    561	__update_tlb(vma, address, pte);
    562}
    563
    564#define	__HAVE_ARCH_UPDATE_MMU_TLB
    565#define update_mmu_tlb	update_mmu_cache
    566
    567static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
    568	unsigned long address, pmd_t *pmdp)
    569{
    570	pte_t pte = *(pte_t *)pmdp;
    571
    572	__update_tlb(vma, address, pte);
    573}
    574
    575#define kern_addr_valid(addr)	(1)
    576
    577/*
    578 * Allow physical addresses to be fixed up to help 36-bit peripherals.
    579 */
    580#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
    581phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
    582int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
    583		unsigned long pfn, unsigned long size, pgprot_t prot);
    584#define io_remap_pfn_range io_remap_pfn_range
    585#else
    586#define fixup_bigphys_addr(addr, size)	(addr)
    587#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
    588
    589#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    590
    591/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
    592#define pmdp_establish generic_pmdp_establish
    593
    594#define has_transparent_hugepage has_transparent_hugepage
    595extern int has_transparent_hugepage(void);
    596
    597static inline int pmd_trans_huge(pmd_t pmd)
    598{
    599	return !!(pmd_val(pmd) & _PAGE_HUGE);
    600}
    601
    602static inline pmd_t pmd_mkhuge(pmd_t pmd)
    603{
    604	pmd_val(pmd) |= _PAGE_HUGE;
    605
    606	return pmd;
    607}
    608
    609extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
    610		       pmd_t *pmdp, pmd_t pmd);
    611
    612static inline pmd_t pmd_wrprotect(pmd_t pmd)
    613{
    614	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
    615	return pmd;
    616}
    617
    618static inline pmd_t pmd_mkwrite(pmd_t pmd)
    619{
    620	pmd_val(pmd) |= _PAGE_WRITE;
    621	if (pmd_val(pmd) & _PAGE_MODIFIED)
    622		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
    623
    624	return pmd;
    625}
    626
    627static inline int pmd_dirty(pmd_t pmd)
    628{
    629	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
    630}
    631
    632static inline pmd_t pmd_mkclean(pmd_t pmd)
    633{
    634	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
    635	return pmd;
    636}
    637
    638static inline pmd_t pmd_mkdirty(pmd_t pmd)
    639{
    640	pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
    641	if (pmd_val(pmd) & _PAGE_WRITE)
    642		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
    643
    644	return pmd;
    645}
    646
    647static inline int pmd_young(pmd_t pmd)
    648{
    649	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
    650}
    651
    652static inline pmd_t pmd_mkold(pmd_t pmd)
    653{
    654	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
    655
    656	return pmd;
    657}
    658
    659static inline pmd_t pmd_mkyoung(pmd_t pmd)
    660{
    661	pmd_val(pmd) |= _PAGE_ACCESSED;
    662
    663	if (!(pmd_val(pmd) & _PAGE_NO_READ))
    664		pmd_val(pmd) |= _PAGE_SILENT_READ;
    665
    666	return pmd;
    667}
    668
    669#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
    670static inline int pmd_soft_dirty(pmd_t pmd)
    671{
    672	return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
    673}
    674
    675static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
    676{
    677	pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
    678	return pmd;
    679}
    680
    681static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
    682{
    683	pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
    684	return pmd;
    685}
    686
    687#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
    688
    689/* Extern to avoid header file madness */
    690extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
    691
    692static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
    693{
    694	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
    695		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
    696	return pmd;
    697}
    698
    699static inline pmd_t pmd_mkinvalid(pmd_t pmd)
    700{
    701	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
    702
    703	return pmd;
    704}
    705
    706/*
    707 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
    708 * different prototype.
    709 */
    710#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
    711static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
    712					    unsigned long address, pmd_t *pmdp)
    713{
    714	pmd_t old = *pmdp;
    715
    716	pmd_clear(pmdp);
    717
    718	return old;
    719}
    720
    721#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
    722
    723#ifdef _PAGE_HUGE
    724#define pmd_leaf(pmd)	((pmd_val(pmd) & _PAGE_HUGE) != 0)
    725#define pud_leaf(pud)	((pud_val(pud) & _PAGE_HUGE) != 0)
    726#endif
    727
    728#define gup_fast_permitted(start, end)	(!cpu_has_dc_aliases)
    729
    730/*
    731 * We provide our own get_unmapped area to cope with the virtual aliasing
    732 * constraints placed on us by the cache architecture.
    733 */
    734#define HAVE_ARCH_UNMAPPED_AREA
    735#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
    736
    737#endif /* _ASM_PGTABLE_H */