cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgalloc.h (5306B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_PGALLOC_H
      3#define _ASM_X86_PGALLOC_H
      4
      5#include <linux/threads.h>
      6#include <linux/mm.h>		/* for struct page */
      7#include <linux/pagemap.h>
      8
      9#define __HAVE_ARCH_PTE_ALLOC_ONE
     10#define __HAVE_ARCH_PGD_FREE
     11#include <asm-generic/pgalloc.h>
     12
     13static inline int  __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
     14
     15#ifdef CONFIG_PARAVIRT_XXL
     16#include <asm/paravirt.h>
     17#else
     18#define paravirt_pgd_alloc(mm)	__paravirt_pgd_alloc(mm)
     19static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
     20static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)	{}
     21static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)	{}
     22static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
     23					    unsigned long start, unsigned long count) {}
     24static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)	{}
     25static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)	{}
     26static inline void paravirt_release_pte(unsigned long pfn) {}
     27static inline void paravirt_release_pmd(unsigned long pfn) {}
     28static inline void paravirt_release_pud(unsigned long pfn) {}
     29static inline void paravirt_release_p4d(unsigned long pfn) {}
     30#endif
     31
     32/*
     33 * Flags to use when allocating a user page table page.
     34 */
     35extern gfp_t __userpte_alloc_gfp;
     36
     37#ifdef CONFIG_PAGE_TABLE_ISOLATION
     38/*
     39 * Instead of one PGD, we acquire two PGDs.  Being order-1, it is
     40 * both 8k in size and 8k-aligned.  That lets us just flip bit 12
     41 * in a pointer to swap between the two 4k halves.
     42 */
     43#define PGD_ALLOCATION_ORDER 1
     44#else
     45#define PGD_ALLOCATION_ORDER 0
     46#endif
     47
     48/*
     49 * Allocate and free page tables.
     50 */
     51extern pgd_t *pgd_alloc(struct mm_struct *);
     52extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
     53
     54extern pgtable_t pte_alloc_one(struct mm_struct *);
     55
     56extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
     57
     58static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
     59				  unsigned long address)
     60{
     61	___pte_free_tlb(tlb, pte);
     62}
     63
     64static inline void pmd_populate_kernel(struct mm_struct *mm,
     65				       pmd_t *pmd, pte_t *pte)
     66{
     67	paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
     68	set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
     69}
     70
     71static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
     72				       pmd_t *pmd, pte_t *pte)
     73{
     74	paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
     75	set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
     76}
     77
     78static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
     79				struct page *pte)
     80{
     81	unsigned long pfn = page_to_pfn(pte);
     82
     83	paravirt_alloc_pte(mm, pfn);
     84	set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
     85}
     86
     87#if CONFIG_PGTABLE_LEVELS > 2
     88extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
     89
     90static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
     91				  unsigned long address)
     92{
     93	___pmd_free_tlb(tlb, pmd);
     94}
     95
     96#ifdef CONFIG_X86_PAE
     97extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
     98#else	/* !CONFIG_X86_PAE */
     99static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
    100{
    101	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
    102	set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
    103}
    104
    105static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
    106{
    107	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
    108	set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
    109}
    110#endif	/* CONFIG_X86_PAE */
    111
    112#if CONFIG_PGTABLE_LEVELS > 3
    113static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
    114{
    115	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
    116	set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
    117}
    118
    119static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
    120{
    121	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
    122	set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
    123}
    124
    125extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
    126
    127static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
    128				  unsigned long address)
    129{
    130	___pud_free_tlb(tlb, pud);
    131}
    132
    133#if CONFIG_PGTABLE_LEVELS > 4
    134static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
    135{
    136	if (!pgtable_l5_enabled())
    137		return;
    138	paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
    139	set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
    140}
    141
    142static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
    143{
    144	if (!pgtable_l5_enabled())
    145		return;
    146	paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
    147	set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
    148}
    149
    150static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
    151{
    152	gfp_t gfp = GFP_KERNEL_ACCOUNT;
    153
    154	if (mm == &init_mm)
    155		gfp &= ~__GFP_ACCOUNT;
    156	return (p4d_t *)get_zeroed_page(gfp);
    157}
    158
    159static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
    160{
    161	if (!pgtable_l5_enabled())
    162		return;
    163
    164	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
    165	free_page((unsigned long)p4d);
    166}
    167
    168extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
    169
    170static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
    171				  unsigned long address)
    172{
    173	if (pgtable_l5_enabled())
    174		___p4d_free_tlb(tlb, p4d);
    175}
    176
    177#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
    178#endif	/* CONFIG_PGTABLE_LEVELS > 3 */
    179#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
    180
    181#endif /* _ASM_X86_PGALLOC_H */