cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgalloc.h (4993B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __ASM_GENERIC_PGALLOC_H
      3#define __ASM_GENERIC_PGALLOC_H
      4
      5#ifdef CONFIG_MMU
      6
      7#define GFP_PGTABLE_KERNEL	(GFP_KERNEL | __GFP_ZERO)
      8#define GFP_PGTABLE_USER	(GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
      9
     10/**
     11 * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
     12 * @mm: the mm_struct of the current context
     13 *
     14 * This function is intended for architectures that need
     15 * anything beyond simple page allocation.
     16 *
     17 * Return: pointer to the allocated memory or %NULL on error
     18 */
     19static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
     20{
     21	return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
     22}
     23
     24#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
     25/**
     26 * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
     27 * @mm: the mm_struct of the current context
     28 *
     29 * Return: pointer to the allocated memory or %NULL on error
     30 */
     31static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
     32{
     33	return __pte_alloc_one_kernel(mm);
     34}
     35#endif
     36
     37/**
     38 * pte_free_kernel - free PTE-level kernel page table page
     39 * @mm: the mm_struct of the current context
     40 * @pte: pointer to the memory containing the page table
     41 */
     42static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
     43{
     44	free_page((unsigned long)pte);
     45}
     46
     47/**
     48 * __pte_alloc_one - allocate a page for PTE-level user page table
     49 * @mm: the mm_struct of the current context
     50 * @gfp: GFP flags to use for the allocation
     51 *
     52 * Allocates a page and runs the pgtable_pte_page_ctor().
     53 *
     54 * This function is intended for architectures that need
     55 * anything beyond simple page allocation or must have custom GFP flags.
     56 *
     57 * Return: `struct page` initialized as page table or %NULL on error
     58 */
     59static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
     60{
     61	struct page *pte;
     62
     63	pte = alloc_page(gfp);
     64	if (!pte)
     65		return NULL;
     66	if (!pgtable_pte_page_ctor(pte)) {
     67		__free_page(pte);
     68		return NULL;
     69	}
     70
     71	return pte;
     72}
     73
     74#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
     75/**
     76 * pte_alloc_one - allocate a page for PTE-level user page table
     77 * @mm: the mm_struct of the current context
     78 *
     79 * Allocates a page and runs the pgtable_pte_page_ctor().
     80 *
     81 * Return: `struct page` initialized as page table or %NULL on error
     82 */
     83static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
     84{
     85	return __pte_alloc_one(mm, GFP_PGTABLE_USER);
     86}
     87#endif
     88
     89/*
     90 * Should really implement gc for free page table pages. This could be
     91 * done with a reference count in struct page.
     92 */
     93
     94/**
     95 * pte_free - free PTE-level user page table page
     96 * @mm: the mm_struct of the current context
     97 * @pte_page: the `struct page` representing the page table
     98 */
     99static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
    100{
    101	pgtable_pte_page_dtor(pte_page);
    102	__free_page(pte_page);
    103}
    104
    105
    106#if CONFIG_PGTABLE_LEVELS > 2
    107
    108#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
    109/**
    110 * pmd_alloc_one - allocate a page for PMD-level page table
    111 * @mm: the mm_struct of the current context
    112 *
    113 * Allocates a page and runs the pgtable_pmd_page_ctor().
    114 * Allocations use %GFP_PGTABLE_USER in user context and
    115 * %GFP_PGTABLE_KERNEL in kernel context.
    116 *
    117 * Return: pointer to the allocated memory or %NULL on error
    118 */
    119static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
    120{
    121	struct page *page;
    122	gfp_t gfp = GFP_PGTABLE_USER;
    123
    124	if (mm == &init_mm)
    125		gfp = GFP_PGTABLE_KERNEL;
    126	page = alloc_pages(gfp, 0);
    127	if (!page)
    128		return NULL;
    129	if (!pgtable_pmd_page_ctor(page)) {
    130		__free_pages(page, 0);
    131		return NULL;
    132	}
    133	return (pmd_t *)page_address(page);
    134}
    135#endif
    136
    137#ifndef __HAVE_ARCH_PMD_FREE
    138static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
    139{
    140	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
    141	pgtable_pmd_page_dtor(virt_to_page(pmd));
    142	free_page((unsigned long)pmd);
    143}
    144#endif
    145
    146#endif /* CONFIG_PGTABLE_LEVELS > 2 */
    147
    148#if CONFIG_PGTABLE_LEVELS > 3
    149
    150static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
    151{
    152	gfp_t gfp = GFP_PGTABLE_USER;
    153
    154	if (mm == &init_mm)
    155		gfp = GFP_PGTABLE_KERNEL;
    156	return (pud_t *)get_zeroed_page(gfp);
    157}
    158
    159#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
    160/**
    161 * pud_alloc_one - allocate a page for PUD-level page table
    162 * @mm: the mm_struct of the current context
    163 *
    164 * Allocates a page using %GFP_PGTABLE_USER for user context and
    165 * %GFP_PGTABLE_KERNEL for kernel context.
    166 *
    167 * Return: pointer to the allocated memory or %NULL on error
    168 */
    169static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
    170{
    171	return __pud_alloc_one(mm, addr);
    172}
    173#endif
    174
    175static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
    176{
    177	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
    178	free_page((unsigned long)pud);
    179}
    180
    181#ifndef __HAVE_ARCH_PUD_FREE
    182static inline void pud_free(struct mm_struct *mm, pud_t *pud)
    183{
    184	__pud_free(mm, pud);
    185}
    186#endif
    187
    188#endif /* CONFIG_PGTABLE_LEVELS > 3 */
    189
    190#ifndef __HAVE_ARCH_PGD_FREE
    191static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
    192{
    193	free_page((unsigned long)pgd);
    194}
    195#endif
    196
    197#endif /* CONFIG_MMU */
    198
    199#endif /* __ASM_GENERIC_PGALLOC_H */