cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgalloc.h (3732B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
      4 * Copyright (C) 2012 Regents of the University of California
      5 */
      6
      7#ifndef _ASM_RISCV_PGALLOC_H
      8#define _ASM_RISCV_PGALLOC_H
      9
     10#include <linux/mm.h>
     11#include <asm/tlb.h>
     12
     13#ifdef CONFIG_MMU
     14#define __HAVE_ARCH_PUD_ALLOC_ONE
     15#define __HAVE_ARCH_PUD_FREE
     16#include <asm-generic/pgalloc.h>
     17
     18static inline void pmd_populate_kernel(struct mm_struct *mm,
     19	pmd_t *pmd, pte_t *pte)
     20{
     21	unsigned long pfn = virt_to_pfn(pte);
     22
     23	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
     24}
     25
     26static inline void pmd_populate(struct mm_struct *mm,
     27	pmd_t *pmd, pgtable_t pte)
     28{
     29	unsigned long pfn = virt_to_pfn(page_address(pte));
     30
     31	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
     32}
     33
     34#ifndef __PAGETABLE_PMD_FOLDED
     35static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
     36{
     37	unsigned long pfn = virt_to_pfn(pmd);
     38
     39	set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
     40}
     41
     42static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
     43{
     44	if (pgtable_l4_enabled) {
     45		unsigned long pfn = virt_to_pfn(pud);
     46
     47		set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
     48	}
     49}
     50
     51static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
     52				     pud_t *pud)
     53{
     54	if (pgtable_l4_enabled) {
     55		unsigned long pfn = virt_to_pfn(pud);
     56
     57		set_p4d_safe(p4d,
     58			     __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
     59	}
     60}
     61
     62static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
     63{
     64	if (pgtable_l5_enabled) {
     65		unsigned long pfn = virt_to_pfn(p4d);
     66
     67		set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
     68	}
     69}
     70
     71static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
     72				     p4d_t *p4d)
     73{
     74	if (pgtable_l5_enabled) {
     75		unsigned long pfn = virt_to_pfn(p4d);
     76
     77		set_pgd_safe(pgd,
     78			     __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
     79	}
     80}
     81
     82#define pud_alloc_one pud_alloc_one
     83static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
     84{
     85	if (pgtable_l4_enabled)
     86		return __pud_alloc_one(mm, addr);
     87
     88	return NULL;
     89}
     90
     91#define pud_free pud_free
     92static inline void pud_free(struct mm_struct *mm, pud_t *pud)
     93{
     94	if (pgtable_l4_enabled)
     95		__pud_free(mm, pud);
     96}
     97
     98#define __pud_free_tlb(tlb, pud, addr)  pud_free((tlb)->mm, pud)
     99
    100#define p4d_alloc_one p4d_alloc_one
    101static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
    102{
    103	if (pgtable_l5_enabled) {
    104		gfp_t gfp = GFP_PGTABLE_USER;
    105
    106		if (mm == &init_mm)
    107			gfp = GFP_PGTABLE_KERNEL;
    108		return (p4d_t *)get_zeroed_page(gfp);
    109	}
    110
    111	return NULL;
    112}
    113
    114static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
    115{
    116	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
    117	free_page((unsigned long)p4d);
    118}
    119
    120#define p4d_free p4d_free
    121static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
    122{
    123	if (pgtable_l5_enabled)
    124		__p4d_free(mm, p4d);
    125}
    126
    127#define __p4d_free_tlb(tlb, p4d, addr)  p4d_free((tlb)->mm, p4d)
    128#endif /* __PAGETABLE_PMD_FOLDED */
    129
    130static inline pgd_t *pgd_alloc(struct mm_struct *mm)
    131{
    132	pgd_t *pgd;
    133
    134	pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
    135	if (likely(pgd != NULL)) {
    136		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
    137		/* Copy kernel mappings */
    138		memcpy(pgd + USER_PTRS_PER_PGD,
    139			init_mm.pgd + USER_PTRS_PER_PGD,
    140			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    141	}
    142	return pgd;
    143}
    144
    145#ifndef __PAGETABLE_PMD_FOLDED
    146
    147#define __pmd_free_tlb(tlb, pmd, addr)  pmd_free((tlb)->mm, pmd)
    148
    149#endif /* __PAGETABLE_PMD_FOLDED */
    150
    151#define __pte_free_tlb(tlb, pte, buf)   \
    152do {                                    \
    153	pgtable_pte_page_dtor(pte);     \
    154	tlb_remove_page((tlb), pte);    \
    155} while (0)
    156#endif /* CONFIG_MMU */
    157
    158#endif /* _ASM_RISCV_PGALLOC_H */