cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgalloc.h (2709B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Page table support for the Hexagon architecture
      4 *
      5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
      6 */
      7
      8#ifndef _ASM_PGALLOC_H
      9#define _ASM_PGALLOC_H
     10
     11#include <asm/mem-layout.h>
     12#include <asm/atomic.h>
     13
     14#include <asm-generic/pgalloc.h>
     15
     16extern unsigned long long kmap_generation;
     17
     18/*
     19 * Page table creation interface
     20 */
     21static inline pgd_t *pgd_alloc(struct mm_struct *mm)
     22{
     23	pgd_t *pgd;
     24
     25	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
     26
     27	/*
     28	 * There may be better ways to do this, but to ensure
     29	 * that new address spaces always contain the kernel
     30	 * base mapping, and to ensure that the user area is
     31	 * initially marked invalid, initialize the new map
     32	 * map with a copy of the kernel's persistent map.
     33	 */
     34
     35	memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
     36	mm->context.generation = kmap_generation;
     37
     38	/* Physical version is what is passed to virtual machine on switch */
     39	mm->context.ptbase = __pa(pgd);
     40
     41	return pgd;
     42}
     43
     44static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
     45				pgtable_t pte)
     46{
     47	/*
     48	 * Conveniently, zero in 3 LSB means indirect 4K page table.
     49	 * Not so convenient when you're trying to vary the page size.
     50	 */
     51	set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
     52		HEXAGON_L1_PTE_SIZE));
     53}
     54
     55/*
     56 * Other architectures seem to have ways of making all processes
     57 * share the same pmd's for their kernel mappings, but the v0.3
     58 * Hexagon VM spec has a "monolithic" L1 table for user and kernel
     59 * segments.  We track "generations" of the kernel map to minimize
     60 * overhead, and update the "slave" copies of the kernel mappings
     61 * as part of switch_mm.  However, we still need to update the
     62 * kernel map of the active thread who's calling pmd_populate_kernel...
     63 */
     64static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
     65				       pte_t *pte)
     66{
     67	extern spinlock_t kmap_gen_lock;
     68	pmd_t *ppmd;
     69	int pmdindex;
     70
     71	spin_lock(&kmap_gen_lock);
     72	kmap_generation++;
     73	mm->context.generation = kmap_generation;
     74	current->active_mm->context.generation = kmap_generation;
     75	spin_unlock(&kmap_gen_lock);
     76
     77	set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
     78
     79	/*
     80	 * Now the "slave" copy of the current thread.
     81	 * This is pointer arithmetic, not byte addresses!
     82	 */
     83	pmdindex = (pgd_t *)pmd - mm->pgd;
     84	ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
     85	set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
     86	if (pmdindex > max_kernel_seg)
     87		max_kernel_seg = pmdindex;
     88}
     89
     90#define __pte_free_tlb(tlb, pte, addr)		\
     91do {						\
     92	pgtable_pte_page_dtor((pte));		\
     93	tlb_remove_page((tlb), (pte));		\
     94} while (0)
     95
     96#endif