cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mem.c (4907B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
      4 */
      5
      6#include <linux/stddef.h>
      7#include <linux/module.h>
      8#include <linux/memblock.h>
      9#include <linux/highmem.h>
     10#include <linux/mm.h>
     11#include <linux/swap.h>
     12#include <linux/slab.h>
     13#include <asm/fixmap.h>
     14#include <asm/page.h>
     15#include <as-layout.h>
     16#include <init.h>
     17#include <kern.h>
     18#include <kern_util.h>
     19#include <mem_user.h>
     20#include <os.h>
     21
     22/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
     23unsigned long *empty_zero_page = NULL;
     24EXPORT_SYMBOL(empty_zero_page);
     25
     26/*
     27 * Initialized during boot, and readonly for initializing page tables
     28 * afterwards
     29 */
     30pgd_t swapper_pg_dir[PTRS_PER_PGD];
     31
     32/* Initialized at boot time, and readonly after that */
     33unsigned long long highmem;
     34EXPORT_SYMBOL(highmem);
     35int kmalloc_ok = 0;
     36
     37/* Used during early boot */
     38static unsigned long brk_end;
     39
     40void __init mem_init(void)
     41{
     42	/* clear the zero-page */
     43	memset(empty_zero_page, 0, PAGE_SIZE);
     44
     45	/* Map in the area just after the brk now that kmalloc is about
     46	 * to be turned on.
     47	 */
     48	brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
     49	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
     50	memblock_free((void *)brk_end, uml_reserved - brk_end);
     51	uml_reserved = brk_end;
     52
     53	/* this will put all low memory onto the freelists */
     54	memblock_free_all();
     55	max_low_pfn = totalram_pages();
     56	max_pfn = max_low_pfn;
     57	kmalloc_ok = 1;
     58}
     59
     60/*
     61 * Create a page table and place a pointer to it in a middle page
     62 * directory entry.
     63 */
     64static void __init one_page_table_init(pmd_t *pmd)
     65{
     66	if (pmd_none(*pmd)) {
     67		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
     68							  PAGE_SIZE);
     69		if (!pte)
     70			panic("%s: Failed to allocate %lu bytes align=%lx\n",
     71			      __func__, PAGE_SIZE, PAGE_SIZE);
     72
     73		set_pmd(pmd, __pmd(_KERNPG_TABLE +
     74					   (unsigned long) __pa(pte)));
     75		BUG_ON(pte != pte_offset_kernel(pmd, 0));
     76	}
     77}
     78
     79static void __init one_md_table_init(pud_t *pud)
     80{
     81#ifdef CONFIG_3_LEVEL_PGTABLES
     82	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
     83	if (!pmd_table)
     84		panic("%s: Failed to allocate %lu bytes align=%lx\n",
     85		      __func__, PAGE_SIZE, PAGE_SIZE);
     86
     87	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
     88	BUG_ON(pmd_table != pmd_offset(pud, 0));
     89#endif
     90}
     91
     92static void __init fixrange_init(unsigned long start, unsigned long end,
     93				 pgd_t *pgd_base)
     94{
     95	pgd_t *pgd;
     96	p4d_t *p4d;
     97	pud_t *pud;
     98	pmd_t *pmd;
     99	int i, j;
    100	unsigned long vaddr;
    101
    102	vaddr = start;
    103	i = pgd_index(vaddr);
    104	j = pmd_index(vaddr);
    105	pgd = pgd_base + i;
    106
    107	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
    108		p4d = p4d_offset(pgd, vaddr);
    109		pud = pud_offset(p4d, vaddr);
    110		if (pud_none(*pud))
    111			one_md_table_init(pud);
    112		pmd = pmd_offset(pud, vaddr);
    113		for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
    114			one_page_table_init(pmd);
    115			vaddr += PMD_SIZE;
    116		}
    117		j = 0;
    118	}
    119}
    120
    121static void __init fixaddr_user_init( void)
    122{
    123#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
    124	long size = FIXADDR_USER_END - FIXADDR_USER_START;
    125	pte_t *pte;
    126	phys_t p;
    127	unsigned long v, vaddr = FIXADDR_USER_START;
    128
    129	if (!size)
    130		return;
    131
    132	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
    133	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
    134	if (!v)
    135		panic("%s: Failed to allocate %lu bytes align=%lx\n",
    136		      __func__, size, PAGE_SIZE);
    137
    138	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
    139	p = __pa(v);
    140	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
    141		      p += PAGE_SIZE) {
    142		pte = virt_to_kpte(vaddr);
    143		pte_set_val(*pte, p, PAGE_READONLY);
    144	}
    145#endif
    146}
    147
    148void __init paging_init(void)
    149{
    150	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
    151	unsigned long vaddr;
    152
    153	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
    154							       PAGE_SIZE);
    155	if (!empty_zero_page)
    156		panic("%s: Failed to allocate %lu bytes align=%lx\n",
    157		      __func__, PAGE_SIZE, PAGE_SIZE);
    158
    159	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
    160	free_area_init(max_zone_pfn);
    161
    162	/*
    163	 * Fixed mappings, only the page table structure has to be
    164	 * created - mappings will be set by set_fixmap():
    165	 */
    166	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
    167	fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
    168
    169	fixaddr_user_init();
    170}
    171
    172/*
    173 * This can't do anything because nothing in the kernel image can be freed
    174 * since it's not in kernel physical memory.
    175 */
    176
    177void free_initmem(void)
    178{
    179}
    180
    181/* Allocate and free page tables. */
    182
    183pgd_t *pgd_alloc(struct mm_struct *mm)
    184{
    185	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
    186
    187	if (pgd) {
    188		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
    189		memcpy(pgd + USER_PTRS_PER_PGD,
    190		       swapper_pg_dir + USER_PTRS_PER_PGD,
    191		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    192	}
    193	return pgd;
    194}
    195
    196void *uml_kmalloc(int size, int flags)
    197{
    198	return kmalloc(size, flags);
    199}