cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sun3mmu.c (2732B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * linux/arch/m68k/mm/sun3mmu.c
      4 *
      5 * Implementations of mm routines specific to the sun3 MMU.
      6 *
      7 * Moved here 8/20/1999 Sam Creasey
      8 *
      9 */
     10
     11#include <linux/signal.h>
     12#include <linux/sched.h>
     13#include <linux/mm.h>
     14#include <linux/swap.h>
     15#include <linux/kernel.h>
     16#include <linux/string.h>
     17#include <linux/types.h>
     18#include <linux/init.h>
     19#include <linux/memblock.h>
     20
     21#include <asm/setup.h>
     22#include <linux/uaccess.h>
     23#include <asm/page.h>
     24#include <asm/machdep.h>
     25#include <asm/io.h>
     26
     27extern void mmu_emu_init (unsigned long bootmem_end);
     28
     29const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
     30
     31extern unsigned long num_pages;
     32
     33/* For the sun3 we try to follow the i386 paging_init() more closely */
     34/* start_mem and end_mem have PAGE_OFFSET added already */
     35/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
     36void __init paging_init(void)
     37{
     38	pgd_t * pg_dir;
     39	pte_t * pg_table;
     40	int i;
     41	unsigned long address;
     42	unsigned long next_pgtable;
     43	unsigned long bootmem_end;
     44	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
     45	unsigned long size;
     46
     47	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
     48	if (!empty_zero_page)
     49		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
     50		      __func__, PAGE_SIZE, PAGE_SIZE);
     51
     52	address = PAGE_OFFSET;
     53	pg_dir = swapper_pg_dir;
     54	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
     55	memset (kernel_pg_dir,  0, sizeof (kernel_pg_dir));
     56
     57	size = num_pages * sizeof(pte_t);
     58	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
     59
     60	next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
     61	if (!next_pgtable)
     62		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
     63		      __func__, size, PAGE_SIZE);
     64	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
     65
     66	/* Map whole memory from PAGE_OFFSET (0x0E000000) */
     67	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
     68
     69	while (address < (unsigned long)high_memory) {
     70		pg_table = (pte_t *) __pa (next_pgtable);
     71		next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
     72		pgd_val(*pg_dir) = (unsigned long) pg_table;
     73		pg_dir++;
     74
     75		/* now change pg_table to kernel virtual addresses */
     76		pg_table = (pte_t *) __va ((unsigned long) pg_table);
     77		for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
     78			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
     79			if (address >= (unsigned long)high_memory)
     80				pte_val (pte) = 0;
     81			set_pte (pg_table, pte);
     82			address += PAGE_SIZE;
     83		}
     84	}
     85
     86	mmu_emu_init(bootmem_end);
     87
     88	current->mm = NULL;
     89
     90	/* memory sizing is a hack stolen from motorola.c..  hope it works for us */
     91	max_zone_pfn[ZONE_DMA] = ((unsigned long)high_memory) >> PAGE_SHIFT;
     92
     93	/* I really wish I knew why the following change made things better...  -- Sam */
     94	free_area_init(max_zone_pfn);
     95
     96
     97}