cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mcfmmu.c (5956B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Based upon linux/arch/m68k/mm/sun3mmu.c
      4 * Based upon linux/arch/ppc/mm/mmu_context.c
      5 *
      6 * Implementations of mm routines specific to the Coldfire MMU.
      7 *
      8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
      9 */
     10
     11#include <linux/kernel.h>
     12#include <linux/types.h>
     13#include <linux/mm.h>
     14#include <linux/init.h>
     15#include <linux/string.h>
     16#include <linux/memblock.h>
     17
     18#include <asm/setup.h>
     19#include <asm/page.h>
     20#include <asm/mmu_context.h>
     21#include <asm/mcf_pgalloc.h>
     22#include <asm/tlbflush.h>
     23#include <asm/pgalloc.h>
     24
     25#define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
     26
     27mm_context_t next_mmu_context;
     28unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
     29atomic_t nr_free_contexts;
     30struct mm_struct *context_mm[LAST_CONTEXT+1];
     31unsigned long num_pages;
     32
     33/*
     34 * ColdFire paging_init derived from sun3.
     35 */
     36void __init paging_init(void)
     37{
     38	pgd_t *pg_dir;
     39	pte_t *pg_table;
     40	unsigned long address, size;
     41	unsigned long next_pgtable, bootmem_end;
     42	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
     43	int i;
     44
     45	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
     46	if (!empty_zero_page)
     47		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
     48		      __func__, PAGE_SIZE, PAGE_SIZE);
     49
     50	pg_dir = swapper_pg_dir;
     51	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
     52
     53	size = num_pages * sizeof(pte_t);
     54	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
     55	next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
     56	if (!next_pgtable)
     57		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
     58		      __func__, size, PAGE_SIZE);
     59
     60	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
     61	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
     62
     63	address = PAGE_OFFSET;
     64	while (address < (unsigned long)high_memory) {
     65		pg_table = (pte_t *) next_pgtable;
     66		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
     67		pgd_val(*pg_dir) = (unsigned long) pg_table;
     68		pg_dir++;
     69
     70		/* now change pg_table to kernel virtual addresses */
     71		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
     72			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
     73			if (address >= (unsigned long) high_memory)
     74				pte_val(pte) = 0;
     75
     76			set_pte(pg_table, pte);
     77			address += PAGE_SIZE;
     78		}
     79	}
     80
     81	current->mm = NULL;
     82	max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
     83	free_area_init(max_zone_pfn);
     84}
     85
     86int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
     87{
     88	unsigned long flags, mmuar, mmutr;
     89	struct mm_struct *mm;
     90	pgd_t *pgd;
     91	p4d_t *p4d;
     92	pud_t *pud;
     93	pmd_t *pmd;
     94	pte_t *pte;
     95	int asid;
     96
     97	local_irq_save(flags);
     98
     99	mmuar = (dtlb) ? mmu_read(MMUAR) :
    100		regs->pc + (extension_word * sizeof(long));
    101
    102	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
    103	if (!mm) {
    104		local_irq_restore(flags);
    105		return -1;
    106	}
    107
    108	pgd = pgd_offset(mm, mmuar);
    109	if (pgd_none(*pgd))  {
    110		local_irq_restore(flags);
    111		return -1;
    112	}
    113
    114	p4d = p4d_offset(pgd, mmuar);
    115	if (p4d_none(*p4d)) {
    116		local_irq_restore(flags);
    117		return -1;
    118	}
    119
    120	pud = pud_offset(p4d, mmuar);
    121	if (pud_none(*pud)) {
    122		local_irq_restore(flags);
    123		return -1;
    124	}
    125
    126	pmd = pmd_offset(pud, mmuar);
    127	if (pmd_none(*pmd)) {
    128		local_irq_restore(flags);
    129		return -1;
    130	}
    131
    132	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
    133				: pte_offset_map(pmd, mmuar);
    134	if (pte_none(*pte) || !pte_present(*pte)) {
    135		local_irq_restore(flags);
    136		return -1;
    137	}
    138
    139	if (write) {
    140		if (!pte_write(*pte)) {
    141			local_irq_restore(flags);
    142			return -1;
    143		}
    144		set_pte(pte, pte_mkdirty(*pte));
    145	}
    146
    147	set_pte(pte, pte_mkyoung(*pte));
    148	asid = mm->context & 0xff;
    149	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
    150		set_pte(pte, pte_wrprotect(*pte));
    151
    152	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
    153	if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
    154		mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
    155	mmu_write(MMUTR, mmutr);
    156
    157	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
    158		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
    159
    160	if (dtlb)
    161		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
    162	else
    163		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
    164
    165	local_irq_restore(flags);
    166	return 0;
    167}
    168
    169void __init cf_bootmem_alloc(void)
    170{
    171	unsigned long memstart;
    172
    173	/* _rambase and _ramend will be naturally page aligned */
    174	m68k_memory[0].addr = _rambase;
    175	m68k_memory[0].size = _ramend - _rambase;
    176
    177	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
    178			  MEMBLOCK_NONE);
    179
    180	/* compute total pages in system */
    181	num_pages = PFN_DOWN(_ramend - _rambase);
    182
    183	/* page numbers */
    184	memstart = PAGE_ALIGN(_ramstart);
    185	min_low_pfn = PFN_DOWN(_rambase);
    186	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
    187	high_memory = (void *)_ramend;
    188
    189	/* Reserve kernel text/data/bss */
    190	memblock_reserve(_rambase, memstart - _rambase);
    191
    192	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
    193	module_fixup(NULL, __start_fixup, __stop_fixup);
    194
    195	/* setup node data */
    196	m68k_setup_node(0);
    197}
    198
    199/*
    200 * Initialize the context management stuff.
    201 * The following was taken from arch/ppc/mmu_context.c
    202 */
    203void __init cf_mmu_context_init(void)
    204{
    205	/*
    206	 * Some processors have too few contexts to reserve one for
    207	 * init_mm, and require using context 0 for a normal task.
    208	 * Other processors reserve the use of context zero for the kernel.
    209	 * This code assumes FIRST_CONTEXT < 32.
    210	 */
    211	context_map[0] = (1 << FIRST_CONTEXT) - 1;
    212	next_mmu_context = FIRST_CONTEXT;
    213	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
    214}
    215
    216/*
    217 * Steal a context from a task that has one at the moment.
    218 * This isn't an LRU system, it just frees up each context in
    219 * turn (sort-of pseudo-random replacement :).  This would be the
    220 * place to implement an LRU scheme if anyone was motivated to do it.
    221 *  -- paulus
    222 */
    223void steal_context(void)
    224{
    225	struct mm_struct *mm;
    226	/*
    227	 * free up context `next_mmu_context'
    228	 * if we shouldn't free context 0, don't...
    229	 */
    230	if (next_mmu_context < FIRST_CONTEXT)
    231		next_mmu_context = FIRST_CONTEXT;
    232	mm = context_mm[next_mmu_context];
    233	flush_tlb_mm(mm);
    234	destroy_context(mm);
    235}
    236