cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

44x.c (6338B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Modifications by Matt Porter (mporter@mvista.com) to support
      4 * PPC44x Book E processors.
      5 *
      6 * This file contains the routines for initializing the MMU
      7 * on the 4xx series of chips.
      8 *  -- paulus
      9 *
     10 *  Derived from arch/ppc/mm/init.c:
     11 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
     12 *
     13 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
     14 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
     15 *    Copyright (C) 1996 Paul Mackerras
     16 *
     17 *  Derived from "arch/i386/mm/init.c"
     18 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
     19 */
     20
     21#include <linux/init.h>
     22#include <linux/memblock.h>
     23
     24#include <asm/mmu.h>
     25#include <asm/page.h>
     26#include <asm/cacheflush.h>
     27#include <asm/code-patching.h>
     28#include <asm/smp.h>
     29
     30#include <mm/mmu_decl.h>
     31
     32/* Used by the 44x TLB replacement exception handler.
     33 * Just needed it declared someplace.
     34 */
     35unsigned int tlb_44x_index; /* = 0 */
     36unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
     37int icache_44x_need_flush;
     38
     39unsigned long tlb_47x_boltmap[1024/8];
     40
     41static void __init ppc44x_update_tlb_hwater(void)
     42{
     43	/* The TLB miss handlers hard codes the watermark in a cmpli
     44	 * instruction to improve performances rather than loading it
     45	 * from the global variable. Thus, we patch the instructions
     46	 * in the 2 TLB miss handlers when updating the value
     47	 */
     48	modify_instruction_site(&patch__tlb_44x_hwater_D, 0xffff, tlb_44x_hwater);
     49	modify_instruction_site(&patch__tlb_44x_hwater_I, 0xffff, tlb_44x_hwater);
     50}
     51
     52/*
     53 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
     54 */
     55static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
     56{
     57	unsigned int entry = tlb_44x_hwater--;
     58
     59	ppc44x_update_tlb_hwater();
     60
     61	mtspr(SPRN_MMUCR, 0);
     62
     63	__asm__ __volatile__(
     64		"tlbwe	%2,%3,%4\n"
     65		"tlbwe	%1,%3,%5\n"
     66		"tlbwe	%0,%3,%6\n"
     67	:
     68	: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
     69	  "r" (phys),
     70	  "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
     71	  "r" (entry),
     72	  "i" (PPC44x_TLB_PAGEID),
     73	  "i" (PPC44x_TLB_XLAT),
     74	  "i" (PPC44x_TLB_ATTRIB));
     75}
     76
     77static int __init ppc47x_find_free_bolted(void)
     78{
     79	unsigned int mmube0 = mfspr(SPRN_MMUBE0);
     80	unsigned int mmube1 = mfspr(SPRN_MMUBE1);
     81
     82	if (!(mmube0 & MMUBE0_VBE0))
     83		return 0;
     84	if (!(mmube0 & MMUBE0_VBE1))
     85		return 1;
     86	if (!(mmube0 & MMUBE0_VBE2))
     87		return 2;
     88	if (!(mmube1 & MMUBE1_VBE3))
     89		return 3;
     90	if (!(mmube1 & MMUBE1_VBE4))
     91		return 4;
     92	if (!(mmube1 & MMUBE1_VBE5))
     93		return 5;
     94	return -1;
     95}
     96
     97static void __init ppc47x_update_boltmap(void)
     98{
     99	unsigned int mmube0 = mfspr(SPRN_MMUBE0);
    100	unsigned int mmube1 = mfspr(SPRN_MMUBE1);
    101
    102	if (mmube0 & MMUBE0_VBE0)
    103		__set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
    104			  tlb_47x_boltmap);
    105	if (mmube0 & MMUBE0_VBE1)
    106		__set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
    107			  tlb_47x_boltmap);
    108	if (mmube0 & MMUBE0_VBE2)
    109		__set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
    110			  tlb_47x_boltmap);
    111	if (mmube1 & MMUBE1_VBE3)
    112		__set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
    113			  tlb_47x_boltmap);
    114	if (mmube1 & MMUBE1_VBE4)
    115		__set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
    116			  tlb_47x_boltmap);
    117	if (mmube1 & MMUBE1_VBE5)
    118		__set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
    119			  tlb_47x_boltmap);
    120}
    121
    122/*
    123 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
    124 */
    125static void __init ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
    126{
    127	unsigned int rA;
    128	int bolted;
    129
    130	/* Base rA is HW way select, way 0, bolted bit set */
    131	rA = 0x88000000;
    132
    133	/* Look for a bolted entry slot */
    134	bolted = ppc47x_find_free_bolted();
    135	BUG_ON(bolted < 0);
    136
    137	/* Insert bolted slot number */
    138	rA |= bolted << 24;
    139
    140	pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
    141		 virt, phys, bolted);
    142
    143	mtspr(SPRN_MMUCR, 0);
    144
    145	__asm__ __volatile__(
    146		"tlbwe	%2,%3,0\n"
    147		"tlbwe	%1,%3,1\n"
    148		"tlbwe	%0,%3,2\n"
    149		:
    150		: "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
    151		       PPC47x_TLB2_SX
    152#ifdef CONFIG_SMP
    153		       | PPC47x_TLB2_M
    154#endif
    155		       ),
    156		  "r" (phys),
    157		  "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
    158		  "r" (rA));
    159}
    160
    161void __init MMU_init_hw(void)
    162{
    163	/* This is not useful on 47x but won't hurt either */
    164	ppc44x_update_tlb_hwater();
    165
    166	flush_instruction_cache();
    167}
    168
    169unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
    170{
    171	unsigned long addr;
    172	unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
    173
    174	/* Pin in enough TLBs to cover any lowmem not covered by the
    175	 * initial 256M mapping established in head_44x.S */
    176	for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
    177	     addr += PPC_PIN_SIZE) {
    178		if (mmu_has_feature(MMU_FTR_TYPE_47x))
    179			ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
    180		else
    181			ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
    182	}
    183	if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
    184		ppc47x_update_boltmap();
    185
    186#ifdef DEBUG
    187		{
    188			int i;
    189
    190			printk(KERN_DEBUG "bolted entries: ");
    191			for (i = 0; i < 255; i++) {
    192				if (test_bit(i, tlb_47x_boltmap))
    193					printk("%d ", i);
    194			}
    195			printk("\n");
    196		}
    197#endif /* DEBUG */
    198	}
    199	return total_lowmem;
    200}
    201
    202void setup_initial_memory_limit(phys_addr_t first_memblock_base,
    203				phys_addr_t first_memblock_size)
    204{
    205	u64 size;
    206
    207#ifndef CONFIG_NONSTATIC_KERNEL
    208	/* We don't currently support the first MEMBLOCK not mapping 0
    209	 * physical on those processors
    210	 */
    211	BUG_ON(first_memblock_base != 0);
    212#endif
    213
    214	/* 44x has a 256M TLB entry pinned at boot */
    215	size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE));
    216	memblock_set_current_limit(first_memblock_base + size);
    217}
    218
    219#ifdef CONFIG_SMP
    220void __init mmu_init_secondary(int cpu)
    221{
    222	unsigned long addr;
    223	unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
    224
    225	/* Pin in enough TLBs to cover any lowmem not covered by the
    226	 * initial 256M mapping established in head_44x.S
    227	 *
    228	 * WARNING: This is called with only the first 256M of the
    229	 * linear mapping in the TLB and we can't take faults yet
    230	 * so beware of what this code uses. It runs off a temporary
    231	 * stack. current (r2) isn't initialized, smp_processor_id()
    232	 * will not work, current thread info isn't accessible, ...
    233	 */
    234	for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
    235	     addr += PPC_PIN_SIZE) {
    236		if (mmu_has_feature(MMU_FTR_TYPE_47x))
    237			ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
    238		else
    239			ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
    240	}
    241}
    242#endif /* CONFIG_SMP */