cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu.c (12274B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * This file contains the routines for handling the MMU on those
      4 * PowerPC implementations where the MMU substantially follows the
      5 * architecture specification.  This includes the 6xx, 7xx, 7xxx,
      6 * and 8260 implementations but excludes the 8xx and 4xx.
      7 *  -- paulus
      8 *
      9 *  Derived from arch/ppc/mm/init.c:
     10 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
     11 *
     12 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
     13 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
     14 *    Copyright (C) 1996 Paul Mackerras
     15 *
     16 *  Derived from "arch/i386/mm/init.c"
     17 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
     18 */
     19
     20#include <linux/kernel.h>
     21#include <linux/mm.h>
     22#include <linux/init.h>
     23#include <linux/highmem.h>
     24#include <linux/memblock.h>
     25
     26#include <asm/mmu.h>
     27#include <asm/machdep.h>
     28#include <asm/code-patching.h>
     29#include <asm/sections.h>
     30
     31#include <mm/mmu_decl.h>
     32
     33u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
     34
     35static struct hash_pte __initdata *Hash = (struct hash_pte *)early_hash;
     36static unsigned long __initdata Hash_size, Hash_mask;
     37static unsigned int __initdata hash_mb, hash_mb2;
     38unsigned long __initdata _SDR1;
     39
     40struct ppc_bat BATS[8][2];	/* 8 pairs of IBAT, DBAT */
     41
     42static struct batrange {	/* stores address ranges mapped by BATs */
     43	unsigned long start;
     44	unsigned long limit;
     45	phys_addr_t phys;
     46} bat_addrs[8];
     47
     48#ifdef CONFIG_SMP
     49unsigned long mmu_hash_lock;
     50#endif
     51
     52/*
     53 * Return PA for this VA if it is mapped by a BAT, or 0
     54 */
     55phys_addr_t v_block_mapped(unsigned long va)
     56{
     57	int b;
     58	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
     59		if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
     60			return bat_addrs[b].phys + (va - bat_addrs[b].start);
     61	return 0;
     62}
     63
     64/*
     65 * Return VA for a given PA or 0 if not mapped
     66 */
     67unsigned long p_block_mapped(phys_addr_t pa)
     68{
     69	int b;
     70	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
     71		if (pa >= bat_addrs[b].phys
     72	    	    && pa < (bat_addrs[b].limit-bat_addrs[b].start)
     73		              +bat_addrs[b].phys)
     74			return bat_addrs[b].start+(pa-bat_addrs[b].phys);
     75	return 0;
     76}
     77
     78int __init find_free_bat(void)
     79{
     80	int b;
     81	int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
     82
     83	for (b = 0; b < n; b++) {
     84		struct ppc_bat *bat = BATS[b];
     85
     86		if (!(bat[1].batu & 3))
     87			return b;
     88	}
     89	return -1;
     90}
     91
     92/*
     93 * This function calculates the size of the larger block usable to map the
     94 * beginning of an area based on the start address and size of that area:
     95 * - max block size is 256 on 6xx.
     96 * - base address must be aligned to the block size. So the maximum block size
     97 *   is identified by the lowest bit set to 1 in the base address (for instance
     98 *   if base is 0x16000000, max size is 0x02000000).
     99 * - block size has to be a power of two. This is calculated by finding the
    100 *   highest bit set to 1.
    101 */
    102unsigned int bat_block_size(unsigned long base, unsigned long top)
    103{
    104	unsigned int max_size = SZ_256M;
    105	unsigned int base_shift = (ffs(base) - 1) & 31;
    106	unsigned int block_shift = (fls(top - base) - 1) & 31;
    107
    108	return min3(max_size, 1U << base_shift, 1U << block_shift);
    109}
    110
    111/*
    112 * Set up one of the IBAT (block address translation) register pairs.
    113 * The parameters are not checked; in particular size must be a power
    114 * of 2 between 128k and 256M.
    115 */
    116static void setibat(int index, unsigned long virt, phys_addr_t phys,
    117		    unsigned int size, pgprot_t prot)
    118{
    119	unsigned int bl = (size >> 17) - 1;
    120	int wimgxpp;
    121	struct ppc_bat *bat = BATS[index];
    122	unsigned long flags = pgprot_val(prot);
    123
    124	if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
    125		flags &= ~_PAGE_COHERENT;
    126
    127	wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
    128	bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
    129	bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
    130	if (flags & _PAGE_USER)
    131		bat[0].batu |= 1;	/* Vp = 1 */
    132}
    133
    134static void clearibat(int index)
    135{
    136	struct ppc_bat *bat = BATS[index];
    137
    138	bat[0].batu = 0;
    139	bat[0].batl = 0;
    140}
    141
    142static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
    143{
    144	int idx;
    145
    146	while ((idx = find_free_bat()) != -1 && base != top) {
    147		unsigned int size = bat_block_size(base, top);
    148
    149		if (size < 128 << 10)
    150			break;
    151		setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
    152		base += size;
    153	}
    154
    155	return base;
    156}
    157
    158unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
    159{
    160	unsigned long done;
    161	unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
    162
    163
    164	if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
    165		pr_debug_once("Read-Write memory mapped without BATs\n");
    166		if (base >= border)
    167			return base;
    168		if (top >= border)
    169			top = border;
    170	}
    171
    172	if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
    173		return __mmu_mapin_ram(base, top);
    174
    175	done = __mmu_mapin_ram(base, border);
    176	if (done != border)
    177		return done;
    178
    179	return __mmu_mapin_ram(border, top);
    180}
    181
    182static bool is_module_segment(unsigned long addr)
    183{
    184	if (!IS_ENABLED(CONFIG_MODULES))
    185		return false;
    186	if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
    187		return false;
    188	if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
    189		return false;
    190	return true;
    191}
    192
    193void mmu_mark_initmem_nx(void)
    194{
    195	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
    196	int i;
    197	unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
    198	unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
    199	unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
    200	unsigned long size;
    201
    202	for (i = 0; i < nb - 1 && base < top;) {
    203		size = bat_block_size(base, top);
    204		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
    205		base += size;
    206	}
    207	if (base < top) {
    208		size = bat_block_size(base, top);
    209		if ((top - base) > size) {
    210			size <<= 1;
    211			if (strict_kernel_rwx_enabled() && base + size > border)
    212				pr_warn("Some RW data is getting mapped X. "
    213					"Adjust CONFIG_DATA_SHIFT to avoid that.\n");
    214		}
    215		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
    216		base += size;
    217	}
    218	for (; i < nb; i++)
    219		clearibat(i);
    220
    221	update_bats();
    222
    223	for (i = TASK_SIZE >> 28; i < 16; i++) {
    224		/* Do not set NX on VM space for modules */
    225		if (is_module_segment(i << 28))
    226			continue;
    227
    228		mtsr(mfsr(i << 28) | 0x10000000, i << 28);
    229	}
    230}
    231
    232void mmu_mark_rodata_ro(void)
    233{
    234	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
    235	int i;
    236
    237	for (i = 0; i < nb; i++) {
    238		struct ppc_bat *bat = BATS[i];
    239
    240		if (bat_addrs[i].start < (unsigned long)__init_begin)
    241			bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
    242	}
    243
    244	update_bats();
    245}
    246
    247/*
    248 * Set up one of the I/D BAT (block address translation) register pairs.
    249 * The parameters are not checked; in particular size must be a power
    250 * of 2 between 128k and 256M.
    251 * On 603+, only set IBAT when _PAGE_EXEC is set
    252 */
    253void __init setbat(int index, unsigned long virt, phys_addr_t phys,
    254		   unsigned int size, pgprot_t prot)
    255{
    256	unsigned int bl;
    257	int wimgxpp;
    258	struct ppc_bat *bat;
    259	unsigned long flags = pgprot_val(prot);
    260
    261	if (index == -1)
    262		index = find_free_bat();
    263	if (index == -1) {
    264		pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
    265		       (unsigned long long)phys);
    266		return;
    267	}
    268	bat = BATS[index];
    269
    270	if ((flags & _PAGE_NO_CACHE) ||
    271	    (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
    272		flags &= ~_PAGE_COHERENT;
    273
    274	bl = (size >> 17) - 1;
    275	/* Do DBAT first */
    276	wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
    277			   | _PAGE_COHERENT | _PAGE_GUARDED);
    278	wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
    279	bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
    280	bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
    281	if (flags & _PAGE_USER)
    282		bat[1].batu |= 1; 	/* Vp = 1 */
    283	if (flags & _PAGE_GUARDED) {
    284		/* G bit must be zero in IBATs */
    285		flags &= ~_PAGE_EXEC;
    286	}
    287	if (flags & _PAGE_EXEC)
    288		bat[0] = bat[1];
    289	else
    290		bat[0].batu = bat[0].batl = 0;
    291
    292	bat_addrs[index].start = virt;
    293	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
    294	bat_addrs[index].phys = phys;
    295}
    296
    297/*
    298 * Preload a translation in the hash table
    299 */
    300static void hash_preload(struct mm_struct *mm, unsigned long ea)
    301{
    302	pmd_t *pmd;
    303
    304	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
    305		return;
    306	pmd = pmd_off(mm, ea);
    307	if (!pmd_none(*pmd))
    308		add_hash_page(mm->context.id, ea, pmd_val(*pmd));
    309}
    310
    311/*
    312 * This is called at the end of handling a user page fault, when the
    313 * fault has been handled by updating a PTE in the linux page tables.
    314 * We use it to preload an HPTE into the hash table corresponding to
    315 * the updated linux PTE.
    316 *
    317 * This must always be called with the pte lock held.
    318 */
    319void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
    320		      pte_t *ptep)
    321{
    322	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
    323		return;
    324	/*
    325	 * We don't need to worry about _PAGE_PRESENT here because we are
    326	 * called with either mm->page_table_lock held or ptl lock held
    327	 */
    328
    329	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
    330	if (!pte_young(*ptep) || address >= TASK_SIZE)
    331		return;
    332
    333	/* We have to test for regs NULL since init will get here first thing at boot */
    334	if (!current->thread.regs)
    335		return;
    336
    337	/* We also avoid filling the hash if not coming from a fault */
    338	if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
    339		return;
    340
    341	hash_preload(vma->vm_mm, address);
    342}
    343
    344/*
    345 * Initialize the hash table and patch the instructions in hashtable.S.
    346 */
    347void __init MMU_init_hw(void)
    348{
    349	unsigned int n_hpteg, lg_n_hpteg;
    350
    351	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
    352		return;
    353
    354	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
    355
    356#define LG_HPTEG_SIZE	6		/* 64 bytes per HPTEG */
    357#define SDR1_LOW_BITS	((n_hpteg - 1) >> 10)
    358#define MIN_N_HPTEG	1024		/* min 64kB hash table */
    359
    360	/*
    361	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
    362	 * This is less than the recommended amount, but then
    363	 * Linux ain't AIX.
    364	 */
    365	n_hpteg = total_memory / (PAGE_SIZE * 8);
    366	if (n_hpteg < MIN_N_HPTEG)
    367		n_hpteg = MIN_N_HPTEG;
    368	lg_n_hpteg = __ilog2(n_hpteg);
    369	if (n_hpteg & (n_hpteg - 1)) {
    370		++lg_n_hpteg;		/* round up if not power of 2 */
    371		n_hpteg = 1 << lg_n_hpteg;
    372	}
    373	Hash_size = n_hpteg << LG_HPTEG_SIZE;
    374
    375	/*
    376	 * Find some memory for the hash table.
    377	 */
    378	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
    379	Hash = memblock_alloc(Hash_size, Hash_size);
    380	if (!Hash)
    381		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
    382		      __func__, Hash_size, Hash_size);
    383	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
    384
    385	pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
    386		(unsigned long long)(total_memory >> 20), Hash_size >> 10);
    387
    388
    389	Hash_mask = n_hpteg - 1;
    390	hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
    391	if (lg_n_hpteg > 16)
    392		hash_mb2 = 16 - LG_HPTEG_SIZE;
    393}
    394
    395void __init MMU_init_hw_patch(void)
    396{
    397	unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
    398	unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
    399
    400	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
    401		return;
    402
    403	if (ppc_md.progress)
    404		ppc_md.progress("hash:patch", 0x345);
    405	if (ppc_md.progress)
    406		ppc_md.progress("hash:done", 0x205);
    407
    408	/* WARNING: Make sure nothing can trigger a KASAN check past this point */
    409
    410	/*
    411	 * Patch up the instructions in hashtable.S:create_hpte
    412	 */
    413	modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
    414	modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
    415	modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
    416	modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
    417	modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
    418
    419	/*
    420	 * Patch up the instructions in hashtable.S:flush_hash_page
    421	 */
    422	modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
    423	modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
    424	modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
    425	modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
    426}
    427
    428void setup_initial_memory_limit(phys_addr_t first_memblock_base,
    429				phys_addr_t first_memblock_size)
    430{
    431	/* We don't currently support the first MEMBLOCK not mapping 0
    432	 * physical on those processors
    433	 */
    434	BUG_ON(first_memblock_base != 0);
    435
    436	memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
    437}
    438
    439void __init print_system_hash_info(void)
    440{
    441	pr_info("Hash_size         = 0x%lx\n", Hash_size);
    442	if (Hash_mask)
    443		pr_info("Hash_mask         = 0x%lx\n", Hash_mask);
    444}
    445
    446void __init early_init_mmu(void)
    447{
    448}