cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

motorola.c (11845B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * linux/arch/m68k/mm/motorola.c
      4 *
      5 * Routines specific to the Motorola MMU, originally from:
      6 * linux/arch/m68k/init.c
      7 * which are Copyright (C) 1995 Hamish Macdonald
      8 *
      9 * Moved 8/20/1999 Sam Creasey
     10 */
     11
     12#include <linux/module.h>
     13#include <linux/signal.h>
     14#include <linux/sched.h>
     15#include <linux/mm.h>
     16#include <linux/swap.h>
     17#include <linux/kernel.h>
     18#include <linux/string.h>
     19#include <linux/types.h>
     20#include <linux/init.h>
     21#include <linux/memblock.h>
     22#include <linux/gfp.h>
     23
     24#include <asm/setup.h>
     25#include <linux/uaccess.h>
     26#include <asm/page.h>
     27#include <asm/pgalloc.h>
     28#include <asm/machdep.h>
     29#include <asm/io.h>
     30#ifdef CONFIG_ATARI
     31#include <asm/atari_stram.h>
     32#endif
     33#include <asm/sections.h>
     34
     35#undef DEBUG
     36
     37#ifndef mm_cachebits
     38/*
     39 * Bits to add to page descriptors for "normal" caching mode.
     40 * For 68020/030 this is 0.
     41 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
     42 */
     43unsigned long mm_cachebits;
     44EXPORT_SYMBOL(mm_cachebits);
     45#endif
     46
     47/* Prior to calling these routines, the page should have been flushed
     48 * from both the cache and ATC, or the CPU might not notice that the
     49 * cache setting for the page has been changed. -jskov
     50 */
     51static inline void nocache_page(void *vaddr)
     52{
     53	unsigned long addr = (unsigned long)vaddr;
     54
     55	if (CPU_IS_040_OR_060) {
     56		pte_t *ptep = virt_to_kpte(addr);
     57
     58		*ptep = pte_mknocache(*ptep);
     59	}
     60}
     61
     62static inline void cache_page(void *vaddr)
     63{
     64	unsigned long addr = (unsigned long)vaddr;
     65
     66	if (CPU_IS_040_OR_060) {
     67		pte_t *ptep = virt_to_kpte(addr);
     68
     69		*ptep = pte_mkcache(*ptep);
     70	}
     71}
     72
     73/*
     74 * Motorola 680x0 user's manual recommends using uncached memory for address
     75 * translation tables.
     76 *
     77 * Seeing how the MMU can be external on (some of) these chips, that seems like
     78 * a very important recommendation to follow. Provide some helpers to combat
     79 * 'variation' amongst the users of this.
     80 */
     81
     82void mmu_page_ctor(void *page)
     83{
     84	__flush_page_to_ram(page);
     85	flush_tlb_kernel_page(page);
     86	nocache_page(page);
     87}
     88
     89void mmu_page_dtor(void *page)
     90{
     91	cache_page(page);
     92}
     93
     94/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
     95   struct page instead of separately kmalloced struct.  Stolen from
     96   arch/sparc/mm/srmmu.c ... */
     97
     98typedef struct list_head ptable_desc;
     99
    100static struct list_head ptable_list[2] = {
    101	LIST_HEAD_INIT(ptable_list[0]),
    102	LIST_HEAD_INIT(ptable_list[1]),
    103};
    104
    105#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
    106#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
    107#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
    108
    109static const int ptable_shift[2] = {
    110	7+2, /* PGD, PMD */
    111	6+2, /* PTE */
    112};
    113
    114#define ptable_size(type) (1U << ptable_shift[type])
    115#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
    116
    117void __init init_pointer_table(void *table, int type)
    118{
    119	ptable_desc *dp;
    120	unsigned long ptable = (unsigned long)table;
    121	unsigned long page = ptable & PAGE_MASK;
    122	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
    123
    124	dp = PD_PTABLE(page);
    125	if (!(PD_MARKBITS(dp) & mask)) {
    126		PD_MARKBITS(dp) = ptable_mask(type);
    127		list_add(dp, &ptable_list[type]);
    128	}
    129
    130	PD_MARKBITS(dp) &= ~mask;
    131	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
    132
    133	/* unreserve the page so it's possible to free that page */
    134	__ClearPageReserved(PD_PAGE(dp));
    135	init_page_count(PD_PAGE(dp));
    136
    137	return;
    138}
    139
    140void *get_pointer_table(int type)
    141{
    142	ptable_desc *dp = ptable_list[type].next;
    143	unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
    144	unsigned int tmp, off;
    145
    146	/*
    147	 * For a pointer table for a user process address space, a
    148	 * table is taken from a page allocated for the purpose.  Each
    149	 * page can hold 8 pointer tables.  The page is remapped in
    150	 * virtual address space to be noncacheable.
    151	 */
    152	if (mask == 0) {
    153		void *page;
    154		ptable_desc *new;
    155
    156		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
    157			return NULL;
    158
    159		if (type == TABLE_PTE) {
    160			/*
    161			 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
    162			 * SMP.
    163			 */
    164			pgtable_pte_page_ctor(virt_to_page(page));
    165		}
    166
    167		mmu_page_ctor(page);
    168
    169		new = PD_PTABLE(page);
    170		PD_MARKBITS(new) = ptable_mask(type) - 1;
    171		list_add_tail(new, dp);
    172
    173		return (pmd_t *)page;
    174	}
    175
    176	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
    177		;
    178	PD_MARKBITS(dp) = mask & ~tmp;
    179	if (!PD_MARKBITS(dp)) {
    180		/* move to end of list */
    181		list_move_tail(dp, &ptable_list[type]);
    182	}
    183	return page_address(PD_PAGE(dp)) + off;
    184}
    185
    186int free_pointer_table(void *table, int type)
    187{
    188	ptable_desc *dp;
    189	unsigned long ptable = (unsigned long)table;
    190	unsigned long page = ptable & PAGE_MASK;
    191	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
    192
    193	dp = PD_PTABLE(page);
    194	if (PD_MARKBITS (dp) & mask)
    195		panic ("table already free!");
    196
    197	PD_MARKBITS (dp) |= mask;
    198
    199	if (PD_MARKBITS(dp) == ptable_mask(type)) {
    200		/* all tables in page are free, free page */
    201		list_del(dp);
    202		mmu_page_dtor((void *)page);
    203		if (type == TABLE_PTE)
    204			pgtable_pte_page_dtor(virt_to_page(page));
    205		free_page (page);
    206		return 1;
    207	} else if (ptable_list[type].next != dp) {
    208		/*
    209		 * move this descriptor to the front of the list, since
    210		 * it has one or more free tables.
    211		 */
    212		list_move(dp, &ptable_list[type]);
    213	}
    214	return 0;
    215}
    216
    217/* size of memory already mapped in head.S */
    218extern __initdata unsigned long m68k_init_mapped_size;
    219
    220extern unsigned long availmem;
    221
    222static pte_t *last_pte_table __initdata = NULL;
    223
    224static pte_t * __init kernel_page_table(void)
    225{
    226	pte_t *pte_table = last_pte_table;
    227
    228	if (PAGE_ALIGNED(last_pte_table)) {
    229		pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
    230		if (!pte_table) {
    231			panic("%s: Failed to allocate %lu bytes align=%lx\n",
    232					__func__, PAGE_SIZE, PAGE_SIZE);
    233		}
    234
    235		clear_page(pte_table);
    236		mmu_page_ctor(pte_table);
    237
    238		last_pte_table = pte_table;
    239	}
    240
    241	last_pte_table += PTRS_PER_PTE;
    242
    243	return pte_table;
    244}
    245
    246static pmd_t *last_pmd_table __initdata = NULL;
    247
    248static pmd_t * __init kernel_ptr_table(void)
    249{
    250	if (!last_pmd_table) {
    251		unsigned long pmd, last;
    252		int i;
    253
    254		/* Find the last ptr table that was used in head.S and
    255		 * reuse the remaining space in that page for further
    256		 * ptr tables.
    257		 */
    258		last = (unsigned long)kernel_pg_dir;
    259		for (i = 0; i < PTRS_PER_PGD; i++) {
    260			pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
    261
    262			if (!pud_present(*pud))
    263				continue;
    264			pmd = pgd_page_vaddr(kernel_pg_dir[i]);
    265			if (pmd > last)
    266				last = pmd;
    267		}
    268
    269		last_pmd_table = (pmd_t *)last;
    270#ifdef DEBUG
    271		printk("kernel_ptr_init: %p\n", last_pmd_table);
    272#endif
    273	}
    274
    275	last_pmd_table += PTRS_PER_PMD;
    276	if (PAGE_ALIGNED(last_pmd_table)) {
    277		last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
    278		if (!last_pmd_table)
    279			panic("%s: Failed to allocate %lu bytes align=%lx\n",
    280			      __func__, PAGE_SIZE, PAGE_SIZE);
    281
    282		clear_page(last_pmd_table);
    283		mmu_page_ctor(last_pmd_table);
    284	}
    285
    286	return last_pmd_table;
    287}
    288
    289static void __init map_node(int node)
    290{
    291	unsigned long physaddr, virtaddr, size;
    292	pgd_t *pgd_dir;
    293	p4d_t *p4d_dir;
    294	pud_t *pud_dir;
    295	pmd_t *pmd_dir;
    296	pte_t *pte_dir;
    297
    298	size = m68k_memory[node].size;
    299	physaddr = m68k_memory[node].addr;
    300	virtaddr = (unsigned long)phys_to_virt(physaddr);
    301	physaddr |= m68k_supervisor_cachemode |
    302		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
    303	if (CPU_IS_040_OR_060)
    304		physaddr |= _PAGE_GLOBAL040;
    305
    306	while (size > 0) {
    307#ifdef DEBUG
    308		if (!(virtaddr & (PMD_SIZE-1)))
    309			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
    310				virtaddr);
    311#endif
    312		pgd_dir = pgd_offset_k(virtaddr);
    313		if (virtaddr && CPU_IS_020_OR_030) {
    314			if (!(virtaddr & (PGDIR_SIZE-1)) &&
    315			    size >= PGDIR_SIZE) {
    316#ifdef DEBUG
    317				printk ("[very early term]");
    318#endif
    319				pgd_val(*pgd_dir) = physaddr;
    320				size -= PGDIR_SIZE;
    321				virtaddr += PGDIR_SIZE;
    322				physaddr += PGDIR_SIZE;
    323				continue;
    324			}
    325		}
    326		p4d_dir = p4d_offset(pgd_dir, virtaddr);
    327		pud_dir = pud_offset(p4d_dir, virtaddr);
    328		if (!pud_present(*pud_dir)) {
    329			pmd_dir = kernel_ptr_table();
    330#ifdef DEBUG
    331			printk ("[new pointer %p]", pmd_dir);
    332#endif
    333			pud_set(pud_dir, pmd_dir);
    334		} else
    335			pmd_dir = pmd_offset(pud_dir, virtaddr);
    336
    337		if (CPU_IS_020_OR_030) {
    338			if (virtaddr) {
    339#ifdef DEBUG
    340				printk ("[early term]");
    341#endif
    342				pmd_val(*pmd_dir) = physaddr;
    343				physaddr += PMD_SIZE;
    344			} else {
    345				int i;
    346#ifdef DEBUG
    347				printk ("[zero map]");
    348#endif
    349				pte_dir = kernel_page_table();
    350				pmd_set(pmd_dir, pte_dir);
    351
    352				pte_val(*pte_dir++) = 0;
    353				physaddr += PAGE_SIZE;
    354				for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
    355					pte_val(*pte_dir++) = physaddr;
    356			}
    357			size -= PMD_SIZE;
    358			virtaddr += PMD_SIZE;
    359		} else {
    360			if (!pmd_present(*pmd_dir)) {
    361#ifdef DEBUG
    362				printk ("[new table]");
    363#endif
    364				pte_dir = kernel_page_table();
    365				pmd_set(pmd_dir, pte_dir);
    366			}
    367			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
    368
    369			if (virtaddr) {
    370				if (!pte_present(*pte_dir))
    371					pte_val(*pte_dir) = physaddr;
    372			} else
    373				pte_val(*pte_dir) = 0;
    374			size -= PAGE_SIZE;
    375			virtaddr += PAGE_SIZE;
    376			physaddr += PAGE_SIZE;
    377		}
    378
    379	}
    380#ifdef DEBUG
    381	printk("\n");
    382#endif
    383}
    384
    385/*
    386 * paging_init() continues the virtual memory environment setup which
    387 * was begun by the code in arch/head.S.
    388 */
    389void __init paging_init(void)
    390{
    391	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
    392	unsigned long min_addr, max_addr;
    393	unsigned long addr;
    394	int i;
    395
    396#ifdef DEBUG
    397	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
    398#endif
    399
    400	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
    401	if (CPU_IS_040_OR_060) {
    402		int i;
    403#ifndef mm_cachebits
    404		mm_cachebits = _PAGE_CACHE040;
    405#endif
    406		for (i = 0; i < 16; i++)
    407			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
    408	}
    409
    410	min_addr = m68k_memory[0].addr;
    411	max_addr = min_addr + m68k_memory[0].size;
    412	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
    413			  MEMBLOCK_NONE);
    414	for (i = 1; i < m68k_num_memory;) {
    415		if (m68k_memory[i].addr < min_addr) {
    416			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
    417				m68k_memory[i].addr, m68k_memory[i].size);
    418			printk("Fix your bootloader or use a memfile to make use of this area!\n");
    419			m68k_num_memory--;
    420			memmove(m68k_memory + i, m68k_memory + i + 1,
    421				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
    422			continue;
    423		}
    424		memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
    425				  MEMBLOCK_NONE);
    426		addr = m68k_memory[i].addr + m68k_memory[i].size;
    427		if (addr > max_addr)
    428			max_addr = addr;
    429		i++;
    430	}
    431	m68k_memoffset = min_addr - PAGE_OFFSET;
    432	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
    433
    434	module_fixup(NULL, __start_fixup, __stop_fixup);
    435	flush_icache();
    436
    437	high_memory = phys_to_virt(max_addr);
    438
    439	min_low_pfn = availmem >> PAGE_SHIFT;
    440	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
    441
    442	/* Reserve kernel text/data/bss and the memory allocated in head.S */
    443	memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
    444
    445	/*
    446	 * Map the physical memory available into the kernel virtual
    447	 * address space. Make sure memblock will not try to allocate
    448	 * pages beyond the memory we already mapped in head.S
    449	 */
    450	memblock_set_bottom_up(true);
    451
    452	for (i = 0; i < m68k_num_memory; i++) {
    453		m68k_setup_node(i);
    454		map_node(i);
    455	}
    456
    457	flush_tlb_all();
    458
    459	early_memtest(min_addr, max_addr);
    460
    461	/*
    462	 * initialize the bad page table and bad page to point
    463	 * to a couple of allocated pages
    464	 */
    465	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
    466	if (!empty_zero_page)
    467		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
    468		      __func__, PAGE_SIZE, PAGE_SIZE);
    469
    470	/*
    471	 * Set up SFC/DFC registers
    472	 */
    473	set_fc(USER_DATA);
    474
    475#ifdef DEBUG
    476	printk ("before free_area_init\n");
    477#endif
    478	for (i = 0; i < m68k_num_memory; i++)
    479		if (node_present_pages(i))
    480			node_set_state(i, N_NORMAL_MEMORY);
    481
    482	max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
    483	free_area_init(max_zone_pfn);
    484}