cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

init.c (10023B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * linux/arch/sh/mm/init.c
      4 *
      5 *  Copyright (C) 1999  Niibe Yutaka
      6 *  Copyright (C) 2002 - 2011  Paul Mundt
      7 *
      8 *  Based on linux/arch/i386/mm/init.c:
      9 *   Copyright (C) 1995  Linus Torvalds
     10 */
     11#include <linux/mm.h>
     12#include <linux/swap.h>
     13#include <linux/init.h>
     14#include <linux/gfp.h>
     15#include <linux/memblock.h>
     16#include <linux/proc_fs.h>
     17#include <linux/pagemap.h>
     18#include <linux/percpu.h>
     19#include <linux/io.h>
     20#include <linux/dma-mapping.h>
     21#include <linux/export.h>
     22#include <asm/mmu_context.h>
     23#include <asm/mmzone.h>
     24#include <asm/kexec.h>
     25#include <asm/tlb.h>
     26#include <asm/cacheflush.h>
     27#include <asm/sections.h>
     28#include <asm/setup.h>
     29#include <asm/cache.h>
     30#include <asm/pgalloc.h>
     31#include <linux/sizes.h>
     32#include "ioremap.h"
     33
     34pgd_t swapper_pg_dir[PTRS_PER_PGD];
     35
     36void __init generic_mem_init(void)
     37{
     38	memblock_add(__MEMORY_START, __MEMORY_SIZE);
     39}
     40
     41void __init __weak plat_mem_setup(void)
     42{
     43	/* Nothing to see here, move along. */
     44}
     45
     46#ifdef CONFIG_MMU
     47static pte_t *__get_pte_phys(unsigned long addr)
     48{
     49	pgd_t *pgd;
     50	p4d_t *p4d;
     51	pud_t *pud;
     52	pmd_t *pmd;
     53
     54	pgd = pgd_offset_k(addr);
     55	if (pgd_none(*pgd)) {
     56		pgd_ERROR(*pgd);
     57		return NULL;
     58	}
     59
     60	p4d = p4d_alloc(NULL, pgd, addr);
     61	if (unlikely(!p4d)) {
     62		p4d_ERROR(*p4d);
     63		return NULL;
     64	}
     65
     66	pud = pud_alloc(NULL, p4d, addr);
     67	if (unlikely(!pud)) {
     68		pud_ERROR(*pud);
     69		return NULL;
     70	}
     71
     72	pmd = pmd_alloc(NULL, pud, addr);
     73	if (unlikely(!pmd)) {
     74		pmd_ERROR(*pmd);
     75		return NULL;
     76	}
     77
     78	return pte_offset_kernel(pmd, addr);
     79}
     80
     81static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
     82{
     83	pte_t *pte;
     84
     85	pte = __get_pte_phys(addr);
     86	if (!pte_none(*pte)) {
     87		pte_ERROR(*pte);
     88		return;
     89	}
     90
     91	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
     92	local_flush_tlb_one(get_asid(), addr);
     93
     94	if (pgprot_val(prot) & _PAGE_WIRED)
     95		tlb_wire_entry(NULL, addr, *pte);
     96}
     97
     98static void clear_pte_phys(unsigned long addr, pgprot_t prot)
     99{
    100	pte_t *pte;
    101
    102	pte = __get_pte_phys(addr);
    103
    104	if (pgprot_val(prot) & _PAGE_WIRED)
    105		tlb_unwire_entry();
    106
    107	set_pte(pte, pfn_pte(0, __pgprot(0)));
    108	local_flush_tlb_one(get_asid(), addr);
    109}
    110
    111void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
    112{
    113	unsigned long address = __fix_to_virt(idx);
    114
    115	if (idx >= __end_of_fixed_addresses) {
    116		BUG();
    117		return;
    118	}
    119
    120	set_pte_phys(address, phys, prot);
    121}
    122
    123void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
    124{
    125	unsigned long address = __fix_to_virt(idx);
    126
    127	if (idx >= __end_of_fixed_addresses) {
    128		BUG();
    129		return;
    130	}
    131
    132	clear_pte_phys(address, prot);
    133}
    134
    135static pmd_t * __init one_md_table_init(pud_t *pud)
    136{
    137	if (pud_none(*pud)) {
    138		pmd_t *pmd;
    139
    140		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
    141		if (!pmd)
    142			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
    143			      __func__, PAGE_SIZE, PAGE_SIZE);
    144		pud_populate(&init_mm, pud, pmd);
    145		BUG_ON(pmd != pmd_offset(pud, 0));
    146	}
    147
    148	return pmd_offset(pud, 0);
    149}
    150
    151static pte_t * __init one_page_table_init(pmd_t *pmd)
    152{
    153	if (pmd_none(*pmd)) {
    154		pte_t *pte;
    155
    156		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
    157		if (!pte)
    158			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
    159			      __func__, PAGE_SIZE, PAGE_SIZE);
    160		pmd_populate_kernel(&init_mm, pmd, pte);
    161		BUG_ON(pte != pte_offset_kernel(pmd, 0));
    162	}
    163
    164	return pte_offset_kernel(pmd, 0);
    165}
    166
    167static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
    168					    unsigned long vaddr, pte_t *lastpte)
    169{
    170	return pte;
    171}
    172
    173void __init page_table_range_init(unsigned long start, unsigned long end,
    174					 pgd_t *pgd_base)
    175{
    176	pgd_t *pgd;
    177	pud_t *pud;
    178	pmd_t *pmd;
    179	pte_t *pte = NULL;
    180	int i, j, k;
    181	unsigned long vaddr;
    182
    183	vaddr = start;
    184	i = pgd_index(vaddr);
    185	j = pud_index(vaddr);
    186	k = pmd_index(vaddr);
    187	pgd = pgd_base + i;
    188
    189	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
    190		pud = (pud_t *)pgd;
    191		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
    192			pmd = one_md_table_init(pud);
    193#ifndef __PAGETABLE_PMD_FOLDED
    194			pmd += k;
    195#endif
    196			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
    197				pte = page_table_kmap_check(one_page_table_init(pmd),
    198							    pmd, vaddr, pte);
    199				vaddr += PMD_SIZE;
    200			}
    201			k = 0;
    202		}
    203		j = 0;
    204	}
    205}
    206#endif	/* CONFIG_MMU */
    207
    208void __init allocate_pgdat(unsigned int nid)
    209{
    210	unsigned long start_pfn, end_pfn;
    211
    212	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
    213
    214#ifdef CONFIG_NUMA
    215	NODE_DATA(nid) = memblock_alloc_try_nid(
    216				sizeof(struct pglist_data),
    217				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
    218				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
    219	if (!NODE_DATA(nid))
    220		panic("Can't allocate pgdat for node %d\n", nid);
    221#endif
    222
    223	NODE_DATA(nid)->node_start_pfn = start_pfn;
    224	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
    225}
    226
    227static void __init do_init_bootmem(void)
    228{
    229	unsigned long start_pfn, end_pfn;
    230	int i;
    231
    232	/* Add active regions with valid PFNs. */
    233	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
    234		__add_active_range(0, start_pfn, end_pfn);
    235
    236	/* All of system RAM sits in node 0 for the non-NUMA case */
    237	allocate_pgdat(0);
    238	node_set_online(0);
    239
    240	plat_mem_setup();
    241
    242	sparse_init();
    243}
    244
    245static void __init early_reserve_mem(void)
    246{
    247	unsigned long start_pfn;
    248	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
    249	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
    250
    251	/*
    252	 * Partially used pages are not usable - thus
    253	 * we are rounding upwards:
    254	 */
    255	start_pfn = PFN_UP(__pa(_end));
    256
    257	/*
    258	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
    259	 * this in two steps (first step was init_bootmem()), because
    260	 * this catches the (definitely buggy) case of us accidentally
    261	 * initializing the bootmem allocator with an invalid RAM area.
    262	 */
    263	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
    264
    265	/*
    266	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
    267	 */
    268	if (CONFIG_ZERO_PAGE_OFFSET != 0)
    269		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
    270
    271	/*
    272	 * Handle additional early reservations
    273	 */
    274	check_for_initrd();
    275	reserve_crashkernel();
    276}
    277
    278void __init paging_init(void)
    279{
    280	unsigned long max_zone_pfns[MAX_NR_ZONES];
    281	unsigned long vaddr, end;
    282
    283	sh_mv.mv_mem_init();
    284
    285	early_reserve_mem();
    286
    287	/*
    288	 * Once the early reservations are out of the way, give the
    289	 * platforms a chance to kick out some memory.
    290	 */
    291	if (sh_mv.mv_mem_reserve)
    292		sh_mv.mv_mem_reserve();
    293
    294	memblock_enforce_memory_limit(memory_limit);
    295	memblock_allow_resize();
    296
    297	memblock_dump_all();
    298
    299	/*
    300	 * Determine low and high memory ranges:
    301	 */
    302	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
    303	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
    304
    305	nodes_clear(node_online_map);
    306
    307	memory_start = (unsigned long)__va(__MEMORY_START);
    308	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
    309
    310	uncached_init();
    311	pmb_init();
    312	do_init_bootmem();
    313	ioremap_fixed_init();
    314
    315	/* We don't need to map the kernel through the TLB, as
    316	 * it is permanatly mapped using P1. So clear the
    317	 * entire pgd. */
    318	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
    319
    320	/* Set an initial value for the MMU.TTB so we don't have to
    321	 * check for a null value. */
    322	set_TTB(swapper_pg_dir);
    323
    324	/*
    325	 * Populate the relevant portions of swapper_pg_dir so that
    326	 * we can use the fixmap entries without calling kmalloc.
    327	 * pte's will be filled in by __set_fixmap().
    328	 */
    329	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
    330	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
    331	page_table_range_init(vaddr, end, swapper_pg_dir);
    332
    333	kmap_coherent_init();
    334
    335	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
    336	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
    337	free_area_init(max_zone_pfns);
    338}
    339
    340unsigned int mem_init_done = 0;
    341
    342void __init mem_init(void)
    343{
    344	pg_data_t *pgdat;
    345
    346	high_memory = NULL;
    347	for_each_online_pgdat(pgdat)
    348		high_memory = max_t(void *, high_memory,
    349				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
    350
    351	memblock_free_all();
    352
    353	/* Set this up early, so we can take care of the zero page */
    354	cpu_cache_init();
    355
    356	/* clear the zero-page */
    357	memset(empty_zero_page, 0, PAGE_SIZE);
    358	__flush_wback_region(empty_zero_page, PAGE_SIZE);
    359
    360	vsyscall_init();
    361
    362	pr_info("virtual kernel memory layout:\n"
    363		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
    364		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
    365		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
    366#ifdef CONFIG_UNCACHED_MAPPING
    367		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
    368#endif
    369		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
    370		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
    371		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
    372		FIXADDR_START, FIXADDR_TOP,
    373		(FIXADDR_TOP - FIXADDR_START) >> 10,
    374
    375		(unsigned long)VMALLOC_START, VMALLOC_END,
    376		(VMALLOC_END - VMALLOC_START) >> 20,
    377
    378		(unsigned long)memory_start, (unsigned long)high_memory,
    379		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
    380
    381#ifdef CONFIG_UNCACHED_MAPPING
    382		uncached_start, uncached_end, uncached_size >> 20,
    383#endif
    384
    385		(unsigned long)&__init_begin, (unsigned long)&__init_end,
    386		((unsigned long)&__init_end -
    387		 (unsigned long)&__init_begin) >> 10,
    388
    389		(unsigned long)&_etext, (unsigned long)&_edata,
    390		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
    391
    392		(unsigned long)&_text, (unsigned long)&_etext,
    393		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
    394
    395	mem_init_done = 1;
    396}
    397
    398#ifdef CONFIG_MEMORY_HOTPLUG
    399int arch_add_memory(int nid, u64 start, u64 size,
    400		    struct mhp_params *params)
    401{
    402	unsigned long start_pfn = PFN_DOWN(start);
    403	unsigned long nr_pages = size >> PAGE_SHIFT;
    404	int ret;
    405
    406	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
    407		return -EINVAL;
    408
    409	/* We only have ZONE_NORMAL, so this is easy.. */
    410	ret = __add_pages(nid, start_pfn, nr_pages, params);
    411	if (unlikely(ret))
    412		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
    413
    414	return ret;
    415}
    416
    417void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
    418{
    419	unsigned long start_pfn = PFN_DOWN(start);
    420	unsigned long nr_pages = size >> PAGE_SHIFT;
    421
    422	__remove_pages(start_pfn, nr_pages, altmap);
    423}
    424#endif /* CONFIG_MEMORY_HOTPLUG */