cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

setup.c (20432B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 1995 Linus Torvalds
      7 * Copyright (C) 1995 Waldorf Electronics
      8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
      9 * Copyright (C) 1996 Stoned Elipot
     10 * Copyright (C) 1999 Silicon Graphics, Inc.
     11 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
     12 */
     13#include <linux/init.h>
     14#include <linux/ioport.h>
     15#include <linux/export.h>
     16#include <linux/screen_info.h>
     17#include <linux/memblock.h>
     18#include <linux/initrd.h>
     19#include <linux/root_dev.h>
     20#include <linux/highmem.h>
     21#include <linux/console.h>
     22#include <linux/pfn.h>
     23#include <linux/debugfs.h>
     24#include <linux/kexec.h>
     25#include <linux/sizes.h>
     26#include <linux/device.h>
     27#include <linux/dma-map-ops.h>
     28#include <linux/decompress/generic.h>
     29#include <linux/of_fdt.h>
     30#include <linux/dmi.h>
     31#include <linux/crash_dump.h>
     32
     33#include <asm/addrspace.h>
     34#include <asm/bootinfo.h>
     35#include <asm/bugs.h>
     36#include <asm/cache.h>
     37#include <asm/cdmm.h>
     38#include <asm/cpu.h>
     39#include <asm/debug.h>
     40#include <asm/mmzone.h>
     41#include <asm/sections.h>
     42#include <asm/setup.h>
     43#include <asm/smp-ops.h>
     44#include <asm/prom.h>
     45
     46#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
     47char __section(".appended_dtb") __appended_dtb[0x100000];
     48#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
     49
     50struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
     51
     52EXPORT_SYMBOL(cpu_data);
     53
     54#ifdef CONFIG_VT
     55struct screen_info screen_info;
     56#endif
     57
     58/*
     59 * Setup information
     60 *
     61 * These are initialized so they are in the .data section
     62 */
     63unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
     64
     65EXPORT_SYMBOL(mips_machtype);
     66
     67static char __initdata command_line[COMMAND_LINE_SIZE];
     68char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
     69
     70#ifdef CONFIG_CMDLINE_BOOL
     71static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
     72#else
     73static const char builtin_cmdline[] __initconst = "";
     74#endif
     75
     76/*
     77 * mips_io_port_base is the begin of the address space to which x86 style
     78 * I/O ports are mapped.
     79 */
     80unsigned long mips_io_port_base = -1;
     81EXPORT_SYMBOL(mips_io_port_base);
     82
     83static struct resource code_resource = { .name = "Kernel code", };
     84static struct resource data_resource = { .name = "Kernel data", };
     85static struct resource bss_resource = { .name = "Kernel bss", };
     86
     87unsigned long __kaslr_offset __ro_after_init;
     88EXPORT_SYMBOL(__kaslr_offset);
     89
     90static void *detect_magic __initdata = detect_memory_region;
     91
     92#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
     93unsigned long ARCH_PFN_OFFSET;
     94EXPORT_SYMBOL(ARCH_PFN_OFFSET);
     95#endif
     96
     97void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
     98{
     99	void *dm = &detect_magic;
    100	phys_addr_t size;
    101
    102	for (size = sz_min; size < sz_max; size <<= 1) {
    103		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
    104			break;
    105	}
    106
    107	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
    108		((unsigned long long) size) / SZ_1M,
    109		(unsigned long long) start,
    110		((unsigned long long) sz_min) / SZ_1M,
    111		((unsigned long long) sz_max) / SZ_1M);
    112
    113	memblock_add(start, size);
    114}
    115
    116/*
    117 * Manage initrd
    118 */
    119#ifdef CONFIG_BLK_DEV_INITRD
    120
    121static int __init rd_start_early(char *p)
    122{
    123	unsigned long start = memparse(p, &p);
    124
    125#ifdef CONFIG_64BIT
    126	/* Guess if the sign extension was forgotten by bootloader */
    127	if (start < XKPHYS)
    128		start = (int)start;
    129#endif
    130	initrd_start = start;
    131	initrd_end += start;
    132	return 0;
    133}
    134early_param("rd_start", rd_start_early);
    135
    136static int __init rd_size_early(char *p)
    137{
    138	initrd_end += memparse(p, &p);
    139	return 0;
    140}
    141early_param("rd_size", rd_size_early);
    142
    143/* it returns the next free pfn after initrd */
    144static unsigned long __init init_initrd(void)
    145{
    146	unsigned long end;
    147
    148	/*
    149	 * Board specific code or command line parser should have
    150	 * already set up initrd_start and initrd_end. In these cases
    151	 * perfom sanity checks and use them if all looks good.
    152	 */
    153	if (!initrd_start || initrd_end <= initrd_start)
    154		goto disable;
    155
    156	if (initrd_start & ~PAGE_MASK) {
    157		pr_err("initrd start must be page aligned\n");
    158		goto disable;
    159	}
    160	if (initrd_start < PAGE_OFFSET) {
    161		pr_err("initrd start < PAGE_OFFSET\n");
    162		goto disable;
    163	}
    164
    165	/*
    166	 * Sanitize initrd addresses. For example firmware
    167	 * can't guess if they need to pass them through
    168	 * 64-bits values if the kernel has been built in pure
    169	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
    170	 * addresses now, so the code can now safely use __pa().
    171	 */
    172	end = __pa(initrd_end);
    173	initrd_end = (unsigned long)__va(end);
    174	initrd_start = (unsigned long)__va(__pa(initrd_start));
    175
    176	ROOT_DEV = Root_RAM0;
    177	return PFN_UP(end);
    178disable:
    179	initrd_start = 0;
    180	initrd_end = 0;
    181	return 0;
    182}
    183
    184/* In some conditions (e.g. big endian bootloader with a little endian
    185   kernel), the initrd might appear byte swapped.  Try to detect this and
    186   byte swap it if needed.  */
    187static void __init maybe_bswap_initrd(void)
    188{
    189#if defined(CONFIG_CPU_CAVIUM_OCTEON)
    190	u64 buf;
    191
    192	/* Check for CPIO signature */
    193	if (!memcmp((void *)initrd_start, "070701", 6))
    194		return;
    195
    196	/* Check for compressed initrd */
    197	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
    198		return;
    199
    200	/* Try again with a byte swapped header */
    201	buf = swab64p((u64 *)initrd_start);
    202	if (!memcmp(&buf, "070701", 6) ||
    203	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
    204		unsigned long i;
    205
    206		pr_info("Byteswapped initrd detected\n");
    207		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
    208			swab64s((u64 *)i);
    209	}
    210#endif
    211}
    212
    213static void __init finalize_initrd(void)
    214{
    215	unsigned long size = initrd_end - initrd_start;
    216
    217	if (size == 0) {
    218		printk(KERN_INFO "Initrd not found or empty");
    219		goto disable;
    220	}
    221	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
    222		printk(KERN_ERR "Initrd extends beyond end of memory");
    223		goto disable;
    224	}
    225
    226	maybe_bswap_initrd();
    227
    228	memblock_reserve(__pa(initrd_start), size);
    229	initrd_below_start_ok = 1;
    230
    231	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
    232		initrd_start, size);
    233	return;
    234disable:
    235	printk(KERN_CONT " - disabling initrd\n");
    236	initrd_start = 0;
    237	initrd_end = 0;
    238}
    239
    240#else  /* !CONFIG_BLK_DEV_INITRD */
    241
    242static unsigned long __init init_initrd(void)
    243{
    244	return 0;
    245}
    246
    247#define finalize_initrd()	do {} while (0)
    248
    249#endif
    250
    251/*
    252 * Initialize the bootmem allocator. It also setup initrd related data
    253 * if needed.
    254 */
    255#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
    256
    257static void __init bootmem_init(void)
    258{
    259	init_initrd();
    260	finalize_initrd();
    261}
    262
    263#else  /* !CONFIG_SGI_IP27 */
    264
    265static void __init bootmem_init(void)
    266{
    267	phys_addr_t ramstart, ramend;
    268	unsigned long start, end;
    269	int i;
    270
    271	ramstart = memblock_start_of_DRAM();
    272	ramend = memblock_end_of_DRAM();
    273
    274	/*
    275	 * Sanity check any INITRD first. We don't take it into account
    276	 * for bootmem setup initially, rely on the end-of-kernel-code
    277	 * as our memory range starting point. Once bootmem is inited we
    278	 * will reserve the area used for the initrd.
    279	 */
    280	init_initrd();
    281
    282	/* Reserve memory occupied by kernel. */
    283	memblock_reserve(__pa_symbol(&_text),
    284			__pa_symbol(&_end) - __pa_symbol(&_text));
    285
    286	/* max_low_pfn is not a number of pages but the end pfn of low mem */
    287
    288#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
    289	ARCH_PFN_OFFSET = PFN_UP(ramstart);
    290#else
    291	/*
    292	 * Reserve any memory between the start of RAM and PHYS_OFFSET
    293	 */
    294	if (ramstart > PHYS_OFFSET)
    295		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
    296
    297	if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
    298		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
    299			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
    300			(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
    301	}
    302#endif
    303
    304	min_low_pfn = ARCH_PFN_OFFSET;
    305	max_pfn = PFN_DOWN(ramend);
    306	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
    307		/*
    308		 * Skip highmem here so we get an accurate max_low_pfn if low
    309		 * memory stops short of high memory.
    310		 * If the region overlaps HIGHMEM_START, end is clipped so
    311		 * max_pfn excludes the highmem portion.
    312		 */
    313		if (start >= PFN_DOWN(HIGHMEM_START))
    314			continue;
    315		if (end > PFN_DOWN(HIGHMEM_START))
    316			end = PFN_DOWN(HIGHMEM_START);
    317		if (end > max_low_pfn)
    318			max_low_pfn = end;
    319	}
    320
    321	if (min_low_pfn >= max_low_pfn)
    322		panic("Incorrect memory mapping !!!");
    323
    324	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
    325#ifdef CONFIG_HIGHMEM
    326		highstart_pfn = PFN_DOWN(HIGHMEM_START);
    327		highend_pfn = max_pfn;
    328#else
    329		max_low_pfn = PFN_DOWN(HIGHMEM_START);
    330		max_pfn = max_low_pfn;
    331#endif
    332	}
    333
    334	/*
    335	 * Reserve initrd memory if needed.
    336	 */
    337	finalize_initrd();
    338}
    339
    340#endif	/* CONFIG_SGI_IP27 */
    341
    342static int usermem __initdata;
    343
    344static int __init early_parse_mem(char *p)
    345{
    346	phys_addr_t start, size;
    347
    348	if (!p) {
    349		pr_err("mem parameter is empty, do nothing\n");
    350		return -EINVAL;
    351	}
    352
    353	/*
    354	 * If a user specifies memory size, we
    355	 * blow away any automatically generated
    356	 * size.
    357	 */
    358	if (usermem == 0) {
    359		usermem = 1;
    360		memblock_remove(memblock_start_of_DRAM(),
    361			memblock_end_of_DRAM() - memblock_start_of_DRAM());
    362	}
    363	start = 0;
    364	size = memparse(p, &p);
    365	if (*p == '@')
    366		start = memparse(p + 1, &p);
    367
    368	if (IS_ENABLED(CONFIG_NUMA))
    369		memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
    370	else
    371		memblock_add(start, size);
    372
    373	return 0;
    374}
    375early_param("mem", early_parse_mem);
    376
    377static int __init early_parse_memmap(char *p)
    378{
    379	char *oldp;
    380	u64 start_at, mem_size;
    381
    382	if (!p)
    383		return -EINVAL;
    384
    385	if (!strncmp(p, "exactmap", 8)) {
    386		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
    387		return 0;
    388	}
    389
    390	oldp = p;
    391	mem_size = memparse(p, &p);
    392	if (p == oldp)
    393		return -EINVAL;
    394
    395	if (*p == '@') {
    396		start_at = memparse(p+1, &p);
    397		memblock_add(start_at, mem_size);
    398	} else if (*p == '#') {
    399		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
    400		return -EINVAL;
    401	} else if (*p == '$') {
    402		start_at = memparse(p+1, &p);
    403		memblock_add(start_at, mem_size);
    404		memblock_reserve(start_at, mem_size);
    405	} else {
    406		pr_err("\"memmap\" invalid format!\n");
    407		return -EINVAL;
    408	}
    409
    410	if (*p == '\0') {
    411		usermem = 1;
    412		return 0;
    413	} else
    414		return -EINVAL;
    415}
    416early_param("memmap", early_parse_memmap);
    417
    418static void __init mips_reserve_vmcore(void)
    419{
    420#ifdef CONFIG_PROC_VMCORE
    421	phys_addr_t start, end;
    422	u64 i;
    423
    424	if (!elfcorehdr_size) {
    425		for_each_mem_range(i, &start, &end) {
    426			if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
    427				/*
    428				 * Reserve from the elf core header to the end of
    429				 * the memory segment, that should all be kdump
    430				 * reserved memory.
    431				 */
    432				elfcorehdr_size = end - elfcorehdr_addr;
    433				break;
    434			}
    435		}
    436	}
    437
    438	pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
    439		(unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
    440
    441	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
    442#endif
    443}
    444
    445#ifdef CONFIG_KEXEC
    446
    447/* 64M alignment for crash kernel regions */
    448#define CRASH_ALIGN	SZ_64M
    449#define CRASH_ADDR_MAX	SZ_512M
    450
    451static void __init mips_parse_crashkernel(void)
    452{
    453	unsigned long long total_mem;
    454	unsigned long long crash_size, crash_base;
    455	int ret;
    456
    457	total_mem = memblock_phys_mem_size();
    458	ret = parse_crashkernel(boot_command_line, total_mem,
    459				&crash_size, &crash_base);
    460	if (ret != 0 || crash_size <= 0)
    461		return;
    462
    463	if (crash_base <= 0) {
    464		crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
    465						       CRASH_ALIGN,
    466						       CRASH_ADDR_MAX);
    467		if (!crash_base) {
    468			pr_warn("crashkernel reservation failed - No suitable area found.\n");
    469			return;
    470		}
    471	} else {
    472		unsigned long long start;
    473
    474		start = memblock_phys_alloc_range(crash_size, 1,
    475						  crash_base,
    476						  crash_base + crash_size);
    477		if (start != crash_base) {
    478			pr_warn("Invalid memory region reserved for crash kernel\n");
    479			return;
    480		}
    481	}
    482
    483	crashk_res.start = crash_base;
    484	crashk_res.end	 = crash_base + crash_size - 1;
    485}
    486
    487static void __init request_crashkernel(struct resource *res)
    488{
    489	int ret;
    490
    491	if (crashk_res.start == crashk_res.end)
    492		return;
    493
    494	ret = request_resource(res, &crashk_res);
    495	if (!ret)
    496		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
    497			(unsigned long)(resource_size(&crashk_res) >> 20),
    498			(unsigned long)(crashk_res.start  >> 20));
    499}
    500#else /* !defined(CONFIG_KEXEC)		*/
    501static void __init mips_parse_crashkernel(void)
    502{
    503}
    504
    505static void __init request_crashkernel(struct resource *res)
    506{
    507}
    508#endif /* !defined(CONFIG_KEXEC)  */
    509
    510static void __init check_kernel_sections_mem(void)
    511{
    512	phys_addr_t start = __pa_symbol(&_text);
    513	phys_addr_t size = __pa_symbol(&_end) - start;
    514
    515	if (!memblock_is_region_memory(start, size)) {
    516		pr_info("Kernel sections are not in the memory maps\n");
    517		memblock_add(start, size);
    518	}
    519}
    520
    521static void __init bootcmdline_append(const char *s, size_t max)
    522{
    523	if (!s[0] || !max)
    524		return;
    525
    526	if (boot_command_line[0])
    527		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
    528
    529	strlcat(boot_command_line, s, max);
    530}
    531
    532#ifdef CONFIG_OF_EARLY_FLATTREE
    533
    534static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
    535					  int depth, void *data)
    536{
    537	bool *dt_bootargs = data;
    538	const char *p;
    539	int l;
    540
    541	if (depth != 1 || !data ||
    542	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
    543		return 0;
    544
    545	p = of_get_flat_dt_prop(node, "bootargs", &l);
    546	if (p != NULL && l > 0) {
    547		bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
    548		*dt_bootargs = true;
    549	}
    550
    551	return 1;
    552}
    553
    554#endif /* CONFIG_OF_EARLY_FLATTREE */
    555
    556static void __init bootcmdline_init(void)
    557{
    558	bool dt_bootargs = false;
    559
    560	/*
    561	 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
    562	 * trivial - we simply use the built-in command line unconditionally &
    563	 * unmodified.
    564	 */
    565	if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
    566		strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
    567		return;
    568	}
    569
    570	/*
    571	 * If the user specified a built-in command line &
    572	 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
    573	 * prepended to arguments from the bootloader or DT so we'll copy them
    574	 * to the start of boot_command_line here. Otherwise, empty
    575	 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
    576	 */
    577	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
    578		strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
    579	else
    580		boot_command_line[0] = 0;
    581
    582#ifdef CONFIG_OF_EARLY_FLATTREE
    583	/*
    584	 * If we're configured to take boot arguments from DT, look for those
    585	 * now.
    586	 */
    587	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
    588	    IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
    589		of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
    590#endif
    591
    592	/*
    593	 * If we didn't get any arguments from DT (regardless of whether that's
    594	 * because we weren't configured to look for them, or because we looked
    595	 * & found none) then we'll take arguments from the bootloader.
    596	 * plat_mem_setup() should have filled arcs_cmdline with arguments from
    597	 * the bootloader.
    598	 */
    599	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
    600		bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
    601
    602	/*
    603	 * If the user specified a built-in command line & we didn't already
    604	 * prepend it, we append it to boot_command_line here.
    605	 */
    606	if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
    607	    !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
    608		bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
    609}
    610
    611/*
    612 * arch_mem_init - initialize memory management subsystem
    613 *
    614 *  o plat_mem_setup() detects the memory configuration and will record detected
    615 *    memory areas using memblock_add.
    616 *
    617 * At this stage the memory configuration of the system is known to the
    618 * kernel but generic memory management system is still entirely uninitialized.
    619 *
    620 *  o bootmem_init()
    621 *  o sparse_init()
    622 *  o paging_init()
    623 *  o dma_contiguous_reserve()
    624 *
    625 * At this stage the bootmem allocator is ready to use.
    626 *
    627 * NOTE: historically plat_mem_setup did the entire platform initialization.
    628 *	 This was rather impractical because it meant plat_mem_setup had to
    629 * get away without any kind of memory allocator.  To keep old code from
    630 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
    631 * initialization hook for anything else was introduced.
    632 */
    633static void __init arch_mem_init(char **cmdline_p)
    634{
    635	/* call board setup routine */
    636	plat_mem_setup();
    637	memblock_set_bottom_up(true);
    638
    639	bootcmdline_init();
    640	strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
    641	*cmdline_p = command_line;
    642
    643	parse_early_param();
    644
    645	if (usermem)
    646		pr_info("User-defined physical RAM map overwrite\n");
    647
    648	check_kernel_sections_mem();
    649
    650	early_init_fdt_reserve_self();
    651	early_init_fdt_scan_reserved_mem();
    652
    653#ifndef CONFIG_NUMA
    654	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
    655#endif
    656	bootmem_init();
    657
    658	/*
    659	 * Prevent memblock from allocating high memory.
    660	 * This cannot be done before max_low_pfn is detected, so up
    661	 * to this point is possible to only reserve physical memory
    662	 * with memblock_reserve; memblock_alloc* can be used
    663	 * only after this point
    664	 */
    665	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
    666
    667	mips_reserve_vmcore();
    668
    669	mips_parse_crashkernel();
    670	device_tree_init();
    671
    672	/*
    673	 * In order to reduce the possibility of kernel panic when failed to
    674	 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
    675	 * low memory as small as possible before plat_swiotlb_setup(), so
    676	 * make sparse_init() using top-down allocation.
    677	 */
    678	memblock_set_bottom_up(false);
    679	sparse_init();
    680	memblock_set_bottom_up(true);
    681
    682	plat_swiotlb_setup();
    683
    684	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
    685
    686	/* Reserve for hibernation. */
    687	memblock_reserve(__pa_symbol(&__nosave_begin),
    688		__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
    689
    690	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
    691}
    692
    693static void __init resource_init(void)
    694{
    695	phys_addr_t start, end;
    696	u64 i;
    697
    698	if (UNCAC_BASE != IO_BASE)
    699		return;
    700
    701	code_resource.start = __pa_symbol(&_text);
    702	code_resource.end = __pa_symbol(&_etext) - 1;
    703	data_resource.start = __pa_symbol(&_etext);
    704	data_resource.end = __pa_symbol(&_edata) - 1;
    705	bss_resource.start = __pa_symbol(&__bss_start);
    706	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
    707
    708	for_each_mem_range(i, &start, &end) {
    709		struct resource *res;
    710
    711		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
    712		if (!res)
    713			panic("%s: Failed to allocate %zu bytes\n", __func__,
    714			      sizeof(struct resource));
    715
    716		res->start = start;
    717		/*
    718		 * In memblock, end points to the first byte after the
    719		 * range while in resourses, end points to the last byte in
    720		 * the range.
    721		 */
    722		res->end = end - 1;
    723		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
    724		res->name = "System RAM";
    725
    726		request_resource(&iomem_resource, res);
    727
    728		/*
    729		 *  We don't know which RAM region contains kernel data,
    730		 *  so we try it repeatedly and let the resource manager
    731		 *  test it.
    732		 */
    733		request_resource(res, &code_resource);
    734		request_resource(res, &data_resource);
    735		request_resource(res, &bss_resource);
    736		request_crashkernel(res);
    737	}
    738}
    739
    740#ifdef CONFIG_SMP
    741static void __init prefill_possible_map(void)
    742{
    743	int i, possible = num_possible_cpus();
    744
    745	if (possible > nr_cpu_ids)
    746		possible = nr_cpu_ids;
    747
    748	for (i = 0; i < possible; i++)
    749		set_cpu_possible(i, true);
    750	for (; i < NR_CPUS; i++)
    751		set_cpu_possible(i, false);
    752
    753	nr_cpu_ids = possible;
    754}
    755#else
    756static inline void prefill_possible_map(void) {}
    757#endif
    758
    759void __init setup_arch(char **cmdline_p)
    760{
    761	cpu_probe();
    762	mips_cm_probe();
    763	prom_init();
    764
    765	setup_early_fdc_console();
    766#ifdef CONFIG_EARLY_PRINTK
    767	setup_early_printk();
    768#endif
    769	cpu_report();
    770	check_bugs_early();
    771
    772#if defined(CONFIG_VT)
    773#if defined(CONFIG_VGA_CONSOLE)
    774	conswitchp = &vga_con;
    775#endif
    776#endif
    777
    778	arch_mem_init(cmdline_p);
    779	dmi_setup();
    780
    781	resource_init();
    782	plat_smp_setup();
    783	prefill_possible_map();
    784
    785	cpu_cache_init();
    786	paging_init();
    787
    788	memblock_dump_all();
    789}
    790
    791unsigned long kernelsp[NR_CPUS];
    792unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
    793
    794#ifdef CONFIG_DEBUG_FS
    795struct dentry *mips_debugfs_dir;
    796static int __init debugfs_mips(void)
    797{
    798	mips_debugfs_dir = debugfs_create_dir("mips", NULL);
    799	return 0;
    800}
    801arch_initcall(debugfs_mips);
    802#endif
    803
    804#ifdef CONFIG_DMA_NONCOHERENT
    805static int __init setcoherentio(char *str)
    806{
    807	dma_default_coherent = true;
    808	pr_info("Hardware DMA cache coherency (command line)\n");
    809	return 0;
    810}
    811early_param("coherentio", setcoherentio);
    812
    813static int __init setnocoherentio(char *str)
    814{
    815	dma_default_coherent = false;
    816	pr_info("Software DMA cache coherency (command line)\n");
    817	return 0;
    818}
    819early_param("nocoherentio", setnocoherentio);
    820#endif