setup.c (8313B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 4 * Chen Liqin <liqin.chen@sunplusct.com> 5 * Lennox Wu <lennox.wu@sunplusct.com> 6 * Copyright (C) 2012 Regents of the University of California 7 * Copyright (C) 2020 FORTH-ICS/CARV 8 * Nick Kossifidis <mick@ics.forth.gr> 9 */ 10 11#include <linux/init.h> 12#include <linux/mm.h> 13#include <linux/memblock.h> 14#include <linux/sched.h> 15#include <linux/console.h> 16#include <linux/screen_info.h> 17#include <linux/of_fdt.h> 18#include <linux/of_platform.h> 19#include <linux/sched/task.h> 20#include <linux/smp.h> 21#include <linux/efi.h> 22#include <linux/crash_dump.h> 23 24#include <asm/alternative.h> 25#include <asm/cpu_ops.h> 26#include <asm/early_ioremap.h> 27#include <asm/pgtable.h> 28#include <asm/setup.h> 29#include <asm/set_memory.h> 30#include <asm/sections.h> 31#include <asm/sbi.h> 32#include <asm/tlbflush.h> 33#include <asm/thread_info.h> 34#include <asm/kasan.h> 35#include <asm/efi.h> 36 37#include "head.h" 38 39#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI) 40struct screen_info screen_info __section(".data") = { 41 .orig_video_lines = 30, 42 .orig_video_cols = 80, 43 .orig_video_mode = 0, 44 .orig_video_ega_bx = 0, 45 .orig_video_isVGA = 1, 46 .orig_video_points = 8 47}; 48#endif 49 50/* 51 * The lucky hart to first increment this variable will boot the other cores. 52 * This is used before the kernel initializes the BSS so it can't be in the 53 * BSS. 54 */ 55atomic_t hart_lottery __section(".sdata") 56#ifdef CONFIG_XIP_KERNEL 57= ATOMIC_INIT(0xC001BEEF) 58#endif 59; 60unsigned long boot_cpu_hartid; 61static DEFINE_PER_CPU(struct cpu, cpu_devices); 62 63/* 64 * Place kernel memory regions on the resource tree so that 65 * kexec-tools can retrieve them from /proc/iomem. While there 66 * also add "System RAM" regions for compatibility with other 67 * archs, and the rest of the known regions for completeness. 68 */ 69static struct resource kimage_res = { .name = "Kernel image", }; 70static struct resource code_res = { .name = "Kernel code", }; 71static struct resource data_res = { .name = "Kernel data", }; 72static struct resource rodata_res = { .name = "Kernel rodata", }; 73static struct resource bss_res = { .name = "Kernel bss", }; 74#ifdef CONFIG_CRASH_DUMP 75static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; 76#endif 77 78static int __init add_resource(struct resource *parent, 79 struct resource *res) 80{ 81 int ret = 0; 82 83 ret = insert_resource(parent, res); 84 if (ret < 0) { 85 pr_err("Failed to add a %s resource at %llx\n", 86 res->name, (unsigned long long) res->start); 87 return ret; 88 } 89 90 return 1; 91} 92 93static int __init add_kernel_resources(void) 94{ 95 int ret = 0; 96 97 /* 98 * The memory region of the kernel image is continuous and 99 * was reserved on setup_bootmem, register it here as a 100 * resource, with the various segments of the image as 101 * child nodes. 102 */ 103 104 code_res.start = __pa_symbol(_text); 105 code_res.end = __pa_symbol(_etext) - 1; 106 code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 107 108 rodata_res.start = __pa_symbol(__start_rodata); 109 rodata_res.end = __pa_symbol(__end_rodata) - 1; 110 rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 111 112 data_res.start = __pa_symbol(_data); 113 data_res.end = __pa_symbol(_edata) - 1; 114 data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 115 116 bss_res.start = __pa_symbol(__bss_start); 117 bss_res.end = __pa_symbol(__bss_stop) - 1; 118 bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 119 120 kimage_res.start = code_res.start; 121 kimage_res.end = bss_res.end; 122 kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 123 124 ret = add_resource(&iomem_resource, &kimage_res); 125 if (ret < 0) 126 return ret; 127 128 ret = add_resource(&kimage_res, &code_res); 129 if (ret < 0) 130 return ret; 131 132 ret = add_resource(&kimage_res, &rodata_res); 133 if (ret < 0) 134 return ret; 135 136 ret = add_resource(&kimage_res, &data_res); 137 if (ret < 0) 138 return ret; 139 140 ret = add_resource(&kimage_res, &bss_res); 141 142 return ret; 143} 144 145static void __init init_resources(void) 146{ 147 struct memblock_region *region = NULL; 148 struct resource *res = NULL; 149 struct resource *mem_res = NULL; 150 size_t mem_res_sz = 0; 151 int num_resources = 0, res_idx = 0; 152 int ret = 0; 153 154 /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ 155 num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1; 156 res_idx = num_resources - 1; 157 158 mem_res_sz = num_resources * sizeof(*mem_res); 159 mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); 160 if (!mem_res) 161 panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); 162 163 /* 164 * Start by adding the reserved regions, if they overlap 165 * with /memory regions, insert_resource later on will take 166 * care of it. 167 */ 168 ret = add_kernel_resources(); 169 if (ret < 0) 170 goto error; 171 172#ifdef CONFIG_KEXEC_CORE 173 if (crashk_res.start != crashk_res.end) { 174 ret = add_resource(&iomem_resource, &crashk_res); 175 if (ret < 0) 176 goto error; 177 } 178#endif 179 180#ifdef CONFIG_CRASH_DUMP 181 if (elfcorehdr_size > 0) { 182 elfcorehdr_res.start = elfcorehdr_addr; 183 elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1; 184 elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 185 add_resource(&iomem_resource, &elfcorehdr_res); 186 } 187#endif 188 189 for_each_reserved_mem_region(region) { 190 res = &mem_res[res_idx--]; 191 192 res->name = "Reserved"; 193 res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE; 194 res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); 195 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; 196 197 /* 198 * Ignore any other reserved regions within 199 * system memory. 200 */ 201 if (memblock_is_memory(res->start)) { 202 /* Re-use this pre-allocated resource */ 203 res_idx++; 204 continue; 205 } 206 207 ret = add_resource(&iomem_resource, res); 208 if (ret < 0) 209 goto error; 210 } 211 212 /* Add /memory regions to the resource tree */ 213 for_each_mem_region(region) { 214 res = &mem_res[res_idx--]; 215 216 if (unlikely(memblock_is_nomap(region))) { 217 res->name = "Reserved"; 218 res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE; 219 } else { 220 res->name = "System RAM"; 221 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 222 } 223 224 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 225 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 226 227 ret = add_resource(&iomem_resource, res); 228 if (ret < 0) 229 goto error; 230 } 231 232 /* Clean-up any unused pre-allocated resources */ 233 if (res_idx >= 0) 234 memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res)); 235 return; 236 237 error: 238 /* Better an empty resource tree than an inconsistent one */ 239 release_child_resources(&iomem_resource); 240 memblock_free(mem_res, mem_res_sz); 241} 242 243 244static void __init parse_dtb(void) 245{ 246 /* Early scan of device tree from init memory */ 247 if (early_init_dt_scan(dtb_early_va)) { 248 const char *name = of_flat_dt_get_machine_name(); 249 250 if (name) { 251 pr_info("Machine model: %s\n", name); 252 dump_stack_set_arch_desc("%s (DT)", name); 253 } 254 return; 255 } 256 257 pr_err("No DTB passed to the kernel\n"); 258#ifdef CONFIG_CMDLINE_FORCE 259 strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 260 pr_info("Forcing kernel command line to: %s\n", boot_command_line); 261#endif 262} 263 264void __init setup_arch(char **cmdline_p) 265{ 266 parse_dtb(); 267 setup_initial_init_mm(_stext, _etext, _edata, _end); 268 269 *cmdline_p = boot_command_line; 270 271 early_ioremap_setup(); 272 jump_label_init(); 273 parse_early_param(); 274 275 efi_init(); 276 paging_init(); 277#if IS_ENABLED(CONFIG_BUILTIN_DTB) 278 unflatten_and_copy_device_tree(); 279#else 280 if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa)))) 281 unflatten_device_tree(); 282 else 283 pr_err("No DTB found in kernel mappings\n"); 284#endif 285 misc_mem_init(); 286 287 init_resources(); 288 sbi_init(); 289 290#ifdef CONFIG_KASAN 291 kasan_init(); 292#endif 293 294#ifdef CONFIG_SMP 295 setup_smp(); 296#endif 297 298 riscv_fill_hwcap(); 299 apply_boot_alternatives(); 300} 301 302static int __init topology_init(void) 303{ 304 int i, ret; 305 306 for_each_possible_cpu(i) { 307 struct cpu *cpu = &per_cpu(cpu_devices, i); 308 309 cpu->hotpluggable = cpu_has_hotplug(i); 310 ret = register_cpu(cpu, i); 311 if (unlikely(ret)) 312 pr_warn("Warning: %s: register_cpu %d failed (%d)\n", 313 __func__, i, ret); 314 } 315 316 return 0; 317} 318subsys_initcall(topology_init); 319 320void free_initmem(void) 321{ 322 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 323 set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), 324 IS_ENABLED(CONFIG_64BIT) ? 325 set_memory_rw : set_memory_rw_nx); 326 327 free_initmem_default(POISON_FREE_INITMEM); 328}