cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

setup.c (11487B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Based on arch/arm/kernel/setup.c
      4 *
      5 * Copyright (C) 1995-2001 Russell King
      6 * Copyright (C) 2012 ARM Ltd.
      7 */
      8
      9#include <linux/acpi.h>
     10#include <linux/export.h>
     11#include <linux/kernel.h>
     12#include <linux/stddef.h>
     13#include <linux/ioport.h>
     14#include <linux/delay.h>
     15#include <linux/initrd.h>
     16#include <linux/console.h>
     17#include <linux/cache.h>
     18#include <linux/screen_info.h>
     19#include <linux/init.h>
     20#include <linux/kexec.h>
     21#include <linux/root_dev.h>
     22#include <linux/cpu.h>
     23#include <linux/interrupt.h>
     24#include <linux/smp.h>
     25#include <linux/fs.h>
     26#include <linux/panic_notifier.h>
     27#include <linux/proc_fs.h>
     28#include <linux/memblock.h>
     29#include <linux/of_fdt.h>
     30#include <linux/efi.h>
     31#include <linux/psci.h>
     32#include <linux/sched/task.h>
     33#include <linux/mm.h>
     34
     35#include <asm/acpi.h>
     36#include <asm/fixmap.h>
     37#include <asm/cpu.h>
     38#include <asm/cputype.h>
     39#include <asm/daifflags.h>
     40#include <asm/elf.h>
     41#include <asm/cpufeature.h>
     42#include <asm/cpu_ops.h>
     43#include <asm/kasan.h>
     44#include <asm/numa.h>
     45#include <asm/sections.h>
     46#include <asm/setup.h>
     47#include <asm/smp_plat.h>
     48#include <asm/cacheflush.h>
     49#include <asm/tlbflush.h>
     50#include <asm/traps.h>
     51#include <asm/efi.h>
     52#include <asm/xen/hypervisor.h>
     53#include <asm/mmu_context.h>
     54
     55static int num_standard_resources;
     56static struct resource *standard_resources;
     57
     58phys_addr_t __fdt_pointer __initdata;
     59
     60/*
     61 * Standard memory resources
     62 */
     63static struct resource mem_res[] = {
     64	{
     65		.name = "Kernel code",
     66		.start = 0,
     67		.end = 0,
     68		.flags = IORESOURCE_SYSTEM_RAM
     69	},
     70	{
     71		.name = "Kernel data",
     72		.start = 0,
     73		.end = 0,
     74		.flags = IORESOURCE_SYSTEM_RAM
     75	}
     76};
     77
     78#define kernel_code mem_res[0]
     79#define kernel_data mem_res[1]
     80
     81/*
     82 * The recorded values of x0 .. x3 upon kernel entry.
     83 */
     84u64 __cacheline_aligned boot_args[4];
     85
     86void __init smp_setup_processor_id(void)
     87{
     88	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
     89	set_cpu_logical_map(0, mpidr);
     90
     91	pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
     92		(unsigned long)mpidr, read_cpuid_id());
     93}
     94
     95bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
     96{
     97	return phys_id == cpu_logical_map(cpu);
     98}
     99
    100struct mpidr_hash mpidr_hash;
    101/**
    102 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
    103 *			  level in order to build a linear index from an
    104 *			  MPIDR value. Resulting algorithm is a collision
    105 *			  free hash carried out through shifting and ORing
    106 */
    107static void __init smp_build_mpidr_hash(void)
    108{
    109	u32 i, affinity, fs[4], bits[4], ls;
    110	u64 mask = 0;
    111	/*
    112	 * Pre-scan the list of MPIDRS and filter out bits that do
    113	 * not contribute to affinity levels, ie they never toggle.
    114	 */
    115	for_each_possible_cpu(i)
    116		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
    117	pr_debug("mask of set bits %#llx\n", mask);
    118	/*
    119	 * Find and stash the last and first bit set at all affinity levels to
    120	 * check how many bits are required to represent them.
    121	 */
    122	for (i = 0; i < 4; i++) {
    123		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
    124		/*
    125		 * Find the MSB bit and LSB bits position
    126		 * to determine how many bits are required
    127		 * to express the affinity level.
    128		 */
    129		ls = fls(affinity);
    130		fs[i] = affinity ? ffs(affinity) - 1 : 0;
    131		bits[i] = ls - fs[i];
    132	}
    133	/*
    134	 * An index can be created from the MPIDR_EL1 by isolating the
    135	 * significant bits at each affinity level and by shifting
    136	 * them in order to compress the 32 bits values space to a
    137	 * compressed set of values. This is equivalent to hashing
    138	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
    139	 * hash though not minimal since some levels might contain a number
    140	 * of CPUs that is not an exact power of 2 and their bit
    141	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
    142	 */
    143	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
    144	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
    145	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
    146						(bits[1] + bits[0]);
    147	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
    148				  fs[3] - (bits[2] + bits[1] + bits[0]);
    149	mpidr_hash.mask = mask;
    150	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
    151	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
    152		mpidr_hash.shift_aff[0],
    153		mpidr_hash.shift_aff[1],
    154		mpidr_hash.shift_aff[2],
    155		mpidr_hash.shift_aff[3],
    156		mpidr_hash.mask,
    157		mpidr_hash.bits);
    158	/*
    159	 * 4x is an arbitrary value used to warn on a hash table much bigger
    160	 * than expected on most systems.
    161	 */
    162	if (mpidr_hash_size() > 4 * num_possible_cpus())
    163		pr_warn("Large number of MPIDR hash buckets detected\n");
    164}
    165
    166static void *early_fdt_ptr __initdata;
    167
    168void __init *get_early_fdt_ptr(void)
    169{
    170	return early_fdt_ptr;
    171}
    172
    173asmlinkage void __init early_fdt_map(u64 dt_phys)
    174{
    175	int fdt_size;
    176
    177	early_fixmap_init();
    178	early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
    179}
    180
    181static void __init setup_machine_fdt(phys_addr_t dt_phys)
    182{
    183	int size;
    184	void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
    185	const char *name;
    186
    187	if (dt_virt)
    188		memblock_reserve(dt_phys, size);
    189
    190	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
    191		pr_crit("\n"
    192			"Error: invalid device tree blob at physical address %pa (virtual address 0x%px)\n"
    193			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
    194			"\nPlease check your bootloader.",
    195			&dt_phys, dt_virt);
    196
    197		/*
    198		 * Note that in this _really_ early stage we cannot even BUG()
    199		 * or oops, so the least terrible thing to do is cpu_relax(),
    200		 * or else we could end-up printing non-initialized data, etc.
    201		 */
    202		while (true)
    203			cpu_relax();
    204	}
    205
    206	/* Early fixups are done, map the FDT as read-only now */
    207	fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
    208
    209	name = of_flat_dt_get_machine_name();
    210	if (!name)
    211		return;
    212
    213	pr_info("Machine model: %s\n", name);
    214	dump_stack_set_arch_desc("%s (DT)", name);
    215}
    216
    217static void __init request_standard_resources(void)
    218{
    219	struct memblock_region *region;
    220	struct resource *res;
    221	unsigned long i = 0;
    222	size_t res_size;
    223
    224	kernel_code.start   = __pa_symbol(_stext);
    225	kernel_code.end     = __pa_symbol(__init_begin - 1);
    226	kernel_data.start   = __pa_symbol(_sdata);
    227	kernel_data.end     = __pa_symbol(_end - 1);
    228	insert_resource(&iomem_resource, &kernel_code);
    229	insert_resource(&iomem_resource, &kernel_data);
    230
    231	num_standard_resources = memblock.memory.cnt;
    232	res_size = num_standard_resources * sizeof(*standard_resources);
    233	standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
    234	if (!standard_resources)
    235		panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
    236
    237	for_each_mem_region(region) {
    238		res = &standard_resources[i++];
    239		if (memblock_is_nomap(region)) {
    240			res->name  = "reserved";
    241			res->flags = IORESOURCE_MEM;
    242			res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
    243			res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
    244		} else {
    245			res->name  = "System RAM";
    246			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
    247			res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
    248			res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
    249		}
    250
    251		insert_resource(&iomem_resource, res);
    252	}
    253}
    254
    255static int __init reserve_memblock_reserved_regions(void)
    256{
    257	u64 i, j;
    258
    259	for (i = 0; i < num_standard_resources; ++i) {
    260		struct resource *mem = &standard_resources[i];
    261		phys_addr_t r_start, r_end, mem_size = resource_size(mem);
    262
    263		if (!memblock_is_region_reserved(mem->start, mem_size))
    264			continue;
    265
    266		for_each_reserved_mem_range(j, &r_start, &r_end) {
    267			resource_size_t start, end;
    268
    269			start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
    270			end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
    271
    272			if (start > mem->end || end < mem->start)
    273				continue;
    274
    275			reserve_region_with_split(mem, start, end, "reserved");
    276		}
    277	}
    278
    279	return 0;
    280}
    281arch_initcall(reserve_memblock_reserved_regions);
    282
    283u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
    284
    285u64 cpu_logical_map(unsigned int cpu)
    286{
    287	return __cpu_logical_map[cpu];
    288}
    289
    290void __init __no_sanitize_address setup_arch(char **cmdline_p)
    291{
    292	setup_initial_init_mm(_stext, _etext, _edata, _end);
    293
    294	*cmdline_p = boot_command_line;
    295
    296	/*
    297	 * If know now we are going to need KPTI then use non-global
    298	 * mappings from the start, avoiding the cost of rewriting
    299	 * everything later.
    300	 */
    301	arm64_use_ng_mappings = kaslr_requires_kpti();
    302
    303	early_fixmap_init();
    304	early_ioremap_init();
    305
    306	setup_machine_fdt(__fdt_pointer);
    307
    308	/*
    309	 * Initialise the static keys early as they may be enabled by the
    310	 * cpufeature code and early parameters.
    311	 */
    312	jump_label_init();
    313	parse_early_param();
    314
    315	/*
    316	 * Unmask asynchronous aborts and fiq after bringing up possible
    317	 * earlycon. (Report possible System Errors once we can report this
    318	 * occurred).
    319	 */
    320	local_daif_restore(DAIF_PROCCTX_NOIRQ);
    321
    322	/*
    323	 * TTBR0 is only used for the identity mapping at this stage. Make it
    324	 * point to zero page to avoid speculatively fetching new entries.
    325	 */
    326	cpu_uninstall_idmap();
    327
    328	xen_early_init();
    329	efi_init();
    330
    331	if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
    332	     pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
    333
    334	arm64_memblock_init();
    335
    336	paging_init();
    337
    338	acpi_table_upgrade();
    339
    340	/* Parse the ACPI tables for possible boot-time configuration */
    341	acpi_boot_table_init();
    342
    343	if (acpi_disabled)
    344		unflatten_device_tree();
    345
    346	bootmem_init();
    347
    348	kasan_init();
    349
    350	request_standard_resources();
    351
    352	early_ioremap_reset();
    353
    354	if (acpi_disabled)
    355		psci_dt_init();
    356	else
    357		psci_acpi_init();
    358
    359	init_bootcpu_ops();
    360	smp_init_cpus();
    361	smp_build_mpidr_hash();
    362
    363	/* Init percpu seeds for random tags after cpus are set up. */
    364	kasan_init_sw_tags();
    365
    366#ifdef CONFIG_ARM64_SW_TTBR0_PAN
    367	/*
    368	 * Make sure init_thread_info.ttbr0 always generates translation
    369	 * faults in case uaccess_enable() is inadvertently called by the init
    370	 * thread.
    371	 */
    372	init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
    373#endif
    374
    375	if (boot_args[1] || boot_args[2] || boot_args[3]) {
    376		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
    377			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
    378			"This indicates a broken bootloader or old kernel\n",
    379			boot_args[1], boot_args[2], boot_args[3]);
    380	}
    381}
    382
    383static inline bool cpu_can_disable(unsigned int cpu)
    384{
    385#ifdef CONFIG_HOTPLUG_CPU
    386	const struct cpu_operations *ops = get_cpu_ops(cpu);
    387
    388	if (ops && ops->cpu_can_disable)
    389		return ops->cpu_can_disable(cpu);
    390#endif
    391	return false;
    392}
    393
    394static int __init topology_init(void)
    395{
    396	int i;
    397
    398	for_each_possible_cpu(i) {
    399		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
    400		cpu->hotpluggable = cpu_can_disable(i);
    401		register_cpu(cpu, i);
    402	}
    403
    404	return 0;
    405}
    406subsys_initcall(topology_init);
    407
    408static void dump_kernel_offset(void)
    409{
    410	const unsigned long offset = kaslr_offset();
    411
    412	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
    413		pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
    414			 offset, KIMAGE_VADDR);
    415		pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
    416	} else {
    417		pr_emerg("Kernel Offset: disabled\n");
    418	}
    419}
    420
    421static int arm64_panic_block_dump(struct notifier_block *self,
    422				  unsigned long v, void *p)
    423{
    424	dump_kernel_offset();
    425	dump_cpu_features();
    426	dump_mem_limit();
    427	return 0;
    428}
    429
    430static struct notifier_block arm64_panic_block = {
    431	.notifier_call = arm64_panic_block_dump
    432};
    433
    434static int __init register_arm64_panic_block(void)
    435{
    436	atomic_notifier_chain_register(&panic_notifier_list,
    437				       &arm64_panic_block);
    438	return 0;
    439}
    440device_initcall(register_arm64_panic_block);