cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

startup.c (9517B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/string.h>
      3#include <linux/elf.h>
      4#include <asm/boot_data.h>
      5#include <asm/sections.h>
      6#include <asm/cpu_mf.h>
      7#include <asm/setup.h>
      8#include <asm/kasan.h>
      9#include <asm/kexec.h>
     10#include <asm/sclp.h>
     11#include <asm/diag.h>
     12#include <asm/uv.h>
     13#include "decompressor.h"
     14#include "boot.h"
     15#include "uv.h"
     16
     17unsigned long __bootdata_preserved(__kaslr_offset);
     18unsigned long __bootdata(__amode31_base);
     19unsigned long __bootdata_preserved(VMALLOC_START);
     20unsigned long __bootdata_preserved(VMALLOC_END);
     21struct page *__bootdata_preserved(vmemmap);
     22unsigned long __bootdata_preserved(vmemmap_size);
     23unsigned long __bootdata_preserved(MODULES_VADDR);
     24unsigned long __bootdata_preserved(MODULES_END);
     25unsigned long __bootdata(ident_map_size);
     26int __bootdata(is_full_image) = 1;
     27struct initrd_data __bootdata(initrd_data);
     28
     29u64 __bootdata_preserved(stfle_fac_list[16]);
     30u64 __bootdata_preserved(alt_stfle_fac_list[16]);
     31struct oldmem_data __bootdata_preserved(oldmem_data);
     32
     33void error(char *x)
     34{
     35	sclp_early_printk("\n\n");
     36	sclp_early_printk(x);
     37	sclp_early_printk("\n\n -- System halted");
     38
     39	disabled_wait();
     40}
     41
     42static void setup_lpp(void)
     43{
     44	S390_lowcore.current_pid = 0;
     45	S390_lowcore.lpp = LPP_MAGIC;
     46	if (test_facility(40))
     47		lpp(&S390_lowcore.lpp);
     48}
     49
     50#ifdef CONFIG_KERNEL_UNCOMPRESSED
     51unsigned long mem_safe_offset(void)
     52{
     53	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
     54}
     55#endif
     56
     57static void rescue_initrd(unsigned long addr)
     58{
     59	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
     60		return;
     61	if (!initrd_data.start || !initrd_data.size)
     62		return;
     63	if (addr <= initrd_data.start)
     64		return;
     65	memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
     66	initrd_data.start = addr;
     67}
     68
     69static void copy_bootdata(void)
     70{
     71	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
     72		error(".boot.data section size mismatch");
     73	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
     74	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
     75		error(".boot.preserved.data section size mismatch");
     76	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
     77}
     78
     79static void handle_relocs(unsigned long offset)
     80{
     81	Elf64_Rela *rela_start, *rela_end, *rela;
     82	int r_type, r_sym, rc;
     83	Elf64_Addr loc, val;
     84	Elf64_Sym *dynsym;
     85
     86	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
     87	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
     88	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
     89	for (rela = rela_start; rela < rela_end; rela++) {
     90		loc = rela->r_offset + offset;
     91		val = rela->r_addend;
     92		r_sym = ELF64_R_SYM(rela->r_info);
     93		if (r_sym) {
     94			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
     95				val += dynsym[r_sym].st_value + offset;
     96		} else {
     97			/*
     98			 * 0 == undefined symbol table index (STN_UNDEF),
     99			 * used for R_390_RELATIVE, only add KASLR offset
    100			 */
    101			val += offset;
    102		}
    103		r_type = ELF64_R_TYPE(rela->r_info);
    104		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
    105		if (rc)
    106			error("Unknown relocation type");
    107	}
    108}
    109
    110/*
    111 * Merge information from several sources into a single ident_map_size value.
    112 * "ident_map_size" represents the upper limit of physical memory we may ever
    113 * reach. It might not be all online memory, but also include standby (offline)
    114 * memory. "ident_map_size" could be lower then actual standby or even online
    115 * memory present, due to limiting factors. We should never go above this limit.
    116 * It is the size of our identity mapping.
    117 *
    118 * Consider the following factors:
    119 * 1. max_physmem_end - end of physical memory online or standby.
    120 *    Always <= end of the last online memory block (get_mem_detect_end()).
    121 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
    122 *    kernel is able to support.
    123 * 3. "mem=" kernel command line option which limits physical memory usage.
    124 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
    125 *    crash kernel.
    126 * 5. "hsa" size which is a memory limit when the kernel is executed during
    127 *    zfcp/nvme dump.
    128 */
    129static void setup_ident_map_size(unsigned long max_physmem_end)
    130{
    131	unsigned long hsa_size;
    132
    133	ident_map_size = max_physmem_end;
    134	if (memory_limit)
    135		ident_map_size = min(ident_map_size, memory_limit);
    136	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
    137
    138#ifdef CONFIG_CRASH_DUMP
    139	if (oldmem_data.start) {
    140		kaslr_enabled = 0;
    141		ident_map_size = min(ident_map_size, oldmem_data.size);
    142	} else if (ipl_block_valid && is_ipl_block_dump()) {
    143		kaslr_enabled = 0;
    144		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
    145			ident_map_size = min(ident_map_size, hsa_size);
    146	}
    147#endif
    148}
    149
    150static void setup_kernel_memory_layout(void)
    151{
    152	unsigned long vmemmap_start;
    153	unsigned long rte_size;
    154	unsigned long pages;
    155
    156	pages = ident_map_size / PAGE_SIZE;
    157	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
    158	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
    159
    160	/* choose kernel address space layout: 4 or 3 levels. */
    161	vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
    162	if (IS_ENABLED(CONFIG_KASAN) ||
    163	    vmalloc_size > _REGION2_SIZE ||
    164	    vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
    165		    _REGION2_SIZE) {
    166		MODULES_END = _REGION1_SIZE;
    167		rte_size = _REGION2_SIZE;
    168	} else {
    169		MODULES_END = _REGION2_SIZE;
    170		rte_size = _REGION3_SIZE;
    171	}
    172	/*
    173	 * forcing modules and vmalloc area under the ultravisor
    174	 * secure storage limit, so that any vmalloc allocation
    175	 * we do could be used to back secure guest storage.
    176	 */
    177	adjust_to_uv_max(&MODULES_END);
    178#ifdef CONFIG_KASAN
    179	/* force vmalloc and modules below kasan shadow */
    180	MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
    181#endif
    182	MODULES_VADDR = MODULES_END - MODULES_LEN;
    183	VMALLOC_END = MODULES_VADDR;
    184
    185	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
    186	vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
    187	VMALLOC_START = VMALLOC_END - vmalloc_size;
    188
    189	/* split remaining virtual space between 1:1 mapping & vmemmap array */
    190	pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
    191	pages = SECTION_ALIGN_UP(pages);
    192	/* keep vmemmap_start aligned to a top level region table entry */
    193	vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
    194	/* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
    195	vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
    196	/* make sure identity map doesn't overlay with vmemmap */
    197	ident_map_size = min(ident_map_size, vmemmap_start);
    198	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
    199	/* make sure vmemmap doesn't overlay with vmalloc area */
    200	VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
    201	vmemmap = (struct page *)vmemmap_start;
    202}
    203
    204/*
    205 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
    206 */
    207static void clear_bss_section(void)
    208{
    209	memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
    210}
    211
    212/*
    213 * Set vmalloc area size to an 8th of (potential) physical memory
    214 * size, unless size has been set by kernel command line parameter.
    215 */
    216static void setup_vmalloc_size(void)
    217{
    218	unsigned long size;
    219
    220	if (vmalloc_size_set)
    221		return;
    222	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
    223	vmalloc_size = max(size, vmalloc_size);
    224}
    225
    226static void offset_vmlinux_info(unsigned long offset)
    227{
    228	vmlinux.default_lma += offset;
    229	*(unsigned long *)(&vmlinux.entry) += offset;
    230	vmlinux.bootdata_off += offset;
    231	vmlinux.bootdata_preserved_off += offset;
    232	vmlinux.rela_dyn_start += offset;
    233	vmlinux.rela_dyn_end += offset;
    234	vmlinux.dynsym_start += offset;
    235}
    236
    237static unsigned long reserve_amode31(unsigned long safe_addr)
    238{
    239	__amode31_base = PAGE_ALIGN(safe_addr);
    240	return safe_addr + vmlinux.amode31_size;
    241}
    242
    243void startup_kernel(void)
    244{
    245	unsigned long random_lma;
    246	unsigned long safe_addr;
    247	void *img;
    248
    249	initrd_data.start = parmarea.initrd_start;
    250	initrd_data.size = parmarea.initrd_size;
    251	oldmem_data.start = parmarea.oldmem_base;
    252	oldmem_data.size = parmarea.oldmem_size;
    253
    254	setup_lpp();
    255	store_ipl_parmblock();
    256	safe_addr = mem_safe_offset();
    257	safe_addr = reserve_amode31(safe_addr);
    258	safe_addr = read_ipl_report(safe_addr);
    259	uv_query_info();
    260	rescue_initrd(safe_addr);
    261	sclp_early_read_info();
    262	setup_boot_command_line();
    263	parse_boot_command_line();
    264	sanitize_prot_virt_host();
    265	setup_ident_map_size(detect_memory());
    266	setup_vmalloc_size();
    267	setup_kernel_memory_layout();
    268
    269	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
    270		random_lma = get_random_base(safe_addr);
    271		if (random_lma) {
    272			__kaslr_offset = random_lma - vmlinux.default_lma;
    273			img = (void *)vmlinux.default_lma;
    274			offset_vmlinux_info(__kaslr_offset);
    275		}
    276	}
    277
    278	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
    279		img = decompress_kernel();
    280		memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
    281	} else if (__kaslr_offset)
    282		memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
    283
    284	clear_bss_section();
    285	copy_bootdata();
    286	if (IS_ENABLED(CONFIG_RELOCATABLE))
    287		handle_relocs(__kaslr_offset);
    288
    289	if (__kaslr_offset) {
    290		/*
    291		 * Save KASLR offset for early dumps, before vmcore_info is set.
    292		 * Mark as uneven to distinguish from real vmcore_info pointer.
    293		 */
    294		S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
    295		/* Clear non-relocated kernel */
    296		if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
    297			memset(img, 0, vmlinux.image_size);
    298	}
    299	vmlinux.entry();
    300}