cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

machine_kexec.c (7125B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright IBM Corp. 2005, 2011
      4 *
      5 * Author(s): Rolf Adelsberger,
      6 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
      7 */
      8
      9#include <linux/device.h>
     10#include <linux/mm.h>
     11#include <linux/kexec.h>
     12#include <linux/delay.h>
     13#include <linux/reboot.h>
     14#include <linux/ftrace.h>
     15#include <linux/debug_locks.h>
     16#include <asm/cio.h>
     17#include <asm/setup.h>
     18#include <asm/smp.h>
     19#include <asm/ipl.h>
     20#include <asm/diag.h>
     21#include <asm/elf.h>
     22#include <asm/asm-offsets.h>
     23#include <asm/cacheflush.h>
     24#include <asm/os_info.h>
     25#include <asm/set_memory.h>
     26#include <asm/stacktrace.h>
     27#include <asm/switch_to.h>
     28#include <asm/nmi.h>
     29#include <asm/sclp.h>
     30
     31typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long,
     32				  unsigned long);
     33
     34extern const unsigned char relocate_kernel[];
     35extern const unsigned long long relocate_kernel_len;
     36
     37#ifdef CONFIG_CRASH_DUMP
     38
     39/*
     40 * Reset the system, copy boot CPU registers to absolute zero,
     41 * and jump to the kdump image
     42 */
     43static void __do_machine_kdump(void *image)
     44{
     45	int (*start_kdump)(int);
     46	unsigned long prefix;
     47
     48	/* store_status() saved the prefix register to lowcore */
     49	prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
     50
     51	/* Now do the reset  */
     52	s390_reset_system();
     53
     54	/*
     55	 * Copy dump CPU store status info to absolute zero.
     56	 * This need to be done *after* s390_reset_system set the
     57	 * prefix register of this CPU to zero
     58	 */
     59	memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
     60	       (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
     61
     62	__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
     63	start_kdump = (void *)((struct kimage *) image)->start;
     64	start_kdump(1);
     65
     66	/* Die if start_kdump returns */
     67	disabled_wait();
     68}
     69
     70/*
     71 * Start kdump: create a LGR log entry, store status of all CPUs and
     72 * branch to __do_machine_kdump.
     73 */
     74static noinline void __machine_kdump(void *image)
     75{
     76	struct mcesa *mcesa;
     77	union ctlreg2 cr2_old, cr2_new;
     78	int this_cpu, cpu;
     79
     80	lgr_info_log();
     81	/* Get status of the other CPUs */
     82	this_cpu = smp_find_processor_id(stap());
     83	for_each_online_cpu(cpu) {
     84		if (cpu == this_cpu)
     85			continue;
     86		if (smp_store_status(cpu))
     87			continue;
     88	}
     89	/* Store status of the boot CPU */
     90	mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
     91	if (MACHINE_HAS_VX)
     92		save_vx_regs((__vector128 *) mcesa->vector_save_area);
     93	if (MACHINE_HAS_GS) {
     94		__ctl_store(cr2_old.val, 2, 2);
     95		cr2_new = cr2_old;
     96		cr2_new.gse = 1;
     97		__ctl_load(cr2_new.val, 2, 2);
     98		save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
     99		__ctl_load(cr2_old.val, 2, 2);
    100	}
    101	/*
    102	 * To create a good backchain for this CPU in the dump store_status
    103	 * is passed the address of a function. The address is saved into
    104	 * the PSW save area of the boot CPU and the function is invoked as
    105	 * a tail call of store_status. The backchain in the dump will look
    106	 * like this:
    107	 *   restart_int_handler ->  __machine_kexec -> __do_machine_kdump
    108	 * The call to store_status() will not return.
    109	 */
    110	store_status(__do_machine_kdump, image);
    111}
    112
    113static unsigned long do_start_kdump(unsigned long addr)
    114{
    115	struct kimage *image = (struct kimage *) addr;
    116	int (*start_kdump)(int) = (void *)image->start;
    117	int rc;
    118
    119	__arch_local_irq_stnsm(0xfb); /* disable DAT */
    120	rc = start_kdump(0);
    121	__arch_local_irq_stosm(0x04); /* enable DAT */
    122	return rc;
    123}
    124
    125#endif /* CONFIG_CRASH_DUMP */
    126
    127/*
    128 * Check if kdump checksums are valid: We call purgatory with parameter "0"
    129 */
    130static bool kdump_csum_valid(struct kimage *image)
    131{
    132#ifdef CONFIG_CRASH_DUMP
    133	int rc;
    134
    135	preempt_disable();
    136	rc = call_on_stack(1, S390_lowcore.nodat_stack, unsigned long, do_start_kdump,
    137			   unsigned long, (unsigned long)image);
    138	preempt_enable();
    139	return rc == 0;
    140#else
    141	return false;
    142#endif
    143}
    144
    145#ifdef CONFIG_CRASH_DUMP
    146
    147void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
    148{
    149	unsigned long addr, size;
    150
    151	for (addr = begin; addr < end; addr += PAGE_SIZE)
    152		free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
    153	size = begin - crashk_res.start;
    154	if (size)
    155		os_info_crashkernel_add(crashk_res.start, size);
    156	else
    157		os_info_crashkernel_add(0, 0);
    158}
    159
    160static void crash_protect_pages(int protect)
    161{
    162	unsigned long size;
    163
    164	if (!crashk_res.end)
    165		return;
    166	size = resource_size(&crashk_res);
    167	if (protect)
    168		set_memory_ro(crashk_res.start, size >> PAGE_SHIFT);
    169	else
    170		set_memory_rw(crashk_res.start, size >> PAGE_SHIFT);
    171}
    172
    173void arch_kexec_protect_crashkres(void)
    174{
    175	crash_protect_pages(1);
    176}
    177
    178void arch_kexec_unprotect_crashkres(void)
    179{
    180	crash_protect_pages(0);
    181}
    182
    183#endif
    184
    185/*
    186 * Give back memory to hypervisor before new kdump is loaded
    187 */
    188static int machine_kexec_prepare_kdump(void)
    189{
    190#ifdef CONFIG_CRASH_DUMP
    191	if (MACHINE_IS_VM)
    192		diag10_range(PFN_DOWN(crashk_res.start),
    193			     PFN_DOWN(crashk_res.end - crashk_res.start + 1));
    194	return 0;
    195#else
    196	return -EINVAL;
    197#endif
    198}
    199
    200int machine_kexec_prepare(struct kimage *image)
    201{
    202	void *reboot_code_buffer;
    203
    204	if (image->type == KEXEC_TYPE_CRASH)
    205		return machine_kexec_prepare_kdump();
    206
    207	/* We don't support anything but the default image type for now. */
    208	if (image->type != KEXEC_TYPE_DEFAULT)
    209		return -EINVAL;
    210
    211	/* Get the destination where the assembler code should be copied to.*/
    212	reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
    213
    214	/* Then copy it */
    215	memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
    216	return 0;
    217}
    218
    219void machine_kexec_cleanup(struct kimage *image)
    220{
    221}
    222
    223void arch_crash_save_vmcoreinfo(void)
    224{
    225	VMCOREINFO_SYMBOL(lowcore_ptr);
    226	VMCOREINFO_SYMBOL(high_memory);
    227	VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
    228	vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
    229	vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
    230	vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
    231	put_abs_lowcore(vmcore_info, paddr_vmcoreinfo_note());
    232}
    233
    234void machine_shutdown(void)
    235{
    236}
    237
    238void machine_crash_shutdown(struct pt_regs *regs)
    239{
    240	set_os_info_reipl_block();
    241}
    242
    243/*
    244 * Do normal kexec
    245 */
    246static void __do_machine_kexec(void *data)
    247{
    248	unsigned long diag308_subcode;
    249	relocate_kernel_t data_mover;
    250	struct kimage *image = data;
    251
    252	s390_reset_system();
    253	data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
    254
    255	__arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
    256	/* Call the moving routine */
    257	diag308_subcode = DIAG308_CLEAR_RESET;
    258	if (sclp.has_iplcc)
    259		diag308_subcode |= DIAG308_FLAG_EI;
    260	(*data_mover)(&image->head, image->start, diag308_subcode);
    261
    262	/* Die if kexec returns */
    263	disabled_wait();
    264}
    265
    266/*
    267 * Reset system and call either kdump or normal kexec
    268 */
    269static void __machine_kexec(void *data)
    270{
    271	pfault_fini();
    272	tracing_off();
    273	debug_locks_off();
    274#ifdef CONFIG_CRASH_DUMP
    275	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
    276		__machine_kdump(data);
    277#endif
    278	__do_machine_kexec(data);
    279}
    280
    281/*
    282 * Do either kdump or normal kexec. In case of kdump we first ask
    283 * purgatory, if kdump checksums are valid.
    284 */
    285void machine_kexec(struct kimage *image)
    286{
    287	if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
    288		return;
    289	tracer_disable();
    290	smp_send_stop();
    291	smp_call_ipl_cpu(__machine_kexec, image);
    292}