cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

machine_kexec_file.c (4541B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * kexec_file for arm64
      4 *
      5 * Copyright (C) 2018 Linaro Limited
      6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
      7 *
      8 * Most code is derived from arm64 port of kexec-tools
      9 */
     10
     11#define pr_fmt(fmt) "kexec_file: " fmt
     12
     13#include <linux/ioport.h>
     14#include <linux/kernel.h>
     15#include <linux/kexec.h>
     16#include <linux/libfdt.h>
     17#include <linux/memblock.h>
     18#include <linux/of.h>
     19#include <linux/of_fdt.h>
     20#include <linux/slab.h>
     21#include <linux/string.h>
     22#include <linux/types.h>
     23#include <linux/vmalloc.h>
     24
     25const struct kexec_file_ops * const kexec_file_loaders[] = {
     26	&kexec_image_ops,
     27	NULL
     28};
     29
     30int arch_kimage_file_post_load_cleanup(struct kimage *image)
     31{
     32	kvfree(image->arch.dtb);
     33	image->arch.dtb = NULL;
     34
     35	vfree(image->elf_headers);
     36	image->elf_headers = NULL;
     37	image->elf_headers_sz = 0;
     38
     39	return kexec_image_post_load_cleanup_default(image);
     40}
     41
     42static int prepare_elf_headers(void **addr, unsigned long *sz)
     43{
     44	struct crash_mem *cmem;
     45	unsigned int nr_ranges;
     46	int ret;
     47	u64 i;
     48	phys_addr_t start, end;
     49
     50	nr_ranges = 1; /* for exclusion of crashkernel region */
     51	for_each_mem_range(i, &start, &end)
     52		nr_ranges++;
     53
     54	cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
     55	if (!cmem)
     56		return -ENOMEM;
     57
     58	cmem->max_nr_ranges = nr_ranges;
     59	cmem->nr_ranges = 0;
     60	for_each_mem_range(i, &start, &end) {
     61		cmem->ranges[cmem->nr_ranges].start = start;
     62		cmem->ranges[cmem->nr_ranges].end = end - 1;
     63		cmem->nr_ranges++;
     64	}
     65
     66	/* Exclude crashkernel region */
     67	ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
     68	if (ret)
     69		goto out;
     70
     71	if (crashk_low_res.end) {
     72		ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
     73		if (ret)
     74			goto out;
     75	}
     76
     77	ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
     78
     79out:
     80	kfree(cmem);
     81	return ret;
     82}
     83
     84/*
     85 * Tries to add the initrd and DTB to the image. If it is not possible to find
     86 * valid locations, this function will undo changes to the image and return non
     87 * zero.
     88 */
     89int load_other_segments(struct kimage *image,
     90			unsigned long kernel_load_addr,
     91			unsigned long kernel_size,
     92			char *initrd, unsigned long initrd_len,
     93			char *cmdline)
     94{
     95	struct kexec_buf kbuf;
     96	void *headers, *dtb = NULL;
     97	unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
     98		      orig_segments = image->nr_segments;
     99	int ret = 0;
    100
    101	kbuf.image = image;
    102	/* not allocate anything below the kernel */
    103	kbuf.buf_min = kernel_load_addr + kernel_size;
    104
    105	/* load elf core header */
    106	if (image->type == KEXEC_TYPE_CRASH) {
    107		ret = prepare_elf_headers(&headers, &headers_sz);
    108		if (ret) {
    109			pr_err("Preparing elf core header failed\n");
    110			goto out_err;
    111		}
    112
    113		kbuf.buffer = headers;
    114		kbuf.bufsz = headers_sz;
    115		kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
    116		kbuf.memsz = headers_sz;
    117		kbuf.buf_align = SZ_64K; /* largest supported page size */
    118		kbuf.buf_max = ULONG_MAX;
    119		kbuf.top_down = true;
    120
    121		ret = kexec_add_buffer(&kbuf);
    122		if (ret) {
    123			vfree(headers);
    124			goto out_err;
    125		}
    126		image->elf_headers = headers;
    127		image->elf_load_addr = kbuf.mem;
    128		image->elf_headers_sz = headers_sz;
    129
    130		pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
    131			 image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
    132	}
    133
    134	/* load initrd */
    135	if (initrd) {
    136		kbuf.buffer = initrd;
    137		kbuf.bufsz = initrd_len;
    138		kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
    139		kbuf.memsz = initrd_len;
    140		kbuf.buf_align = 0;
    141		/* within 1GB-aligned window of up to 32GB in size */
    142		kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
    143						+ (unsigned long)SZ_1G * 32;
    144		kbuf.top_down = false;
    145
    146		ret = kexec_add_buffer(&kbuf);
    147		if (ret)
    148			goto out_err;
    149		initrd_load_addr = kbuf.mem;
    150
    151		pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
    152				initrd_load_addr, kbuf.bufsz, kbuf.memsz);
    153	}
    154
    155	/* load dtb */
    156	dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
    157					   initrd_len, cmdline, 0);
    158	if (!dtb) {
    159		pr_err("Preparing for new dtb failed\n");
    160		ret = -EINVAL;
    161		goto out_err;
    162	}
    163
    164	/* trim it */
    165	fdt_pack(dtb);
    166	dtb_len = fdt_totalsize(dtb);
    167	kbuf.buffer = dtb;
    168	kbuf.bufsz = dtb_len;
    169	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
    170	kbuf.memsz = dtb_len;
    171	/* not across 2MB boundary */
    172	kbuf.buf_align = SZ_2M;
    173	kbuf.buf_max = ULONG_MAX;
    174	kbuf.top_down = true;
    175
    176	ret = kexec_add_buffer(&kbuf);
    177	if (ret)
    178		goto out_err;
    179	image->arch.dtb = dtb;
    180	image->arch.dtb_mem = kbuf.mem;
    181
    182	pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
    183			kbuf.mem, kbuf.bufsz, kbuf.memsz);
    184
    185	return 0;
    186
    187out_err:
    188	image->nr_segments = orig_segments;
    189	kvfree(dtb);
    190	return ret;
    191}