cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amd.c (22348B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  AMD CPU Microcode Update Driver for Linux
      4 *
      5 *  This driver allows to upgrade microcode on F10h AMD
      6 *  CPUs and later.
      7 *
      8 *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
      9 *	          2013-2018 Borislav Petkov <bp@alien8.de>
     10 *
     11 *  Author: Peter Oruba <peter.oruba@amd.com>
     12 *
     13 *  Based on work by:
     14 *  Tigran Aivazian <aivazian.tigran@gmail.com>
     15 *
     16 *  early loader:
     17 *  Copyright (C) 2013 Advanced Micro Devices, Inc.
     18 *
     19 *  Author: Jacob Shin <jacob.shin@amd.com>
     20 *  Fixes: Borislav Petkov <bp@suse.de>
     21 */
     22#define pr_fmt(fmt) "microcode: " fmt
     23
     24#include <linux/earlycpio.h>
     25#include <linux/firmware.h>
     26#include <linux/uaccess.h>
     27#include <linux/vmalloc.h>
     28#include <linux/initrd.h>
     29#include <linux/kernel.h>
     30#include <linux/pci.h>
     31
     32#include <asm/microcode_amd.h>
     33#include <asm/microcode.h>
     34#include <asm/processor.h>
     35#include <asm/setup.h>
     36#include <asm/cpu.h>
     37#include <asm/msr.h>
     38
     39static struct equiv_cpu_table {
     40	unsigned int num_entries;
     41	struct equiv_cpu_entry *entry;
     42} equiv_table;
     43
     44/*
     45 * This points to the current valid container of microcode patches which we will
     46 * save from the initrd/builtin before jettisoning its contents. @mc is the
     47 * microcode patch we found to match.
     48 */
     49struct cont_desc {
     50	struct microcode_amd *mc;
     51	u32		     cpuid_1_eax;
     52	u32		     psize;
     53	u8		     *data;
     54	size_t		     size;
     55};
     56
     57static u32 ucode_new_rev;
     58static u8 amd_ucode_patch[PATCH_MAX_SIZE];
     59
     60/*
     61 * Microcode patch container file is prepended to the initrd in cpio
     62 * format. See Documentation/x86/microcode.rst
     63 */
     64static const char
     65ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
     66
     67static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
     68{
     69	unsigned int i;
     70
     71	if (!et || !et->num_entries)
     72		return 0;
     73
     74	for (i = 0; i < et->num_entries; i++) {
     75		struct equiv_cpu_entry *e = &et->entry[i];
     76
     77		if (sig == e->installed_cpu)
     78			return e->equiv_cpu;
     79
     80		e++;
     81	}
     82	return 0;
     83}
     84
     85/*
     86 * Check whether there is a valid microcode container file at the beginning
     87 * of @buf of size @buf_size. Set @early to use this function in the early path.
     88 */
     89static bool verify_container(const u8 *buf, size_t buf_size, bool early)
     90{
     91	u32 cont_magic;
     92
     93	if (buf_size <= CONTAINER_HDR_SZ) {
     94		if (!early)
     95			pr_debug("Truncated microcode container header.\n");
     96
     97		return false;
     98	}
     99
    100	cont_magic = *(const u32 *)buf;
    101	if (cont_magic != UCODE_MAGIC) {
    102		if (!early)
    103			pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
    104
    105		return false;
    106	}
    107
    108	return true;
    109}
    110
    111/*
    112 * Check whether there is a valid, non-truncated CPU equivalence table at the
    113 * beginning of @buf of size @buf_size. Set @early to use this function in the
    114 * early path.
    115 */
    116static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
    117{
    118	const u32 *hdr = (const u32 *)buf;
    119	u32 cont_type, equiv_tbl_len;
    120
    121	if (!verify_container(buf, buf_size, early))
    122		return false;
    123
    124	cont_type = hdr[1];
    125	if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
    126		if (!early)
    127			pr_debug("Wrong microcode container equivalence table type: %u.\n",
    128			       cont_type);
    129
    130		return false;
    131	}
    132
    133	buf_size -= CONTAINER_HDR_SZ;
    134
    135	equiv_tbl_len = hdr[2];
    136	if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
    137	    buf_size < equiv_tbl_len) {
    138		if (!early)
    139			pr_debug("Truncated equivalence table.\n");
    140
    141		return false;
    142	}
    143
    144	return true;
    145}
    146
    147/*
    148 * Check whether there is a valid, non-truncated microcode patch section at the
    149 * beginning of @buf of size @buf_size. Set @early to use this function in the
    150 * early path.
    151 *
    152 * On success, @sh_psize returns the patch size according to the section header,
    153 * to the caller.
    154 */
    155static bool
    156__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
    157{
    158	u32 p_type, p_size;
    159	const u32 *hdr;
    160
    161	if (buf_size < SECTION_HDR_SIZE) {
    162		if (!early)
    163			pr_debug("Truncated patch section.\n");
    164
    165		return false;
    166	}
    167
    168	hdr = (const u32 *)buf;
    169	p_type = hdr[0];
    170	p_size = hdr[1];
    171
    172	if (p_type != UCODE_UCODE_TYPE) {
    173		if (!early)
    174			pr_debug("Invalid type field (0x%x) in container file section header.\n",
    175				p_type);
    176
    177		return false;
    178	}
    179
    180	if (p_size < sizeof(struct microcode_header_amd)) {
    181		if (!early)
    182			pr_debug("Patch of size %u too short.\n", p_size);
    183
    184		return false;
    185	}
    186
    187	*sh_psize = p_size;
    188
    189	return true;
    190}
    191
    192/*
    193 * Check whether the passed remaining file @buf_size is large enough to contain
    194 * a patch of the indicated @sh_psize (and also whether this size does not
    195 * exceed the per-family maximum). @sh_psize is the size read from the section
    196 * header.
    197 */
    198static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
    199{
    200	u32 max_size;
    201
    202	if (family >= 0x15)
    203		return min_t(u32, sh_psize, buf_size);
    204
    205#define F1XH_MPB_MAX_SIZE 2048
    206#define F14H_MPB_MAX_SIZE 1824
    207
    208	switch (family) {
    209	case 0x10 ... 0x12:
    210		max_size = F1XH_MPB_MAX_SIZE;
    211		break;
    212	case 0x14:
    213		max_size = F14H_MPB_MAX_SIZE;
    214		break;
    215	default:
    216		WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
    217		return 0;
    218	}
    219
    220	if (sh_psize > min_t(u32, buf_size, max_size))
    221		return 0;
    222
    223	return sh_psize;
    224}
    225
    226/*
    227 * Verify the patch in @buf.
    228 *
    229 * Returns:
    230 * negative: on error
    231 * positive: patch is not for this family, skip it
    232 * 0: success
    233 */
    234static int
    235verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
    236{
    237	struct microcode_header_amd *mc_hdr;
    238	unsigned int ret;
    239	u32 sh_psize;
    240	u16 proc_id;
    241	u8 patch_fam;
    242
    243	if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
    244		return -1;
    245
    246	/*
    247	 * The section header length is not included in this indicated size
    248	 * but is present in the leftover file length so we need to subtract
    249	 * it before passing this value to the function below.
    250	 */
    251	buf_size -= SECTION_HDR_SIZE;
    252
    253	/*
    254	 * Check if the remaining buffer is big enough to contain a patch of
    255	 * size sh_psize, as the section claims.
    256	 */
    257	if (buf_size < sh_psize) {
    258		if (!early)
    259			pr_debug("Patch of size %u truncated.\n", sh_psize);
    260
    261		return -1;
    262	}
    263
    264	ret = __verify_patch_size(family, sh_psize, buf_size);
    265	if (!ret) {
    266		if (!early)
    267			pr_debug("Per-family patch size mismatch.\n");
    268		return -1;
    269	}
    270
    271	*patch_size = sh_psize;
    272
    273	mc_hdr	= (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
    274	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
    275		if (!early)
    276			pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
    277		return -1;
    278	}
    279
    280	proc_id	= mc_hdr->processor_rev_id;
    281	patch_fam = 0xf + (proc_id >> 12);
    282	if (patch_fam != family)
    283		return 1;
    284
    285	return 0;
    286}
    287
    288/*
    289 * This scans the ucode blob for the proper container as we can have multiple
    290 * containers glued together. Returns the equivalence ID from the equivalence
    291 * table or 0 if none found.
    292 * Returns the amount of bytes consumed while scanning. @desc contains all the
    293 * data we're going to use in later stages of the application.
    294 */
    295static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
    296{
    297	struct equiv_cpu_table table;
    298	size_t orig_size = size;
    299	u32 *hdr = (u32 *)ucode;
    300	u16 eq_id;
    301	u8 *buf;
    302
    303	if (!verify_equivalence_table(ucode, size, true))
    304		return 0;
    305
    306	buf = ucode;
    307
    308	table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
    309	table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
    310
    311	/*
    312	 * Find the equivalence ID of our CPU in this table. Even if this table
    313	 * doesn't contain a patch for the CPU, scan through the whole container
    314	 * so that it can be skipped in case there are other containers appended.
    315	 */
    316	eq_id = find_equiv_id(&table, desc->cpuid_1_eax);
    317
    318	buf  += hdr[2] + CONTAINER_HDR_SZ;
    319	size -= hdr[2] + CONTAINER_HDR_SZ;
    320
    321	/*
    322	 * Scan through the rest of the container to find where it ends. We do
    323	 * some basic sanity-checking too.
    324	 */
    325	while (size > 0) {
    326		struct microcode_amd *mc;
    327		u32 patch_size;
    328		int ret;
    329
    330		ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
    331		if (ret < 0) {
    332			/*
    333			 * Patch verification failed, skip to the next
    334			 * container, if there's one:
    335			 */
    336			goto out;
    337		} else if (ret > 0) {
    338			goto skip;
    339		}
    340
    341		mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
    342		if (eq_id == mc->hdr.processor_rev_id) {
    343			desc->psize = patch_size;
    344			desc->mc = mc;
    345		}
    346
    347skip:
    348		/* Skip patch section header too: */
    349		buf  += patch_size + SECTION_HDR_SIZE;
    350		size -= patch_size + SECTION_HDR_SIZE;
    351	}
    352
    353	/*
    354	 * If we have found a patch (desc->mc), it means we're looking at the
    355	 * container which has a patch for this CPU so return 0 to mean, @ucode
    356	 * already points to the proper container. Otherwise, we return the size
    357	 * we scanned so that we can advance to the next container in the
    358	 * buffer.
    359	 */
    360	if (desc->mc) {
    361		desc->data = ucode;
    362		desc->size = orig_size - size;
    363
    364		return 0;
    365	}
    366
    367out:
    368	return orig_size - size;
    369}
    370
    371/*
    372 * Scan the ucode blob for the proper container as we can have multiple
    373 * containers glued together.
    374 */
    375static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
    376{
    377	while (size) {
    378		size_t s = parse_container(ucode, size, desc);
    379		if (!s)
    380			return;
    381
    382		/* catch wraparound */
    383		if (size >= s) {
    384			ucode += s;
    385			size  -= s;
    386		} else {
    387			return;
    388		}
    389	}
    390}
    391
    392static int __apply_microcode_amd(struct microcode_amd *mc)
    393{
    394	u32 rev, dummy;
    395
    396	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
    397
    398	/* verify patch application was successful */
    399	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
    400	if (rev != mc->hdr.patch_id)
    401		return -1;
    402
    403	return 0;
    404}
    405
    406/*
    407 * Early load occurs before we can vmalloc(). So we look for the microcode
    408 * patch container file in initrd, traverse equivalent cpu table, look for a
    409 * matching microcode patch, and update, all in initrd memory in place.
    410 * When vmalloc() is available for use later -- on 64-bit during first AP load,
    411 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
    412 * load_microcode_amd() to save equivalent cpu table and microcode patches in
    413 * kernel heap memory.
    414 *
    415 * Returns true if container found (sets @desc), false otherwise.
    416 */
    417static bool
    418apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
    419{
    420	struct cont_desc desc = { 0 };
    421	u8 (*patch)[PATCH_MAX_SIZE];
    422	struct microcode_amd *mc;
    423	u32 rev, dummy, *new_rev;
    424	bool ret = false;
    425
    426#ifdef CONFIG_X86_32
    427	new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
    428	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
    429#else
    430	new_rev = &ucode_new_rev;
    431	patch	= &amd_ucode_patch;
    432#endif
    433
    434	desc.cpuid_1_eax = cpuid_1_eax;
    435
    436	scan_containers(ucode, size, &desc);
    437
    438	mc = desc.mc;
    439	if (!mc)
    440		return ret;
    441
    442	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
    443	if (rev >= mc->hdr.patch_id)
    444		return ret;
    445
    446	if (!__apply_microcode_amd(mc)) {
    447		*new_rev = mc->hdr.patch_id;
    448		ret      = true;
    449
    450		if (save_patch)
    451			memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
    452	}
    453
    454	return ret;
    455}
    456
    457static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
    458{
    459	char fw_name[36] = "amd-ucode/microcode_amd.bin";
    460	struct firmware fw;
    461
    462	if (IS_ENABLED(CONFIG_X86_32))
    463		return false;
    464
    465	if (family >= 0x15)
    466		snprintf(fw_name, sizeof(fw_name),
    467			 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
    468
    469	if (firmware_request_builtin(&fw, fw_name)) {
    470		cp->size = fw.size;
    471		cp->data = (void *)fw.data;
    472		return true;
    473	}
    474
    475	return false;
    476}
    477
    478static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
    479{
    480	struct ucode_cpu_info *uci;
    481	struct cpio_data cp;
    482	const char *path;
    483	bool use_pa;
    484
    485	if (IS_ENABLED(CONFIG_X86_32)) {
    486		uci	= (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
    487		path	= (const char *)__pa_nodebug(ucode_path);
    488		use_pa	= true;
    489	} else {
    490		uci     = ucode_cpu_info;
    491		path	= ucode_path;
    492		use_pa	= false;
    493	}
    494
    495	if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
    496		cp = find_microcode_in_initrd(path, use_pa);
    497
    498	/* Needed in load_microcode_amd() */
    499	uci->cpu_sig.sig = cpuid_1_eax;
    500
    501	*ret = cp;
    502}
    503
    504void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
    505{
    506	struct cpio_data cp = { };
    507
    508	__load_ucode_amd(cpuid_1_eax, &cp);
    509	if (!(cp.data && cp.size))
    510		return;
    511
    512	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
    513}
    514
    515void load_ucode_amd_ap(unsigned int cpuid_1_eax)
    516{
    517	struct microcode_amd *mc;
    518	struct cpio_data cp;
    519	u32 *new_rev, rev, dummy;
    520
    521	if (IS_ENABLED(CONFIG_X86_32)) {
    522		mc	= (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
    523		new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
    524	} else {
    525		mc	= (struct microcode_amd *)amd_ucode_patch;
    526		new_rev = &ucode_new_rev;
    527	}
    528
    529	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
    530
    531	/* Check whether we have saved a new patch already: */
    532	if (*new_rev && rev < mc->hdr.patch_id) {
    533		if (!__apply_microcode_amd(mc)) {
    534			*new_rev = mc->hdr.patch_id;
    535			return;
    536		}
    537	}
    538
    539	__load_ucode_amd(cpuid_1_eax, &cp);
    540	if (!(cp.data && cp.size))
    541		return;
    542
    543	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
    544}
    545
    546static enum ucode_state
    547load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
    548
    549int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
    550{
    551	struct cont_desc desc = { 0 };
    552	enum ucode_state ret;
    553	struct cpio_data cp;
    554
    555	cp = find_microcode_in_initrd(ucode_path, false);
    556	if (!(cp.data && cp.size))
    557		return -EINVAL;
    558
    559	desc.cpuid_1_eax = cpuid_1_eax;
    560
    561	scan_containers(cp.data, cp.size, &desc);
    562	if (!desc.mc)
    563		return -EINVAL;
    564
    565	ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
    566	if (ret > UCODE_UPDATED)
    567		return -EINVAL;
    568
    569	return 0;
    570}
    571
    572void reload_ucode_amd(void)
    573{
    574	struct microcode_amd *mc;
    575	u32 rev, dummy __always_unused;
    576
    577	mc = (struct microcode_amd *)amd_ucode_patch;
    578
    579	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
    580
    581	if (rev < mc->hdr.patch_id) {
    582		if (!__apply_microcode_amd(mc)) {
    583			ucode_new_rev = mc->hdr.patch_id;
    584			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
    585		}
    586	}
    587}
    588static u16 __find_equiv_id(unsigned int cpu)
    589{
    590	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
    591	return find_equiv_id(&equiv_table, uci->cpu_sig.sig);
    592}
    593
    594/*
    595 * a small, trivial cache of per-family ucode patches
    596 */
    597static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
    598{
    599	struct ucode_patch *p;
    600
    601	list_for_each_entry(p, &microcode_cache, plist)
    602		if (p->equiv_cpu == equiv_cpu)
    603			return p;
    604	return NULL;
    605}
    606
    607static void update_cache(struct ucode_patch *new_patch)
    608{
    609	struct ucode_patch *p;
    610
    611	list_for_each_entry(p, &microcode_cache, plist) {
    612		if (p->equiv_cpu == new_patch->equiv_cpu) {
    613			if (p->patch_id >= new_patch->patch_id) {
    614				/* we already have the latest patch */
    615				kfree(new_patch->data);
    616				kfree(new_patch);
    617				return;
    618			}
    619
    620			list_replace(&p->plist, &new_patch->plist);
    621			kfree(p->data);
    622			kfree(p);
    623			return;
    624		}
    625	}
    626	/* no patch found, add it */
    627	list_add_tail(&new_patch->plist, &microcode_cache);
    628}
    629
    630static void free_cache(void)
    631{
    632	struct ucode_patch *p, *tmp;
    633
    634	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
    635		__list_del(p->plist.prev, p->plist.next);
    636		kfree(p->data);
    637		kfree(p);
    638	}
    639}
    640
    641static struct ucode_patch *find_patch(unsigned int cpu)
    642{
    643	u16 equiv_id;
    644
    645	equiv_id = __find_equiv_id(cpu);
    646	if (!equiv_id)
    647		return NULL;
    648
    649	return cache_find_patch(equiv_id);
    650}
    651
    652static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
    653{
    654	struct cpuinfo_x86 *c = &cpu_data(cpu);
    655	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
    656	struct ucode_patch *p;
    657
    658	csig->sig = cpuid_eax(0x00000001);
    659	csig->rev = c->microcode;
    660
    661	/*
    662	 * a patch could have been loaded early, set uci->mc so that
    663	 * mc_bp_resume() can call apply_microcode()
    664	 */
    665	p = find_patch(cpu);
    666	if (p && (p->patch_id == csig->rev))
    667		uci->mc = p->data;
    668
    669	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
    670
    671	return 0;
    672}
    673
    674static enum ucode_state apply_microcode_amd(int cpu)
    675{
    676	struct cpuinfo_x86 *c = &cpu_data(cpu);
    677	struct microcode_amd *mc_amd;
    678	struct ucode_cpu_info *uci;
    679	struct ucode_patch *p;
    680	enum ucode_state ret;
    681	u32 rev, dummy __always_unused;
    682
    683	BUG_ON(raw_smp_processor_id() != cpu);
    684
    685	uci = ucode_cpu_info + cpu;
    686
    687	p = find_patch(cpu);
    688	if (!p)
    689		return UCODE_NFOUND;
    690
    691	mc_amd  = p->data;
    692	uci->mc = p->data;
    693
    694	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
    695
    696	/* need to apply patch? */
    697	if (rev >= mc_amd->hdr.patch_id) {
    698		ret = UCODE_OK;
    699		goto out;
    700	}
    701
    702	if (__apply_microcode_amd(mc_amd)) {
    703		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
    704			cpu, mc_amd->hdr.patch_id);
    705		return UCODE_ERROR;
    706	}
    707
    708	rev = mc_amd->hdr.patch_id;
    709	ret = UCODE_UPDATED;
    710
    711	pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
    712
    713out:
    714	uci->cpu_sig.rev = rev;
    715	c->microcode	 = rev;
    716
    717	/* Update boot_cpu_data's revision too, if we're on the BSP: */
    718	if (c->cpu_index == boot_cpu_data.cpu_index)
    719		boot_cpu_data.microcode = rev;
    720
    721	return ret;
    722}
    723
    724static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
    725{
    726	u32 equiv_tbl_len;
    727	const u32 *hdr;
    728
    729	if (!verify_equivalence_table(buf, buf_size, false))
    730		return 0;
    731
    732	hdr = (const u32 *)buf;
    733	equiv_tbl_len = hdr[2];
    734
    735	equiv_table.entry = vmalloc(equiv_tbl_len);
    736	if (!equiv_table.entry) {
    737		pr_err("failed to allocate equivalent CPU table\n");
    738		return 0;
    739	}
    740
    741	memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
    742	equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
    743
    744	/* add header length */
    745	return equiv_tbl_len + CONTAINER_HDR_SZ;
    746}
    747
    748static void free_equiv_cpu_table(void)
    749{
    750	vfree(equiv_table.entry);
    751	memset(&equiv_table, 0, sizeof(equiv_table));
    752}
    753
    754static void cleanup(void)
    755{
    756	free_equiv_cpu_table();
    757	free_cache();
    758}
    759
    760/*
    761 * Return a non-negative value even if some of the checks failed so that
    762 * we can skip over the next patch. If we return a negative value, we
    763 * signal a grave error like a memory allocation has failed and the
    764 * driver cannot continue functioning normally. In such cases, we tear
    765 * down everything we've used up so far and exit.
    766 */
    767static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
    768				unsigned int *patch_size)
    769{
    770	struct microcode_header_amd *mc_hdr;
    771	struct ucode_patch *patch;
    772	u16 proc_id;
    773	int ret;
    774
    775	ret = verify_patch(family, fw, leftover, patch_size, false);
    776	if (ret)
    777		return ret;
    778
    779	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
    780	if (!patch) {
    781		pr_err("Patch allocation failure.\n");
    782		return -EINVAL;
    783	}
    784
    785	patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
    786	if (!patch->data) {
    787		pr_err("Patch data allocation failure.\n");
    788		kfree(patch);
    789		return -EINVAL;
    790	}
    791
    792	mc_hdr      = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
    793	proc_id     = mc_hdr->processor_rev_id;
    794
    795	INIT_LIST_HEAD(&patch->plist);
    796	patch->patch_id  = mc_hdr->patch_id;
    797	patch->equiv_cpu = proc_id;
    798
    799	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
    800		 __func__, patch->patch_id, proc_id);
    801
    802	/* ... and add to cache. */
    803	update_cache(patch);
    804
    805	return 0;
    806}
    807
    808static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
    809					     size_t size)
    810{
    811	u8 *fw = (u8 *)data;
    812	size_t offset;
    813
    814	offset = install_equiv_cpu_table(data, size);
    815	if (!offset)
    816		return UCODE_ERROR;
    817
    818	fw   += offset;
    819	size -= offset;
    820
    821	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
    822		pr_err("invalid type field in container file section header\n");
    823		free_equiv_cpu_table();
    824		return UCODE_ERROR;
    825	}
    826
    827	while (size > 0) {
    828		unsigned int crnt_size = 0;
    829		int ret;
    830
    831		ret = verify_and_add_patch(family, fw, size, &crnt_size);
    832		if (ret < 0)
    833			return UCODE_ERROR;
    834
    835		fw   +=  crnt_size + SECTION_HDR_SIZE;
    836		size -= (crnt_size + SECTION_HDR_SIZE);
    837	}
    838
    839	return UCODE_OK;
    840}
    841
    842static enum ucode_state
    843load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
    844{
    845	struct ucode_patch *p;
    846	enum ucode_state ret;
    847
    848	/* free old equiv table */
    849	free_equiv_cpu_table();
    850
    851	ret = __load_microcode_amd(family, data, size);
    852	if (ret != UCODE_OK) {
    853		cleanup();
    854		return ret;
    855	}
    856
    857	p = find_patch(0);
    858	if (!p) {
    859		return ret;
    860	} else {
    861		if (boot_cpu_data.microcode >= p->patch_id)
    862			return ret;
    863
    864		ret = UCODE_NEW;
    865	}
    866
    867	/* save BSP's matching patch for early load */
    868	if (!save)
    869		return ret;
    870
    871	memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
    872	memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
    873
    874	return ret;
    875}
    876
    877/*
    878 * AMD microcode firmware naming convention, up to family 15h they are in
    879 * the legacy file:
    880 *
    881 *    amd-ucode/microcode_amd.bin
    882 *
    883 * This legacy file is always smaller than 2K in size.
    884 *
    885 * Beginning with family 15h, they are in family-specific firmware files:
    886 *
    887 *    amd-ucode/microcode_amd_fam15h.bin
    888 *    amd-ucode/microcode_amd_fam16h.bin
    889 *    ...
    890 *
    891 * These might be larger than 2K.
    892 */
    893static enum ucode_state request_microcode_amd(int cpu, struct device *device,
    894					      bool refresh_fw)
    895{
    896	char fw_name[36] = "amd-ucode/microcode_amd.bin";
    897	struct cpuinfo_x86 *c = &cpu_data(cpu);
    898	bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
    899	enum ucode_state ret = UCODE_NFOUND;
    900	const struct firmware *fw;
    901
    902	/* reload ucode container only on the boot cpu */
    903	if (!refresh_fw || !bsp)
    904		return UCODE_OK;
    905
    906	if (c->x86 >= 0x15)
    907		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
    908
    909	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
    910		pr_debug("failed to load file %s\n", fw_name);
    911		goto out;
    912	}
    913
    914	ret = UCODE_ERROR;
    915	if (!verify_container(fw->data, fw->size, false))
    916		goto fw_release;
    917
    918	ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
    919
    920 fw_release:
    921	release_firmware(fw);
    922
    923 out:
    924	return ret;
    925}
    926
    927static enum ucode_state
    928request_microcode_user(int cpu, const void __user *buf, size_t size)
    929{
    930	return UCODE_ERROR;
    931}
    932
    933static void microcode_fini_cpu_amd(int cpu)
    934{
    935	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
    936
    937	uci->mc = NULL;
    938}
    939
    940static struct microcode_ops microcode_amd_ops = {
    941	.request_microcode_user           = request_microcode_user,
    942	.request_microcode_fw             = request_microcode_amd,
    943	.collect_cpu_info                 = collect_cpu_info_amd,
    944	.apply_microcode                  = apply_microcode_amd,
    945	.microcode_fini_cpu               = microcode_fini_cpu_amd,
    946};
    947
    948struct microcode_ops * __init init_amd_microcode(void)
    949{
    950	struct cpuinfo_x86 *c = &boot_cpu_data;
    951
    952	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
    953		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
    954		return NULL;
    955	}
    956
    957	if (ucode_new_rev)
    958		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
    959			     ucode_new_rev);
    960
    961	return &microcode_amd_ops;
    962}
    963
    964void __exit exit_amd_microcode(void)
    965{
    966	cleanup();
    967}