cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

proton-pack.c (30900B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
      4 * detailed at:
      5 *
      6 *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
      7 *
      8 * This code was originally written hastily under an awful lot of stress and so
      9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
     10 * instantly makes me feel ill. Thanks, Jann. Thann.
     11 *
     12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
     13 * Copyright (C) 2020 Google LLC
     14 *
     15 * "If there's something strange in your neighbourhood, who you gonna call?"
     16 *
     17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
     18 */
     19
     20#include <linux/arm-smccc.h>
     21#include <linux/bpf.h>
     22#include <linux/cpu.h>
     23#include <linux/device.h>
     24#include <linux/nospec.h>
     25#include <linux/prctl.h>
     26#include <linux/sched/task_stack.h>
     27
     28#include <asm/debug-monitors.h>
     29#include <asm/insn.h>
     30#include <asm/spectre.h>
     31#include <asm/traps.h>
     32#include <asm/vectors.h>
     33#include <asm/virt.h>
     34
     35/*
     36 * We try to ensure that the mitigation state can never change as the result of
     37 * onlining a late CPU.
     38 */
     39static void update_mitigation_state(enum mitigation_state *oldp,
     40				    enum mitigation_state new)
     41{
     42	enum mitigation_state state;
     43
     44	do {
     45		state = READ_ONCE(*oldp);
     46		if (new <= state)
     47			break;
     48
     49		/* Userspace almost certainly can't deal with this. */
     50		if (WARN_ON(system_capabilities_finalized()))
     51			break;
     52	} while (cmpxchg_relaxed(oldp, state, new) != state);
     53}
     54
     55/*
     56 * Spectre v1.
     57 *
     58 * The kernel can't protect userspace for this one: it's each person for
     59 * themselves. Advertise what we're doing and be done with it.
     60 */
     61ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
     62			    char *buf)
     63{
     64	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
     65}
     66
     67/*
     68 * Spectre v2.
     69 *
     70 * This one sucks. A CPU is either:
     71 *
     72 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
     73 * - Mitigated in hardware and listed in our "safe list".
     74 * - Mitigated in software by firmware.
     75 * - Mitigated in software by a CPU-specific dance in the kernel and a
     76 *   firmware call at EL2.
     77 * - Vulnerable.
     78 *
     79 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
     80 * different camps.
     81 */
     82static enum mitigation_state spectre_v2_state;
     83
     84static bool __read_mostly __nospectre_v2;
     85static int __init parse_spectre_v2_param(char *str)
     86{
     87	__nospectre_v2 = true;
     88	return 0;
     89}
     90early_param("nospectre_v2", parse_spectre_v2_param);
     91
     92static bool spectre_v2_mitigations_off(void)
     93{
     94	bool ret = __nospectre_v2 || cpu_mitigations_off();
     95
     96	if (ret)
     97		pr_info_once("spectre-v2 mitigation disabled by command line option\n");
     98
     99	return ret;
    100}
    101
    102static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
    103{
    104	switch (bhb_state) {
    105	case SPECTRE_UNAFFECTED:
    106		return "";
    107	default:
    108	case SPECTRE_VULNERABLE:
    109		return ", but not BHB";
    110	case SPECTRE_MITIGATED:
    111		return ", BHB";
    112	}
    113}
    114
    115static bool _unprivileged_ebpf_enabled(void)
    116{
    117#ifdef CONFIG_BPF_SYSCALL
    118	return !sysctl_unprivileged_bpf_disabled;
    119#else
    120	return false;
    121#endif
    122}
    123
    124ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
    125			    char *buf)
    126{
    127	enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
    128	const char *bhb_str = get_bhb_affected_string(bhb_state);
    129	const char *v2_str = "Branch predictor hardening";
    130
    131	switch (spectre_v2_state) {
    132	case SPECTRE_UNAFFECTED:
    133		if (bhb_state == SPECTRE_UNAFFECTED)
    134			return sprintf(buf, "Not affected\n");
    135
    136		/*
    137		 * Platforms affected by Spectre-BHB can't report
    138		 * "Not affected" for Spectre-v2.
    139		 */
    140		v2_str = "CSV2";
    141		fallthrough;
    142	case SPECTRE_MITIGATED:
    143		if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
    144			return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
    145
    146		return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
    147	case SPECTRE_VULNERABLE:
    148		fallthrough;
    149	default:
    150		return sprintf(buf, "Vulnerable\n");
    151	}
    152}
    153
    154static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
    155{
    156	u64 pfr0;
    157	static const struct midr_range spectre_v2_safe_list[] = {
    158		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
    159		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
    160		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
    161		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
    162		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
    163		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
    164		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
    165		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
    166		{ /* sentinel */ }
    167	};
    168
    169	/* If the CPU has CSV2 set, we're safe */
    170	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
    171	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
    172		return SPECTRE_UNAFFECTED;
    173
    174	/* Alternatively, we have a list of unaffected CPUs */
    175	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
    176		return SPECTRE_UNAFFECTED;
    177
    178	return SPECTRE_VULNERABLE;
    179}
    180
    181static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
    182{
    183	int ret;
    184	struct arm_smccc_res res;
    185
    186	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
    187			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
    188
    189	ret = res.a0;
    190	switch (ret) {
    191	case SMCCC_RET_SUCCESS:
    192		return SPECTRE_MITIGATED;
    193	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
    194		return SPECTRE_UNAFFECTED;
    195	default:
    196		fallthrough;
    197	case SMCCC_RET_NOT_SUPPORTED:
    198		return SPECTRE_VULNERABLE;
    199	}
    200}
    201
    202bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
    203{
    204	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
    205
    206	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
    207		return false;
    208
    209	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
    210		return false;
    211
    212	return true;
    213}
    214
    215enum mitigation_state arm64_get_spectre_v2_state(void)
    216{
    217	return spectre_v2_state;
    218}
    219
    220DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
    221
    222static void install_bp_hardening_cb(bp_hardening_cb_t fn)
    223{
    224	__this_cpu_write(bp_hardening_data.fn, fn);
    225
    226	/*
    227	 * Vinz Clortho takes the hyp_vecs start/end "keys" at
    228	 * the door when we're a guest. Skip the hyp-vectors work.
    229	 */
    230	if (!is_hyp_mode_available())
    231		return;
    232
    233	__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
    234}
    235
    236/* Called during entry so must be noinstr */
    237static noinstr void call_smc_arch_workaround_1(void)
    238{
    239	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
    240}
    241
    242/* Called during entry so must be noinstr */
    243static noinstr void call_hvc_arch_workaround_1(void)
    244{
    245	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
    246}
    247
    248/* Called during entry so must be noinstr */
    249static noinstr void qcom_link_stack_sanitisation(void)
    250{
    251	u64 tmp;
    252
    253	asm volatile("mov	%0, x30		\n"
    254		     ".rept	16		\n"
    255		     "bl	. + 4		\n"
    256		     ".endr			\n"
    257		     "mov	x30, %0		\n"
    258		     : "=&r" (tmp));
    259}
    260
    261static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
    262{
    263	u32 midr = read_cpuid_id();
    264	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
    265	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
    266		return NULL;
    267
    268	return qcom_link_stack_sanitisation;
    269}
    270
    271static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
    272{
    273	bp_hardening_cb_t cb;
    274	enum mitigation_state state;
    275
    276	state = spectre_v2_get_cpu_fw_mitigation_state();
    277	if (state != SPECTRE_MITIGATED)
    278		return state;
    279
    280	if (spectre_v2_mitigations_off())
    281		return SPECTRE_VULNERABLE;
    282
    283	switch (arm_smccc_1_1_get_conduit()) {
    284	case SMCCC_CONDUIT_HVC:
    285		cb = call_hvc_arch_workaround_1;
    286		break;
    287
    288	case SMCCC_CONDUIT_SMC:
    289		cb = call_smc_arch_workaround_1;
    290		break;
    291
    292	default:
    293		return SPECTRE_VULNERABLE;
    294	}
    295
    296	/*
    297	 * Prefer a CPU-specific workaround if it exists. Note that we
    298	 * still rely on firmware for the mitigation at EL2.
    299	 */
    300	cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
    301	install_bp_hardening_cb(cb);
    302	return SPECTRE_MITIGATED;
    303}
    304
    305void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
    306{
    307	enum mitigation_state state;
    308
    309	WARN_ON(preemptible());
    310
    311	state = spectre_v2_get_cpu_hw_mitigation_state();
    312	if (state == SPECTRE_VULNERABLE)
    313		state = spectre_v2_enable_fw_mitigation();
    314
    315	update_mitigation_state(&spectre_v2_state, state);
    316}
    317
    318/*
    319 * Spectre-v3a.
    320 *
    321 * Phew, there's not an awful lot to do here! We just instruct EL2 to use
    322 * an indirect trampoline for the hyp vectors so that guests can't read
    323 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
    324 */
    325bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
    326{
    327	static const struct midr_range spectre_v3a_unsafe_list[] = {
    328		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
    329		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
    330		{},
    331	};
    332
    333	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
    334	return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
    335}
    336
    337void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
    338{
    339	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
    340
    341	if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
    342		data->slot += HYP_VECTOR_INDIRECT;
    343}
    344
    345/*
    346 * Spectre v4.
    347 *
    348 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
    349 * either:
    350 *
    351 * - Mitigated in hardware and listed in our "safe list".
    352 * - Mitigated in hardware via PSTATE.SSBS.
    353 * - Mitigated in software by firmware (sometimes referred to as SSBD).
    354 *
    355 * Wait, that doesn't sound so bad, does it? Keep reading...
    356 *
    357 * A major source of headaches is that the software mitigation is enabled both
    358 * on a per-task basis, but can also be forced on for the kernel, necessitating
    359 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
    360 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
    361 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
    362 * so you can have systems that have both firmware and SSBS mitigations. This
    363 * means we actually have to reject late onlining of CPUs with mitigations if
    364 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
    365 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
    366 *
    367 * The only good part is that if the firmware mitigation is present, then it is
    368 * present for all CPUs, meaning we don't have to worry about late onlining of a
    369 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
    370 *
    371 * Give me a VAX-11/780 any day of the week...
    372 */
    373static enum mitigation_state spectre_v4_state;
    374
    375/* This is the per-cpu state tracking whether we need to talk to firmware */
    376DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
    377
    378enum spectre_v4_policy {
    379	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
    380	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
    381	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
    382};
    383
    384static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
    385
    386static const struct spectre_v4_param {
    387	const char		*str;
    388	enum spectre_v4_policy	policy;
    389} spectre_v4_params[] = {
    390	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
    391	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
    392	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
    393};
    394static int __init parse_spectre_v4_param(char *str)
    395{
    396	int i;
    397
    398	if (!str || !str[0])
    399		return -EINVAL;
    400
    401	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
    402		const struct spectre_v4_param *param = &spectre_v4_params[i];
    403
    404		if (strncmp(str, param->str, strlen(param->str)))
    405			continue;
    406
    407		__spectre_v4_policy = param->policy;
    408		return 0;
    409	}
    410
    411	return -EINVAL;
    412}
    413early_param("ssbd", parse_spectre_v4_param);
    414
    415/*
    416 * Because this was all written in a rush by people working in different silos,
    417 * we've ended up with multiple command line options to control the same thing.
    418 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
    419 * with contradictory parameters. The mitigation is always either "off",
    420 * "dynamic" or "on".
    421 */
    422static bool spectre_v4_mitigations_off(void)
    423{
    424	bool ret = cpu_mitigations_off() ||
    425		   __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
    426
    427	if (ret)
    428		pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
    429
    430	return ret;
    431}
    432
    433/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
    434static bool spectre_v4_mitigations_dynamic(void)
    435{
    436	return !spectre_v4_mitigations_off() &&
    437	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
    438}
    439
    440static bool spectre_v4_mitigations_on(void)
    441{
    442	return !spectre_v4_mitigations_off() &&
    443	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
    444}
    445
    446ssize_t cpu_show_spec_store_bypass(struct device *dev,
    447				   struct device_attribute *attr, char *buf)
    448{
    449	switch (spectre_v4_state) {
    450	case SPECTRE_UNAFFECTED:
    451		return sprintf(buf, "Not affected\n");
    452	case SPECTRE_MITIGATED:
    453		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
    454	case SPECTRE_VULNERABLE:
    455		fallthrough;
    456	default:
    457		return sprintf(buf, "Vulnerable\n");
    458	}
    459}
    460
    461enum mitigation_state arm64_get_spectre_v4_state(void)
    462{
    463	return spectre_v4_state;
    464}
    465
    466static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
    467{
    468	static const struct midr_range spectre_v4_safe_list[] = {
    469		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
    470		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
    471		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
    472		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
    473		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
    474		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
    475		{ /* sentinel */ },
    476	};
    477
    478	if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
    479		return SPECTRE_UNAFFECTED;
    480
    481	/* CPU features are detected first */
    482	if (this_cpu_has_cap(ARM64_SSBS))
    483		return SPECTRE_MITIGATED;
    484
    485	return SPECTRE_VULNERABLE;
    486}
    487
    488static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
    489{
    490	int ret;
    491	struct arm_smccc_res res;
    492
    493	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
    494			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
    495
    496	ret = res.a0;
    497	switch (ret) {
    498	case SMCCC_RET_SUCCESS:
    499		return SPECTRE_MITIGATED;
    500	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
    501		fallthrough;
    502	case SMCCC_RET_NOT_REQUIRED:
    503		return SPECTRE_UNAFFECTED;
    504	default:
    505		fallthrough;
    506	case SMCCC_RET_NOT_SUPPORTED:
    507		return SPECTRE_VULNERABLE;
    508	}
    509}
    510
    511bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
    512{
    513	enum mitigation_state state;
    514
    515	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
    516
    517	state = spectre_v4_get_cpu_hw_mitigation_state();
    518	if (state == SPECTRE_VULNERABLE)
    519		state = spectre_v4_get_cpu_fw_mitigation_state();
    520
    521	return state != SPECTRE_UNAFFECTED;
    522}
    523
    524static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
    525{
    526	if (user_mode(regs))
    527		return 1;
    528
    529	if (instr & BIT(PSTATE_Imm_shift))
    530		regs->pstate |= PSR_SSBS_BIT;
    531	else
    532		regs->pstate &= ~PSR_SSBS_BIT;
    533
    534	arm64_skip_faulting_instruction(regs, 4);
    535	return 0;
    536}
    537
    538static struct undef_hook ssbs_emulation_hook = {
    539	.instr_mask	= ~(1U << PSTATE_Imm_shift),
    540	.instr_val	= 0xd500401f | PSTATE_SSBS,
    541	.fn		= ssbs_emulation_handler,
    542};
    543
    544static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
    545{
    546	static bool undef_hook_registered = false;
    547	static DEFINE_RAW_SPINLOCK(hook_lock);
    548	enum mitigation_state state;
    549
    550	/*
    551	 * If the system is mitigated but this CPU doesn't have SSBS, then
    552	 * we must be on the safelist and there's nothing more to do.
    553	 */
    554	state = spectre_v4_get_cpu_hw_mitigation_state();
    555	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
    556		return state;
    557
    558	raw_spin_lock(&hook_lock);
    559	if (!undef_hook_registered) {
    560		register_undef_hook(&ssbs_emulation_hook);
    561		undef_hook_registered = true;
    562	}
    563	raw_spin_unlock(&hook_lock);
    564
    565	if (spectre_v4_mitigations_off()) {
    566		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
    567		set_pstate_ssbs(1);
    568		return SPECTRE_VULNERABLE;
    569	}
    570
    571	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
    572	set_pstate_ssbs(0);
    573	return SPECTRE_MITIGATED;
    574}
    575
    576/*
    577 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
    578 * we fallthrough and check whether firmware needs to be called on this CPU.
    579 */
    580void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
    581						  __le32 *origptr,
    582						  __le32 *updptr, int nr_inst)
    583{
    584	BUG_ON(nr_inst != 1); /* Branch -> NOP */
    585
    586	if (spectre_v4_mitigations_off())
    587		return;
    588
    589	if (cpus_have_final_cap(ARM64_SSBS))
    590		return;
    591
    592	if (spectre_v4_mitigations_dynamic())
    593		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
    594}
    595
    596/*
    597 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
    598 * to call into firmware to adjust the mitigation state.
    599 */
    600void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
    601					       __le32 *origptr,
    602					       __le32 *updptr, int nr_inst)
    603{
    604	u32 insn;
    605
    606	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
    607
    608	switch (arm_smccc_1_1_get_conduit()) {
    609	case SMCCC_CONDUIT_HVC:
    610		insn = aarch64_insn_get_hvc_value();
    611		break;
    612	case SMCCC_CONDUIT_SMC:
    613		insn = aarch64_insn_get_smc_value();
    614		break;
    615	default:
    616		return;
    617	}
    618
    619	*updptr = cpu_to_le32(insn);
    620}
    621
    622static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
    623{
    624	enum mitigation_state state;
    625
    626	state = spectre_v4_get_cpu_fw_mitigation_state();
    627	if (state != SPECTRE_MITIGATED)
    628		return state;
    629
    630	if (spectre_v4_mitigations_off()) {
    631		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
    632		return SPECTRE_VULNERABLE;
    633	}
    634
    635	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
    636
    637	if (spectre_v4_mitigations_dynamic())
    638		__this_cpu_write(arm64_ssbd_callback_required, 1);
    639
    640	return SPECTRE_MITIGATED;
    641}
    642
    643void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
    644{
    645	enum mitigation_state state;
    646
    647	WARN_ON(preemptible());
    648
    649	state = spectre_v4_enable_hw_mitigation();
    650	if (state == SPECTRE_VULNERABLE)
    651		state = spectre_v4_enable_fw_mitigation();
    652
    653	update_mitigation_state(&spectre_v4_state, state);
    654}
    655
    656static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
    657{
    658	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
    659
    660	if (state)
    661		regs->pstate |= bit;
    662	else
    663		regs->pstate &= ~bit;
    664}
    665
    666void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
    667{
    668	struct pt_regs *regs = task_pt_regs(tsk);
    669	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
    670
    671	if (spectre_v4_mitigations_off())
    672		ssbs = true;
    673	else if (spectre_v4_mitigations_dynamic() && !kthread)
    674		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
    675
    676	__update_pstate_ssbs(regs, ssbs);
    677}
    678
    679/*
    680 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
    681 * This is interesting because the "speculation disabled" behaviour can be
    682 * configured so that it is preserved across exec(), which means that the
    683 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
    684 * from userspace.
    685 */
    686static void ssbd_prctl_enable_mitigation(struct task_struct *task)
    687{
    688	task_clear_spec_ssb_noexec(task);
    689	task_set_spec_ssb_disable(task);
    690	set_tsk_thread_flag(task, TIF_SSBD);
    691}
    692
    693static void ssbd_prctl_disable_mitigation(struct task_struct *task)
    694{
    695	task_clear_spec_ssb_noexec(task);
    696	task_clear_spec_ssb_disable(task);
    697	clear_tsk_thread_flag(task, TIF_SSBD);
    698}
    699
    700static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
    701{
    702	switch (ctrl) {
    703	case PR_SPEC_ENABLE:
    704		/* Enable speculation: disable mitigation */
    705		/*
    706		 * Force disabled speculation prevents it from being
    707		 * re-enabled.
    708		 */
    709		if (task_spec_ssb_force_disable(task))
    710			return -EPERM;
    711
    712		/*
    713		 * If the mitigation is forced on, then speculation is forced
    714		 * off and we again prevent it from being re-enabled.
    715		 */
    716		if (spectre_v4_mitigations_on())
    717			return -EPERM;
    718
    719		ssbd_prctl_disable_mitigation(task);
    720		break;
    721	case PR_SPEC_FORCE_DISABLE:
    722		/* Force disable speculation: force enable mitigation */
    723		/*
    724		 * If the mitigation is forced off, then speculation is forced
    725		 * on and we prevent it from being disabled.
    726		 */
    727		if (spectre_v4_mitigations_off())
    728			return -EPERM;
    729
    730		task_set_spec_ssb_force_disable(task);
    731		fallthrough;
    732	case PR_SPEC_DISABLE:
    733		/* Disable speculation: enable mitigation */
    734		/* Same as PR_SPEC_FORCE_DISABLE */
    735		if (spectre_v4_mitigations_off())
    736			return -EPERM;
    737
    738		ssbd_prctl_enable_mitigation(task);
    739		break;
    740	case PR_SPEC_DISABLE_NOEXEC:
    741		/* Disable speculation until execve(): enable mitigation */
    742		/*
    743		 * If the mitigation state is forced one way or the other, then
    744		 * we must fail now before we try to toggle it on execve().
    745		 */
    746		if (task_spec_ssb_force_disable(task) ||
    747		    spectre_v4_mitigations_off() ||
    748		    spectre_v4_mitigations_on()) {
    749			return -EPERM;
    750		}
    751
    752		ssbd_prctl_enable_mitigation(task);
    753		task_set_spec_ssb_noexec(task);
    754		break;
    755	default:
    756		return -ERANGE;
    757	}
    758
    759	spectre_v4_enable_task_mitigation(task);
    760	return 0;
    761}
    762
    763int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
    764			     unsigned long ctrl)
    765{
    766	switch (which) {
    767	case PR_SPEC_STORE_BYPASS:
    768		return ssbd_prctl_set(task, ctrl);
    769	default:
    770		return -ENODEV;
    771	}
    772}
    773
    774static int ssbd_prctl_get(struct task_struct *task)
    775{
    776	switch (spectre_v4_state) {
    777	case SPECTRE_UNAFFECTED:
    778		return PR_SPEC_NOT_AFFECTED;
    779	case SPECTRE_MITIGATED:
    780		if (spectre_v4_mitigations_on())
    781			return PR_SPEC_NOT_AFFECTED;
    782
    783		if (spectre_v4_mitigations_dynamic())
    784			break;
    785
    786		/* Mitigations are disabled, so we're vulnerable. */
    787		fallthrough;
    788	case SPECTRE_VULNERABLE:
    789		fallthrough;
    790	default:
    791		return PR_SPEC_ENABLE;
    792	}
    793
    794	/* Check the mitigation state for this task */
    795	if (task_spec_ssb_force_disable(task))
    796		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
    797
    798	if (task_spec_ssb_noexec(task))
    799		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
    800
    801	if (task_spec_ssb_disable(task))
    802		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
    803
    804	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
    805}
    806
    807int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
    808{
    809	switch (which) {
    810	case PR_SPEC_STORE_BYPASS:
    811		return ssbd_prctl_get(task);
    812	default:
    813		return -ENODEV;
    814	}
    815}
    816
    817/*
    818 * Spectre BHB.
    819 *
    820 * A CPU is either:
    821 * - Mitigated by a branchy loop a CPU specific number of times, and listed
    822 *   in our "loop mitigated list".
    823 * - Mitigated in software by the firmware Spectre v2 call.
    824 * - Has the ClearBHB instruction to perform the mitigation.
    825 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
    826 *   software mitigation in the vectors is needed.
    827 * - Has CSV2.3, so is unaffected.
    828 */
    829static enum mitigation_state spectre_bhb_state;
    830
    831enum mitigation_state arm64_get_spectre_bhb_state(void)
    832{
    833	return spectre_bhb_state;
    834}
    835
    836enum bhb_mitigation_bits {
    837	BHB_LOOP,
    838	BHB_FW,
    839	BHB_HW,
    840	BHB_INSN,
    841};
    842static unsigned long system_bhb_mitigations;
    843
    844/*
    845 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
    846 * SCOPE_SYSTEM call will give the right answer.
    847 */
    848u8 spectre_bhb_loop_affected(int scope)
    849{
    850	u8 k = 0;
    851	static u8 max_bhb_k;
    852
    853	if (scope == SCOPE_LOCAL_CPU) {
    854		static const struct midr_range spectre_bhb_k32_list[] = {
    855			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
    856			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
    857			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
    858			MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
    859			MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
    860			MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
    861			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
    862			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
    863			{},
    864		};
    865		static const struct midr_range spectre_bhb_k24_list[] = {
    866			MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
    867			MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
    868			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
    869			{},
    870		};
    871		static const struct midr_range spectre_bhb_k8_list[] = {
    872			MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
    873			MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
    874			{},
    875		};
    876
    877		if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
    878			k = 32;
    879		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
    880			k = 24;
    881		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
    882			k =  8;
    883
    884		max_bhb_k = max(max_bhb_k, k);
    885	} else {
    886		k = max_bhb_k;
    887	}
    888
    889	return k;
    890}
    891
    892static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
    893{
    894	int ret;
    895	struct arm_smccc_res res;
    896
    897	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
    898			     ARM_SMCCC_ARCH_WORKAROUND_3, &res);
    899
    900	ret = res.a0;
    901	switch (ret) {
    902	case SMCCC_RET_SUCCESS:
    903		return SPECTRE_MITIGATED;
    904	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
    905		return SPECTRE_UNAFFECTED;
    906	default:
    907		fallthrough;
    908	case SMCCC_RET_NOT_SUPPORTED:
    909		return SPECTRE_VULNERABLE;
    910	}
    911}
    912
    913static bool is_spectre_bhb_fw_affected(int scope)
    914{
    915	static bool system_affected;
    916	enum mitigation_state fw_state;
    917	bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
    918	static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
    919		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
    920		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
    921		{},
    922	};
    923	bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
    924					 spectre_bhb_firmware_mitigated_list);
    925
    926	if (scope != SCOPE_LOCAL_CPU)
    927		return system_affected;
    928
    929	fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
    930	if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
    931		system_affected = true;
    932		return true;
    933	}
    934
    935	return false;
    936}
    937
    938static bool supports_ecbhb(int scope)
    939{
    940	u64 mmfr1;
    941
    942	if (scope == SCOPE_LOCAL_CPU)
    943		mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
    944	else
    945		mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
    946
    947	return cpuid_feature_extract_unsigned_field(mmfr1,
    948						    ID_AA64MMFR1_ECBHB_SHIFT);
    949}
    950
    951bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
    952			     int scope)
    953{
    954	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
    955
    956	if (supports_csv2p3(scope))
    957		return false;
    958
    959	if (supports_clearbhb(scope))
    960		return true;
    961
    962	if (spectre_bhb_loop_affected(scope))
    963		return true;
    964
    965	if (is_spectre_bhb_fw_affected(scope))
    966		return true;
    967
    968	return false;
    969}
    970
    971static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
    972{
    973	const char *v = arm64_get_bp_hardening_vector(slot);
    974
    975	if (slot < 0)
    976		return;
    977
    978	__this_cpu_write(this_cpu_vector, v);
    979
    980	/*
    981	 * When KPTI is in use, the vectors are switched when exiting to
    982	 * user-space.
    983	 */
    984	if (arm64_kernel_unmapped_at_el0())
    985		return;
    986
    987	write_sysreg(v, vbar_el1);
    988	isb();
    989}
    990
    991void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
    992{
    993	bp_hardening_cb_t cpu_cb;
    994	enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
    995	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
    996
    997	if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
    998		return;
    999
   1000	if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
   1001		/* No point mitigating Spectre-BHB alone. */
   1002	} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
   1003		pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
   1004	} else if (cpu_mitigations_off()) {
   1005		pr_info_once("spectre-bhb mitigation disabled by command line option\n");
   1006	} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
   1007		state = SPECTRE_MITIGATED;
   1008		set_bit(BHB_HW, &system_bhb_mitigations);
   1009	} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
   1010		/*
   1011		 * Ensure KVM uses the indirect vector which will have ClearBHB
   1012		 * added.
   1013		 */
   1014		if (!data->slot)
   1015			data->slot = HYP_VECTOR_INDIRECT;
   1016
   1017		this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
   1018		state = SPECTRE_MITIGATED;
   1019		set_bit(BHB_INSN, &system_bhb_mitigations);
   1020	} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
   1021		/*
   1022		 * Ensure KVM uses the indirect vector which will have the
   1023		 * branchy-loop added. A57/A72-r0 will already have selected
   1024		 * the spectre-indirect vector, which is sufficient for BHB
   1025		 * too.
   1026		 */
   1027		if (!data->slot)
   1028			data->slot = HYP_VECTOR_INDIRECT;
   1029
   1030		this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
   1031		state = SPECTRE_MITIGATED;
   1032		set_bit(BHB_LOOP, &system_bhb_mitigations);
   1033	} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
   1034		fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
   1035		if (fw_state == SPECTRE_MITIGATED) {
   1036			/*
   1037			 * Ensure KVM uses one of the spectre bp_hardening
   1038			 * vectors. The indirect vector doesn't include the EL3
   1039			 * call, so needs upgrading to
   1040			 * HYP_VECTOR_SPECTRE_INDIRECT.
   1041			 */
   1042			if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
   1043				data->slot += 1;
   1044
   1045			this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
   1046
   1047			/*
   1048			 * The WA3 call in the vectors supersedes the WA1 call
   1049			 * made during context-switch. Uninstall any firmware
   1050			 * bp_hardening callback.
   1051			 */
   1052			cpu_cb = spectre_v2_get_sw_mitigation_cb();
   1053			if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
   1054				__this_cpu_write(bp_hardening_data.fn, NULL);
   1055
   1056			state = SPECTRE_MITIGATED;
   1057			set_bit(BHB_FW, &system_bhb_mitigations);
   1058		}
   1059	}
   1060
   1061	update_mitigation_state(&spectre_bhb_state, state);
   1062}
   1063
   1064/* Patched to NOP when enabled */
   1065void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
   1066						     __le32 *origptr,
   1067						      __le32 *updptr, int nr_inst)
   1068{
   1069	BUG_ON(nr_inst != 1);
   1070
   1071	if (test_bit(BHB_LOOP, &system_bhb_mitigations))
   1072		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
   1073}
   1074
   1075/* Patched to NOP when enabled */
   1076void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
   1077						   __le32 *origptr,
   1078						   __le32 *updptr, int nr_inst)
   1079{
   1080	BUG_ON(nr_inst != 1);
   1081
   1082	if (test_bit(BHB_FW, &system_bhb_mitigations))
   1083		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
   1084}
   1085
   1086/* Patched to correct the immediate */
   1087void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
   1088				   __le32 *origptr, __le32 *updptr, int nr_inst)
   1089{
   1090	u8 rd;
   1091	u32 insn;
   1092	u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
   1093
   1094	BUG_ON(nr_inst != 1); /* MOV -> MOV */
   1095
   1096	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
   1097		return;
   1098
   1099	insn = le32_to_cpu(*origptr);
   1100	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
   1101	insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
   1102					 AARCH64_INSN_VARIANT_64BIT,
   1103					 AARCH64_INSN_MOVEWIDE_ZERO);
   1104	*updptr++ = cpu_to_le32(insn);
   1105}
   1106
   1107/* Patched to mov WA3 when supported */
   1108void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
   1109				   __le32 *origptr, __le32 *updptr, int nr_inst)
   1110{
   1111	u8 rd;
   1112	u32 insn;
   1113
   1114	BUG_ON(nr_inst != 1); /* MOV -> MOV */
   1115
   1116	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
   1117	    !test_bit(BHB_FW, &system_bhb_mitigations))
   1118		return;
   1119
   1120	insn = le32_to_cpu(*origptr);
   1121	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
   1122
   1123	insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
   1124						  AARCH64_INSN_VARIANT_32BIT,
   1125						  AARCH64_INSN_REG_ZR, rd,
   1126						  ARM_SMCCC_ARCH_WORKAROUND_3);
   1127	if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
   1128		return;
   1129
   1130	*updptr++ = cpu_to_le32(insn);
   1131}
   1132
   1133/* Patched to NOP when not supported */
   1134void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
   1135				   __le32 *origptr, __le32 *updptr, int nr_inst)
   1136{
   1137	BUG_ON(nr_inst != 2);
   1138
   1139	if (test_bit(BHB_INSN, &system_bhb_mitigations))
   1140		return;
   1141
   1142	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
   1143	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
   1144}
   1145
   1146#ifdef CONFIG_BPF_SYSCALL
   1147#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
   1148void unpriv_ebpf_notify(int new_state)
   1149{
   1150	if (spectre_v2_state == SPECTRE_VULNERABLE ||
   1151	    spectre_bhb_state != SPECTRE_MITIGATED)
   1152		return;
   1153
   1154	if (!new_state)
   1155		pr_err("WARNING: %s", EBPF_WARN);
   1156}
   1157#endif