cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

proc-v7-bugs.c (6799B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/arm-smccc.h>
      3#include <linux/kernel.h>
      4#include <linux/smp.h>
      5
      6#include <asm/cp15.h>
      7#include <asm/cputype.h>
      8#include <asm/proc-fns.h>
      9#include <asm/spectre.h>
     10#include <asm/system_misc.h>
     11
     12#ifdef CONFIG_ARM_PSCI
     13static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
     14{
     15	struct arm_smccc_res res;
     16
     17	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
     18			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
     19
     20	switch ((int)res.a0) {
     21	case SMCCC_RET_SUCCESS:
     22		return SPECTRE_MITIGATED;
     23
     24	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
     25		return SPECTRE_UNAFFECTED;
     26
     27	default:
     28		return SPECTRE_VULNERABLE;
     29	}
     30}
     31#else
     32static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
     33{
     34	return SPECTRE_VULNERABLE;
     35}
     36#endif
     37
     38#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
     39DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
     40
     41extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
     42extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
     43extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
     44extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
     45
     46static void harden_branch_predictor_bpiall(void)
     47{
     48	write_sysreg(0, BPIALL);
     49}
     50
     51static void harden_branch_predictor_iciallu(void)
     52{
     53	write_sysreg(0, ICIALLU);
     54}
     55
     56static void __maybe_unused call_smc_arch_workaround_1(void)
     57{
     58	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
     59}
     60
     61static void __maybe_unused call_hvc_arch_workaround_1(void)
     62{
     63	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
     64}
     65
     66static unsigned int spectre_v2_install_workaround(unsigned int method)
     67{
     68	const char *spectre_v2_method = NULL;
     69	int cpu = smp_processor_id();
     70
     71	if (per_cpu(harden_branch_predictor_fn, cpu))
     72		return SPECTRE_MITIGATED;
     73
     74	switch (method) {
     75	case SPECTRE_V2_METHOD_BPIALL:
     76		per_cpu(harden_branch_predictor_fn, cpu) =
     77			harden_branch_predictor_bpiall;
     78		spectre_v2_method = "BPIALL";
     79		break;
     80
     81	case SPECTRE_V2_METHOD_ICIALLU:
     82		per_cpu(harden_branch_predictor_fn, cpu) =
     83			harden_branch_predictor_iciallu;
     84		spectre_v2_method = "ICIALLU";
     85		break;
     86
     87	case SPECTRE_V2_METHOD_HVC:
     88		per_cpu(harden_branch_predictor_fn, cpu) =
     89			call_hvc_arch_workaround_1;
     90		cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
     91		spectre_v2_method = "hypervisor";
     92		break;
     93
     94	case SPECTRE_V2_METHOD_SMC:
     95		per_cpu(harden_branch_predictor_fn, cpu) =
     96			call_smc_arch_workaround_1;
     97		cpu_do_switch_mm = cpu_v7_smc_switch_mm;
     98		spectre_v2_method = "firmware";
     99		break;
    100	}
    101
    102	if (spectre_v2_method)
    103		pr_info("CPU%u: Spectre v2: using %s workaround\n",
    104			smp_processor_id(), spectre_v2_method);
    105
    106	return SPECTRE_MITIGATED;
    107}
    108#else
    109static unsigned int spectre_v2_install_workaround(unsigned int method)
    110{
    111	pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
    112		smp_processor_id());
    113
    114	return SPECTRE_VULNERABLE;
    115}
    116#endif
    117
    118static void cpu_v7_spectre_v2_init(void)
    119{
    120	unsigned int state, method = 0;
    121
    122	switch (read_cpuid_part()) {
    123	case ARM_CPU_PART_CORTEX_A8:
    124	case ARM_CPU_PART_CORTEX_A9:
    125	case ARM_CPU_PART_CORTEX_A12:
    126	case ARM_CPU_PART_CORTEX_A17:
    127	case ARM_CPU_PART_CORTEX_A73:
    128	case ARM_CPU_PART_CORTEX_A75:
    129		state = SPECTRE_MITIGATED;
    130		method = SPECTRE_V2_METHOD_BPIALL;
    131		break;
    132
    133	case ARM_CPU_PART_CORTEX_A15:
    134	case ARM_CPU_PART_BRAHMA_B15:
    135		state = SPECTRE_MITIGATED;
    136		method = SPECTRE_V2_METHOD_ICIALLU;
    137		break;
    138
    139	case ARM_CPU_PART_BRAHMA_B53:
    140		/* Requires no workaround */
    141		state = SPECTRE_UNAFFECTED;
    142		break;
    143
    144	default:
    145		/* Other ARM CPUs require no workaround */
    146		if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
    147			state = SPECTRE_UNAFFECTED;
    148			break;
    149		}
    150
    151		fallthrough;
    152
    153	/* Cortex A57/A72 require firmware workaround */
    154	case ARM_CPU_PART_CORTEX_A57:
    155	case ARM_CPU_PART_CORTEX_A72:
    156		state = spectre_v2_get_cpu_fw_mitigation_state();
    157		if (state != SPECTRE_MITIGATED)
    158			break;
    159
    160		switch (arm_smccc_1_1_get_conduit()) {
    161		case SMCCC_CONDUIT_HVC:
    162			method = SPECTRE_V2_METHOD_HVC;
    163			break;
    164
    165		case SMCCC_CONDUIT_SMC:
    166			method = SPECTRE_V2_METHOD_SMC;
    167			break;
    168
    169		default:
    170			state = SPECTRE_VULNERABLE;
    171			break;
    172		}
    173	}
    174
    175	if (state == SPECTRE_MITIGATED)
    176		state = spectre_v2_install_workaround(method);
    177
    178	spectre_v2_update_state(state, method);
    179}
    180
    181#ifdef CONFIG_HARDEN_BRANCH_HISTORY
    182static int spectre_bhb_method;
    183
    184static const char *spectre_bhb_method_name(int method)
    185{
    186	switch (method) {
    187	case SPECTRE_V2_METHOD_LOOP8:
    188		return "loop";
    189
    190	case SPECTRE_V2_METHOD_BPIALL:
    191		return "BPIALL";
    192
    193	default:
    194		return "unknown";
    195	}
    196}
    197
    198static int spectre_bhb_install_workaround(int method)
    199{
    200	if (spectre_bhb_method != method) {
    201		if (spectre_bhb_method) {
    202			pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
    203			       smp_processor_id());
    204
    205			return SPECTRE_VULNERABLE;
    206		}
    207
    208		if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
    209			return SPECTRE_VULNERABLE;
    210
    211		spectre_bhb_method = method;
    212	}
    213
    214	pr_info("CPU%u: Spectre BHB: using %s workaround\n",
    215		smp_processor_id(), spectre_bhb_method_name(method));
    216
    217	return SPECTRE_MITIGATED;
    218}
    219#else
    220static int spectre_bhb_install_workaround(int method)
    221{
    222	return SPECTRE_VULNERABLE;
    223}
    224#endif
    225
    226static void cpu_v7_spectre_bhb_init(void)
    227{
    228	unsigned int state, method = 0;
    229
    230	switch (read_cpuid_part()) {
    231	case ARM_CPU_PART_CORTEX_A15:
    232	case ARM_CPU_PART_BRAHMA_B15:
    233	case ARM_CPU_PART_CORTEX_A57:
    234	case ARM_CPU_PART_CORTEX_A72:
    235		state = SPECTRE_MITIGATED;
    236		method = SPECTRE_V2_METHOD_LOOP8;
    237		break;
    238
    239	case ARM_CPU_PART_CORTEX_A73:
    240	case ARM_CPU_PART_CORTEX_A75:
    241		state = SPECTRE_MITIGATED;
    242		method = SPECTRE_V2_METHOD_BPIALL;
    243		break;
    244
    245	default:
    246		state = SPECTRE_UNAFFECTED;
    247		break;
    248	}
    249
    250	if (state == SPECTRE_MITIGATED)
    251		state = spectre_bhb_install_workaround(method);
    252
    253	spectre_v2_update_state(state, method);
    254}
    255
    256static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
    257						  u32 mask, const char *msg)
    258{
    259	u32 aux_cr;
    260
    261	asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
    262
    263	if ((aux_cr & mask) != mask) {
    264		if (!*warned)
    265			pr_err("CPU%u: %s", smp_processor_id(), msg);
    266		*warned = true;
    267		return false;
    268	}
    269	return true;
    270}
    271
    272static DEFINE_PER_CPU(bool, spectre_warned);
    273
    274static bool check_spectre_auxcr(bool *warned, u32 bit)
    275{
    276	return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
    277		cpu_v7_check_auxcr_set(warned, bit,
    278				       "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
    279}
    280
    281void cpu_v7_ca8_ibe(void)
    282{
    283	if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
    284		cpu_v7_spectre_v2_init();
    285}
    286
    287void cpu_v7_ca15_ibe(void)
    288{
    289	if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
    290		cpu_v7_spectre_v2_init();
    291	cpu_v7_spectre_bhb_init();
    292}
    293
    294void cpu_v7_bugs_init(void)
    295{
    296	cpu_v7_spectre_v2_init();
    297	cpu_v7_spectre_bhb_init();
    298}