cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

archrandom.h (3847B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_ARCHRANDOM_H
      3#define _ASM_ARCHRANDOM_H
      4
      5#ifdef CONFIG_ARCH_RANDOM
      6
      7#include <linux/arm-smccc.h>
      8#include <linux/bug.h>
      9#include <linux/kernel.h>
     10#include <asm/cpufeature.h>
     11
     12#define ARM_SMCCC_TRNG_MIN_VERSION	0x10000UL
     13
     14extern bool smccc_trng_available;
     15
     16static inline bool __init smccc_probe_trng(void)
     17{
     18	struct arm_smccc_res res;
     19
     20	arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res);
     21	if ((s32)res.a0 < 0)
     22		return false;
     23
     24	return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION;
     25}
     26
     27static inline bool __arm64_rndr(unsigned long *v)
     28{
     29	bool ok;
     30
     31	/*
     32	 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
     33	 * and set PSTATE.NZCV to 0b0100 otherwise.
     34	 */
     35	asm volatile(
     36		__mrs_s("%0", SYS_RNDR_EL0) "\n"
     37	"	cset %w1, ne\n"
     38	: "=r" (*v), "=r" (ok)
     39	:
     40	: "cc");
     41
     42	return ok;
     43}
     44
     45static inline bool __arm64_rndrrs(unsigned long *v)
     46{
     47	bool ok;
     48
     49	/*
     50	 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success,
     51	 * and set PSTATE.NZCV to 0b0100 otherwise.
     52	 */
     53	asm volatile(
     54		__mrs_s("%0", SYS_RNDRRS_EL0) "\n"
     55	"	cset %w1, ne\n"
     56	: "=r" (*v), "=r" (ok)
     57	:
     58	: "cc");
     59
     60	return ok;
     61}
     62
     63static inline bool __must_check arch_get_random_long(unsigned long *v)
     64{
     65	/*
     66	 * Only support the generic interface after we have detected
     67	 * the system wide capability, avoiding complexity with the
     68	 * cpufeature code and with potential scheduling between CPUs
     69	 * with and without the feature.
     70	 */
     71	if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
     72		return true;
     73	return false;
     74}
     75
     76static inline bool __must_check arch_get_random_int(unsigned int *v)
     77{
     78	if (cpus_have_const_cap(ARM64_HAS_RNG)) {
     79		unsigned long val;
     80
     81		if (__arm64_rndr(&val)) {
     82			*v = val;
     83			return true;
     84		}
     85	}
     86	return false;
     87}
     88
     89static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
     90{
     91	struct arm_smccc_res res;
     92
     93	/*
     94	 * We prefer the SMCCC call, since its semantics (return actual
     95	 * hardware backed entropy) is closer to the idea behind this
     96	 * function here than what even the RNDRSS register provides
     97	 * (the output of a pseudo RNG freshly seeded by a TRNG).
     98	 */
     99	if (smccc_trng_available) {
    100		arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
    101		if ((int)res.a0 >= 0) {
    102			*v = res.a3;
    103			return true;
    104		}
    105	}
    106
    107	/*
    108	 * RNDRRS is not backed by an entropy source but by a DRBG that is
    109	 * reseeded after each invocation. This is not a 100% fit but good
    110	 * enough to implement this API if no other entropy source exists.
    111	 */
    112	if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
    113		return true;
    114
    115	return false;
    116}
    117
    118static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
    119{
    120	struct arm_smccc_res res;
    121	unsigned long val;
    122
    123	if (smccc_trng_available) {
    124		arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res);
    125		if ((int)res.a0 >= 0) {
    126			*v = res.a3 & GENMASK(31, 0);
    127			return true;
    128		}
    129	}
    130
    131	if (cpus_have_const_cap(ARM64_HAS_RNG)) {
    132		if (__arm64_rndrrs(&val)) {
    133			*v = val;
    134			return true;
    135		}
    136	}
    137
    138	return false;
    139}
    140
    141static inline bool __init __early_cpu_has_rndr(void)
    142{
    143	/* Open code as we run prior to the first call to cpufeature. */
    144	unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
    145	return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
    146}
    147
    148static inline bool __init __must_check
    149arch_get_random_seed_long_early(unsigned long *v)
    150{
    151	WARN_ON(system_state != SYSTEM_BOOTING);
    152
    153	if (smccc_trng_available) {
    154		struct arm_smccc_res res;
    155
    156		arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
    157		if ((int)res.a0 >= 0) {
    158			*v = res.a3;
    159			return true;
    160		}
    161	}
    162
    163	if (__early_cpu_has_rndr() && __arm64_rndr(v))
    164		return true;
    165
    166	return false;
    167}
    168#define arch_get_random_seed_long_early arch_get_random_seed_long_early
    169
    170#else /* !CONFIG_ARCH_RANDOM */
    171
    172static inline bool __init smccc_probe_trng(void)
    173{
    174	return false;
    175}
    176
    177#endif /* CONFIG_ARCH_RANDOM */
    178#endif /* _ASM_ARCHRANDOM_H */