diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2022-10-29 01:18:04 +0200 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2022-11-18 02:18:10 +0100 |
commit | 2c03e16f44993d45f064580adb0eb408b513ad72 (patch) | |
tree | 90a6fb43cdf6bfd22bd5ae8fe1f5602f073b36b2 /arch/arm64/include/asm/archrandom.h | |
parent | random: use random.trust_{bootloader,cpu} command line option only (diff) | |
download | linux-2c03e16f44993d45f064580adb0eb408b513ad72.tar.xz linux-2c03e16f44993d45f064580adb0eb408b513ad72.zip |
random: remove early archrandom abstraction
The arch_get_random*_early() abstraction is not completely useful and
adds complexity, because it's not a given that there will be no calls to
arch_get_random*() between random_init_early(), which uses
arch_get_random*_early(), and init_cpu_features(). During that gap,
crng_reseed() might be called, which uses arch_get_random*(), since it's
mostly not init code.
Instead we can test whether we're in the early phase in
arch_get_random*() itself, and in doing so avoid all ambiguity about
where we are. Fortunately, the only architecture that currently
implements arch_get_random*_early() also has an alternatives-based cpu
feature system, one flag of which determines whether the other flags
have been initialized. This makes it possible to do the early check with
zero cost once the system is initialized.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'arch/arm64/include/asm/archrandom.h')
-rw-r--r-- | arch/arm64/include/asm/archrandom.h | 48 |
1 files changed, 10 insertions, 38 deletions
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index 109e2a4454be..2f5f3da34782 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -5,6 +5,7 @@ #include <linux/arm-smccc.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/irqflags.h> #include <asm/cpufeature.h> #define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL @@ -58,6 +59,13 @@ static inline bool __arm64_rndrrs(unsigned long *v) return ok; } +static __always_inline bool __cpu_has_rng(void) +{ + if (unlikely(!system_capabilities_finalized() && !preemptible())) + return this_cpu_has_cap(ARM64_HAS_RNG); + return cpus_have_const_cap(ARM64_HAS_RNG); +} + static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) { /* @@ -66,7 +74,7 @@ static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t * cpufeature code and with potential scheduling between CPUs * with and without the feature. */ - if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v)) + if (max_longs && __cpu_has_rng() && __arm64_rndr(v)) return 1; return 0; } @@ -108,7 +116,7 @@ static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, s * reseeded after each invocation. This is not a 100% fit but good * enough to implement this API if no other entropy source exists. */ - if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v)) + if (__cpu_has_rng() && __arm64_rndrrs(v)) return 1; return 0; @@ -121,40 +129,4 @@ static inline bool __init __early_cpu_has_rndr(void) return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf; } -static inline size_t __init __must_check -arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs) -{ - WARN_ON(system_state != SYSTEM_BOOTING); - - if (!max_longs) - return 0; - - if (smccc_trng_available) { - struct arm_smccc_res res; - - max_longs = min_t(size_t, 3, max_longs); - arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res); - if ((int)res.a0 >= 0) { - switch (max_longs) { - case 3: - *v++ = res.a1; - fallthrough; - case 2: - *v++ = res.a2; - fallthrough; - case 1: - *v++ = res.a3; - break; - } - return max_longs; - } - } - - if (__early_cpu_has_rndr() && __arm64_rndr(v)) - return 1; - - return 0; -} -#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early - #endif /* _ASM_ARCHRANDOM_H */ |