diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2022-07-17 13:35:24 +0300 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2022-07-25 14:26:14 +0300 |
commit | d349ab99eec7ab0f977fc4aac27aa476907acf90 (patch) | |
tree | b98736b45c4f0c00ea2e00e5cefe543bd2ce0759 /arch/arm64 | |
parent | 0b9ba6135d7f18b82f3d8bebb55ded725ba88e0e (diff) | |
download | linux-d349ab99eec7ab0f977fc4aac27aa476907acf90.tar.xz |
random: handle archrandom with multiple longs
The archrandom interface was originally designed for x86, which supplies
RDRAND/RDSEED for receiving random words into registers, resulting in
one function to generate an int and another to generate a long. However,
other architectures don't follow this.
On arm64, the SMCCC TRNG interface can return between one and three
longs. On s390, the CPACF TRNG interface can return arbitrary amounts,
with four longs having the same cost as one. On UML, the os_getrandom()
interface can return arbitrary amounts.
So change the api signature to take a "max_longs" parameter designating
the maximum number of longs requested, and then return the number of
longs generated.
Since callers need to check this return value and loop anyway, each arch
implementation does not bother implementing its own loop to try again to
fill the maximum number of longs. Additionally, all existing callers
pass in a constant max_longs parameter. Taken together, these two things
mean that the codegen doesn't really change much for one-word-at-a-time
platforms, while performance is greatly improved on platforms such as
s390.
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au>
Acked-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/archrandom.h | 102 | ||||
-rw-r--r-- | arch/arm64/kernel/kaslr.c | 2 |
2 files changed, 48 insertions, 56 deletions
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index c3b9fa56af67..109e2a4454be 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -58,7 +58,7 @@ static inline bool __arm64_rndrrs(unsigned long *v) return ok; } -static inline bool __must_check arch_get_random_long(unsigned long *v) +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) { /* * Only support the generic interface after we have detected @@ -66,27 +66,15 @@ static inline bool __must_check arch_get_random_long(unsigned long *v) * cpufeature code and with potential scheduling between CPUs * with and without the feature. */ - if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v)) - return true; - return false; + if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v)) + return 1; + return 0; } -static inline bool __must_check arch_get_random_int(unsigned int *v) +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) { - if (cpus_have_const_cap(ARM64_HAS_RNG)) { - unsigned long val; - - if (__arm64_rndr(&val)) { - *v = val; - return true; - } - } - return false; -} - -static inline bool __must_check arch_get_random_seed_long(unsigned long *v) -{ - struct arm_smccc_res res; + if (!max_longs) + return 0; /* * We prefer the SMCCC call, since its semantics (return actual @@ -95,10 +83,23 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v) * (the output of a pseudo RNG freshly seeded by a TRNG). */ if (smccc_trng_available) { - arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); + struct arm_smccc_res res; + + max_longs = min_t(size_t, 3, max_longs); + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res); if ((int)res.a0 >= 0) { - *v = res.a3; - return true; + switch (max_longs) { + case 3: + *v++ = res.a1; + fallthrough; + case 2: + *v++ = res.a2; + fallthrough; + case 1: + *v++ = res.a3; + break; + } + return max_longs; } } @@ -108,32 +109,9 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v) * enough to implement this API if no other entropy source exists. */ if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v)) - return true; + return 1; - return false; -} - -static inline bool __must_check arch_get_random_seed_int(unsigned int *v) -{ - struct arm_smccc_res res; - unsigned long val; - - if (smccc_trng_available) { - arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res); - if ((int)res.a0 >= 0) { - *v = res.a3 & GENMASK(31, 0); - return true; - } - } - - if (cpus_have_const_cap(ARM64_HAS_RNG)) { - if (__arm64_rndrrs(&val)) { - *v = val; - return true; - } - } - - return false; + return 0; } static inline bool __init __early_cpu_has_rndr(void) @@ -143,26 +121,40 @@ static inline bool __init __early_cpu_has_rndr(void) return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf; } -static inline bool __init __must_check -arch_get_random_seed_long_early(unsigned long *v) +static inline size_t __init __must_check +arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs) { WARN_ON(system_state != SYSTEM_BOOTING); + if (!max_longs) + return 0; + if (smccc_trng_available) { struct arm_smccc_res res; - arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); + max_longs = min_t(size_t, 3, max_longs); + arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res); if ((int)res.a0 >= 0) { - *v = res.a3; - return true; + switch (max_longs) { + case 3: + *v++ = res.a1; + fallthrough; + case 2: + *v++ = res.a2; + fallthrough; + case 1: + *v++ = res.a3; + break; + } + return max_longs; } } if (__early_cpu_has_rndr() && __arm64_rndr(v)) - return true; + return 1; - return false; + return 0; } -#define arch_get_random_seed_long_early arch_get_random_seed_long_early +#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early #endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 418b2bba1521..c5d541f358d3 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -106,7 +106,7 @@ u64 __init kaslr_early_init(void) * and supported. */ - if (arch_get_random_seed_long_early(&raw)) + if (arch_get_random_seed_longs_early(&raw, 1)) seed ^= raw; if (!seed) { |