diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2012-07-28 06:26:08 +0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2012-07-28 06:37:20 +0400 |
commit | d2e7c96af1e54b507ae2a6a7dd2baf588417a7e5 (patch) | |
tree | a60b31adede15169f7835e8a4b10151f20e8bd24 /drivers/char/random.c | |
parent | d114a33387472555188f142ed8e98acdb8181c6d (diff) | |
download | linux-d2e7c96af1e54b507ae2a6a7dd2baf588417a7e5.tar.xz |
random: mix in architectural randomness in extract_buf()
Mix in any architectural randomness in extract_buf() instead of
xfer_secondary_buf(). This allows us to mix in more architectural
randomness, and it also makes xfer_secondary_buf() faster, moving a
tiny bit of additional CPU overhead to process which is extracting the
randomness.
[ Commit description modified by tytso to remove an extended
advertisement for the RDRAND instruction. ]
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: DJ Johnston <dj.johnston@intel.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r-- | drivers/char/random.c | 56 |
1 files changed, 32 insertions, 24 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 1a2dfa816041..b86eae9b77df 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -277,6 +277,8 @@ #define SEC_XFER_SIZE 512 #define EXTRACT_SIZE 10 +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) + /* * The minimum number of bits of entropy before we wake up a read on * /dev/random. Should be enough to do a significant reseed. @@ -813,11 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, */ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) { - union { - __u32 tmp[OUTPUT_POOL_WORDS]; - long hwrand[4]; - } u; - int i; + __u32 tmp[OUTPUT_POOL_WORDS]; if (r->pull && r->entropy_count < nbytes * 8 && r->entropy_count < r->poolinfo->POOLBITS) { @@ -828,23 +826,17 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) /* pull at least as many as BYTES as wakeup BITS */ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); /* but never more than the buffer size */ - bytes = min_t(int, bytes, sizeof(u.tmp)); + bytes = min_t(int, bytes, sizeof(tmp)); DEBUG_ENT("going to reseed %s with %d bits " "(%d of %d requested)\n", r->name, bytes * 8, nbytes * 8, r->entropy_count); - bytes = extract_entropy(r->pull, u.tmp, bytes, + bytes = extract_entropy(r->pull, tmp, bytes, random_read_wakeup_thresh / 8, rsvd); - mix_pool_bytes(r, u.tmp, bytes, NULL); + mix_pool_bytes(r, tmp, bytes, NULL); credit_entropy_bits(r, bytes*8); } - kmemcheck_mark_initialized(&u.hwrand, sizeof(u.hwrand)); - for (i = 0; i < 4; i++) - if (arch_get_random_long(&u.hwrand[i])) - break; - if (i) - mix_pool_bytes(r, &u.hwrand, sizeof(u.hwrand), 0); } /* @@ -901,15 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, static void extract_buf(struct entropy_store *r, __u8 *out) { int i; - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; + union { + __u32 w[5]; + unsigned long l[LONGS(EXTRACT_SIZE)]; + } hash; + __u32 workspace[SHA_WORKSPACE_WORDS]; __u8 extract[64]; unsigned long flags; /* Generate a hash across the pool, 16 words (512 bits) at a time */ - sha_init(hash); + sha_init(hash.w); spin_lock_irqsave(&r->lock, flags); for (i = 0; i < r->poolinfo->poolwords; i += 16) - sha_transform(hash, (__u8 *)(r->pool + i), workspace); + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); /* * We mix the hash back into the pool to prevent backtracking @@ -920,14 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out) * brute-forcing the feedback as hard as brute-forcing the * hash. */ - __mix_pool_bytes(r, hash, sizeof(hash), extract); + __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); spin_unlock_irqrestore(&r->lock, flags); /* * To avoid duplicates, we atomically extract a portion of the * pool while mixing, and hash one final time. */ - sha_transform(hash, extract, workspace); + sha_transform(hash.w, extract, workspace); memset(extract, 0, sizeof(extract)); memset(workspace, 0, sizeof(workspace)); @@ -936,11 +932,23 @@ static void extract_buf(struct entropy_store *r, __u8 *out) * pattern, we fold it in half. Thus, we always feed back * twice as much data as we output. */ - hash[0] ^= hash[3]; - hash[1] ^= hash[4]; - hash[2] ^= rol32(hash[2], 16); - memcpy(out, hash, EXTRACT_SIZE); - memset(hash, 0, sizeof(hash)); + hash.w[0] ^= hash.w[3]; + hash.w[1] ^= hash.w[4]; + hash.w[2] ^= rol32(hash.w[2], 16); + + /* + * If we have a architectural hardware random number + * generator, mix that in, too. + */ + for (i = 0; i < LONGS(EXTRACT_SIZE); i++) { + unsigned long v; + if (!arch_get_random_long(&v)) + break; + hash.l[i] ^= v; + } + + memcpy(out, &hash, EXTRACT_SIZE); + memset(&hash, 0, sizeof(hash)); } static ssize_t extract_entropy(struct entropy_store *r, void *buf, |