summaryrefslogtreecommitdiff
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c869
1 files changed, 398 insertions, 471 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 605969ed0f96..68613f0b6887 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,8 +1,7 @@
/*
* random.c -- A strong random number generator
*
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
+ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
*
@@ -78,12 +77,12 @@
* an *estimate* of how many bits of randomness have been stored into
* the random number generator's internal state.
*
- * When random bytes are desired, they are obtained by taking the SHA
- * hash of the contents of the "entropy pool". The SHA hash avoids
+ * When random bytes are desired, they are obtained by taking the BLAKE2s
+ * hash of the contents of the "entropy pool". The BLAKE2s hash avoids
* exposing the internal state of the entropy pool. It is believed to
* be computationally infeasible to derive any useful information
- * about the input of SHA from its output. Even if it is possible to
- * analyze SHA in some clever way, as long as the amount of data
+ * about the input of BLAKE2s from its output. Even if it is possible to
+ * analyze BLAKE2s in some clever way, as long as the amount of data
* returned from the generator is less than the inherent entropy in
* the pool, the output data is totally unpredictable. For this
* reason, the routine decreases its internal estimate of how many
@@ -93,7 +92,7 @@
* If this estimate goes to zero, the routine can still generate
* random numbers; however, an attacker may (at least in theory) be
* able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of SHA, which is
+ * outputs. This requires successful cryptanalysis of BLAKE2s, which is
* not believed to be feasible, but there is a remote possibility.
* Nonetheless, these numbers should be useful for the vast majority
* of purposes.
@@ -102,7 +101,7 @@
* ===============================
*
* There are four exported interfaces; two for use within the kernel,
- * and two or use from userspace.
+ * and two for use from userspace.
*
* Exported interfaces ---- userspace output
* -----------------------------------------
@@ -125,7 +124,7 @@
*
* The primary kernel interface is
*
- * void get_random_bytes(void *buf, int nbytes);
+ * void get_random_bytes(void *buf, int nbytes);
*
* This interface will return the requested number of random bytes,
* and place it in the requested buffer. This is equivalent to a
@@ -133,10 +132,10 @@
*
* For less critical applications, there are the functions:
*
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
*
* These are produced by a cryptographic RNG seeded from get_random_bytes,
* and so do not deplete the entropy pool as much. These are recommended
@@ -198,10 +197,13 @@
* from the devices are:
*
* void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
+ * void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
- * void add_interrupt_randomness(int irq, int irq_flags);
- * void add_disk_randomness(struct gendisk *disk);
+ * void add_interrupt_randomness(int irq);
+ * void add_disk_randomness(struct gendisk *disk);
+ * void add_hwgenerator_randomness(const char *buffer, size_t count,
+ * size_t entropy);
+ * void add_bootloader_randomness(const void *buf, unsigned int size);
*
* add_device_randomness() is for adding data to the random pool that
* is likely to differ between two devices (or possibly even per boot).
@@ -228,6 +230,14 @@
* particular randomness source. They do this by keeping track of the
* first and second order deltas of the event timings.
*
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
+ * add_device_randomness(), depending on whether or not the configuration
+ * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
* Ensuring unpredictability at system startup
* ============================================
*
@@ -286,8 +296,8 @@
* /dev/random and /dev/urandom created already, they can be created
* by using the commands:
*
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
+ * mknod /dev/random c 1 8
+ * mknod /dev/urandom c 1 9
*
* Acknowledgements:
* =================
@@ -327,7 +337,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -336,7 +345,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
-#include <crypto/sha1.h>
+#include <crypto/blake2s.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -350,33 +359,11 @@
/* #define ADD_INTERRUPT_BENCH */
/*
- * Configuration information
- */
-#define INPUT_POOL_SHIFT 12
-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-#define OUTPUT_POOL_SHIFT 10
-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define EXTRACT_SIZE 10
-
-
-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
-
-/*
- * To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits.
- *
- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
- */
-#define ENTROPY_SHIFT 3
-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
-
-/*
* If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write
* access to /dev/random.
*/
-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+static int random_write_wakeup_bits = 28 * (1 << 5);
/*
* Originally, we used a primitive polynomial of degree .poolwords
@@ -395,7 +382,7 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* Thanks to Colin Plumb for suggesting this.
*
* The mixing operation is much less sensitive than the output hash,
- * where we use SHA-1. All that we want of mixing operation is that
+ * where we use BLAKE2s. All that we want of mixing operation is that
* it be a good non-cryptographic hash; i.e. it not produce collisions
* when fed "random" data of the sort we expect to see. As long as
* the pool state differs for different inputs, we have preserved the
@@ -423,14 +410,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* polynomial which improves the resulting TGFSR polynomial to be
* irreducible, which we have made here.
*/
-static const struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
- int tap1, tap2, tap3, tap4, tap5;
-} poolinfo_table[] = {
- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+enum poolinfo {
+ POOL_WORDS = 128,
+ POOL_WORDMASK = POOL_WORDS - 1,
+ POOL_BYTES = POOL_WORDS * sizeof(u32),
+ POOL_BITS = POOL_BYTES * 8,
+ POOL_BITSHIFT = ilog2(POOL_BITS),
+
+ /* To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits. */
+ POOL_ENTROPY_SHIFT = 3,
+#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT)
+ POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT,
+
/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- { S(128), 104, 76, 51, 25, 1 },
+ POOL_TAP1 = 104,
+ POOL_TAP2 = 76,
+ POOL_TAP3 = 51,
+ POOL_TAP4 = 25,
+ POOL_TAP5 = 1,
+
+ EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2
};
/*
@@ -443,13 +443,17 @@ static DEFINE_SPINLOCK(random_ready_list_lock);
static LIST_HEAD(random_ready_list);
struct crng_state {
- __u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
+ u32 state[16];
+ unsigned long init_time;
+ spinlock_t lock;
};
static struct crng_state primary_crng = {
.lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
+ .state[0] = CHACHA_CONSTANT_EXPA,
+ .state[1] = CHACHA_CONSTANT_ND_3,
+ .state[2] = CHACHA_CONSTANT_2_BY,
+ .state[3] = CHACHA_CONSTANT_TE_K,
};
/*
@@ -461,13 +465,14 @@ static struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
+static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
+#define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
+static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
+ u8 tmp[CHACHA_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -488,38 +493,23 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
*
**********************************************************************/
-struct entropy_store;
-struct entropy_store {
- /* read-only data: */
- const struct poolinfo *poolinfo;
- __u32 *pool;
- const char *name;
+static u32 input_pool_data[POOL_WORDS] __latent_entropy;
- /* read-write data: */
+static struct {
spinlock_t lock;
- unsigned short add_ptr;
- unsigned short input_rotate;
+ u16 add_ptr;
+ u16 input_rotate;
int entropy_count;
- unsigned int last_data_init:1;
- __u8 last_data[EXTRACT_SIZE];
+} input_pool = {
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
};
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int rsvd);
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips);
-
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static ssize_t extract_entropy(void *buf, size_t nbytes, int min);
+static ssize_t _extract_entropy(void *buf, size_t nbytes);
-static struct entropy_store input_pool = {
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
-};
+static void crng_reseed(struct crng_state *crng, bool use_input_pool);
-static __u32 const twist_table[8] = {
+static const u32 twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
@@ -533,39 +523,31 @@ static __u32 const twist_table[8] = {
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void _mix_pool_bytes(const void *in, int nbytes)
{
- unsigned long i, tap1, tap2, tap3, tap4, tap5;
+ unsigned long i;
int input_rotate;
- int wordmask = r->poolinfo->poolwords - 1;
- const char *bytes = in;
- __u32 w;
+ const u8 *bytes = in;
+ u32 w;
- tap1 = r->poolinfo->tap1;
- tap2 = r->poolinfo->tap2;
- tap3 = r->poolinfo->tap3;
- tap4 = r->poolinfo->tap4;
- tap5 = r->poolinfo->tap5;
-
- input_rotate = r->input_rotate;
- i = r->add_ptr;
+ input_rotate = input_pool.input_rotate;
+ i = input_pool.add_ptr;
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
w = rol32(*bytes++, input_rotate);
- i = (i - 1) & wordmask;
+ i = (i - 1) & POOL_WORDMASK;
/* XOR in the various taps */
- w ^= r->pool[i];
- w ^= r->pool[(i + tap1) & wordmask];
- w ^= r->pool[(i + tap2) & wordmask];
- w ^= r->pool[(i + tap3) & wordmask];
- w ^= r->pool[(i + tap4) & wordmask];
- w ^= r->pool[(i + tap5) & wordmask];
+ w ^= input_pool_data[i];
+ w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK];
/* Mix the result back in with a twist */
- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+ input_pool_data[i] = (w >> 3) ^ twist_table[w & 7];
/*
* Normally, we add 7 bits of rotation to the pool.
@@ -576,33 +558,31 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
}
- r->input_rotate = input_rotate;
- r->add_ptr = i;
+ input_pool.input_rotate = input_rotate;
+ input_pool.add_ptr = i;
}
-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void __mix_pool_bytes(const void *in, int nbytes)
{
- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
- _mix_pool_bytes(r, in, nbytes);
+ trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
+ _mix_pool_bytes(in, nbytes);
}
-static void mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void mix_pool_bytes(const void *in, int nbytes)
{
unsigned long flags;
- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
- spin_lock_irqsave(&r->lock, flags);
- _mix_pool_bytes(r, in, nbytes);
- spin_unlock_irqrestore(&r->lock, flags);
+ trace_mix_pool_bytes(nbytes, _RET_IP_);
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(in, nbytes);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
struct fast_pool {
- __u32 pool[4];
- unsigned long last;
- unsigned short reg_idx;
- unsigned char count;
+ u32 pool[4];
+ unsigned long last;
+ u16 reg_idx;
+ u8 count;
};
/*
@@ -612,8 +592,8 @@ struct fast_pool {
*/
static void fast_mix(struct fast_pool *f)
{
- __u32 a = f->pool[0], b = f->pool[1];
- __u32 c = f->pool[2], d = f->pool[3];
+ u32 a = f->pool[0], b = f->pool[1];
+ u32 c = f->pool[2], d = f->pool[3];
a += b; c += d;
b = rol32(b, 6); d = rol32(d, 27);
@@ -657,17 +637,19 @@ static void process_random_ready_list(void)
* Use credit_entropy_bits_safe() if the value comes from userspace
* or otherwise should be checked for extreme values.
*/
-static void credit_entropy_bits(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(int nbits)
{
- int entropy_count, orig;
- const int pool_size = r->poolinfo->poolfracbits;
- int nfrac = nbits << ENTROPY_SHIFT;
+ int entropy_count, entropy_bits, orig;
+ int nfrac = nbits << POOL_ENTROPY_SHIFT;
+
+ /* Ensure that the multiplication can avoid being 64 bits wide. */
+ BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31);
if (!nbits)
return;
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(input_pool.entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
@@ -694,50 +676,43 @@ retry:
* turns no matter how large nbits is.
*/
int pnfrac = nfrac;
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+ const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2;
/* The +2 corresponds to the /4 in the denominator */
do {
- unsigned int anfrac = min(pnfrac, pool_size/2);
+ unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2);
unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
+ ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s;
entropy_count += add;
pnfrac -= anfrac;
- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
+ } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac));
}
if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
+ pr_warn("negative entropy/overflow: count %d\n", entropy_count);
entropy_count = 0;
- } else if (entropy_count > pool_size)
- entropy_count = pool_size;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ } else if (entropy_count > POOL_FRACBITS)
+ entropy_count = POOL_FRACBITS;
+ if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry;
- trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
-
- if (r == &input_pool) {
- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_);
- if (crng_init < 2 && entropy_bits >= 128)
- crng_reseed(&primary_crng, r);
- }
+ entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT;
+ if (crng_init < 2 && entropy_bits >= 128)
+ crng_reseed(&primary_crng, true);
}
-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+static int credit_entropy_bits_safe(int nbits)
{
- const int nbits_max = r->poolinfo->poolwords * 32;
-
if (nbits < 0)
return -EINVAL;
/* Cap the value to avoid overflows */
- nbits = min(nbits, nbits_max);
+ nbits = min(nbits, POOL_BITS);
- credit_entropy_bits(r, nbits);
+ credit_entropy_bits(nbits);
return 0;
}
@@ -747,11 +722,10 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
*
*********************************************************************/
-#define CRNG_RESEED_INTERVAL (300*HZ)
+#define CRNG_RESEED_INTERVAL (300 * HZ)
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
-#ifdef CONFIG_NUMA
/*
* Hack to deal with crazy userspace progams when they are all trying
* to access /dev/urandom in parallel. The programs are almost
@@ -759,7 +733,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
* their brain damage.
*/
static struct crng_state **crng_node_pool __read_mostly;
-#endif
static void invalidate_batched_entropy(void);
static void numa_crng_init(void);
@@ -773,9 +746,9 @@ early_param("random.trust_cpu", parse_trust_cpu);
static bool crng_init_try_arch(struct crng_state *crng)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
@@ -791,9 +764,9 @@ static bool crng_init_try_arch(struct crng_state *crng)
static bool __init crng_init_try_arch_early(struct crng_state *crng)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long_early(&rv) &&
@@ -807,35 +780,63 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
return arch_init;
}
-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+static void crng_initialize_secondary(struct crng_state *crng)
{
chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ _get_random_bytes(&crng->state[4], sizeof(u32) * 12);
crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
static void __init crng_initialize_primary(struct crng_state *crng)
{
- chacha_init_consts(crng->state);
- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
- if (crng_init_try_arch_early(crng) && trust_cpu) {
+ _extract_entropy(&crng->state[4], sizeof(u32) * 12);
+ if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
- pr_notice("crng done (trusting CPU's manufacturer)\n");
+ pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-#ifdef CONFIG_NUMA
+static void crng_finalize_init(struct crng_state *crng)
+{
+ if (crng != &primary_crng || crng_init >= 2)
+ return;
+ if (!system_wq) {
+ /* We can't call numa_crng_init until we have workqueues,
+ * so mark this for processing later. */
+ crng_need_final_init = true;
+ return;
+ }
+
+ invalidate_batched_entropy();
+ numa_crng_init();
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
+}
+
static void do_numa_crng_init(struct work_struct *work)
{
int i;
struct crng_state *crng;
struct crng_state **pool;
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
@@ -843,8 +844,8 @@ static void do_numa_crng_init(struct work_struct *work)
crng_initialize_secondary(crng);
pool[i] = crng;
}
- mb();
- if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ /* pairs with READ_ONCE() in select_crng() */
+ if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
for_each_node(i)
kfree(pool[i]);
kfree(pool);
@@ -855,20 +856,35 @@ static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
static void numa_crng_init(void)
{
- schedule_work(&numa_crng_init_work);
+ if (IS_ENABLED(CONFIG_NUMA))
+ schedule_work(&numa_crng_init_work);
+}
+
+static struct crng_state *select_crng(void)
+{
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ struct crng_state **pool;
+ int nid = numa_node_id();
+
+ /* pairs with cmpxchg_release() in do_numa_crng_init() */
+ pool = READ_ONCE(crng_node_pool);
+ if (pool && pool[nid])
+ return pool[nid];
+ }
+
+ return &primary_crng;
}
-#else
-static void numa_crng_init(void) {}
-#endif
/*
* crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally.
+ * path. So we can't afford to dilly-dally. Returns the number of
+ * bytes processed from cp.
*/
-static int crng_fast_load(const char *cp, size_t len)
+static size_t crng_fast_load(const u8 *cp, size_t len)
{
unsigned long flags;
- char *p;
+ u8 *p;
+ size_t ret = 0;
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
@@ -876,10 +892,10 @@ static int crng_fast_load(const char *cp, size_t len)
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
- p = (unsigned char *) &primary_crng.state[4];
+ p = (u8 *)&primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--;
+ cp++; crng_init_cnt++; len--; ret++;
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
@@ -887,7 +903,7 @@ static int crng_fast_load(const char *cp, size_t len)
crng_init = 1;
pr_notice("fast init done\n");
}
- return 1;
+ return ret;
}
/*
@@ -904,14 +920,14 @@ static int crng_fast_load(const char *cp, size_t len)
* like a fixed DMI table (for example), which might very well be
* unique to the machine, but is otherwise unvarying.
*/
-static int crng_slow_load(const char *cp, size_t len)
+static int crng_slow_load(const u8 *cp, size_t len)
{
- unsigned long flags;
- static unsigned char lfsr = 1;
- unsigned char tmp;
- unsigned i, max = CHACHA_KEY_SIZE;
- const char * src_buf = cp;
- char * dest_buf = (char *) &primary_crng.state[4];
+ unsigned long flags;
+ static u8 lfsr = 1;
+ u8 tmp;
+ unsigned int i, max = CHACHA_KEY_SIZE;
+ const u8 *src_buf = cp;
+ u8 *dest_buf = (u8 *)&primary_crng.state[4];
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
@@ -922,7 +938,7 @@ static int crng_slow_load(const char *cp, size_t len)
if (len > max)
max = len;
- for (i = 0; i < max ; i++) {
+ for (i = 0; i < max; i++) {
tmp = lfsr;
lfsr >>= 1;
if (tmp & 1)
@@ -935,17 +951,17 @@ static int crng_slow_load(const char *cp, size_t len)
return 1;
}
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+static void crng_reseed(struct crng_state *crng, bool use_input_pool)
{
- unsigned long flags;
- int i, num;
+ unsigned long flags;
+ int i, num;
union {
- __u8 block[CHACHA_BLOCK_SIZE];
- __u32 key[8];
+ u8 block[CHACHA_BLOCK_SIZE];
+ u32 key[8];
} buf;
- if (r) {
- num = extract_entropy(r, &buf, 32, 16, 0);
+ if (use_input_pool) {
+ num = extract_entropy(&buf, 32, 16);
if (num == 0)
return;
} else {
@@ -955,65 +971,38 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
- unsigned long rv;
+ unsigned long rv;
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
- crng->state[i+4] ^= buf.key[i] ^ rv;
+ crng->state[i + 4] ^= buf.key[i] ^ rv;
}
memzero_explicit(&buf, sizeof(buf));
- crng->init_time = jiffies;
+ WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
- if (crng == &primary_crng && crng_init < 2) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
- }
+ crng_finalize_init(crng);
}
-static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA_BLOCK_SIZE])
+static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
{
- unsigned long v, flags;
+ unsigned long flags, init_time;
- if (crng_ready() &&
- (time_after(crng_global_init_time, crng->init_time) ||
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
- crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
+ if (crng_ready()) {
+ init_time = READ_ONCE(crng->init_time);
+ if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
+ time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
+ crng_reseed(crng, crng == &primary_crng);
+ }
spin_lock_irqsave(&crng->lock, flags);
- if (arch_get_random_long(&v))
- crng->state[14] ^= v;
chacha20_block(&crng->state[0], out);
if (crng->state[12] == 0)
crng->state[13]++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
+static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _extract_crng(crng, out);
+ _extract_crng(select_crng(), out);
}
/*
@@ -1021,42 +1010,34 @@ static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
+ u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
- unsigned long flags;
- __u32 *s, *d;
- int i;
+ unsigned long flags;
+ u32 *s, *d;
+ int i;
- used = round_up(used, sizeof(__u32));
+ used = round_up(used, sizeof(u32));
if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
extract_crng(tmp);
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = (u32 *)&tmp[used];
d = &crng->state[4];
- for (i=0; i < 8; i++)
+ for (i = 0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _crng_backtrack_protect(crng, tmp, used);
+ _crng_backtrack_protect(select_crng(), tmp, used);
}
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1088,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
return ret;
}
-
/*********************************************************************
*
* Entropy input management
@@ -1121,8 +1101,8 @@ void add_device_randomness(const void *buf, unsigned int size)
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(&input_pool, buf, size);
- _mix_pool_bytes(&input_pool, &time, sizeof(time));
+ _mix_pool_bytes(buf, size);
+ _mix_pool_bytes(&time, sizeof(time));
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
@@ -1141,19 +1121,17 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
- struct entropy_store *r;
struct {
long jiffies;
- unsigned cycles;
- unsigned num;
+ unsigned int cycles;
+ unsigned int num;
} sample;
long delta, delta2, delta3;
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
- r = &input_pool;
- mix_pool_bytes(r, &sample, sizeof(sample));
+ mix_pool_bytes(&sample, sizeof(sample));
/*
* Calculate number of bits of randomness we probably added.
@@ -1185,11 +1163,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits.
*/
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
}
void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value)
+ unsigned int value)
{
static unsigned char last_value;
@@ -1200,7 +1178,7 @@ void add_input_randomness(unsigned int type, unsigned int code,
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
+ trace_add_input_randomness(POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -1209,48 +1187,47 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
#ifdef ADD_INTERRUPT_BENCH
static unsigned long avg_cycles, avg_deviation;
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
+#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
+#define FIXED_1_2 (1 << (AVG_SHIFT - 1))
static void add_interrupt_bench(cycles_t start)
{
- long delta = random_get_entropy() - start;
-
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
+ long delta = random_get_entropy() - start;
+
+ /* Use a weighted moving average */
+ delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
+ avg_cycles += delta;
+ /* And average deviation */
+ delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
+ avg_deviation += delta;
}
#else
#define add_interrupt_bench(x)
#endif
-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
- __u32 *ptr = (__u32 *) regs;
+ u32 *ptr = (u32 *)regs;
unsigned int idx;
if (regs == NULL)
return 0;
idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ if (idx >= sizeof(struct pt_regs) / sizeof(u32))
idx = 0;
ptr += idx++;
WRITE_ONCE(f->reg_idx, idx);
return *ptr;
}
-void add_interrupt_randomness(int irq, int irq_flags)
+void add_interrupt_randomness(int irq)
{
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
- __u64 ip;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ cycles_t cycles = random_get_entropy();
+ u32 c_high, j_high;
+ u64 ip;
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
@@ -1260,38 +1237,35 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_pool->pool[1] ^= now ^ c_high;
ip = regs ? instruction_pointer(regs) : _RET_IP_;
fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- get_reg(fast_pool, regs);
+ fast_pool->pool[3] ^=
+ (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
fast_mix(fast_pool);
add_interrupt_bench(cycles);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool))) {
+ crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0;
fast_pool->last = now;
}
return;
}
- if ((fast_pool->count < 64) &&
- !time_after(now, fast_pool->last + HZ))
+ if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
return;
- r = &input_pool;
- if (!spin_trylock(&r->lock))
+ if (!spin_trylock(&input_pool.lock))
return;
fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&r->lock);
+ __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
+ spin_unlock(&input_pool.lock);
fast_pool->count = 0;
/* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
+ credit_entropy_bits(1);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1302,7 +1276,7 @@ void add_disk_randomness(struct gendisk *disk)
return;
/* first major is 1, so we get >= 0x200 here */
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
+ trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
#endif
@@ -1317,43 +1291,36 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
* This function decides how many bytes to actually take from the
* given pool, and also debits the entropy count accordingly.
*/
-static size_t account(struct entropy_store *r, size_t nbytes, int min,
- int reserved)
+static size_t account(size_t nbytes, int min)
{
- int entropy_count, orig, have_bytes;
+ int entropy_count, orig;
size_t ibytes, nfrac;
- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
+ BUG_ON(input_pool.entropy_count > POOL_FRACBITS);
/* Can we pull enough? */
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- ibytes = nbytes;
- /* never pull more than available */
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
-
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- if (ibytes < min)
- ibytes = 0;
-
+ entropy_count = orig = READ_ONCE(input_pool.entropy_count);
if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: pool %s count %d\n",
- r->name, entropy_count);
+ pr_warn("negative entropy count: count %d\n", entropy_count);
entropy_count = 0;
}
- nfrac = ibytes << (ENTROPY_SHIFT + 3);
- if ((size_t) entropy_count > nfrac)
+
+ /* never pull more than available */
+ ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3));
+ if (ibytes < min)
+ ibytes = 0;
+ nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
+ if ((size_t)entropy_count > nfrac)
entropy_count -= nfrac;
else
entropy_count = 0;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry;
- trace_debit_entropy(r->name, 8 * ibytes);
- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
+ trace_debit_entropy(8 * ibytes);
+ if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1366,77 +1333,59 @@ retry:
*
* Note: we assume that .poolwords is a multiple of 16 words.
*/
-static void extract_buf(struct entropy_store *r, __u8 *out)
+static void extract_buf(u8 *out)
{
- int i;
- union {
- __u32 w[5];
- unsigned long l[LONGS(20)];
- } hash;
- __u32 workspace[SHA1_WORKSPACE_WORDS];
+ struct blake2s_state state __aligned(__alignof__(unsigned long));
+ u8 hash[BLAKE2S_HASH_SIZE];
+ unsigned long *salt;
unsigned long flags;
+ blake2s_init(&state, sizeof(hash));
+
/*
* If we have an architectural hardware random number
- * generator, use it for SHA's initial vector
+ * generator, use it for BLAKE2's salt & personal fields.
*/
- sha1_init(hash.w);
- for (i = 0; i < LONGS(20); i++) {
+ for (salt = (unsigned long *)&state.h[4];
+ salt < (unsigned long *)&state.h[8]; ++salt) {
unsigned long v;
if (!arch_get_random_long(&v))
break;
- hash.l[i] = v;
+ *salt ^= v;
}
- /* Generate a hash across the pool, 16 words (512 bits) at a time */
- spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+ /* Generate a hash across the pool */
+ spin_lock_irqsave(&input_pool.lock, flags);
+ blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES);
+ blake2s_final(&state, hash); /* final zeros out state */
/*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
* plus the current outputs, and attempts to find previous
- * ouputs), unless the hash function can be inverted. By
- * mixing at least a SHA1 worth of hash data back, we make
+ * outputs), unless the hash function can be inverted. By
+ * mixing at least a hash worth of hash data back, we make
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
- spin_unlock_irqrestore(&r->lock, flags);
-
- memzero_explicit(workspace, sizeof(workspace));
+ __mix_pool_bytes(hash, sizeof(hash));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
- /*
- * In case the hash function has some recognizable output
- * pattern, we fold it in half. Thus, we always feed back
- * twice as much data as we output.
+ /* Note that EXTRACT_SIZE is half of hash size here, because above
+ * we've dumped the full length back into mixer. By reducing the
+ * amount that we emit, we retain a level of forward secrecy.
*/
- hash.w[0] ^= hash.w[3];
- hash.w[1] ^= hash.w[4];
- hash.w[2] ^= rol32(hash.w[2], 16);
-
- memcpy(out, &hash, EXTRACT_SIZE);
- memzero_explicit(&hash, sizeof(hash));
+ memcpy(out, hash, EXTRACT_SIZE);
+ memzero_explicit(hash, sizeof(hash));
}
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips)
+static ssize_t _extract_entropy(void *buf, size_t nbytes)
{
ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ u8 tmp[EXTRACT_SIZE];
while (nbytes) {
- extract_buf(r, tmp);
-
- if (fips) {
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- spin_unlock_irqrestore(&r->lock, flags);
- }
+ extract_buf(tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i);
nbytes -= i;
@@ -1455,42 +1404,19 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
* returns it in a buffer.
*
* The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding while the
- * reserved parameter indicates how much entropy we must leave in the
- * pool after each pull to avoid starving other readers.
+ * failing to avoid races that defeat catastrophic reseeding.
*/
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
+static ssize_t extract_entropy(void *buf, size_t nbytes, int min)
{
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
-
- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled) {
- spin_lock_irqsave(&r->lock, flags);
- if (!r->last_data_init) {
- r->last_data_init = 1;
- spin_unlock_irqrestore(&r->lock, flags);
- trace_extract_entropy(r->name, EXTRACT_SIZE,
- ENTROPY_BITS(r), _RET_IP_);
- extract_buf(r, tmp);
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- }
- spin_unlock_irqrestore(&r->lock, flags);
- }
-
- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- nbytes = account(r, nbytes, min, reserved);
-
- return _extract_entropy(r, buf, nbytes, fips_enabled);
+ trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_);
+ nbytes = account(nbytes, min);
+ return _extract_entropy(buf, nbytes);
}
#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+ _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
-static void _warn_unseeded_randomness(const char *func_name, void *caller,
- void **previous)
+static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
{
#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
const bool print_once = false;
@@ -1498,8 +1424,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
static bool print_once __read_mostly;
#endif
- if (print_once ||
- crng_ready() ||
+ if (print_once || crng_ready() ||
(previous && (caller == READ_ONCE(*previous))))
return;
WRITE_ONCE(*previous, caller);
@@ -1507,9 +1432,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true;
#endif
if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS "
- "with crng_init=%d\n", func_name, caller,
- crng_init);
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
+ func_name, caller, crng_init);
}
/*
@@ -1524,7 +1448,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -1552,7 +1476,6 @@ void get_random_bytes(void *buf, int nbytes)
}
EXPORT_SYMBOL(get_random_bytes);
-
/*
* Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another
@@ -1568,7 +1491,7 @@ EXPORT_SYMBOL(get_random_bytes);
*/
static void entropy_timer(struct timer_list *t)
{
- credit_entropy_bits(&input_pool, 1);
+ credit_entropy_bits(1);
}
/*
@@ -1591,15 +1514,15 @@ static void try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready()) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies+1);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mod_timer(&stack.timer, jiffies + 1);
+ mix_pool_bytes(&stack.now, sizeof(stack.now));
schedule();
stack.now = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.now, sizeof(stack.now));
}
/*
@@ -1718,7 +1641,7 @@ EXPORT_SYMBOL(del_random_ready_callback);
int __must_check get_random_bytes_arch(void *buf, int nbytes)
{
int left = nbytes;
- char *p = buf;
+ u8 *p = buf;
trace_get_random_bytes_arch(left, _RET_IP_);
while (left) {
@@ -1740,26 +1663,24 @@ EXPORT_SYMBOL(get_random_bytes_arch);
/*
* init_std_data - initialize pool with system data
*
- * @r: pool to initialize
- *
* This function clears the pool's entropy count and mixes some system
* data into the pool to prepare it for use. The pool is not cleared
* as that can only decrease the entropy in the pool.
*/
-static void __init init_std_data(struct entropy_store *r)
+static void __init init_std_data(void)
{
int i;
ktime_t now = ktime_get_real();
unsigned long rv;
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
+ mix_pool_bytes(&now, sizeof(now));
+ for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
- mix_pool_bytes(r, &rv, sizeof(rv));
+ mix_pool_bytes(&rv, sizeof(rv));
}
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ mix_pool_bytes(utsname(), sizeof(*(utsname())));
}
/*
@@ -1774,7 +1695,9 @@ static void __init init_std_data(struct entropy_store *r)
*/
int __init rand_initialize(void)
{
- init_std_data(&input_pool);
+ init_std_data();
+ if (crng_need_final_init)
+ crng_finalize_init(&primary_crng);
crng_initialize_primary(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
@@ -1801,22 +1724,20 @@ void rand_initialize_disk(struct gendisk *disk)
}
#endif
-static ssize_t
-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
int ret;
- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
+ nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3));
ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
+ trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS());
return ret;
}
-static ssize_t
-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
- unsigned long flags;
static int maxwarn = 10;
if (!crng_ready() && maxwarn > 0) {
@@ -1824,16 +1745,13 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (__ratelimit(&urandom_warning))
pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
current->comm, nbytes);
- spin_lock_irqsave(&primary_crng.lock, flags);
- crng_init_cnt = 0;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
}
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
int ret;
@@ -1843,8 +1761,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
-static __poll_t
-random_poll(struct file *file, poll_table * wait)
+static __poll_t random_poll(struct file *file, poll_table *wait)
{
__poll_t mask;
@@ -1853,16 +1770,15 @@ random_poll(struct file *file, poll_table * wait)
mask = 0;
if (crng_ready())
mask |= EPOLLIN | EPOLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
+ if (POOL_ENTROPY_BITS() < random_write_wakeup_bits)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
-static int
-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+static int write_pool(const char __user *buffer, size_t count)
{
size_t bytes;
- __u32 t, buf[16];
+ u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
@@ -1872,7 +1788,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+ for (b = bytes; b > 0; b -= sizeof(u32), i++) {
if (!arch_get_random_int(&t))
break;
buf[i] ^= t;
@@ -1881,7 +1797,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
- mix_pool_bytes(r, buf, bytes);
+ mix_pool_bytes(buf, bytes);
cond_resched();
}
@@ -1893,7 +1809,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
{
size_t ret;
- ret = write_pool(&input_pool, buffer, count);
+ ret = write_pool(buffer, count);
if (ret)
return ret;
@@ -1909,7 +1825,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
/* inherently racy, no point locking */
- ent_count = ENTROPY_BITS(&input_pool);
+ ent_count = POOL_ENTROPY_BITS();
if (put_user(ent_count, p))
return -EFAULT;
return 0;
@@ -1918,7 +1834,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ return credit_entropy_bits_safe(ent_count);
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1928,11 +1844,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EINVAL;
if (get_user(size, p++))
return -EFAULT;
- retval = write_pool(&input_pool, (const char __user *)p,
- size);
+ retval = write_pool((const char __user *)p, size);
if (retval < 0)
return retval;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ return credit_entropy_bits_safe(ent_count);
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1948,8 +1863,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (crng_init < 2)
return -ENODATA;
- crng_reseed(&primary_crng, &input_pool);
- crng_global_init_time = jiffies - 1;
+ crng_reseed(&primary_crng, true);
+ WRITE_ONCE(crng_global_init_time, jiffies - 1);
return 0;
default:
return -EINVAL;
@@ -1962,9 +1877,9 @@ static int random_fasync(int fd, struct file *filp, int on)
}
const struct file_operations random_fops = {
- .read = random_read,
+ .read = random_read,
.write = random_write,
- .poll = random_poll,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
@@ -1972,7 +1887,7 @@ const struct file_operations random_fops = {
};
const struct file_operations urandom_fops = {
- .read = urandom_read,
+ .read = urandom_read,
.write = random_write,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -1980,19 +1895,19 @@ const struct file_operations urandom_fops = {
.llseek = noop_llseek,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
- unsigned int, flags)
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
+ flags)
{
int ret;
- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
return -EINVAL;
/*
* Requesting insecure and blocking randomness at the same time makes
* no sense.
*/
- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
if (count > INT_MAX)
@@ -2019,7 +1934,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h>
static int min_write_thresh;
-static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static int max_write_thresh = POOL_BITS;
static int random_min_urandom_seed = 60;
static char sysctl_bootid[16];
@@ -2032,8 +1947,8 @@ static char sysctl_bootid[16];
* returned as an ASCII string in the standard UUID format; if via the
* sysctl system call, as 16 bytes of binary data.
*/
-static int proc_do_uuid(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
@@ -2062,13 +1977,13 @@ static int proc_do_uuid(struct ctl_table *table, int write,
/*
* Return entropy available scaled to integral bits
*/
-static int proc_do_entropy(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
int entropy_count;
- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+ entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT;
fake_table.data = &entropy_count;
fake_table.maxlen = sizeof(entropy_count);
@@ -2076,9 +1991,8 @@ static int proc_do_entropy(struct ctl_table *table, int write,
return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
}
-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
-extern struct ctl_table random_table[];
-struct ctl_table random_table[] = {
+static int sysctl_poolsize = POOL_BITS;
+static struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
@@ -2140,7 +2054,18 @@ struct ctl_table random_table[] = {
#endif
{ }
};
-#endif /* CONFIG_SYSCTL */
+
+/*
+ * rand_initialize() is called before sysctl_init(),
+ * so we cannot call register_sysctl_init() in rand_initialize()
+ */
+static int __init random_sysctls_init(void)
+{
+ register_sysctl_init("kernel/random", random_table);
+ return 0;
+}
+device_initcall(random_sysctls_init);
+#endif /* CONFIG_SYSCTL */
struct batched_entropy {
union {
@@ -2160,7 +2085,7 @@ struct batched_entropy {
* point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
};
u64 get_random_u64(void)
@@ -2185,7 +2110,7 @@ u64 get_random_u64(void)
EXPORT_SYMBOL(get_random_u64);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
};
u32 get_random_u32(void)
{
@@ -2217,7 +2142,7 @@ static void invalidate_batched_entropy(void)
int cpu;
unsigned long flags;
- for_each_possible_cpu (cpu) {
+ for_each_possible_cpu(cpu) {
struct batched_entropy *batched_entropy;
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
@@ -2246,8 +2171,7 @@ static void invalidate_batched_entropy(void)
* Return: A page aligned address within [start, start + range). On error,
* @start is returned.
*/
-unsigned long
-randomize_page(unsigned long start, unsigned long range)
+unsigned long randomize_page(unsigned long start, unsigned long range)
{
if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start;
@@ -2272,21 +2196,24 @@ randomize_page(unsigned long start, unsigned long range)
void add_hwgenerator_randomness(const char *buffer, size_t count,
size_t entropy)
{
- struct entropy_store *poolp = &input_pool;
-
if (unlikely(crng_init == 0)) {
- crng_fast_load(buffer, count);
- return;
+ size_t ret = crng_fast_load(buffer, count);
+ mix_pool_bytes(buffer, ret);
+ count -= ret;
+ buffer += ret;
+ if (!count || crng_init == 0)
+ return;
}
/* Suspend writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_interruptible(random_write_wait, kthread_should_stop() ||
- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
- mix_pool_bytes(poolp, buffer, count);
- credit_entropy_bits(poolp, entropy);
+ wait_event_interruptible(random_write_wait,
+ !system_wq || kthread_should_stop() ||
+ POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
+ mix_pool_bytes(buffer, count);
+ credit_entropy_bits(entropy);
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);