diff options
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r-- | drivers/char/random.c | 355 |
1 files changed, 230 insertions, 125 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 4ec04a754733..b86eae9b77df 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -125,21 +125,26 @@ * The current exported interfaces for gathering environmental noise * from the devices are: * + * void add_device_randomness(const void *buf, unsigned int size); * void add_input_randomness(unsigned int type, unsigned int code, * unsigned int value); - * void add_interrupt_randomness(int irq); + * void add_interrupt_randomness(int irq, int irq_flags); * void add_disk_randomness(struct gendisk *disk); * + * add_device_randomness() is for adding data to the random pool that + * is likely to differ between two devices (or possibly even per boot). + * This would be things like MAC addresses or serial numbers, or the + * read-out of the RTC. This does *not* add any actual entropy to the + * pool, but it initializes the pool to different values for devices + * that might otherwise be identical and have very little entropy + * available to them (particularly common in the embedded world). + * * add_input_randomness() uses the input layer interrupt timing, as well as * the event type information from the hardware. * - * add_interrupt_randomness() uses the inter-interrupt timing as random - * inputs to the entropy pool. Note that not all interrupts are good - * sources of randomness! For example, the timer interrupts is not a - * good choice, because the periodicity of the interrupts is too - * regular, and hence predictable to an attacker. Network Interface - * Controller interrupts are a better measure, since the timing of the - * NIC interrupts are more unpredictable. + * add_interrupt_randomness() uses the interrupt timing as random + * inputs to the entropy pool. Using the cycle counters and the irq source + * as inputs, it feeds the randomness roughly once a second. * * add_disk_randomness() uses what amounts to the seek time of block * layer request events, on a per-disk_devt basis, as input to the @@ -248,6 +253,8 @@ #include <linux/percpu.h> #include <linux/cryptohash.h> #include <linux/fips.h> +#include <linux/ptrace.h> +#include <linux/kmemcheck.h> #ifdef CONFIG_GENERIC_HARDIRQS # include <linux/irq.h> @@ -256,8 +263,12 @@ #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/irq.h> +#include <asm/irq_regs.h> #include <asm/io.h> +#define CREATE_TRACE_POINTS +#include <trace/events/random.h> + /* * Configuration information */ @@ -266,6 +277,8 @@ #define SEC_XFER_SIZE 512 #define EXTRACT_SIZE 10 +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) + /* * The minimum number of bits of entropy before we wake up a read on * /dev/random. Should be enough to do a significant reseed. @@ -420,8 +433,10 @@ struct entropy_store { /* read-write data: */ spinlock_t lock; unsigned add_ptr; + unsigned input_rotate; int entropy_count; - int input_rotate; + int entropy_total; + unsigned int initialized:1; __u8 last_data[EXTRACT_SIZE]; }; @@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = { .pool = nonblocking_pool_data }; +static __u32 const twist_table[8] = { + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; + /* * This function adds bytes into the entropy "pool". It does not * update the entropy estimate. The caller should call @@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = { * it's cheap to do so and helps slightly in the expected case where * the entropy is concentrated in the low-order bits. */ -static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, - int nbytes, __u8 out[64]) +static void _mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes, __u8 out[64]) { - static __u32 const twist_table[8] = { - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; unsigned long i, j, tap1, tap2, tap3, tap4, tap5; int input_rotate; int wordmask = r->poolinfo->poolwords - 1; const char *bytes = in; __u32 w; - unsigned long flags; - /* Taps are constant, so we can load them without holding r->lock. */ tap1 = r->poolinfo->tap1; tap2 = r->poolinfo->tap2; tap3 = r->poolinfo->tap3; tap4 = r->poolinfo->tap4; tap5 = r->poolinfo->tap5; - spin_lock_irqsave(&r->lock, flags); - input_rotate = r->input_rotate; - i = r->add_ptr; + smp_rmb(); + input_rotate = ACCESS_ONCE(r->input_rotate); + i = ACCESS_ONCE(r->add_ptr); /* mix one byte at a time to simplify size handling and churn faster */ while (nbytes--) { @@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, input_rotate += i ? 7 : 14; } - r->input_rotate = input_rotate; - r->add_ptr = i; + ACCESS_ONCE(r->input_rotate) = input_rotate; + ACCESS_ONCE(r->add_ptr) = i; + smp_wmb(); if (out) for (j = 0; j < 16; j++) ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; +} + +static void __mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes, __u8 out[64]) +{ + trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); + _mix_pool_bytes(r, in, nbytes, out); +} + +static void mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes, __u8 out[64]) +{ + unsigned long flags; + trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); + spin_lock_irqsave(&r->lock, flags); + _mix_pool_bytes(r, in, nbytes, out); spin_unlock_irqrestore(&r->lock, flags); } -static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) +struct fast_pool { + __u32 pool[4]; + unsigned long last; + unsigned short count; + unsigned char rotate; + unsigned char last_timer_intr; +}; + +/* + * This is a fast mixing routine used by the interrupt randomness + * collector. It's hardcoded for an 128 bit pool and assumes that any + * locks that might be needed are taken by the caller. + */ +static void fast_mix(struct fast_pool *f, const void *in, int nbytes) { - mix_pool_bytes_extract(r, in, bytes, NULL); + const char *bytes = in; + __u32 w; + unsigned i = f->count; + unsigned input_rotate = f->rotate; + + while (nbytes--) { + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^ + f->pool[(i + 1) & 3]; + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7]; + input_rotate += (i++ & 3) ? 7 : 14; + } + f->count = i; + f->rotate = input_rotate; } /* @@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) */ static void credit_entropy_bits(struct entropy_store *r, int nbits) { - unsigned long flags; - int entropy_count; + int entropy_count, orig; if (!nbits) return; - spin_lock_irqsave(&r->lock, flags); - DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); - entropy_count = r->entropy_count; +retry: + entropy_count = orig = ACCESS_ONCE(r->entropy_count); entropy_count += nbits; + if (entropy_count < 0) { DEBUG_ENT("negative entropy/overflow\n"); entropy_count = 0; } else if (entropy_count > r->poolinfo->POOLBITS) entropy_count = r->poolinfo->POOLBITS; - r->entropy_count = entropy_count; + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; + + if (!r->initialized && nbits > 0) { + r->entropy_total += nbits; + if (r->entropy_total > 128) + r->initialized = 1; + } + + trace_credit_entropy_bits(r->name, nbits, entropy_count, + r->entropy_total, _RET_IP_); /* should we wake readers? */ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } - spin_unlock_irqrestore(&r->lock, flags); } /********************************************************************* @@ -572,42 +636,24 @@ struct timer_rand_state { unsigned dont_count_entropy:1; }; -#ifndef CONFIG_GENERIC_HARDIRQS - -static struct timer_rand_state *irq_timer_state[NR_IRQS]; - -static struct timer_rand_state *get_timer_rand_state(unsigned int irq) -{ - return irq_timer_state[irq]; -} - -static void set_timer_rand_state(unsigned int irq, - struct timer_rand_state *state) -{ - irq_timer_state[irq] = state; -} - -#else - -static struct timer_rand_state *get_timer_rand_state(unsigned int irq) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - return desc->timer_rand_state; -} - -static void set_timer_rand_state(unsigned int irq, - struct timer_rand_state *state) +/* + * Add device- or boot-specific data to the input and nonblocking + * pools to help initialize them to unique values. + * + * None of this adds any entropy, it is meant to avoid the + * problem of the nonblocking pool having similar initial state + * across largely identical devices. + */ +void add_device_randomness(const void *buf, unsigned int size) { - struct irq_desc *desc; + unsigned long time = get_cycles() ^ jiffies; - desc = irq_to_desc(irq); - - desc->timer_rand_state = state; + mix_pool_bytes(&input_pool, buf, size, NULL); + mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); + mix_pool_bytes(&nonblocking_pool, buf, size, NULL); + mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL); } -#endif +EXPORT_SYMBOL(add_device_randomness); static struct timer_rand_state input_timer_state; @@ -637,13 +683,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) goto out; sample.jiffies = jiffies; - - /* Use arch random value, fall back to cycles */ - if (!arch_get_random_int(&sample.cycles)) - sample.cycles = get_cycles(); - + sample.cycles = get_cycles(); sample.num = num; - mix_pool_bytes(&input_pool, &sample, sizeof(sample)); + mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); /* * Calculate number of bits of randomness we probably added. @@ -700,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code, } EXPORT_SYMBOL_GPL(add_input_randomness); -void add_interrupt_randomness(int irq) +static DEFINE_PER_CPU(struct fast_pool, irq_randomness); + +void add_interrupt_randomness(int irq, int irq_flags) { - struct timer_rand_state *state; + struct entropy_store *r; + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); + struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + __u32 input[4], cycles = get_cycles(); + + input[0] = cycles ^ jiffies; + input[1] = irq; + if (regs) { + __u64 ip = instruction_pointer(regs); + input[2] = ip; + input[3] = ip >> 32; + } - state = get_timer_rand_state(irq); + fast_mix(fast_pool, input, sizeof(input)); - if (state == NULL) + if ((fast_pool->count & 1023) && + !time_after(now, fast_pool->last + HZ)) return; - DEBUG_ENT("irq event %d\n", irq); - add_timer_randomness(state, 0x100 + irq); + fast_pool->last = now; + + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); + /* + * If we don't have a valid cycle counter, and we see + * back-to-back timer interrupts, then skip giving credit for + * any entropy. + */ + if (cycles == 0) { + if (irq_flags & __IRQF_TIMER) { + if (fast_pool->last_timer_intr) + return; + fast_pool->last_timer_intr = 1; + } else + fast_pool->last_timer_intr = 0; + } + credit_entropy_bits(r, 1); } #ifdef CONFIG_BLOCK @@ -742,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, */ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) { - __u32 tmp[OUTPUT_POOL_WORDS]; + __u32 tmp[OUTPUT_POOL_WORDS]; if (r->pull && r->entropy_count < nbytes * 8 && r->entropy_count < r->poolinfo->POOLBITS) { @@ -761,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) bytes = extract_entropy(r->pull, tmp, bytes, random_read_wakeup_thresh / 8, rsvd); - mix_pool_bytes(r, tmp, bytes); + mix_pool_bytes(r, tmp, bytes, NULL); credit_entropy_bits(r, bytes*8); } } @@ -820,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, static void extract_buf(struct entropy_store *r, __u8 *out) { int i; - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; + union { + __u32 w[5]; + unsigned long l[LONGS(EXTRACT_SIZE)]; + } hash; + __u32 workspace[SHA_WORKSPACE_WORDS]; __u8 extract[64]; + unsigned long flags; /* Generate a hash across the pool, 16 words (512 bits) at a time */ - sha_init(hash); + sha_init(hash.w); + spin_lock_irqsave(&r->lock, flags); for (i = 0; i < r->poolinfo->poolwords; i += 16) - sha_transform(hash, (__u8 *)(r->pool + i), workspace); + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); /* * We mix the hash back into the pool to prevent backtracking @@ -837,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out) * brute-forcing the feedback as hard as brute-forcing the * hash. */ - mix_pool_bytes_extract(r, hash, sizeof(hash), extract); + __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); + spin_unlock_irqrestore(&r->lock, flags); /* * To avoid duplicates, we atomically extract a portion of the * pool while mixing, and hash one final time. */ - sha_transform(hash, extract, workspace); + sha_transform(hash.w, extract, workspace); memset(extract, 0, sizeof(extract)); memset(workspace, 0, sizeof(workspace)); @@ -852,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out) * pattern, we fold it in half. Thus, we always feed back * twice as much data as we output. */ - hash[0] ^= hash[3]; - hash[1] ^= hash[4]; - hash[2] ^= rol32(hash[2], 16); - memcpy(out, hash, EXTRACT_SIZE); - memset(hash, 0, sizeof(hash)); + hash.w[0] ^= hash.w[3]; + hash.w[1] ^= hash.w[4]; + hash.w[2] ^= rol32(hash.w[2], 16); + + /* + * If we have a architectural hardware random number + * generator, mix that in, too. + */ + for (i = 0; i < LONGS(EXTRACT_SIZE); i++) { + unsigned long v; + if (!arch_get_random_long(&v)) + break; + hash.l[i] ^= v; + } + + memcpy(out, &hash, EXTRACT_SIZE); + memset(&hash, 0, sizeof(hash)); } static ssize_t extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int min, int reserved) + size_t nbytes, int min, int reserved) { ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; - unsigned long flags; + trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, min, reserved); @@ -873,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, extract_buf(r, tmp); if (fips_enabled) { + unsigned long flags; + spin_lock_irqsave(&r->lock, flags); if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) panic("Hardware RNG duplicated output!\n"); @@ -898,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; + trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_); xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, 0, 0); @@ -931,17 +1026,35 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, /* * This function is the exported kernel interface. It returns some - * number of good random numbers, suitable for seeding TCP sequence - * numbers, etc. + * number of good random numbers, suitable for key generation, seeding + * TCP sequence numbers, etc. It does not use the hw random number + * generator, if available; use get_random_bytes_arch() for that. */ void get_random_bytes(void *buf, int nbytes) { + extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); +} +EXPORT_SYMBOL(get_random_bytes); + +/* + * This function will use the architecture-specific hardware random + * number generator if it is available. The arch-specific hw RNG will + * almost certainly be faster than what we can do in software, but it + * is impossible to verify that it is implemented securely (as + * opposed, to, say, the AES encryption of a sequence number using a + * key known by the NSA). So it's useful if we need the speed, but + * only if we're willing to trust the hardware manufacturer not to + * have put in a back door. + */ +void get_random_bytes_arch(void *buf, int nbytes) +{ char *p = buf; + trace_get_random_bytes(nbytes, _RET_IP_); while (nbytes) { unsigned long v; int chunk = min(nbytes, (int)sizeof(unsigned long)); - + if (!arch_get_random_long(&v)) break; @@ -950,9 +1063,11 @@ void get_random_bytes(void *buf, int nbytes) nbytes -= chunk; } - extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); + if (nbytes) + extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); } -EXPORT_SYMBOL(get_random_bytes); +EXPORT_SYMBOL(get_random_bytes_arch); + /* * init_std_data - initialize pool with system data @@ -966,23 +1081,30 @@ EXPORT_SYMBOL(get_random_bytes); static void init_std_data(struct entropy_store *r) { int i; - ktime_t now; - unsigned long flags; + ktime_t now = ktime_get_real(); + unsigned long rv; - spin_lock_irqsave(&r->lock, flags); r->entropy_count = 0; - spin_unlock_irqrestore(&r->lock, flags); - - now = ktime_get_real(); - mix_pool_bytes(r, &now, sizeof(now)); - for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) { - if (!arch_get_random_long(&flags)) + r->entropy_total = 0; + mix_pool_bytes(r, &now, sizeof(now), NULL); + for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) { + if (!arch_get_random_long(&rv)) break; - mix_pool_bytes(r, &flags, sizeof(flags)); + mix_pool_bytes(r, &rv, sizeof(rv), NULL); } - mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); + mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL); } +/* + * Note that setup_arch() may call add_device_randomness() + * long before we get here. This allows seeding of the pools + * with some platform dependent data very early in the boot + * process. But it limits our options here. We must use + * statically allocated structures that already have all + * initializations complete at compile time. We should also + * take care not to overwrite the precious per platform data + * we were given. + */ static int rand_initialize(void) { init_std_data(&input_pool); @@ -992,24 +1114,6 @@ static int rand_initialize(void) } module_init(rand_initialize); -void rand_initialize_irq(int irq) -{ - struct timer_rand_state *state; - - state = get_timer_rand_state(irq); - - if (state) - return; - - /* - * If kzalloc returns null, we just won't use that entropy - * source. - */ - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); - if (state) - set_timer_rand_state(irq, state); -} - #ifdef CONFIG_BLOCK void rand_initialize_disk(struct gendisk *disk) { @@ -1117,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) count -= bytes; p += bytes; - mix_pool_bytes(r, buf, bytes); + mix_pool_bytes(r, buf, bytes, NULL); cond_resched(); } @@ -1279,6 +1383,7 @@ static int proc_do_uuid(ctl_table *table, int write, } static int sysctl_poolsize = INPUT_POOL_WORDS * 32; +extern ctl_table random_table[]; ctl_table random_table[] = { { .procname = "poolsize", @@ -1344,7 +1449,7 @@ late_initcall(random_int_secret_init); * value is not cryptographically secure but for several uses the cost of * depleting entropy is too high */ -DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); +static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); unsigned int get_random_int(void) { __u32 *hash; |