summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 22:52:16 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 22:52:16 +0300
commitd93aebbd76a07a8101d2f7393dc18be3e235f11b (patch)
tree5c3b5931bfe1d18879bc5a8b9ddcda2e428c79f9 /drivers
parent9d3a1e0a88e76bcb914e269cba0bfed6f4584a5e (diff)
parent6c8e11e08a5b74bb8a5cdd5cbc1e5143df0fba72 (diff)
downloadlinux-d93aebbd76a07a8101d2f7393dc18be3e235f11b.tar.xz
Merge branch 'random-5.17-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random
Pull random number generator updates from Jason Donenfeld: "These a bit more numerous than usual for the RNG, due to folks resubmitting patches that had been pending prior and generally renewed interest. There are a few categories of patches in here: 1) Dominik Brodowski and I traded a series back and forth for a some weeks that fixed numerous issues related to seeds being provided at extremely early boot by the firmware, before other parts of the kernel or of the RNG have been initialized, both fixing some crashes and addressing correctness around early boot randomness. One of these is marked for stable. 2) I replaced the RNG's usage of SHA-1 with BLAKE2s in the entropy extractor, and made the construction a bit safer and more standard. This was sort of a long overdue low hanging fruit, as we were supposed to have phased out SHA-1 usage quite some time ago (even if all we needed here was non-invertibility). Along the way it also made extraction 131% faster. This required a bit of Kconfig and symbol plumbing to make things work well with the crypto libraries, which is one of the reasons why I'm sending you this pull early in the cycle. 3) I got rid of a truly superfluous call to RDRAND in the hot path, which resulted in a whopping 370% increase in performance. 4) Sebastian Andrzej Siewior sent some patches regarding PREEMPT_RT, the full series of which wasn't ready yet, but the first two preparatory cleanups were good on their own. One of them touches files in kernel/irq/, which is the other reason why I'm sending you this pull early in the cycle. 5) Other assorted correctness fixes from Eric Biggers, Jann Horn, Mark Brown, Dominik Brodowski, and myself" * 'random-5.17-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: random: don't reset crng_init_cnt on urandom_read() random: avoid superfluous call to RDRAND in CRNG extraction random: early initialization of ChaCha constants random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs random: harmonize "crng init done" messages random: mix bootloader randomness into pool random: do not throw away excess input to crng_fast_load random: do not re-init if crng_reseed completes before primary init random: fix crash on multiple early calls to add_bootloader_randomness() random: do not sign extend bytes for rotation when mixing random: use BLAKE2s instead of SHA1 in extraction lib/crypto: blake2s: include as built-in random: fix data race on crng init time random: fix data race on crng_node_pool irq: remove unused flags argument from __handle_irq_event_percpu() random: remove unused irq_flags argument from add_interrupt_randomness() random: document add_hwgenerator_randomness() with other input functions MAINTAINERS: add git tree for random.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/random.c248
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/net/Kconfig1
3 files changed, 133 insertions, 118 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 605969ed0f96..227fb7802738 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,8 +1,7 @@
/*
* random.c -- A strong random number generator
*
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
+ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
*
@@ -78,12 +77,12 @@
* an *estimate* of how many bits of randomness have been stored into
* the random number generator's internal state.
*
- * When random bytes are desired, they are obtained by taking the SHA
- * hash of the contents of the "entropy pool". The SHA hash avoids
+ * When random bytes are desired, they are obtained by taking the BLAKE2s
+ * hash of the contents of the "entropy pool". The BLAKE2s hash avoids
* exposing the internal state of the entropy pool. It is believed to
* be computationally infeasible to derive any useful information
- * about the input of SHA from its output. Even if it is possible to
- * analyze SHA in some clever way, as long as the amount of data
+ * about the input of BLAKE2s from its output. Even if it is possible to
+ * analyze BLAKE2s in some clever way, as long as the amount of data
* returned from the generator is less than the inherent entropy in
* the pool, the output data is totally unpredictable. For this
* reason, the routine decreases its internal estimate of how many
@@ -93,7 +92,7 @@
* If this estimate goes to zero, the routine can still generate
* random numbers; however, an attacker may (at least in theory) be
* able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of SHA, which is
+ * outputs. This requires successful cryptanalysis of BLAKE2s, which is
* not believed to be feasible, but there is a remote possibility.
* Nonetheless, these numbers should be useful for the vast majority
* of purposes.
@@ -200,8 +199,11 @@
* void add_device_randomness(const void *buf, unsigned int size);
* void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
- * void add_interrupt_randomness(int irq, int irq_flags);
+ * void add_interrupt_randomness(int irq);
* void add_disk_randomness(struct gendisk *disk);
+ * void add_hwgenerator_randomness(const char *buffer, size_t count,
+ * size_t entropy);
+ * void add_bootloader_randomness(const void *buf, unsigned int size);
*
* add_device_randomness() is for adding data to the random pool that
* is likely to differ between two devices (or possibly even per boot).
@@ -228,6 +230,14 @@
* particular randomness source. They do this by keeping track of the
* first and second order deltas of the event timings.
*
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
+ * add_device_randomness(), depending on whether or not the configuration
+ * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
* Ensuring unpredictability at system startup
* ============================================
*
@@ -336,7 +346,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
-#include <crypto/sha1.h>
+#include <crypto/blake2s.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -356,10 +366,7 @@
#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
#define OUTPUT_POOL_SHIFT 10
#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define EXTRACT_SIZE 10
-
-
-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+#define EXTRACT_SIZE (BLAKE2S_HASH_SIZE / 2)
/*
* To allow fractional bits to be tracked, the entropy_count field is
@@ -395,7 +402,7 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* Thanks to Colin Plumb for suggesting this.
*
* The mixing operation is much less sensitive than the output hash,
- * where we use SHA-1. All that we want of mixing operation is that
+ * where we use BLAKE2s. All that we want of mixing operation is that
* it be a good non-cryptographic hash; i.e. it not produce collisions
* when fed "random" data of the sort we expect to see. As long as
* the pool state differs for different inputs, we have preserved the
@@ -450,6 +457,10 @@ struct crng_state {
static struct crng_state primary_crng = {
.lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
+ .state[0] = CHACHA_CONSTANT_EXPA,
+ .state[1] = CHACHA_CONSTANT_ND_3,
+ .state[2] = CHACHA_CONSTANT_2_BY,
+ .state[3] = CHACHA_CONSTANT_TE_K,
};
/*
@@ -461,6 +472,7 @@ static struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
+static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
@@ -539,7 +551,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
unsigned long i, tap1, tap2, tap3, tap4, tap5;
int input_rotate;
int wordmask = r->poolinfo->poolwords - 1;
- const char *bytes = in;
+ const unsigned char *bytes = in;
__u32 w;
tap1 = r->poolinfo->tap1;
@@ -751,7 +763,6 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
-#ifdef CONFIG_NUMA
/*
* Hack to deal with crazy userspace progams when they are all trying
* to access /dev/urandom in parallel. The programs are almost
@@ -759,7 +770,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
* their brain damage.
*/
static struct crng_state **crng_node_pool __read_mostly;
-#endif
static void invalidate_batched_entropy(void);
static void numa_crng_init(void);
@@ -807,7 +817,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
return arch_init;
}
-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+static void crng_initialize_secondary(struct crng_state *crng)
{
chacha_init_consts(crng->state);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
@@ -817,18 +827,46 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
static void __init crng_initialize_primary(struct crng_state *crng)
{
- chacha_init_consts(crng->state);
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
- if (crng_init_try_arch_early(crng) && trust_cpu) {
+ if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
- pr_notice("crng done (trusting CPU's manufacturer)\n");
+ pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-#ifdef CONFIG_NUMA
+static void crng_finalize_init(struct crng_state *crng)
+{
+ if (crng != &primary_crng || crng_init >= 2)
+ return;
+ if (!system_wq) {
+ /* We can't call numa_crng_init until we have workqueues,
+ * so mark this for processing later. */
+ crng_need_final_init = true;
+ return;
+ }
+
+ invalidate_batched_entropy();
+ numa_crng_init();
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
+}
+
static void do_numa_crng_init(struct work_struct *work)
{
int i;
@@ -843,8 +881,8 @@ static void do_numa_crng_init(struct work_struct *work)
crng_initialize_secondary(crng);
pool[i] = crng;
}
- mb();
- if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ /* pairs with READ_ONCE() in select_crng() */
+ if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
for_each_node(i)
kfree(pool[i]);
kfree(pool);
@@ -855,20 +893,35 @@ static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
static void numa_crng_init(void)
{
- schedule_work(&numa_crng_init_work);
+ if (IS_ENABLED(CONFIG_NUMA))
+ schedule_work(&numa_crng_init_work);
+}
+
+static struct crng_state *select_crng(void)
+{
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ struct crng_state **pool;
+ int nid = numa_node_id();
+
+ /* pairs with cmpxchg_release() in do_numa_crng_init() */
+ pool = READ_ONCE(crng_node_pool);
+ if (pool && pool[nid])
+ return pool[nid];
+ }
+
+ return &primary_crng;
}
-#else
-static void numa_crng_init(void) {}
-#endif
/*
* crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally.
+ * path. So we can't afford to dilly-dally. Returns the number of
+ * bytes processed from cp.
*/
-static int crng_fast_load(const char *cp, size_t len)
+static size_t crng_fast_load(const char *cp, size_t len)
{
unsigned long flags;
char *p;
+ size_t ret = 0;
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
@@ -879,7 +932,7 @@ static int crng_fast_load(const char *cp, size_t len)
p = (unsigned char *) &primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--;
+ cp++; crng_init_cnt++; len--; ret++;
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
@@ -887,7 +940,7 @@ static int crng_fast_load(const char *cp, size_t len)
crng_init = 1;
pr_notice("fast init done\n");
}
- return 1;
+ return ret;
}
/*
@@ -962,41 +1015,24 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
crng->state[i+4] ^= buf.key[i] ^ rv;
}
memzero_explicit(&buf, sizeof(buf));
- crng->init_time = jiffies;
+ WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
- if (crng == &primary_crng && crng_init < 2) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
- }
+ crng_finalize_init(crng);
}
static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA_BLOCK_SIZE])
{
- unsigned long v, flags;
-
- if (crng_ready() &&
- (time_after(crng_global_init_time, crng->init_time) ||
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
- crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
+ unsigned long flags, init_time;
+
+ if (crng_ready()) {
+ init_time = READ_ONCE(crng->init_time);
+ if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
+ time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
+ crng_reseed(crng, crng == &primary_crng ?
+ &input_pool : NULL);
+ }
spin_lock_irqsave(&crng->lock, flags);
- if (arch_get_random_long(&v))
- crng->state[14] ^= v;
chacha20_block(&crng->state[0], out);
if (crng->state[12] == 0)
crng->state[13]++;
@@ -1005,15 +1041,7 @@ static void _extract_crng(struct crng_state *crng,
static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _extract_crng(crng, out);
+ _extract_crng(select_crng(), out);
}
/*
@@ -1042,15 +1070,7 @@ static void _crng_backtrack_protect(struct crng_state *crng,
static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _crng_backtrack_protect(crng, tmp, used);
+ _crng_backtrack_protect(select_crng(), tmp, used);
}
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
@@ -1242,7 +1262,7 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
return *ptr;
}
-void add_interrupt_randomness(int irq, int irq_flags)
+void add_interrupt_randomness(int irq)
{
struct entropy_store *r;
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
@@ -1269,7 +1289,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool))) {
+ sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0;
fast_pool->last = now;
}
@@ -1368,56 +1388,49 @@ retry:
*/
static void extract_buf(struct entropy_store *r, __u8 *out)
{
- int i;
- union {
- __u32 w[5];
- unsigned long l[LONGS(20)];
- } hash;
- __u32 workspace[SHA1_WORKSPACE_WORDS];
+ struct blake2s_state state __aligned(__alignof__(unsigned long));
+ u8 hash[BLAKE2S_HASH_SIZE];
+ unsigned long *salt;
unsigned long flags;
+ blake2s_init(&state, sizeof(hash));
+
/*
* If we have an architectural hardware random number
- * generator, use it for SHA's initial vector
+ * generator, use it for BLAKE2's salt & personal fields.
*/
- sha1_init(hash.w);
- for (i = 0; i < LONGS(20); i++) {
+ for (salt = (unsigned long *)&state.h[4];
+ salt < (unsigned long *)&state.h[8]; ++salt) {
unsigned long v;
if (!arch_get_random_long(&v))
break;
- hash.l[i] = v;
+ *salt ^= v;
}
- /* Generate a hash across the pool, 16 words (512 bits) at a time */
+ /* Generate a hash across the pool */
spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+ blake2s_update(&state, (const u8 *)r->pool,
+ r->poolinfo->poolwords * sizeof(*r->pool));
+ blake2s_final(&state, hash); /* final zeros out state */
/*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
* plus the current outputs, and attempts to find previous
- * ouputs), unless the hash function can be inverted. By
- * mixing at least a SHA1 worth of hash data back, we make
+ * outputs), unless the hash function can be inverted. By
+ * mixing at least a hash worth of hash data back, we make
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
+ __mix_pool_bytes(r, hash, sizeof(hash));
spin_unlock_irqrestore(&r->lock, flags);
- memzero_explicit(workspace, sizeof(workspace));
-
- /*
- * In case the hash function has some recognizable output
- * pattern, we fold it in half. Thus, we always feed back
- * twice as much data as we output.
+ /* Note that EXTRACT_SIZE is half of hash size here, because above
+ * we've dumped the full length back into mixer. By reducing the
+ * amount that we emit, we retain a level of forward secrecy.
*/
- hash.w[0] ^= hash.w[3];
- hash.w[1] ^= hash.w[4];
- hash.w[2] ^= rol32(hash.w[2], 16);
-
- memcpy(out, &hash, EXTRACT_SIZE);
- memzero_explicit(&hash, sizeof(hash));
+ memcpy(out, hash, EXTRACT_SIZE);
+ memzero_explicit(hash, sizeof(hash));
}
static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
@@ -1775,6 +1788,8 @@ static void __init init_std_data(struct entropy_store *r)
int __init rand_initialize(void)
{
init_std_data(&input_pool);
+ if (crng_need_final_init)
+ crng_finalize_init(&primary_crng);
crng_initialize_primary(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
@@ -1816,7 +1831,6 @@ urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
static ssize_t
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- unsigned long flags;
static int maxwarn = 10;
if (!crng_ready() && maxwarn > 0) {
@@ -1824,9 +1838,6 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (__ratelimit(&urandom_warning))
pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
current->comm, nbytes);
- spin_lock_irqsave(&primary_crng.lock, flags);
- crng_init_cnt = 0;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
}
return urandom_read_nowarn(file, buf, nbytes, ppos);
@@ -1949,7 +1960,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
if (crng_init < 2)
return -ENODATA;
crng_reseed(&primary_crng, &input_pool);
- crng_global_init_time = jiffies - 1;
+ WRITE_ONCE(crng_global_init_time, jiffies - 1);
return 0;
default:
return -EINVAL;
@@ -2275,15 +2286,20 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
struct entropy_store *poolp = &input_pool;
if (unlikely(crng_init == 0)) {
- crng_fast_load(buffer, count);
- return;
+ size_t ret = crng_fast_load(buffer, count);
+ mix_pool_bytes(poolp, buffer, ret);
+ count -= ret;
+ buffer += ret;
+ if (!count || crng_init == 0)
+ return;
}
/* Suspend writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_interruptible(random_write_wait, kthread_should_stop() ||
+ wait_event_interruptible(random_write_wait,
+ !system_wq || kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 392c1ac4f819..7ae04ccb1043 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1381,7 +1381,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(vmbus_interrupt, 0);
+ add_interrupt_randomness(vmbus_interrupt);
}
static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6cccc3dc00bc..b2a4f998c180 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -81,7 +81,6 @@ config WIREGUARD
select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_LIB_BLAKE2S
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT