diff options
| author | Eric Biggers <ebiggers@kernel.org> | 2026-03-31 05:44:30 +0300 |
|---|---|---|
| committer | Eric Biggers <ebiggers@kernel.org> | 2026-04-01 03:19:22 +0300 |
| commit | 8f45af945fce60c8656b5113d80af7fe221c88f5 (patch) | |
| tree | 0dd178cdf8e0737624519a3b0c1508281e41ff01 /lib | |
| parent | 1aa82df3eb4d1a28c02a9d0062c6ed0db1a59bac (diff) | |
| download | linux-8f45af945fce60c8656b5113d80af7fe221c88f5.tar.xz | |
lib/crypto: aesgcm: Don't disable IRQs during AES block encryption
aes_encrypt() now uses AES instructions when available instead of always
using table-based code. AES instructions are constant-time and don't
benefit from disabling IRQs as a constant-time hardening measure.
In fact, on two architectures (arm and riscv) disabling IRQs is
counterproductive because it prevents the AES instructions from being
used. (See the may_use_simd() implementation on those architectures.)
Therefore, let's remove the IRQ disabling/enabling and leave the choice
of constant-time hardening measures to the AES library code.
Note that currently the arm table-based AES code (which runs on arm
kernels that don't have ARMv8 CE) disables IRQs, while the generic
table-based AES code does not. So this does technically regress in
constant-time hardening when that generic code is used. But as
discussed in commit a22fd0e3c495 ("lib/crypto: aes: Introduce improved
AES library") I think just leaving IRQs enabled is the right choice.
Disabling them is slow and can cause problems, and AES instructions
(which modern CPUs have) solve the problem in a much better way anyway.
Link: https://lore.kernel.org/r/20260331024430.51755-1-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/crypto/aesgcm.c | 25 |
1 files changed, 3 insertions, 22 deletions
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c index 8c7e74d2d147..1da31e1f747d 100644 --- a/lib/crypto/aesgcm.c +++ b/lib/crypto/aesgcm.c @@ -9,25 +9,6 @@ #include <crypto/utils.h> #include <linux/export.h> #include <linux/module.h> -#include <asm/irqflags.h> - -static void aesgcm_encrypt_block(const struct aes_enckey *key, void *dst, - const void *src) -{ - unsigned long flags; - - /* - * In AES-GCM, both the GHASH key derivation and the CTR mode - * encryption operate on known plaintext, making them susceptible to - * timing attacks on the encryption key. The AES library already - * mitigates this risk to some extent by pulling the entire S-box into - * the caches before doing any substitutions, but this strategy is more - * effective when running with interrupts disabled. - */ - local_irq_save(flags); - aes_encrypt(key, dst, src); - local_irq_restore(flags); -} /** * aesgcm_expandkey - Expands the AES and GHASH keys for the AES-GCM key @@ -53,7 +34,7 @@ int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key, return ret; ctx->authsize = authsize; - aesgcm_encrypt_block(&ctx->aes_key, h, h); + aes_encrypt(&ctx->aes_key, h, h); ghash_preparekey(&ctx->ghash_key, h); memzero_explicit(h, sizeof(h)); return 0; @@ -98,7 +79,7 @@ static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len, ghash_final(&ghash, ghash_out); ctr[3] = cpu_to_be32(1); - aesgcm_encrypt_block(&ctx->aes_key, enc_ctr, ctr); + aes_encrypt(&ctx->aes_key, enc_ctr, (const u8 *)ctr); crypto_xor_cpy(authtag, ghash_out, enc_ctr, ctx->authsize); memzero_explicit(ghash_out, sizeof(ghash_out)); @@ -120,7 +101,7 @@ static void aesgcm_crypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, * len', this cannot happen, so no explicit test is necessary. */ ctr[3] = cpu_to_be32(n++); - aesgcm_encrypt_block(&ctx->aes_key, buf, ctr); + aes_encrypt(&ctx->aes_key, buf, (const u8 *)ctr); crypto_xor_cpy(dst, src, buf, min(len, AES_BLOCK_SIZE)); dst += AES_BLOCK_SIZE; |
