summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2026-04-01 03:05:40 +0300
committerEric Biggers <ebiggers@kernel.org>2026-04-01 23:02:09 +0300
commit11d6bc70fff310cf0c4bbfa740144b0e350cd706 (patch)
treecda14b95c1d3225f4599bd9f7b42dee458595ac5
parent8aeeb5255d5e0001f2af6786e2a7564fef416acf (diff)
downloadlinux-11d6bc70fff310cf0c4bbfa740144b0e350cd706.tar.xz
lib/crypto: arm64/aes: Remove obsolete chunking logic
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the AES-CBC-MAC code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260401000548.133151-2-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c13
-rw-r--r--include/crypto/aes.h6
-rw-r--r--lib/crypto/arm64/aes-modes.S8
-rw-r--r--lib/crypto/arm64/aes.h35
4 files changed, 23 insertions, 39 deletions
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 45aed0073283..a304375ce724 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -101,16 +101,11 @@ static u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
u32 blocks = abytes / AES_BLOCK_SIZE;
if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) {
- u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac,
- macp, enc_after);
- u32 adv = (blocks - rem) * AES_BLOCK_SIZE;
-
+ ce_aes_mac_update(in, rk, rounds, blocks, mac, macp,
+ enc_after);
macp = enc_after ? 0 : AES_BLOCK_SIZE;
- in += adv;
- abytes -= adv;
-
- if (unlikely(rem))
- macp = 0;
+ in += blocks * AES_BLOCK_SIZE;
+ abytes -= blocks * AES_BLOCK_SIZE;
} else {
u32 l = min(AES_BLOCK_SIZE - macp, abytes);
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 3feb4105c2a2..16fbfd93e2bd 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -230,9 +230,9 @@ asmlinkage void ce_aes_essiv_cbc_encrypt(u8 out[], u8 const in[],
asmlinkage void ce_aes_essiv_cbc_decrypt(u8 out[], u8 const in[],
u32 const rk1[], int rounds,
int blocks, u8 iv[], u32 const rk2[]);
-asmlinkage size_t ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- size_t blocks, u8 dg[], int enc_before,
- int enc_after);
+asmlinkage void ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ size_t blocks, u8 dg[], int enc_before,
+ int enc_after);
#elif defined(CONFIG_PPC)
void ppc_expand_key_128(u32 *key_enc, const u8 *key);
void ppc_expand_key_192(u32 *key_enc, const u8 *key);
diff --git a/lib/crypto/arm64/aes-modes.S b/lib/crypto/arm64/aes-modes.S
index fc89cd02b642..10e537317eaf 100644
--- a/lib/crypto/arm64/aes-modes.S
+++ b/lib/crypto/arm64/aes-modes.S
@@ -817,9 +817,9 @@ AES_FUNC_END(aes_xts_decrypt)
#if IS_ENABLED(CONFIG_CRYPTO_LIB_AES_CBC_MACS)
/*
- * size_t aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- * size_t blocks, u8 dg[], int enc_before,
- * int enc_after);
+ * void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ * size_t blocks, u8 dg[], int enc_before,
+ * int enc_after);
*/
AES_FUNC_START(aes_mac_update)
ld1 {v0.16b}, [x4] /* get dg */
@@ -844,7 +844,6 @@ AES_FUNC_START(aes_mac_update)
cbz w5, .Lmacout
encrypt_block v0, w2, x1, x7, w8
st1 {v0.16b}, [x4] /* return dg */
- cond_yield .Lmacout, x7, x8
b .Lmacloop4x
.Lmac1x:
add x3, x3, #4
@@ -863,7 +862,6 @@ AES_FUNC_START(aes_mac_update)
.Lmacout:
st1 {v0.16b}, [x4] /* return dg */
- mov x0, x3
ret
AES_FUNC_END(aes_mac_update)
#endif /* CONFIG_CRYPTO_LIB_AES_CBC_MACS */
diff --git a/lib/crypto/arm64/aes.h b/lib/crypto/arm64/aes.h
index 135d3324a30a..9e9e45a6f787 100644
--- a/lib/crypto/arm64/aes.h
+++ b/lib/crypto/arm64/aes.h
@@ -29,9 +29,9 @@ asmlinkage void __aes_ce_decrypt(const u32 inv_rk[], u8 out[AES_BLOCK_SIZE],
asmlinkage u32 __aes_ce_sub(u32 l);
asmlinkage void __aes_ce_invert(struct aes_block *out,
const struct aes_block *in);
-asmlinkage size_t neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- size_t blocks, u8 dg[], int enc_before,
- int enc_after);
+asmlinkage void neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ size_t blocks, u8 dg[], int enc_before,
+ int enc_after);
/*
* Expand an AES key using the crypto extensions if supported and usable or
@@ -192,25 +192,16 @@ static bool aes_cbcmac_blocks_arch(u8 h[AES_BLOCK_SIZE],
bool enc_after)
{
if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
- do {
- size_t rem;
-
- scoped_ksimd() {
- if (static_branch_likely(&have_aes))
- rem = ce_aes_mac_update(
- data, key->k.rndkeys,
- key->nrounds, nblocks, h,
- enc_before, enc_after);
- else
- rem = neon_aes_mac_update(
- data, key->k.rndkeys,
- key->nrounds, nblocks, h,
- enc_before, enc_after);
- }
- data += (nblocks - rem) * AES_BLOCK_SIZE;
- nblocks = rem;
- enc_before = false;
- } while (nblocks);
+ scoped_ksimd() {
+ if (static_branch_likely(&have_aes))
+ ce_aes_mac_update(data, key->k.rndkeys,
+ key->nrounds, nblocks, h,
+ enc_before, enc_after);
+ else
+ neon_aes_mac_update(data, key->k.rndkeys,
+ key->nrounds, nblocks, h,
+ enc_before, enc_after);
+ }
return true;
}
return false;