summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2026-04-01 03:05:40 +0300
committerEric Biggers <ebiggers@kernel.org>2026-04-01 23:02:09 +0300
commit11d6bc70fff310cf0c4bbfa740144b0e350cd706 (patch)
treecda14b95c1d3225f4599bd9f7b42dee458595ac5 /lib
parent8aeeb5255d5e0001f2af6786e2a7564fef416acf (diff)
downloadlinux-11d6bc70fff310cf0c4bbfa740144b0e350cd706.tar.xz
lib/crypto: arm64/aes: Remove obsolete chunking logic
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the AES-CBC-MAC code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260401000548.133151-2-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/crypto/arm64/aes-modes.S8
-rw-r--r--lib/crypto/arm64/aes.h35
2 files changed, 16 insertions, 27 deletions
diff --git a/lib/crypto/arm64/aes-modes.S b/lib/crypto/arm64/aes-modes.S
index fc89cd02b642..10e537317eaf 100644
--- a/lib/crypto/arm64/aes-modes.S
+++ b/lib/crypto/arm64/aes-modes.S
@@ -817,9 +817,9 @@ AES_FUNC_END(aes_xts_decrypt)
#if IS_ENABLED(CONFIG_CRYPTO_LIB_AES_CBC_MACS)
/*
- * size_t aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- * size_t blocks, u8 dg[], int enc_before,
- * int enc_after);
+ * void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ * size_t blocks, u8 dg[], int enc_before,
+ * int enc_after);
*/
AES_FUNC_START(aes_mac_update)
ld1 {v0.16b}, [x4] /* get dg */
@@ -844,7 +844,6 @@ AES_FUNC_START(aes_mac_update)
cbz w5, .Lmacout
encrypt_block v0, w2, x1, x7, w8
st1 {v0.16b}, [x4] /* return dg */
- cond_yield .Lmacout, x7, x8
b .Lmacloop4x
.Lmac1x:
add x3, x3, #4
@@ -863,7 +862,6 @@ AES_FUNC_START(aes_mac_update)
.Lmacout:
st1 {v0.16b}, [x4] /* return dg */
- mov x0, x3
ret
AES_FUNC_END(aes_mac_update)
#endif /* CONFIG_CRYPTO_LIB_AES_CBC_MACS */
diff --git a/lib/crypto/arm64/aes.h b/lib/crypto/arm64/aes.h
index 135d3324a30a..9e9e45a6f787 100644
--- a/lib/crypto/arm64/aes.h
+++ b/lib/crypto/arm64/aes.h
@@ -29,9 +29,9 @@ asmlinkage void __aes_ce_decrypt(const u32 inv_rk[], u8 out[AES_BLOCK_SIZE],
asmlinkage u32 __aes_ce_sub(u32 l);
asmlinkage void __aes_ce_invert(struct aes_block *out,
const struct aes_block *in);
-asmlinkage size_t neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- size_t blocks, u8 dg[], int enc_before,
- int enc_after);
+asmlinkage void neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ size_t blocks, u8 dg[], int enc_before,
+ int enc_after);
/*
* Expand an AES key using the crypto extensions if supported and usable or
@@ -192,25 +192,16 @@ static bool aes_cbcmac_blocks_arch(u8 h[AES_BLOCK_SIZE],
bool enc_after)
{
if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
- do {
- size_t rem;
-
- scoped_ksimd() {
- if (static_branch_likely(&have_aes))
- rem = ce_aes_mac_update(
- data, key->k.rndkeys,
- key->nrounds, nblocks, h,
- enc_before, enc_after);
- else
- rem = neon_aes_mac_update(
- data, key->k.rndkeys,
- key->nrounds, nblocks, h,
- enc_before, enc_after);
- }
- data += (nblocks - rem) * AES_BLOCK_SIZE;
- nblocks = rem;
- enc_before = false;
- } while (nblocks);
+ scoped_ksimd() {
+ if (static_branch_likely(&have_aes))
+ ce_aes_mac_update(data, key->k.rndkeys,
+ key->nrounds, nblocks, h,
+ enc_before, enc_after);
+ else
+ neon_aes_mac_update(data, key->k.rndkeys,
+ key->nrounds, nblocks, h,
+ enc_before, enc_after);
+ }
return true;
}
return false;