summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2026-04-01 03:05:44 +0300
committerEric Biggers <ebiggers@kernel.org>2026-04-01 23:02:10 +0300
commitfd5017138ce03f34d0e67758df51e8bb30c0d91b (patch)
treebfbcf476457f81c0430870d26da9ae51da1fd828
parentdec1061f0ae9e00f13b9e141f3b5cae053da1346 (diff)
downloadlinux-fd5017138ce03f34d0e67758df51e8bb30c0d91b.tar.xz
lib/crypto: arm64/sha1: Remove obsolete chunking logic
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the SHA-1 code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260401000548.133151-6-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
-rw-r--r--lib/crypto/arm64/sha1-ce-core.S14
-rw-r--r--lib/crypto/arm64/sha1.h15
2 files changed, 9 insertions, 20 deletions
diff --git a/lib/crypto/arm64/sha1-ce-core.S b/lib/crypto/arm64/sha1-ce-core.S
index 8fbd4767f0f0..59d27fda0714 100644
--- a/lib/crypto/arm64/sha1-ce-core.S
+++ b/lib/crypto/arm64/sha1-ce-core.S
@@ -62,10 +62,10 @@
.endm
/*
- * size_t __sha1_ce_transform(struct sha1_block_state *state,
- * const u8 *data, size_t nblocks);
+ * void sha1_ce_transform(struct sha1_block_state *state,
+ * const u8 *data, size_t nblocks);
*/
-SYM_FUNC_START(__sha1_ce_transform)
+SYM_FUNC_START(sha1_ce_transform)
/* load round constants */
loadrc k0.4s, 0x5a827999, w6
loadrc k1.4s, 0x6ed9eba1, w6
@@ -116,15 +116,11 @@ CPU_LE( rev32 v11.16b, v11.16b )
add dgbv.2s, dgbv.2s, dg1v.2s
add dgav.4s, dgav.4s, dg0v.4s
- /* return early if voluntary preemption is needed */
- cond_yield 1f, x5, x6
-
/* handled all input blocks? */
cbnz x2, 0b
/* store new state */
-1: st1 {dgav.4s}, [x0]
+ st1 {dgav.4s}, [x0]
str dgb, [x0, #16]
- mov x0, x2
ret
-SYM_FUNC_END(__sha1_ce_transform)
+SYM_FUNC_END(sha1_ce_transform)
diff --git a/lib/crypto/arm64/sha1.h b/lib/crypto/arm64/sha1.h
index bc7071f1be09..112c5d443c56 100644
--- a/lib/crypto/arm64/sha1.h
+++ b/lib/crypto/arm64/sha1.h
@@ -9,22 +9,15 @@
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
-asmlinkage size_t __sha1_ce_transform(struct sha1_block_state *state,
- const u8 *data, size_t nblocks);
+asmlinkage void sha1_ce_transform(struct sha1_block_state *state,
+ const u8 *data, size_t nblocks);
static void sha1_blocks(struct sha1_block_state *state,
const u8 *data, size_t nblocks)
{
if (static_branch_likely(&have_ce) && likely(may_use_simd())) {
- do {
- size_t rem;
-
- scoped_ksimd()
- rem = __sha1_ce_transform(state, data, nblocks);
-
- data += (nblocks - rem) * SHA1_BLOCK_SIZE;
- nblocks = rem;
- } while (nblocks);
+ scoped_ksimd()
+ sha1_ce_transform(state, data, nblocks);
} else {
sha1_blocks_generic(state, data, nblocks);
}