summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2026-04-01 03:05:45 +0300
committerEric Biggers <ebiggers@kernel.org>2026-04-01 23:02:10 +0300
commitfe1233c2eb69c040f737dd10b881efff5f4ccee0 (patch)
treecb6bb29d583745c1210810a37a93380f1131b4c8
parentfd5017138ce03f34d0e67758df51e8bb30c0d91b (diff)
downloadlinux-fe1233c2eb69c040f737dd10b881efff5f4ccee0.tar.xz
lib/crypto: arm64/sha256: Remove obsolete chunking logic
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the SHA-256 code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260401000548.133151-7-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
-rw-r--r--lib/crypto/arm64/sha256-ce.S14
-rw-r--r--lib/crypto/arm64/sha256.h29
2 files changed, 13 insertions, 30 deletions
diff --git a/lib/crypto/arm64/sha256-ce.S b/lib/crypto/arm64/sha256-ce.S
index e4bfe42a61a9..b54ad977afa3 100644
--- a/lib/crypto/arm64/sha256-ce.S
+++ b/lib/crypto/arm64/sha256-ce.S
@@ -79,11 +79,11 @@
.endm
/*
- * size_t __sha256_ce_transform(struct sha256_block_state *state,
- * const u8 *data, size_t nblocks);
+ * void sha256_ce_transform(struct sha256_block_state *state,
+ * const u8 *data, size_t nblocks);
*/
.text
-SYM_FUNC_START(__sha256_ce_transform)
+SYM_FUNC_START(sha256_ce_transform)
load_round_constants x8
@@ -127,17 +127,13 @@ CPU_LE( rev32 v19.16b, v19.16b )
add dgav.4s, dgav.4s, dg0v.4s
add dgbv.4s, dgbv.4s, dg1v.4s
- /* return early if voluntary preemption is needed */
- cond_yield 1f, x5, x6
-
/* handled all input blocks? */
cbnz x2, 0b
/* store new state */
-1: st1 {dgav.4s, dgbv.4s}, [x0]
- mov x0, x2
+ st1 {dgav.4s, dgbv.4s}, [x0]
ret
-SYM_FUNC_END(__sha256_ce_transform)
+SYM_FUNC_END(sha256_ce_transform)
.unreq dga
.unreq dgav
diff --git a/lib/crypto/arm64/sha256.h b/lib/crypto/arm64/sha256.h
index 1fad3d7baa9a..b4353d3c4dd0 100644
--- a/lib/crypto/arm64/sha256.h
+++ b/lib/crypto/arm64/sha256.h
@@ -14,26 +14,17 @@ asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage void sha256_block_neon(struct sha256_block_state *state,
const u8 *data, size_t nblocks);
-asmlinkage size_t __sha256_ce_transform(struct sha256_block_state *state,
- const u8 *data, size_t nblocks);
+asmlinkage void sha256_ce_transform(struct sha256_block_state *state,
+ const u8 *data, size_t nblocks);
static void sha256_blocks(struct sha256_block_state *state,
const u8 *data, size_t nblocks)
{
if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
- if (static_branch_likely(&have_ce)) {
- do {
- size_t rem;
-
- scoped_ksimd()
- rem = __sha256_ce_transform(state, data,
- nblocks);
-
- data += (nblocks - rem) * SHA256_BLOCK_SIZE;
- nblocks = rem;
- } while (nblocks);
- } else {
- scoped_ksimd()
+ scoped_ksimd() {
+ if (static_branch_likely(&have_ce))
+ sha256_ce_transform(state, data, nblocks);
+ else
sha256_block_neon(state, data, nblocks);
}
} else {
@@ -55,13 +46,9 @@ static bool sha256_finup_2x_arch(const struct __sha256_ctx *ctx,
u8 out1[SHA256_DIGEST_SIZE],
u8 out2[SHA256_DIGEST_SIZE])
{
- /*
- * The assembly requires len >= SHA256_BLOCK_SIZE && len <= INT_MAX.
- * Further limit len to 65536 to avoid spending too long with preemption
- * disabled. (Of course, in practice len is nearly always 4096 anyway.)
- */
+ /* The assembly requires len >= SHA256_BLOCK_SIZE && len <= INT_MAX. */
if (static_branch_likely(&have_ce) && len >= SHA256_BLOCK_SIZE &&
- len <= 65536 && likely(may_use_simd())) {
+ len <= INT_MAX && likely(may_use_simd())) {
scoped_ksimd()
sha256_ce_finup2x(ctx, data1, data2, len, out1, out2);
kmsan_unpoison_memory(out1, SHA256_DIGEST_SIZE);