summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2026-04-01 03:05:46 +0300
committerEric Biggers <ebiggers@kernel.org>2026-04-01 23:02:10 +0300
commit7116418f6b00faf43e56f0e052b968b04fc75989 (patch)
treef6ce8569472fc52243c17fb2771726f08ae119c8 /lib
parentfe1233c2eb69c040f737dd10b881efff5f4ccee0 (diff)
downloadlinux-7116418f6b00faf43e56f0e052b968b04fc75989.tar.xz
lib/crypto: arm64/sha512: Remove obsolete chunking logic
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the SHA-512 code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260401000548.133151-8-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/crypto/arm64/sha512-ce-core.S12
-rw-r--r--lib/crypto/arm64/sha512.h15
2 files changed, 9 insertions, 18 deletions
diff --git a/lib/crypto/arm64/sha512-ce-core.S b/lib/crypto/arm64/sha512-ce-core.S
index ffd51acfd1ee..26834921e8d6 100644
--- a/lib/crypto/arm64/sha512-ce-core.S
+++ b/lib/crypto/arm64/sha512-ce-core.S
@@ -93,11 +93,11 @@
.endm
/*
- * size_t __sha512_ce_transform(struct sha512_block_state *state,
- * const u8 *data, size_t nblocks);
+ * void sha512_ce_transform(struct sha512_block_state *state,
+ * const u8 *data, size_t nblocks);
*/
.text
-SYM_FUNC_START(__sha512_ce_transform)
+SYM_FUNC_START(sha512_ce_transform)
/* load state */
ld1 {v8.2d-v11.2d}, [x0]
@@ -186,12 +186,10 @@ CPU_LE( rev64 v19.16b, v19.16b )
add v10.2d, v10.2d, v2.2d
add v11.2d, v11.2d, v3.2d
- cond_yield 3f, x4, x5
/* handled all input blocks? */
cbnz x2, 0b
/* store new state */
-3: st1 {v8.2d-v11.2d}, [x0]
- mov x0, x2
+ st1 {v8.2d-v11.2d}, [x0]
ret
-SYM_FUNC_END(__sha512_ce_transform)
+SYM_FUNC_END(sha512_ce_transform)
diff --git a/lib/crypto/arm64/sha512.h b/lib/crypto/arm64/sha512.h
index d978c4d07e90..5da27e6e23ea 100644
--- a/lib/crypto/arm64/sha512.h
+++ b/lib/crypto/arm64/sha512.h
@@ -12,23 +12,16 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha512_insns);
asmlinkage void sha512_block_data_order(struct sha512_block_state *state,
const u8 *data, size_t nblocks);
-asmlinkage size_t __sha512_ce_transform(struct sha512_block_state *state,
- const u8 *data, size_t nblocks);
+asmlinkage void sha512_ce_transform(struct sha512_block_state *state,
+ const u8 *data, size_t nblocks);
static void sha512_blocks(struct sha512_block_state *state,
const u8 *data, size_t nblocks)
{
if (static_branch_likely(&have_sha512_insns) &&
likely(may_use_simd())) {
- do {
- size_t rem;
-
- scoped_ksimd()
- rem = __sha512_ce_transform(state, data, nblocks);
-
- data += (nblocks - rem) * SHA512_BLOCK_SIZE;
- nblocks = rem;
- } while (nblocks);
+ scoped_ksimd()
+ sha512_ce_transform(state, data, nblocks);
} else {
sha512_block_data_order(state, data, nblocks);
}