From: Eric Biggers Date: Wed, 1 Apr 2026 00:05:44 +0000 (-0700) Subject: lib/crypto: arm64/sha1: Remove obsolete chunking logic X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fd5017138ce03f34d0e67758df51e8bb30c0d91b;p=thirdparty%2Fkernel%2Flinux.git lib/crypto: arm64/sha1: Remove obsolete chunking logic Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the SHA-1 code accordingly. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20260401000548.133151-6-ebiggers@kernel.org Signed-off-by: Eric Biggers --- diff --git a/lib/crypto/arm64/sha1-ce-core.S b/lib/crypto/arm64/sha1-ce-core.S index 8fbd4767f0f0c..59d27fda07144 100644 --- a/lib/crypto/arm64/sha1-ce-core.S +++ b/lib/crypto/arm64/sha1-ce-core.S @@ -62,10 +62,10 @@ .endm /* - * size_t __sha1_ce_transform(struct sha1_block_state *state, - * const u8 *data, size_t nblocks); + * void sha1_ce_transform(struct sha1_block_state *state, + * const u8 *data, size_t nblocks); */ -SYM_FUNC_START(__sha1_ce_transform) +SYM_FUNC_START(sha1_ce_transform) /* load round constants */ loadrc k0.4s, 0x5a827999, w6 loadrc k1.4s, 0x6ed9eba1, w6 @@ -116,15 +116,11 @@ CPU_LE( rev32 v11.16b, v11.16b ) add dgbv.2s, dgbv.2s, dg1v.2s add dgav.4s, dgav.4s, dg0v.4s - /* return early if voluntary preemption is needed */ - cond_yield 1f, x5, x6 - /* handled all input blocks? */ cbnz x2, 0b /* store new state */ -1: st1 {dgav.4s}, [x0] + st1 {dgav.4s}, [x0] str dgb, [x0, #16] - mov x0, x2 ret -SYM_FUNC_END(__sha1_ce_transform) +SYM_FUNC_END(sha1_ce_transform) diff --git a/lib/crypto/arm64/sha1.h b/lib/crypto/arm64/sha1.h index bc7071f1be096..112c5d443c562 100644 --- a/lib/crypto/arm64/sha1.h +++ b/lib/crypto/arm64/sha1.h @@ -9,22 +9,15 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce); -asmlinkage size_t __sha1_ce_transform(struct sha1_block_state *state, - const u8 *data, size_t nblocks); +asmlinkage void sha1_ce_transform(struct sha1_block_state *state, + const u8 *data, size_t nblocks); static void sha1_blocks(struct sha1_block_state *state, const u8 *data, size_t nblocks) { if (static_branch_likely(&have_ce) && likely(may_use_simd())) { - do { - size_t rem; - - scoped_ksimd() - rem = __sha1_ce_transform(state, data, nblocks); - - data += (nblocks - rem) * SHA1_BLOCK_SIZE; - nblocks = rem; - } while (nblocks); + scoped_ksimd() + sha1_ce_transform(state, data, nblocks); } else { sha1_blocks_generic(state, data, nblocks); }