From: Eric Biggers Date: Wed, 1 Apr 2026 00:05:46 +0000 (-0700) Subject: lib/crypto: arm64/sha512: Remove obsolete chunking logic X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7116418f6b00faf43e56f0e052b968b04fc75989;p=thirdparty%2Fkernel%2Flinux.git lib/crypto: arm64/sha512: Remove obsolete chunking logic Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the SHA-512 code accordingly. Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20260401000548.133151-8-ebiggers@kernel.org Signed-off-by: Eric Biggers --- diff --git a/lib/crypto/arm64/sha512-ce-core.S b/lib/crypto/arm64/sha512-ce-core.S index ffd51acfd1eed..26834921e8d6f 100644 --- a/lib/crypto/arm64/sha512-ce-core.S +++ b/lib/crypto/arm64/sha512-ce-core.S @@ -93,11 +93,11 @@ .endm /* - * size_t __sha512_ce_transform(struct sha512_block_state *state, - * const u8 *data, size_t nblocks); + * void sha512_ce_transform(struct sha512_block_state *state, + * const u8 *data, size_t nblocks); */ .text -SYM_FUNC_START(__sha512_ce_transform) +SYM_FUNC_START(sha512_ce_transform) /* load state */ ld1 {v8.2d-v11.2d}, [x0] @@ -186,12 +186,10 @@ CPU_LE( rev64 v19.16b, v19.16b ) add v10.2d, v10.2d, v2.2d add v11.2d, v11.2d, v3.2d - cond_yield 3f, x4, x5 /* handled all input blocks? */ cbnz x2, 0b /* store new state */ -3: st1 {v8.2d-v11.2d}, [x0] - mov x0, x2 + st1 {v8.2d-v11.2d}, [x0] ret -SYM_FUNC_END(__sha512_ce_transform) +SYM_FUNC_END(sha512_ce_transform) diff --git a/lib/crypto/arm64/sha512.h b/lib/crypto/arm64/sha512.h index d978c4d07e905..5da27e6e23ea2 100644 --- a/lib/crypto/arm64/sha512.h +++ b/lib/crypto/arm64/sha512.h @@ -12,23 +12,16 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha512_insns); asmlinkage void sha512_block_data_order(struct sha512_block_state *state, const u8 *data, size_t nblocks); -asmlinkage size_t __sha512_ce_transform(struct sha512_block_state *state, - const u8 *data, size_t nblocks); +asmlinkage void sha512_ce_transform(struct sha512_block_state *state, + const u8 *data, size_t nblocks); static void sha512_blocks(struct sha512_block_state *state, const u8 *data, size_t nblocks) { if (static_branch_likely(&have_sha512_insns) && likely(may_use_simd())) { - do { - size_t rem; - - scoped_ksimd() - rem = __sha512_ce_transform(state, data, nblocks); - - data += (nblocks - rem) * SHA512_BLOCK_SIZE; - nblocks = rem; - } while (nblocks); + scoped_ksimd() + sha512_ce_transform(state, data, nblocks); } else { sha512_block_data_order(state, data, nblocks); }