]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
lib/crypto: arm64/sha1: Remove obsolete chunking logic
authorEric Biggers <ebiggers@kernel.org>
Wed, 1 Apr 2026 00:05:44 +0000 (17:05 -0700)
committerEric Biggers <ebiggers@kernel.org>
Wed, 1 Apr 2026 20:02:10 +0000 (13:02 -0700)
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode
NEON at context switch"), kernel-mode NEON sections have been
preemptible on arm64.  And since commit 7dadeaa6e851 ("sched: Further
restrict the preemption modes"), voluntary preemption is no longer
supported on arm64 either.  Therefore, there's no longer any need to
limit the length of kernel-mode NEON sections on arm64.

Simplify the SHA-1 code accordingly.

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20260401000548.133151-6-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
lib/crypto/arm64/sha1-ce-core.S
lib/crypto/arm64/sha1.h

index 8fbd4767f0f0ca0d12962a19bd26a2a1399597f2..59d27fda071440ed93a404e8fcf9c2553e106f6f 100644 (file)
        .endm
 
        /*
-        * size_t __sha1_ce_transform(struct sha1_block_state *state,
-        *                            const u8 *data, size_t nblocks);
+        * void sha1_ce_transform(struct sha1_block_state *state,
+        *                        const u8 *data, size_t nblocks);
         */
-SYM_FUNC_START(__sha1_ce_transform)
+SYM_FUNC_START(sha1_ce_transform)
        /* load round constants */
        loadrc          k0.4s, 0x5a827999, w6
        loadrc          k1.4s, 0x6ed9eba1, w6
@@ -116,15 +116,11 @@ CPU_LE(   rev32           v11.16b, v11.16b        )
        add             dgbv.2s, dgbv.2s, dg1v.2s
        add             dgav.4s, dgav.4s, dg0v.4s
 
-       /* return early if voluntary preemption is needed */
-       cond_yield      1f, x5, x6
-
        /* handled all input blocks? */
        cbnz            x2, 0b
 
        /* store new state */
-1:     st1             {dgav.4s}, [x0]
+       st1             {dgav.4s}, [x0]
        str             dgb, [x0, #16]
-       mov             x0, x2
        ret
-SYM_FUNC_END(__sha1_ce_transform)
+SYM_FUNC_END(sha1_ce_transform)
index bc7071f1be09624af9f925677c539fc7d42ab2c1..112c5d443c562575d5465f84bd0094284a714fde 100644 (file)
@@ -9,22 +9,15 @@
 
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
 
-asmlinkage size_t __sha1_ce_transform(struct sha1_block_state *state,
-                                     const u8 *data, size_t nblocks);
+asmlinkage void sha1_ce_transform(struct sha1_block_state *state,
+                                 const u8 *data, size_t nblocks);
 
 static void sha1_blocks(struct sha1_block_state *state,
                        const u8 *data, size_t nblocks)
 {
        if (static_branch_likely(&have_ce) && likely(may_use_simd())) {
-               do {
-                       size_t rem;
-
-                       scoped_ksimd()
-                               rem = __sha1_ce_transform(state, data, nblocks);
-
-                       data += (nblocks - rem) * SHA1_BLOCK_SIZE;
-                       nblocks = rem;
-               } while (nblocks);
+               scoped_ksimd()
+                       sha1_ce_transform(state, data, nblocks);
        } else {
                sha1_blocks_generic(state, data, nblocks);
        }