]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
lib/crypto: arm64/sha256: Remove obsolete chunking logic
authorEric Biggers <ebiggers@kernel.org>
Wed, 1 Apr 2026 00:05:45 +0000 (17:05 -0700)
committerEric Biggers <ebiggers@kernel.org>
Wed, 1 Apr 2026 20:02:10 +0000 (13:02 -0700)
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode
NEON at context switch"), kernel-mode NEON sections have been
preemptible on arm64.  And since commit 7dadeaa6e851 ("sched: Further
restrict the preemption modes"), voluntary preemption is no longer
supported on arm64 either.  Therefore, there's no longer any need to
limit the length of kernel-mode NEON sections on arm64.

Simplify the SHA-256 code accordingly.

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20260401000548.133151-7-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
lib/crypto/arm64/sha256-ce.S
lib/crypto/arm64/sha256.h

index e4bfe42a61a929d22f96316cbf19d1318939d03d..b54ad977afa339c7eb4e3788d6e77ab2eb21d602 100644 (file)
        .endm
 
        /*
-        * size_t __sha256_ce_transform(struct sha256_block_state *state,
-        *                              const u8 *data, size_t nblocks);
+        * void sha256_ce_transform(struct sha256_block_state *state,
+        *                          const u8 *data, size_t nblocks);
         */
        .text
-SYM_FUNC_START(__sha256_ce_transform)
+SYM_FUNC_START(sha256_ce_transform)
 
        load_round_constants    x8
 
@@ -127,17 +127,13 @@ CPU_LE(   rev32           v19.16b, v19.16b        )
        add             dgav.4s, dgav.4s, dg0v.4s
        add             dgbv.4s, dgbv.4s, dg1v.4s
 
-       /* return early if voluntary preemption is needed */
-       cond_yield      1f, x5, x6
-
        /* handled all input blocks? */
        cbnz            x2, 0b
 
        /* store new state */
-1:     st1             {dgav.4s, dgbv.4s}, [x0]
-       mov             x0, x2
+       st1             {dgav.4s, dgbv.4s}, [x0]
        ret
-SYM_FUNC_END(__sha256_ce_transform)
+SYM_FUNC_END(sha256_ce_transform)
 
        .unreq dga
        .unreq dgav
index 1fad3d7baa9a225300593f92983899f4b539b1df..b4353d3c4dd09e45729c55f4b92706827929aade 100644 (file)
@@ -14,26 +14,17 @@ asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
                                        const u8 *data, size_t nblocks);
 asmlinkage void sha256_block_neon(struct sha256_block_state *state,
                                  const u8 *data, size_t nblocks);
-asmlinkage size_t __sha256_ce_transform(struct sha256_block_state *state,
-                                       const u8 *data, size_t nblocks);
+asmlinkage void sha256_ce_transform(struct sha256_block_state *state,
+                                   const u8 *data, size_t nblocks);
 
 static void sha256_blocks(struct sha256_block_state *state,
                          const u8 *data, size_t nblocks)
 {
        if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
-               if (static_branch_likely(&have_ce)) {
-                       do {
-                               size_t rem;
-
-                               scoped_ksimd()
-                                       rem = __sha256_ce_transform(state, data,
-                                                                   nblocks);
-
-                               data += (nblocks - rem) * SHA256_BLOCK_SIZE;
-                               nblocks = rem;
-                       } while (nblocks);
-               } else {
-                       scoped_ksimd()
+               scoped_ksimd() {
+                       if (static_branch_likely(&have_ce))
+                               sha256_ce_transform(state, data, nblocks);
+                       else
                                sha256_block_neon(state, data, nblocks);
                }
        } else {
@@ -55,13 +46,9 @@ static bool sha256_finup_2x_arch(const struct __sha256_ctx *ctx,
                                 u8 out1[SHA256_DIGEST_SIZE],
                                 u8 out2[SHA256_DIGEST_SIZE])
 {
-       /*
-        * The assembly requires len >= SHA256_BLOCK_SIZE && len <= INT_MAX.
-        * Further limit len to 65536 to avoid spending too long with preemption
-        * disabled.  (Of course, in practice len is nearly always 4096 anyway.)
-        */
+       /* The assembly requires len >= SHA256_BLOCK_SIZE && len <= INT_MAX. */
        if (static_branch_likely(&have_ce) && len >= SHA256_BLOCK_SIZE &&
-           len <= 65536 && likely(may_use_simd())) {
+           len <= INT_MAX && likely(may_use_simd())) {
                scoped_ksimd()
                        sha256_ce_finup2x(ctx, data1, data2, len, out1, out2);
                kmsan_unpoison_memory(out1, SHA256_DIGEST_SIZE);