]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
crypto: lib/sha256 - Add helpers for block-based shash
authorHerbert Xu <herbert@gondor.apana.org.au>
Fri, 2 May 2025 05:30:53 +0000 (13:30 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Mon, 5 May 2025 10:20:45 +0000 (18:20 +0800)
Add an internal sha256_finup helper and move the finalisation code
from __sha256_final into it.

Also add sha256_choose_blocks and CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
so that the Crypto API can use the SIMD block function unconditionally.
The Crypto API must not be used in hard IRQs and there is no reason
to have a fallback path for hardirqs.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
include/crypto/internal/sha2.h
lib/crypto/Kconfig
lib/crypto/sha256.c

index d641c67abcbc3a5af7653ef5b62a9b1afc9e70f7..fff156f66edc31b27228efd9cbb37a25477daa5a 100644 (file)
@@ -3,7 +3,12 @@
 #ifndef _CRYPTO_INTERNAL_SHA2_H
 #define _CRYPTO_INTERNAL_SHA2_H
 
+#include <crypto/internal/simd.h>
 #include <crypto/sha2.h>
+#include <linux/compiler_attributes.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
 
 void sha256_update_generic(struct sha256_state *sctx,
                           const u8 *data, size_t len);
@@ -24,5 +29,45 @@ void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
                           const u8 *data, size_t nblocks);
 void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
                        const u8 *data, size_t nblocks);
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+                       const u8 *data, size_t nblocks);
+
+static inline void sha256_choose_blocks(
+       u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
+       bool force_generic, bool force_simd)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
+               sha256_blocks_generic(state, data, nblocks);
+       else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) &&
+                (force_simd || crypto_simd_usable()))
+               sha256_blocks_simd(state, data, nblocks);
+       else
+               sha256_blocks_arch(state, data, nblocks);
+}
+
+static __always_inline void sha256_finup(
+       struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE],
+       size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size,
+       bool force_generic, bool force_simd)
+{
+       const size_t bit_offset = SHA256_BLOCK_SIZE - 8;
+       __be64 *bits = (__be64 *)&buf[bit_offset];
+       int i;
+
+       buf[len++] = 0x80;
+       if (len > bit_offset) {
+               memset(&buf[len], 0, SHA256_BLOCK_SIZE - len);
+               sha256_choose_blocks(sctx->state, buf, 1, force_generic,
+                                    force_simd);
+               len = 0;
+       }
+
+       memset(&buf[len], 0, bit_offset - len);
+       *bits = cpu_to_be64(sctx->count << 3);
+       sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd);
+
+       for (i = 0; i < digest_size; i += 4)
+               put_unaligned_be32(sctx->state[i / 4], out + i);
+}
 
 #endif /* _CRYPTO_INTERNAL_SHA2_H */
index 6319358b38c204d30e9724313406d3d97069493f..1ec1466108ccdd996fcf4d679ffbd40d54116c4d 100644 (file)
@@ -150,6 +150,14 @@ config CRYPTO_ARCH_HAVE_LIB_SHA256
          Declares whether the architecture provides an arch-specific
          accelerated implementation of the SHA-256 library interface.
 
+config CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
+       bool
+       help
+         Declares whether the architecture provides an arch-specific
+         accelerated implementation of the SHA-256 library interface
+         that is SIMD-based and therefore not usable in hardirq
+         context.
+
 config CRYPTO_LIB_SHA256_GENERIC
        tristate
        default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256
index 563f09c9f381558162dd1773810dae4a036915e3..2ced29efa181c2116c987aac555de281ef51e489 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/string.h>
-#include <linux/unaligned.h>
 
 /*
  * If __DISABLE_EXPORTS is defined, then this file is being compiled for a
 #include "sha256-generic.c"
 #endif
 
+static inline bool sha256_purgatory(void)
+{
+       return __is_defined(__DISABLE_EXPORTS);
+}
+
 static inline void sha256_blocks(u32 state[SHA256_STATE_WORDS], const u8 *data,
                                 size_t nblocks, bool force_generic)
 {
-#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) && !defined(__DISABLE_EXPORTS)
-       if (!force_generic)
-               return sha256_blocks_arch(state, data, nblocks);
-#endif
-       sha256_blocks_generic(state, data, nblocks);
+       sha256_choose_blocks(state, data, nblocks,
+                            force_generic || sha256_purgatory(), false);
 }
 
 static inline void __sha256_update(struct sha256_state *sctx, const u8 *data,
@@ -79,25 +80,10 @@ EXPORT_SYMBOL(sha256_update);
 static inline void __sha256_final(struct sha256_state *sctx, u8 *out,
                                  size_t digest_size, bool force_generic)
 {
-       const size_t bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
-       __be64 *bits = (__be64 *)&sctx->buf[bit_offset];
        size_t partial = sctx->count % SHA256_BLOCK_SIZE;
-       size_t i;
-
-       sctx->buf[partial++] = 0x80;
-       if (partial > bit_offset) {
-               memset(&sctx->buf[partial], 0, SHA256_BLOCK_SIZE - partial);
-               sha256_blocks(sctx->state, sctx->buf, 1, force_generic);
-               partial = 0;
-       }
-
-       memset(&sctx->buf[partial], 0, bit_offset - partial);
-       *bits = cpu_to_be64(sctx->count << 3);
-       sha256_blocks(sctx->state, sctx->buf, 1, force_generic);
-
-       for (i = 0; i < digest_size; i += 4)
-               put_unaligned_be32(sctx->state[i / 4], out + i);
 
+       sha256_finup(&sctx->ctx, sctx->buf, partial, out, digest_size,
+                    force_generic || sha256_purgatory(), false);
        memzero_explicit(sctx, sizeof(*sctx));
 }