]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
lib/crypto: sha256: Consolidate into single module
authorEric Biggers <ebiggers@kernel.org>
Mon, 30 Jun 2025 16:06:43 +0000 (09:06 -0700)
committerEric Biggers <ebiggers@kernel.org>
Fri, 4 Jul 2025 17:23:11 +0000 (10:23 -0700)
Consolidate the CPU-based SHA-256 code into a single module, following
what I did with SHA-512:

- Each arch now provides a header file lib/crypto/$(SRCARCH)/sha256.h,
  replacing lib/crypto/$(SRCARCH)/sha256.c.  The header defines
  sha256_blocks() and optionally sha256_mod_init_arch().  It is included
  by lib/crypto/sha256.c, and thus the code gets built into the single
  libsha256 module, with proper inlining and dead code elimination.

- sha256_blocks_generic() is moved from lib/crypto/sha256-generic.c into
  lib/crypto/sha256.c.  It's now a static function marked with
  __maybe_unused, so the compiler automatically eliminates it in any
  cases where it's not used.

- Whether arch-optimized SHA-256 is buildable is now controlled
  centrally by lib/crypto/Kconfig instead of by
  lib/crypto/$(SRCARCH)/Kconfig.  The conditions for enabling it remain
  the same as before, and it remains enabled by default.

- Any additional arch-specific translation units for the optimized
  SHA-256 code (such as assembly files) are now compiled by
  lib/crypto/Makefile instead of lib/crypto/$(SRCARCH)/Makefile.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20250630160645.3198-13-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
29 files changed:
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/crypto/Makefile
include/crypto/internal/sha2.h [deleted file]
lib/crypto/Kconfig
lib/crypto/Makefile
lib/crypto/arm/Kconfig
lib/crypto/arm/Makefile
lib/crypto/arm/sha256.h [moved from lib/crypto/arm/sha256.c with 64% similarity]
lib/crypto/arm64/Kconfig
lib/crypto/arm64/Makefile
lib/crypto/arm64/sha256.h [moved from lib/crypto/arm64/sha256.c with 67% similarity]
lib/crypto/mips/sha256.h [moved from arch/mips/cavium-octeon/crypto/octeon-sha256.c with 80% similarity]
lib/crypto/powerpc/Kconfig
lib/crypto/powerpc/Makefile
lib/crypto/powerpc/sha256.h [moved from lib/crypto/powerpc/sha256.c with 80% similarity]
lib/crypto/riscv/Kconfig
lib/crypto/riscv/Makefile
lib/crypto/riscv/sha256.h [moved from lib/crypto/riscv/sha256.c with 63% similarity]
lib/crypto/s390/Kconfig
lib/crypto/s390/Makefile
lib/crypto/s390/sha256.h [moved from lib/crypto/s390/sha256.c with 50% similarity]
lib/crypto/sha256-generic.c [deleted file]
lib/crypto/sha256.c
lib/crypto/sparc/Kconfig [deleted file]
lib/crypto/sparc/Makefile [deleted file]
lib/crypto/sparc/sha256.h [moved from lib/crypto/sparc/sha256.c with 62% similarity]
lib/crypto/x86/Kconfig
lib/crypto/x86/Makefile
lib/crypto/x86/sha256.h [moved from lib/crypto/x86/sha256.c with 70% similarity]

index 11f4aa6e80e9b50267f8cd768b456fa0f1ad4ef2..450e979ef5d93723b4c2dca7225d1d4ad74983d8 100644 (file)
@@ -23,12 +23,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE
          legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
          between zero and 6192 bytes).
 
-config CRYPTO_SHA256_OCTEON
-       tristate
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
-       select CRYPTO_LIB_SHA256_GENERIC
-
 endif # CPU_CAVIUM_OCTEON
 
 if CAVIUM_OCTEON_SOC
index 168b19ef7ce89ff12c13046e707353e9f923b053..db428e4b30bce203ec7bb5278a041cc6cbf33645 100644 (file)
@@ -7,4 +7,3 @@ obj-y += octeon-crypto.o
 
 obj-$(CONFIG_CRYPTO_MD5_OCTEON)                += octeon-md5.o
 obj-$(CONFIG_CRYPTO_SHA1_OCTEON)       += octeon-sha1.o
-obj-$(CONFIG_CRYPTO_SHA256_OCTEON)     += octeon-sha256.o
diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h
deleted file mode 100644 (file)
index 7915a3a..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef _CRYPTO_INTERNAL_SHA2_H
-#define _CRYPTO_INTERNAL_SHA2_H
-
-#include <crypto/sha2.h>
-#include <linux/compiler_attributes.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/unaligned.h>
-
-void sha256_blocks_generic(struct sha256_block_state *state,
-                          const u8 *data, size_t nblocks);
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks);
-
-static __always_inline void sha256_choose_blocks(
-       u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
-       bool force_generic, bool force_simd)
-{
-       if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
-               sha256_blocks_generic((struct sha256_block_state *)state, data, nblocks);
-       else
-               sha256_blocks_arch((struct sha256_block_state *)state, data, nblocks);
-}
-
-static __always_inline void sha256_finup(
-       struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE],
-       size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size,
-       bool force_generic, bool force_simd)
-{
-       const size_t bit_offset = SHA256_BLOCK_SIZE - 8;
-       __be64 *bits = (__be64 *)&buf[bit_offset];
-       int i;
-
-       buf[len++] = 0x80;
-       if (len > bit_offset) {
-               memset(&buf[len], 0, SHA256_BLOCK_SIZE - len);
-               sha256_choose_blocks(sctx->state, buf, 1, force_generic,
-                                    force_simd);
-               len = 0;
-       }
-
-       memset(&buf[len], 0, bit_offset - len);
-       *bits = cpu_to_be64(sctx->count << 3);
-       sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd);
-
-       for (i = 0; i < digest_size; i += 4)
-               put_unaligned_be32(sctx->state[i / 4], out + i);
-}
-
-#endif /* _CRYPTO_INTERNAL_SHA2_H */
index 9bd740475a898f596dd404998fc5baeada6b51dd..3305c69085816f414efa4b10a83c41041a83bb07 100644 (file)
@@ -144,20 +144,17 @@ config CRYPTO_LIB_SHA256
          by either the generic implementation or an arch-specific one, if one
          is available and enabled.
 
-config CRYPTO_ARCH_HAVE_LIB_SHA256
+config CRYPTO_LIB_SHA256_ARCH
        bool
-       help
-         Declares whether the architecture provides an arch-specific
-         accelerated implementation of the SHA-256 library interface.
-
-config CRYPTO_LIB_SHA256_GENERIC
-       tristate
-       default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256
-       help
-         This symbol can be selected by arch implementations of the SHA-256
-         library interface that require the generic code as a fallback, e.g.,
-         for SIMD implementations. If no arch specific implementation is
-         enabled, this implementation serves the users of CRYPTO_LIB_SHA256.
+       depends on CRYPTO_LIB_SHA256 && !UML
+       default y if ARM && !CPU_V7M
+       default y if ARM64
+       default y if MIPS && CPU_CAVIUM_OCTEON
+       default y if PPC && SPE
+       default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
+       default y if S390
+       default y if SPARC64
+       default y if X86_64
 
 config CRYPTO_LIB_SHA512
        tristate
@@ -199,9 +196,6 @@ endif
 if S390
 source "lib/crypto/s390/Kconfig"
 endif
-if SPARC
-source "lib/crypto/sparc/Kconfig"
-endif
 if X86
 source "lib/crypto/x86/Kconfig"
 endif
index 5823137fa5a8cb09a4a62bf395fd133581752529..a887bf103bf055959ab508ad24efc23f892a60aa 100644 (file)
@@ -66,11 +66,39 @@ libpoly1305-generic-y                               += poly1305-generic.o
 obj-$(CONFIG_CRYPTO_LIB_SHA1)                  += libsha1.o
 libsha1-y                                      := sha1.o
 
-obj-$(CONFIG_CRYPTO_LIB_SHA256)                        += libsha256.o
-libsha256-y                                    := sha256.o
+################################################################################
 
-obj-$(CONFIG_CRYPTO_LIB_SHA256_GENERIC)                += libsha256-generic.o
-libsha256-generic-y                            := sha256-generic.o
+obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
+libsha256-y := sha256.o
+ifeq ($(CONFIG_CRYPTO_LIB_SHA256_ARCH),y)
+CFLAGS_sha256.o += -I$(src)/$(SRCARCH)
+
+ifeq ($(CONFIG_ARM),y)
+libsha256-y += arm/sha256-ce.o arm/sha256-core.o
+$(obj)/arm/sha256-core.S: $(src)/arm/sha256-armv4.pl
+       $(call cmd,perlasm)
+clean-files += arm/sha256-core.S
+AFLAGS_arm/sha256-core.o += $(aflags-thumb2-y)
+endif
+
+ifeq ($(CONFIG_ARM64),y)
+libsha256-y += arm64/sha256-core.o
+$(obj)/arm64/sha256-core.S: $(src)/arm64/sha2-armv8.pl
+       $(call cmd,perlasm_with_args)
+clean-files += arm64/sha256-core.S
+libsha256-$(CONFIG_KERNEL_MODE_NEON) += arm64/sha256-ce.o
+endif
+
+libsha256-$(CONFIG_PPC) += powerpc/sha256-spe-asm.o
+libsha256-$(CONFIG_RISCV) += riscv/sha256-riscv64-zvknha_or_zvknhb-zvkb.o
+libsha256-$(CONFIG_SPARC) += sparc/sha256_asm.o
+libsha256-$(CONFIG_X86) += x86/sha256-ssse3-asm.o \
+                          x86/sha256-avx-asm.o \
+                          x86/sha256-avx2-asm.o \
+                          x86/sha256-ni-asm.o
+endif # CONFIG_CRYPTO_LIB_SHA256_ARCH
+
+################################################################################
 
 obj-$(CONFIG_CRYPTO_LIB_SHA512) += libsha512.o
 libsha512-y := sha512.o
@@ -100,6 +128,8 @@ libsha512-$(CONFIG_X86) += x86/sha512-ssse3-asm.o \
                           x86/sha512-avx2-asm.o
 endif # CONFIG_CRYPTO_LIB_SHA512_ARCH
 
+################################################################################
+
 obj-$(CONFIG_MPILIB) += mpi/
 
 obj-$(CONFIG_CRYPTO_SELFTESTS_FULL)            += simd.o
@@ -113,5 +143,4 @@ obj-$(CONFIG_MIPS) += mips/
 obj-$(CONFIG_PPC) += powerpc/
 obj-$(CONFIG_RISCV) += riscv/
 obj-$(CONFIG_S390) += s390/
-obj-$(CONFIG_SPARC) += sparc/
 obj-$(CONFIG_X86) += x86/
index 9f3ff30f4032868d0c6327396026477f6a53a0f2..e8444fd0aae3036e859ecba0a3bcc3eb5fd7c511 100644 (file)
@@ -22,9 +22,3 @@ config CRYPTO_POLY1305_ARM
        tristate
        default CRYPTO_LIB_POLY1305
        select CRYPTO_ARCH_HAVE_LIB_POLY1305
-
-config CRYPTO_SHA256_ARM
-       tristate
-       depends on !CPU_V7M
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
index 431f77c3ff6fd5ff8b57683c2fc1823f65868cd5..4c042a4c77ed6e8d6ef4f026b86edc44e68c4e6b 100644 (file)
@@ -10,17 +10,13 @@ chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
 obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
 poly1305-arm-y := poly1305-core.o poly1305-glue.o
 
-obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
-sha256-arm-y := sha256.o sha256-core.o
-sha256-arm-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o
-
 quiet_cmd_perl = PERL    $@
       cmd_perl = $(PERL) $(<) > $(@)
 
 $(obj)/%-core.S: $(src)/%-armv4.pl
        $(call cmd,perl)
 
-clean-files += poly1305-core.S sha256-core.S
+clean-files += poly1305-core.S
 
 aflags-thumb2-$(CONFIG_THUMB2_KERNEL)  := -U__thumb2__ -D__thumb2__=1
 
@@ -28,5 +24,3 @@ aflags-thumb2-$(CONFIG_THUMB2_KERNEL)  := -U__thumb2__ -D__thumb2__=1
 poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
 poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
 AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)
-
-AFLAGS_sha256-core.o += $(aflags-thumb2-y)
similarity index 64%
rename from lib/crypto/arm/sha256.c
rename to lib/crypto/arm/sha256.h
index 27181be0aa92ea261a6c5531b0ca003f3e4d7c08..da75cbdc51d413c2efc3143bc7e7c369071fb213 100644 (file)
@@ -1,14 +1,11 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * SHA-256 optimized for ARM
  *
  * Copyright 2025 Google LLC
  */
 #include <asm/neon.h>
-#include <crypto/internal/sha2.h>
 #include <crypto/internal/simd.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 
 asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
                                        const u8 *data, size_t nblocks);
@@ -20,8 +17,8 @@ asmlinkage void sha256_ce_transform(struct sha256_block_state *state,
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
            static_branch_likely(&have_neon) && crypto_simd_usable()) {
@@ -35,23 +32,15 @@ void sha256_blocks_arch(struct sha256_block_state *state,
                sha256_block_data_order(state, data, nblocks);
        }
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 
-static int __init sha256_arm_mod_init(void)
+#ifdef CONFIG_KERNEL_MODE_NEON
+#define sha256_mod_init_arch sha256_mod_init_arch
+static inline void sha256_mod_init_arch(void)
 {
-       if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
+       if (elf_hwcap & HWCAP_NEON) {
                static_branch_enable(&have_neon);
                if (elf_hwcap2 & HWCAP2_SHA2)
                        static_branch_enable(&have_ce);
        }
-       return 0;
 }
-subsys_initcall(sha256_arm_mod_init);
-
-static void __exit sha256_arm_mod_exit(void)
-{
-}
-module_exit(sha256_arm_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-256 optimized for ARM");
+#endif /* CONFIG_KERNEL_MODE_NEON */
index 49e57bfdb5b529015f95b36ce1357eeab2caadbb..0b903ef524d8574d3c82268f4219c1b0e593bd60 100644 (file)
@@ -12,8 +12,3 @@ config CRYPTO_POLY1305_NEON
        depends on KERNEL_MODE_NEON
        default CRYPTO_LIB_POLY1305
        select CRYPTO_ARCH_HAVE_LIB_POLY1305
-
-config CRYPTO_SHA256_ARM64
-       tristate
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
index 946c099037117a685a6407d19e7c11efd2bc627b..6207088397a73e3f3960eac08ab82465f72cd1a2 100644 (file)
@@ -8,17 +8,10 @@ poly1305-neon-y := poly1305-core.o poly1305-glue.o
 AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_block_init_arch
 AFLAGS_poly1305-core.o += -Dpoly1305_emit=poly1305_emit_arch
 
-obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o
-sha256-arm64-y := sha256.o sha256-core.o
-sha256-arm64-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o
-
 quiet_cmd_perlasm = PERLASM $@
       cmd_perlasm = $(PERL) $(<) void $(@)
 
 $(obj)/%-core.S: $(src)/%-armv8.pl
        $(call cmd,perlasm)
 
-$(obj)/sha256-core.S: $(src)/sha2-armv8.pl
-       $(call cmd,perlasm)
-
-clean-files += poly1305-core.S sha256-core.S
+clean-files += poly1305-core.S
similarity index 67%
rename from lib/crypto/arm64/sha256.c
rename to lib/crypto/arm64/sha256.h
index a5a4982767089094e99c382e55fe5dcf825c42a4..a211966c124a967f4b7a4e71669a1ba422bb28b2 100644 (file)
@@ -1,14 +1,12 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * SHA-256 optimized for ARM64
  *
  * Copyright 2025 Google LLC
  */
 #include <asm/neon.h>
-#include <crypto/internal/sha2.h>
 #include <crypto/internal/simd.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/cpufeature.h>
 
 asmlinkage void sha256_block_data_order(struct sha256_block_state *state,
                                        const u8 *data, size_t nblocks);
@@ -20,8 +18,8 @@ asmlinkage size_t __sha256_ce_transform(struct sha256_block_state *state,
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
            static_branch_likely(&have_neon) && crypto_simd_usable()) {
@@ -45,24 +43,15 @@ void sha256_blocks_arch(struct sha256_block_state *state,
                sha256_block_data_order(state, data, nblocks);
        }
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 
-static int __init sha256_arm64_mod_init(void)
+#ifdef CONFIG_KERNEL_MODE_NEON
+#define sha256_mod_init_arch sha256_mod_init_arch
+static inline void sha256_mod_init_arch(void)
 {
-       if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
-           cpu_have_named_feature(ASIMD)) {
+       if (cpu_have_named_feature(ASIMD)) {
                static_branch_enable(&have_neon);
                if (cpu_have_named_feature(SHA2))
                        static_branch_enable(&have_ce);
        }
-       return 0;
 }
-subsys_initcall(sha256_arm64_mod_init);
-
-static void __exit sha256_arm64_mod_exit(void)
-{
-}
-module_exit(sha256_arm64_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-256 optimized for ARM64");
+#endif /* CONFIG_KERNEL_MODE_NEON */
similarity index 80%
rename from arch/mips/cavium-octeon/crypto/octeon-sha256.c
rename to lib/crypto/mips/sha256.h
index c7c67bdc2bd06ff5f5b0b9680bbd5b1d19ea14ea..ccccfd131634b688d949e567ba05789095a4b141 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * SHA-256 Secure Hash Algorithm.
  *
 
 #include <asm/octeon/crypto.h>
 #include <asm/octeon/octeon.h>
-#include <crypto/internal/sha2.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 
 /*
  * We pass everything as 64-bit. OCTEON can handle misaligned data.
  */
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        struct octeon_cop2_state cop2_state;
        u64 *state64 = (u64 *)state;
@@ -59,8 +56,3 @@ void sha256_blocks_arch(struct sha256_block_state *state,
        state64[3] = read_octeon_64bit_hash_dword(3);
        octeon_crypto_disable(&cop2_state, flags);
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm (OCTEON)");
-MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
index 3f9e1bbd9905b6ef9b06941e3c06f26232418ea8..2eaeb7665a6a0e053504cc45d9f1dcc90ca8372a 100644 (file)
@@ -14,9 +14,3 @@ config CRYPTO_POLY1305_P10
        default CRYPTO_LIB_POLY1305
        select CRYPTO_ARCH_HAVE_LIB_POLY1305
        select CRYPTO_LIB_POLY1305_GENERIC
-
-config CRYPTO_SHA256_PPC_SPE
-       tristate
-       depends on SPE
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
index 27f231f8e334a7f82edcf8128339470e5f329630..5709ae14258a068c412bc2af61c041e94dc77c0c 100644 (file)
@@ -5,6 +5,3 @@ chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
 
 obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
 poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
-
-obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
-sha256-ppc-spe-y := sha256.o sha256-spe-asm.o
similarity index 80%
rename from lib/crypto/powerpc/sha256.c
rename to lib/crypto/powerpc/sha256.h
index 14b8adcdcfc2b8c10f594e9bc3ba0efda196a8ac..50d355441c7e18fcf981f722455e65a72af89e4f 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * SHA-256 Secure Hash Algorithm, SPE optimized
  *
@@ -9,9 +9,6 @@
  */
 
 #include <asm/switch_to.h>
-#include <crypto/internal/sha2.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/preempt.h>
 
 /*
@@ -43,8 +40,8 @@ static void spe_end(void)
        preempt_enable();
 }
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        do {
                /* cut input data into smaller blocks */
@@ -59,7 +56,3 @@ void sha256_blocks_arch(struct sha256_block_state *state,
                nblocks -= unit;
        } while (nblocks);
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm, SPE optimized");
index c100571feb7e8331a5349f73eeed3bbc826c69d0..bc7a43f33eb3a3ea12b856addfad3c54a7f73255 100644 (file)
@@ -6,10 +6,3 @@ config CRYPTO_CHACHA_RISCV64
        default CRYPTO_LIB_CHACHA
        select CRYPTO_ARCH_HAVE_LIB_CHACHA
        select CRYPTO_LIB_CHACHA_GENERIC
-
-config CRYPTO_SHA256_RISCV64
-       tristate
-       depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
-       select CRYPTO_LIB_SHA256_GENERIC
index b7cb877a2c07eb76a17552d8ef4526d698990d25..e27b78f317fc8e0fe2b57eb58fd886f20c3cf6d0 100644 (file)
@@ -2,6 +2,3 @@
 
 obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o
 chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o
-
-obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o
-sha256-riscv64-y := sha256.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o
similarity index 63%
rename from lib/crypto/riscv/sha256.c
rename to lib/crypto/riscv/sha256.h
index 01004cb9c6e9efa3d9ba8849597b19942069b64e..c0f79c18f11998ed0450da0b3029bbf404471f43 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * SHA-256 (RISC-V accelerated)
  *
  */
 
 #include <asm/vector.h>
-#include <crypto/internal/sha2.h>
 #include <crypto/internal/simd.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 
 asmlinkage void
 sha256_transform_zvknha_or_zvknhb_zvkb(struct sha256_block_state *state,
@@ -21,8 +18,8 @@ sha256_transform_zvknha_or_zvknhb_zvkb(struct sha256_block_state *state,
 
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions);
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        if (static_branch_likely(&have_extensions) && crypto_simd_usable()) {
                kernel_vector_begin();
@@ -32,9 +29,9 @@ void sha256_blocks_arch(struct sha256_block_state *state,
                sha256_blocks_generic(state, data, nblocks);
        }
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 
-static int __init riscv64_sha256_mod_init(void)
+#define sha256_mod_init_arch sha256_mod_init_arch
+static inline void sha256_mod_init_arch(void)
 {
        /* Both zvknha and zvknhb provide the SHA-256 instructions. */
        if ((riscv_isa_extension_available(NULL, ZVKNHA) ||
@@ -42,15 +39,4 @@ static int __init riscv64_sha256_mod_init(void)
            riscv_isa_extension_available(NULL, ZVKB) &&
            riscv_vector_vlen() >= 128)
                static_branch_enable(&have_extensions);
-       return 0;
 }
-subsys_initcall(riscv64_sha256_mod_init);
-
-static void __exit riscv64_sha256_mod_exit(void)
-{
-}
-module_exit(riscv64_sha256_mod_exit);
-
-MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)");
-MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>");
-MODULE_LICENSE("GPL");
index e3f855ef4393429d64227efb55e8a89a6a9f8154..069b355fe51aa91047c466f1f4e7c6a65198148a 100644 (file)
@@ -5,9 +5,3 @@ config CRYPTO_CHACHA_S390
        default CRYPTO_LIB_CHACHA
        select CRYPTO_LIB_CHACHA_GENERIC
        select CRYPTO_ARCH_HAVE_LIB_CHACHA
-
-config CRYPTO_SHA256_S390
-       tristate
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
-       select CRYPTO_LIB_SHA256_GENERIC
index 5df30f1e793074ddff2f02a86c9dfdada8bae436..06c2cf77178ef2dc7520861056564e398aeb61e8 100644 (file)
@@ -2,6 +2,3 @@
 
 obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
 chacha_s390-y := chacha-glue.o chacha-s390.o
-
-obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256-s390.o
-sha256-s390-y := sha256.o
similarity index 50%
rename from lib/crypto/s390/sha256.c
rename to lib/crypto/s390/sha256.h
index 6ebfd35a5d44c2629da774e8032112126bf3dc09..70a81cbc06b2c28aa7ccfa853d54b48f89df6467 100644 (file)
@@ -1,19 +1,16 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * SHA-256 optimized using the CP Assist for Cryptographic Functions (CPACF)
  *
  * Copyright 2025 Google LLC
  */
 #include <asm/cpacf.h>
-#include <crypto/internal/sha2.h>
 #include <linux/cpufeature.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_sha256);
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        if (static_branch_likely(&have_cpacf_sha256))
                cpacf_kimd(CPACF_KIMD_SHA_256, state, data,
@@ -21,21 +18,11 @@ void sha256_blocks_arch(struct sha256_block_state *state,
        else
                sha256_blocks_generic(state, data, nblocks);
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 
-static int __init sha256_s390_mod_init(void)
+#define sha256_mod_init_arch sha256_mod_init_arch
+static inline void sha256_mod_init_arch(void)
 {
        if (cpu_have_feature(S390_CPU_FEATURE_MSA) &&
            cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
                static_branch_enable(&have_cpacf_sha256);
-       return 0;
 }
-subsys_initcall(sha256_s390_mod_init);
-
-static void __exit sha256_s390_mod_exit(void)
-{
-}
-module_exit(sha256_s390_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-256 using the CP Assist for Cryptographic Functions (CPACF)");
diff --git a/lib/crypto/sha256-generic.c b/lib/crypto/sha256-generic.c
deleted file mode 100644 (file)
index 99f9040..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SHA-256, as specified in
- * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
- *
- * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2014 Red Hat Inc.
- */
-
-#include <crypto/internal/sha2.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/unaligned.h>
-
-static const u32 SHA256_K[] = {
-       0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
-       0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
-       0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
-       0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
-       0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
-       0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
-       0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
-       0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
-       0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
-       0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
-       0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
-       0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
-       0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
-       0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
-       0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
-       0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
-};
-
-static inline u32 Ch(u32 x, u32 y, u32 z)
-{
-       return z ^ (x & (y ^ z));
-}
-
-static inline u32 Maj(u32 x, u32 y, u32 z)
-{
-       return (x & y) | (z & (x | y));
-}
-
-#define e0(x)       (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
-#define e1(x)       (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
-#define s0(x)       (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
-#define s1(x)       (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
-
-static inline void LOAD_OP(int I, u32 *W, const u8 *input)
-{
-       W[I] = get_unaligned_be32((__u32 *)input + I);
-}
-
-static inline void BLEND_OP(int I, u32 *W)
-{
-       W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
-}
-
-#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do {           \
-       u32 t1, t2;                                             \
-       t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i];      \
-       t2 = e0(a) + Maj(a, b, c);                              \
-       d += t1;                                                \
-       h = t1 + t2;                                            \
-} while (0)
-
-static void sha256_block_generic(struct sha256_block_state *state,
-                                const u8 *input, u32 W[64])
-{
-       u32 a, b, c, d, e, f, g, h;
-       int i;
-
-       /* load the input */
-       for (i = 0; i < 16; i += 8) {
-               LOAD_OP(i + 0, W, input);
-               LOAD_OP(i + 1, W, input);
-               LOAD_OP(i + 2, W, input);
-               LOAD_OP(i + 3, W, input);
-               LOAD_OP(i + 4, W, input);
-               LOAD_OP(i + 5, W, input);
-               LOAD_OP(i + 6, W, input);
-               LOAD_OP(i + 7, W, input);
-       }
-
-       /* now blend */
-       for (i = 16; i < 64; i += 8) {
-               BLEND_OP(i + 0, W);
-               BLEND_OP(i + 1, W);
-               BLEND_OP(i + 2, W);
-               BLEND_OP(i + 3, W);
-               BLEND_OP(i + 4, W);
-               BLEND_OP(i + 5, W);
-               BLEND_OP(i + 6, W);
-               BLEND_OP(i + 7, W);
-       }
-
-       /* load the state into our registers */
-       a = state->h[0];
-       b = state->h[1];
-       c = state->h[2];
-       d = state->h[3];
-       e = state->h[4];
-       f = state->h[5];
-       g = state->h[6];
-       h = state->h[7];
-
-       /* now iterate */
-       for (i = 0; i < 64; i += 8) {
-               SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
-               SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
-               SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
-               SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
-               SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
-               SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
-               SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
-               SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
-       }
-
-       state->h[0] += a;
-       state->h[1] += b;
-       state->h[2] += c;
-       state->h[3] += d;
-       state->h[4] += e;
-       state->h[5] += f;
-       state->h[6] += g;
-       state->h[7] += h;
-}
-
-void sha256_blocks_generic(struct sha256_block_state *state,
-                          const u8 *data, size_t nblocks)
-{
-       u32 W[64];
-
-       do {
-               sha256_block_generic(state, data, W);
-               data += SHA256_BLOCK_SIZE;
-       } while (--nblocks);
-
-       memzero_explicit(W, sizeof(W));
-}
-EXPORT_SYMBOL_GPL(sha256_blocks_generic);
-
-MODULE_DESCRIPTION("SHA-256 Algorithm (generic implementation)");
-MODULE_LICENSE("GPL");
index 12b4b59052c4ae5d74c013eba0fda36c1c48a0c3..68936d5cd7745b473688af374ac1895328c2e794 100644 (file)
@@ -6,15 +6,17 @@
  * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  * Copyright (c) 2014 Red Hat Inc.
+ * Copyright 2025 Google LLC
  */
 
 #include <crypto/hmac.h>
 #include <crypto/internal/blockhash.h>
-#include <crypto/internal/sha2.h>
+#include <crypto/sha2.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/string.h>
+#include <linux/unaligned.h>
 #include <linux/wordpart.h>
 
 static const struct sha256_block_state sha224_iv = {
@@ -31,26 +33,128 @@ static const struct sha256_block_state sha256_iv = {
        },
 };
 
-/*
- * If __DISABLE_EXPORTS is defined, then this file is being compiled for a
- * pre-boot environment.  In that case, ignore the kconfig options, pull the
- * generic code into the same translation unit, and use that only.
- */
-#ifdef __DISABLE_EXPORTS
-#include "sha256-generic.c"
-#endif
+static const u32 sha256_K[64] = {
+       0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
+       0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+       0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
+       0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+       0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
+       0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+       0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
+       0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+       0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
+       0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+       0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
+#define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define Maj(x, y, z) (((x) & (y)) | ((z) & ((x) | (y))))
+#define e0(x) (ror32((x), 2) ^ ror32((x), 13) ^ ror32((x), 22))
+#define e1(x) (ror32((x), 6) ^ ror32((x), 11) ^ ror32((x), 25))
+#define s0(x) (ror32((x), 7) ^ ror32((x), 18) ^ ((x) >> 3))
+#define s1(x) (ror32((x), 17) ^ ror32((x), 19) ^ ((x) >> 10))
+
+static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+{
+       W[I] = get_unaligned_be32((__u32 *)input + I);
+}
+
+static inline void BLEND_OP(int I, u32 *W)
+{
+       W[I] = s1(W[I - 2]) + W[I - 7] + s0(W[I - 15]) + W[I - 16];
+}
 
-static inline bool sha256_purgatory(void)
+#define SHA256_ROUND(i, a, b, c, d, e, f, g, h)                    \
+       do {                                                       \
+               u32 t1, t2;                                        \
+               t1 = h + e1(e) + Ch(e, f, g) + sha256_K[i] + W[i]; \
+               t2 = e0(a) + Maj(a, b, c);                         \
+               d += t1;                                           \
+               h = t1 + t2;                                       \
+       } while (0)
+
+static void sha256_block_generic(struct sha256_block_state *state,
+                                const u8 *input, u32 W[64])
 {
-       return __is_defined(__DISABLE_EXPORTS);
+       u32 a, b, c, d, e, f, g, h;
+       int i;
+
+       /* load the input */
+       for (i = 0; i < 16; i += 8) {
+               LOAD_OP(i + 0, W, input);
+               LOAD_OP(i + 1, W, input);
+               LOAD_OP(i + 2, W, input);
+               LOAD_OP(i + 3, W, input);
+               LOAD_OP(i + 4, W, input);
+               LOAD_OP(i + 5, W, input);
+               LOAD_OP(i + 6, W, input);
+               LOAD_OP(i + 7, W, input);
+       }
+
+       /* now blend */
+       for (i = 16; i < 64; i += 8) {
+               BLEND_OP(i + 0, W);
+               BLEND_OP(i + 1, W);
+               BLEND_OP(i + 2, W);
+               BLEND_OP(i + 3, W);
+               BLEND_OP(i + 4, W);
+               BLEND_OP(i + 5, W);
+               BLEND_OP(i + 6, W);
+               BLEND_OP(i + 7, W);
+       }
+
+       /* load the state into our registers */
+       a = state->h[0];
+       b = state->h[1];
+       c = state->h[2];
+       d = state->h[3];
+       e = state->h[4];
+       f = state->h[5];
+       g = state->h[6];
+       h = state->h[7];
+
+       /* now iterate */
+       for (i = 0; i < 64; i += 8) {
+               SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
+               SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
+               SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
+               SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
+               SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
+               SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
+               SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
+               SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
+       }
+
+       state->h[0] += a;
+       state->h[1] += b;
+       state->h[2] += c;
+       state->h[3] += d;
+       state->h[4] += e;
+       state->h[5] += f;
+       state->h[6] += g;
+       state->h[7] += h;
 }
 
-static inline void sha256_blocks(struct sha256_block_state *state,
-                                const u8 *data, size_t nblocks)
+static void __maybe_unused
+sha256_blocks_generic(struct sha256_block_state *state,
+                     const u8 *data, size_t nblocks)
 {
-       sha256_choose_blocks(state->h, data, nblocks, sha256_purgatory(), false);
+       u32 W[64];
+
+       do {
+               sha256_block_generic(state, data, W);
+               data += SHA256_BLOCK_SIZE;
+       } while (--nblocks);
+
+       memzero_explicit(W, sizeof(W));
 }
 
+#if defined(CONFIG_CRYPTO_LIB_SHA256_ARCH) && !defined(__DISABLE_EXPORTS)
+#include "sha256.h" /* $(SRCARCH)/sha256.h */
+#else
+#define sha256_blocks sha256_blocks_generic
+#endif
+
 static void __sha256_init(struct __sha256_ctx *ctx,
                          const struct sha256_block_state *iv,
                          u64 initial_bytecount)
@@ -273,5 +377,19 @@ void hmac_sha256_usingrawkey(const u8 *raw_key, size_t raw_key_len,
 EXPORT_SYMBOL_GPL(hmac_sha256_usingrawkey);
 #endif /* !__DISABLE_EXPORTS */
 
+#ifdef sha256_mod_init_arch
+static int __init sha256_mod_init(void)
+{
+       sha256_mod_init_arch();
+       return 0;
+}
+subsys_initcall(sha256_mod_init);
+
+static void __exit sha256_mod_exit(void)
+{
+}
+module_exit(sha256_mod_exit);
+#endif
+
 MODULE_DESCRIPTION("SHA-224, SHA-256, HMAC-SHA224, and HMAC-SHA256 library functions");
 MODULE_LICENSE("GPL");
diff --git a/lib/crypto/sparc/Kconfig b/lib/crypto/sparc/Kconfig
deleted file mode 100644 (file)
index e5c3e4d..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config CRYPTO_SHA256_SPARC64
-       tristate
-       depends on SPARC64
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
-       select CRYPTO_LIB_SHA256_GENERIC
diff --git a/lib/crypto/sparc/Makefile b/lib/crypto/sparc/Makefile
deleted file mode 100644 (file)
index 75ee244..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o
-sha256-sparc64-y := sha256.o sha256_asm.o
similarity index 62%
rename from lib/crypto/sparc/sha256.c
rename to lib/crypto/sparc/sha256.h
index f41c109c1c18d1ff275c296375d380de1bce4e0f..1d10108eb1954396c36a1bd1701ec0151936d48d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * SHA-256 accelerated using the sparc64 sha256 opcodes
  *
@@ -8,51 +8,36 @@
  * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
  */
 
-#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
-
 #include <asm/elf.h>
 #include <asm/opcodes.h>
 #include <asm/pstate.h>
-#include <crypto/internal/sha2.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_opcodes);
 
 asmlinkage void sha256_sparc64_transform(struct sha256_block_state *state,
                                         const u8 *data, size_t nblocks);
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        if (static_branch_likely(&have_sha256_opcodes))
                sha256_sparc64_transform(state, data, nblocks);
        else
                sha256_blocks_generic(state, data, nblocks);
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 
-static int __init sha256_sparc64_mod_init(void)
+#define sha256_mod_init_arch sha256_mod_init_arch
+static inline void sha256_mod_init_arch(void)
 {
        unsigned long cfr;
 
        if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
-               return 0;
+               return;
 
        __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
        if (!(cfr & CFR_SHA256))
-               return 0;
+               return;
 
        static_branch_enable(&have_sha256_opcodes);
        pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n");
-       return 0;
 }
-subsys_initcall(sha256_sparc64_mod_init);
-
-static void __exit sha256_sparc64_mod_exit(void)
-{
-}
-module_exit(sha256_sparc64_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-256 accelerated using the sparc64 sha256 opcodes");
index e344579db3d855e157d7a14dc73496d0f8dda535..546fe2afe0b51e62623fd38280600724fc6a60d4 100644 (file)
@@ -24,10 +24,3 @@ config CRYPTO_POLY1305_X86_64
        depends on 64BIT
        default CRYPTO_LIB_POLY1305
        select CRYPTO_ARCH_HAVE_LIB_POLY1305
-
-config CRYPTO_SHA256_X86_64
-       tristate
-       depends on 64BIT
-       default CRYPTO_LIB_SHA256
-       select CRYPTO_ARCH_HAVE_LIB_SHA256
-       select CRYPTO_LIB_SHA256_GENERIC
index abceca3d31c01e5838b274589776f28d3ca4cc79..c2ff8c5f1046e2cb78f9bcac15146a0ec0c59cbb 100644 (file)
@@ -10,9 +10,6 @@ obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
 poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
 targets += poly1305-x86_64-cryptogams.S
 
-obj-$(CONFIG_CRYPTO_SHA256_X86_64) += sha256-x86_64.o
-sha256-x86_64-y := sha256.o sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256-ni-asm.o
-
 quiet_cmd_perlasm = PERLASM $@
       cmd_perlasm = $(PERL) $< > $@
 
similarity index 70%
rename from lib/crypto/x86/sha256.c
rename to lib/crypto/x86/sha256.h
index 9ee38d2b3d57287cbdb223b1890479cbba687958..3b5456c222ba655fbaed450eb37bbc835f020b37 100644 (file)
@@ -1,14 +1,11 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * SHA-256 optimized for x86_64
  *
  * Copyright 2025 Google LLC
  */
 #include <asm/fpu/api.h>
-#include <crypto/internal/sha2.h>
 #include <crypto/internal/simd.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/static_call.h>
 
 asmlinkage void sha256_transform_ssse3(struct sha256_block_state *state,
@@ -24,8 +21,8 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_x86);
 
 DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_transform_ssse3);
 
-void sha256_blocks_arch(struct sha256_block_state *state,
-                       const u8 *data, size_t nblocks)
+static void sha256_blocks(struct sha256_block_state *state,
+                         const u8 *data, size_t nblocks)
 {
        if (static_branch_likely(&have_sha256_x86) && crypto_simd_usable()) {
                kernel_fpu_begin();
@@ -35,14 +32,14 @@ void sha256_blocks_arch(struct sha256_block_state *state,
                sha256_blocks_generic(state, data, nblocks);
        }
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 
-static int __init sha256_x86_mod_init(void)
+#define sha256_mod_init_arch sha256_mod_init_arch
+static inline void sha256_mod_init_arch(void)
 {
        if (boot_cpu_has(X86_FEATURE_SHA_NI)) {
                static_call_update(sha256_blocks_x86, sha256_ni_transform);
-       } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE |
-                                    XFEATURE_MASK_YMM, NULL) &&
+       } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                                    NULL) &&
                   boot_cpu_has(X86_FEATURE_AVX)) {
                if (boot_cpu_has(X86_FEATURE_AVX2) &&
                    boot_cpu_has(X86_FEATURE_BMI2))
@@ -52,17 +49,7 @@ static int __init sha256_x86_mod_init(void)
                        static_call_update(sha256_blocks_x86,
                                           sha256_transform_avx);
        } else if (!boot_cpu_has(X86_FEATURE_SSSE3)) {
-               return 0;
+               return;
        }
        static_branch_enable(&have_sha256_x86);
-       return 0;
 }
-subsys_initcall(sha256_x86_mod_init);
-
-static void __exit sha256_x86_mod_exit(void)
-{
-}
-module_exit(sha256_x86_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-256 optimized for x86_64");