]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
crypto: arm/sha256 - Add simd block function
authorHerbert Xu <herbert@gondor.apana.org.au>
Fri, 2 May 2025 05:31:00 +0000 (13:31 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Mon, 5 May 2025 10:20:45 +0000 (18:20 +0800)
Add CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD and a SIMD block function
so that the caller can decide whether to use SIMD.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm/lib/crypto/Kconfig
arch/arm/lib/crypto/sha256-armv4.pl
arch/arm/lib/crypto/sha256.c

index 9f3ff30f4032868d0c6327396026477f6a53a0f2..d1ad664f0c67469adb5c5d07b3cfb6b984e28924 100644 (file)
@@ -28,3 +28,4 @@ config CRYPTO_SHA256_ARM
        depends on !CPU_V7M
        default CRYPTO_LIB_SHA256
        select CRYPTO_ARCH_HAVE_LIB_SHA256
+       select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
index f3a2b54efd4ee39fbeaefc87ffd850e97915233b..8122db7fd5990256eda13d8c2a99b713b41c1cff 100644 (file)
@@ -204,18 +204,18 @@ K256:
 .word  0                               @ terminator
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
 .LOPENSSL_armcap:
-.word  OPENSSL_armcap_P-sha256_block_data_order
+.word  OPENSSL_armcap_P-sha256_blocks_arch
 #endif
 .align 5
 
-.global        sha256_block_data_order
-.type  sha256_block_data_order,%function
-sha256_block_data_order:
-.Lsha256_block_data_order:
+.global        sha256_blocks_arch
+.type  sha256_blocks_arch,%function
+sha256_blocks_arch:
+.Lsha256_blocks_arch:
 #if __ARM_ARCH__<7
-       sub     r3,pc,#8                @ sha256_block_data_order
+       sub     r3,pc,#8                @ sha256_blocks_arch
 #else
-       adr     r3,.Lsha256_block_data_order
+       adr     r3,.Lsha256_blocks_arch
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
        ldr     r12,.LOPENSSL_armcap
@@ -282,7 +282,7 @@ $code.=<<___;
        moveq   pc,lr                   @ be binary compatible with V4, yet
        bx      lr                      @ interoperable with Thumb ISA:-)
 #endif
-.size  sha256_block_data_order,.-sha256_block_data_order
+.size  sha256_blocks_arch,.-sha256_blocks_arch
 ___
 ######################################################################
 # NEON stuff
@@ -470,8 +470,8 @@ sha256_block_data_order_neon:
        stmdb   sp!,{r4-r12,lr}
 
        sub     $H,sp,#16*4+16
-       adr     $Ktbl,.Lsha256_block_data_order
-       sub     $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256
+       adr     $Ktbl,.Lsha256_blocks_arch
+       sub     $Ktbl,$Ktbl,#.Lsha256_blocks_arch-K256
        bic     $H,$H,#15               @ align for 128-bit stores
        mov     $t2,sp
        mov     sp,$H                   @ alloca
index 2c9cfdaaa0691ad334dfeb9fd360cc24368503af..109192e54b0f058ea2f84cb9f633c65a52239311 100644 (file)
@@ -6,12 +6,12 @@
  */
 #include <asm/neon.h>
 #include <crypto/internal/sha2.h>
-#include <crypto/internal/simd.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
-asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS],
-                                       const u8 *data, size_t nblocks);
+asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+                                  const u8 *data, size_t nblocks);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
 asmlinkage void sha256_block_data_order_neon(u32 state[SHA256_STATE_WORDS],
                                             const u8 *data, size_t nblocks);
 asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
@@ -20,11 +20,11 @@ asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
 
-void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
                        const u8 *data, size_t nblocks)
 {
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
-           static_branch_likely(&have_neon) && crypto_simd_usable()) {
+           static_branch_likely(&have_neon)) {
                kernel_neon_begin();
                if (static_branch_likely(&have_ce))
                        sha256_ce_transform(state, data, nblocks);
@@ -32,10 +32,10 @@ void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
                        sha256_block_data_order_neon(state, data, nblocks);
                kernel_neon_end();
        } else {
-               sha256_block_data_order(state, data, nblocks);
+               sha256_blocks_arch(state, data, nblocks);
        }
 }
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
 
 bool sha256_is_arch_optimized(void)
 {