]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
lib/crypto: x86/sha256: Move static_call above kernel-mode FPU section
authorEric Biggers <ebiggers@kernel.org>
Fri, 4 Jul 2025 02:39:57 +0000 (19:39 -0700)
committerEric Biggers <ebiggers@kernel.org>
Fri, 4 Jul 2025 17:23:55 +0000 (10:23 -0700)
As I did for sha512_blocks(), reorganize x86's sha256_blocks() to be
just a static_call.  To achieve that, for each assembly function add a C
function that handles the kernel-mode FPU section and fallback.  While
this increases total code size slightly, the amount of code actually
executed on a given system does not increase, and it is slightly more
efficient since it eliminates the extra static_key.  It also makes the
assembly functions be called with standard direct calls instead of
static calls, eliminating the need for ANNOTATE_NOENDBR.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20250704023958.73274-2-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
lib/crypto/x86/sha256-avx-asm.S
lib/crypto/x86/sha256-avx2-asm.S
lib/crypto/x86/sha256-ni-asm.S
lib/crypto/x86/sha256-ssse3-asm.S
lib/crypto/x86/sha256.h

index 73bcff2b548f4af24408ec05b8d70dc1bbcdee5a..798a7f07fa013bba9aadd8b4c55a7b27bba07e18 100644 (file)
@@ -48,7 +48,6 @@
 ########################################################################
 
 #include <linux/linkage.h>
-#include <linux/objtool.h>
 
 ## assume buffers not aligned
 #define    VMOVDQ vmovdqu
@@ -346,8 +345,6 @@ a = TMP_
 ########################################################################
 .text
 SYM_FUNC_START(sha256_transform_avx)
-       ANNOTATE_NOENDBR        # since this is called only via static_call
-
        pushq   %rbx
        pushq   %r12
        pushq   %r13
index 45787570387f235167149d7cf7cfd7a62da15511..62a46993359e619cca6b0c2535b71d7af3fc3be6 100644 (file)
@@ -49,7 +49,6 @@
 ########################################################################
 
 #include <linux/linkage.h>
-#include <linux/objtool.h>
 
 ## assume buffers not aligned
 #define        VMOVDQ vmovdqu
@@ -523,8 +522,6 @@ STACK_SIZE  = _CTX      + _CTX_SIZE
 ########################################################################
 .text
 SYM_FUNC_START(sha256_transform_rorx)
-       ANNOTATE_NOENDBR        # since this is called only via static_call
-
        pushq   %rbx
        pushq   %r12
        pushq   %r13
index 4af7d22e29e47ee814c6c74884c65b29836d88a0..9ebbacbb9c13b39975ef4ae2006ecee0ef88c4e9 100644 (file)
@@ -54,7 +54,6 @@
  */
 
 #include <linux/linkage.h>
-#include <linux/objtool.h>
 
 #define STATE_PTR      %rdi    /* 1st arg */
 #define DATA_PTR       %rsi    /* 2nd arg */
  */
 .text
 SYM_FUNC_START(sha256_ni_transform)
-       ANNOTATE_NOENDBR        # since this is called only via static_call
 
        shl             $6, NUM_BLKS            /*  convert to bytes */
        jz              .Ldone_hash
index 407b30adcd37f33d36ac99eef37609f7c971fb18..820fc8bbc29fd2d69d8cf98f4bcabba1da8cb10b 100644 (file)
@@ -47,7 +47,6 @@
 ########################################################################
 
 #include <linux/linkage.h>
-#include <linux/objtool.h>
 
 ## assume buffers not aligned
 #define    MOVDQ movdqu
@@ -353,8 +352,6 @@ a = TMP_
 ########################################################################
 .text
 SYM_FUNC_START(sha256_transform_ssse3)
-       ANNOTATE_NOENDBR        # since this is called only via static_call
-
        pushq   %rbx
        pushq   %r12
        pushq   %r13
index 3b5456c222ba655fbaed450eb37bbc835f020b37..669bc06538b67e768b4ea2eb616e5241e608d099 100644 (file)
@@ -8,48 +8,50 @@
 #include <crypto/internal/simd.h>
 #include <linux/static_call.h>
 
-asmlinkage void sha256_transform_ssse3(struct sha256_block_state *state,
-                                      const u8 *data, size_t nblocks);
-asmlinkage void sha256_transform_avx(struct sha256_block_state *state,
-                                    const u8 *data, size_t nblocks);
-asmlinkage void sha256_transform_rorx(struct sha256_block_state *state,
-                                     const u8 *data, size_t nblocks);
-asmlinkage void sha256_ni_transform(struct sha256_block_state *state,
-                                   const u8 *data, size_t nblocks);
+DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_blocks_generic);
 
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_x86);
+#define DEFINE_X86_SHA256_FN(c_fn, asm_fn)                                 \
+       asmlinkage void asm_fn(struct sha256_block_state *state,           \
+                              const u8 *data, size_t nblocks);            \
+       static void c_fn(struct sha256_block_state *state, const u8 *data, \
+                        size_t nblocks)                                   \
+       {                                                                  \
+               if (likely(crypto_simd_usable())) {                        \
+                       kernel_fpu_begin();                                \
+                       asm_fn(state, data, nblocks);                      \
+                       kernel_fpu_end();                                  \
+               } else {                                                   \
+                       sha256_blocks_generic(state, data, nblocks);       \
+               }                                                          \
+       }
 
-DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_transform_ssse3);
+DEFINE_X86_SHA256_FN(sha256_blocks_ssse3, sha256_transform_ssse3);
+DEFINE_X86_SHA256_FN(sha256_blocks_avx, sha256_transform_avx);
+DEFINE_X86_SHA256_FN(sha256_blocks_avx2, sha256_transform_rorx);
+DEFINE_X86_SHA256_FN(sha256_blocks_ni, sha256_ni_transform);
 
 static void sha256_blocks(struct sha256_block_state *state,
                          const u8 *data, size_t nblocks)
 {
-       if (static_branch_likely(&have_sha256_x86) && crypto_simd_usable()) {
-               kernel_fpu_begin();
-               static_call(sha256_blocks_x86)(state, data, nblocks);
-               kernel_fpu_end();
-       } else {
-               sha256_blocks_generic(state, data, nblocks);
-       }
+       static_call(sha256_blocks_x86)(state, data, nblocks);
 }
 
 #define sha256_mod_init_arch sha256_mod_init_arch
 static inline void sha256_mod_init_arch(void)
 {
        if (boot_cpu_has(X86_FEATURE_SHA_NI)) {
-               static_call_update(sha256_blocks_x86, sha256_ni_transform);
+               static_call_update(sha256_blocks_x86, sha256_blocks_ni);
        } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
                                     NULL) &&
                   boot_cpu_has(X86_FEATURE_AVX)) {
                if (boot_cpu_has(X86_FEATURE_AVX2) &&
                    boot_cpu_has(X86_FEATURE_BMI2))
                        static_call_update(sha256_blocks_x86,
-                                          sha256_transform_rorx);
+                                          sha256_blocks_avx2);
                else
                        static_call_update(sha256_blocks_x86,
-                                          sha256_transform_avx);
-       } else if (!boot_cpu_has(X86_FEATURE_SSSE3)) {
-               return;
+                                          sha256_blocks_avx);
+       } else if (boot_cpu_has(X86_FEATURE_SSSE3)) {
+               static_call_update(sha256_blocks_x86, sha256_blocks_ssse3);
        }
-       static_branch_enable(&have_sha256_x86);
 }