]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
lib/crypto: Switch ARM and arm64 to 'ksimd' scoped guard API
authorArd Biesheuvel <ardb@kernel.org>
Wed, 1 Oct 2025 11:29:23 +0000 (13:29 +0200)
committerArd Biesheuvel <ardb@kernel.org>
Wed, 12 Nov 2025 08:51:13 +0000 (09:51 +0100)
Before modifying the prototypes of kernel_neon_begin() and
kernel_neon_end() to accommodate kernel mode FP/SIMD state buffers
allocated on the stack, move arm64 to the new 'ksimd' scoped guard API,
which encapsulates the calls to those functions.

For symmetry, do the same for 32-bit ARM too.

Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
lib/crypto/arm/chacha.h
lib/crypto/arm/curve25519.h
lib/crypto/arm/poly1305.h
lib/crypto/arm/sha1.h
lib/crypto/arm/sha256.h
lib/crypto/arm/sha512.h
lib/crypto/arm64/chacha.h
lib/crypto/arm64/poly1305.h
lib/crypto/arm64/sha1.h
lib/crypto/arm64/sha256.h
lib/crypto/arm64/sha512.h

index 0cae30f8ee5d1580bdd72f03de3dd9c03ba33b4e..836e49088e98373b74923684669101f59a01a43a 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <asm/cputype.h>
 #include <asm/hwcap.h>
-#include <asm/neon.h>
 #include <asm/simd.h>
 
 asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
@@ -68,9 +67,8 @@ static void hchacha_block_arch(const struct chacha_state *state,
        if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
                hchacha_block_arm(state, out, nrounds);
        } else {
-               kernel_neon_begin();
-               hchacha_block_neon(state, out, nrounds);
-               kernel_neon_end();
+               scoped_ksimd()
+                       hchacha_block_neon(state, out, nrounds);
        }
 }
 
@@ -87,9 +85,8 @@ static void chacha_crypt_arch(struct chacha_state *state, u8 *dst,
        do {
                unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
 
-               kernel_neon_begin();
-               chacha_doneon(state, dst, src, todo, nrounds);
-               kernel_neon_end();
+               scoped_ksimd()
+                       chacha_doneon(state, dst, src, todo, nrounds);
 
                bytes -= todo;
                src += todo;
index f6d66494eb8f880b5384fee0a7b68fea16538087..b1a566885e95bdae413c854c5d0e5ebd67b9d49e 100644 (file)
@@ -25,9 +25,8 @@ static void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
                            const u8 point[CURVE25519_KEY_SIZE])
 {
        if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
-               kernel_neon_begin();
-               curve25519_neon(out, scalar, point);
-               kernel_neon_end();
+               scoped_ksimd()
+                       curve25519_neon(out, scalar, point);
        } else {
                curve25519_generic(out, scalar, point);
        }
index 0021cf368307c752f56d3436290a8080b5adf032..0fe903d8de5563fdd3283c5ca9fe5f4c5c66266d 100644 (file)
@@ -6,7 +6,6 @@
  */
 
 #include <asm/hwcap.h>
-#include <asm/neon.h>
 #include <asm/simd.h>
 #include <linux/cpufeature.h>
 #include <linux/jump_label.h>
@@ -32,9 +31,8 @@ static void poly1305_blocks(struct poly1305_block_state *state, const u8 *src,
                do {
                        unsigned int todo = min_t(unsigned int, len, SZ_4K);
 
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(state, src, todo, padbit);
-                       kernel_neon_end();
+                       scoped_ksimd()
+                               poly1305_blocks_neon(state, src, todo, padbit);
 
                        len -= todo;
                        src += todo;
index 29f8bcad0447c389d69e6a7d3f07edba43837262..3e2d8c7cab9f10a7159b7b07b3ea02d71357df19 100644 (file)
@@ -4,7 +4,6 @@
  *
  * Copyright 2025 Google LLC
  */
-#include <asm/neon.h>
 #include <asm/simd.h>
 
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
@@ -22,12 +21,12 @@ static void sha1_blocks(struct sha1_block_state *state,
 {
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
            static_branch_likely(&have_neon) && likely(may_use_simd())) {
-               kernel_neon_begin();
-               if (static_branch_likely(&have_ce))
-                       sha1_ce_transform(state, data, nblocks);
-               else
-                       sha1_transform_neon(state, data, nblocks);
-               kernel_neon_end();
+               scoped_ksimd() {
+                       if (static_branch_likely(&have_ce))
+                               sha1_ce_transform(state, data, nblocks);
+                       else
+                               sha1_transform_neon(state, data, nblocks);
+               }
        } else {
                sha1_block_data_order(state, data, nblocks);
        }
index 7556457b3094b49405592243e872f4a99de3a026..ae7e52dd6e3b6255327f5f2d4f370fa1e66a19d0 100644 (file)
@@ -22,12 +22,12 @@ static void sha256_blocks(struct sha256_block_state *state,
 {
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
            static_branch_likely(&have_neon) && likely(may_use_simd())) {
-               kernel_neon_begin();
-               if (static_branch_likely(&have_ce))
-                       sha256_ce_transform(state, data, nblocks);
-               else
-                       sha256_block_data_order_neon(state, data, nblocks);
-               kernel_neon_end();
+               scoped_ksimd() {
+                       if (static_branch_likely(&have_ce))
+                               sha256_ce_transform(state, data, nblocks);
+                       else
+                               sha256_block_data_order_neon(state, data, nblocks);
+               }
        } else {
                sha256_block_data_order(state, data, nblocks);
        }
index d1b485dd275db8b7a16a56612d0b716076d3d40a..ed9bd81d6d78d3f8569d233b599105b1ee3073f6 100644 (file)
@@ -19,9 +19,8 @@ static void sha512_blocks(struct sha512_block_state *state,
 {
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
            static_branch_likely(&have_neon) && likely(may_use_simd())) {
-               kernel_neon_begin();
-               sha512_block_data_order_neon(state, data, nblocks);
-               kernel_neon_end();
+               scoped_ksimd()
+                       sha512_block_data_order_neon(state, data, nblocks);
        } else {
                sha512_block_data_order(state, data, nblocks);
        }
index ba6c22d460863302068aabbdd5c800a587f67b07..ca8c6a8b057829bf2651fbb57390c590b4d1f201 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/kernel.h>
 
 #include <asm/hwcap.h>
-#include <asm/neon.h>
 #include <asm/simd.h>
 
 asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
@@ -65,9 +64,8 @@ static void hchacha_block_arch(const struct chacha_state *state,
        if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
                hchacha_block_generic(state, out, nrounds);
        } else {
-               kernel_neon_begin();
-               hchacha_block_neon(state, out, nrounds);
-               kernel_neon_end();
+               scoped_ksimd()
+                       hchacha_block_neon(state, out, nrounds);
        }
 }
 
@@ -81,9 +79,8 @@ static void chacha_crypt_arch(struct chacha_state *state, u8 *dst,
        do {
                unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
 
-               kernel_neon_begin();
-               chacha_doneon(state, dst, src, todo, nrounds);
-               kernel_neon_end();
+               scoped_ksimd()
+                       chacha_doneon(state, dst, src, todo, nrounds);
 
                bytes -= todo;
                src += todo;
index aed5921ccd9a125048b4d176f5d2414a16ed8866..b77669767cd6c3640928e3713e47ba2d9a64b912 100644 (file)
@@ -6,7 +6,6 @@
  */
 
 #include <asm/hwcap.h>
-#include <asm/neon.h>
 #include <asm/simd.h>
 #include <linux/cpufeature.h>
 #include <linux/jump_label.h>
@@ -31,9 +30,8 @@ static void poly1305_blocks(struct poly1305_block_state *state, const u8 *src,
                do {
                        unsigned int todo = min_t(unsigned int, len, SZ_4K);
 
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(state, src, todo, padbit);
-                       kernel_neon_end();
+                       scoped_ksimd()
+                               poly1305_blocks_neon(state, src, todo, padbit);
 
                        len -= todo;
                        src += todo;
index aaef4ebfc5e34a60a04209be7811449d813815d9..bc7071f1be09624af9f925677c539fc7d42ab2c1 100644 (file)
@@ -4,7 +4,6 @@
  *
  * Copyright 2025 Google LLC
  */
-#include <asm/neon.h>
 #include <asm/simd.h>
 #include <linux/cpufeature.h>
 
@@ -20,9 +19,9 @@ static void sha1_blocks(struct sha1_block_state *state,
                do {
                        size_t rem;
 
-                       kernel_neon_begin();
-                       rem = __sha1_ce_transform(state, data, nblocks);
-                       kernel_neon_end();
+                       scoped_ksimd()
+                               rem = __sha1_ce_transform(state, data, nblocks);
+
                        data += (nblocks - rem) * SHA1_BLOCK_SIZE;
                        nblocks = rem;
                } while (nblocks);
index 80d06df27d3a3973d3141d08895dcc0aab41bd82..568dff0f276af4a226d6eb75c44bf068148b9e18 100644 (file)
@@ -4,7 +4,6 @@
  *
  * Copyright 2025 Google LLC
  */
-#include <asm/neon.h>
 #include <asm/simd.h>
 #include <linux/cpufeature.h>
 
@@ -27,17 +26,16 @@ static void sha256_blocks(struct sha256_block_state *state,
                        do {
                                size_t rem;
 
-                               kernel_neon_begin();
-                               rem = __sha256_ce_transform(state,
-                                                           data, nblocks);
-                               kernel_neon_end();
+                               scoped_ksimd()
+                                       rem = __sha256_ce_transform(state, data,
+                                                                   nblocks);
+
                                data += (nblocks - rem) * SHA256_BLOCK_SIZE;
                                nblocks = rem;
                        } while (nblocks);
                } else {
-                       kernel_neon_begin();
-                       sha256_block_neon(state, data, nblocks);
-                       kernel_neon_end();
+                       scoped_ksimd()
+                               sha256_block_neon(state, data, nblocks);
                }
        } else {
                sha256_block_data_order(state, data, nblocks);
@@ -66,9 +64,8 @@ static bool sha256_finup_2x_arch(const struct __sha256_ctx *ctx,
        if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
            static_branch_likely(&have_ce) && len >= SHA256_BLOCK_SIZE &&
            len <= 65536 && likely(may_use_simd())) {
-               kernel_neon_begin();
-               sha256_ce_finup2x(ctx, data1, data2, len, out1, out2);
-               kernel_neon_end();
+               scoped_ksimd()
+                       sha256_ce_finup2x(ctx, data1, data2, len, out1, out2);
                kmsan_unpoison_memory(out1, SHA256_DIGEST_SIZE);
                kmsan_unpoison_memory(out2, SHA256_DIGEST_SIZE);
                return true;
index ddb0d256f73aa38edc465bc03354070415304f5a..7eb7ef04d2687aa7f2742c99a6d4cb8ba44ce577 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright 2025 Google LLC
  */
-#include <asm/neon.h>
+
 #include <asm/simd.h>
 #include <linux/cpufeature.h>
 
@@ -24,9 +24,9 @@ static void sha512_blocks(struct sha512_block_state *state,
                do {
                        size_t rem;
 
-                       kernel_neon_begin();
-                       rem = __sha512_ce_transform(state, data, nblocks);
-                       kernel_neon_end();
+                       scoped_ksimd()
+                               rem = __sha512_ce_transform(state, data, nblocks);
+
                        data += (nblocks - rem) * SHA512_BLOCK_SIZE;
                        nblocks = rem;
                } while (nblocks);