]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
crypto/arm64: aes-ccm - Switch to 'ksimd' scoped guard API
authorArd Biesheuvel <ardb@kernel.org>
Wed, 1 Oct 2025 11:43:01 +0000 (13:43 +0200)
committerArd Biesheuvel <ardb@kernel.org>
Wed, 12 Nov 2025 08:52:01 +0000 (09:52 +0100)
Switch to the more abstract 'scoped_ksimd()' API, which will be modified
in a future patch to transparently allocate a kernel mode FP/SIMD state
buffer on the stack, so that kernel mode FP/SIMD code remains
preemptible in principe, but without the memory overhead that adds 528
bytes to the size of struct task_struct.

Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
arch/arm64/crypto/aes-ce-ccm-glue.c

index 2eb4e76cabc3a171ae16e06ec783495dd66c627b..c4fd648471f1816faf0695a707f51b22f0b7bbaf 100644 (file)
@@ -8,7 +8,6 @@
  * Author: Ard Biesheuvel <ardb@kernel.org>
  */
 
-#include <asm/neon.h>
 #include <linux/unaligned.h>
 #include <crypto/aes.h>
 #include <crypto/scatterwalk.h>
@@ -16,6 +15,8 @@
 #include <crypto/internal/skcipher.h>
 #include <linux/module.h>
 
+#include <asm/simd.h>
+
 #include "aes-ce-setkey.h"
 
 MODULE_IMPORT_NS("CRYPTO_INTERNAL");
@@ -184,40 +185,38 @@ static int ccm_encrypt(struct aead_request *req)
        if (unlikely(err))
                return err;
 
-       kernel_neon_begin();
-
-       if (req->assoclen)
-               ccm_calculate_auth_mac(req, mac);
-
-       do {
-               u32 tail = walk.nbytes % AES_BLOCK_SIZE;
-               const u8 *src = walk.src.virt.addr;
-               u8 *dst = walk.dst.virt.addr;
-               u8 buf[AES_BLOCK_SIZE];
-               u8 *final_iv = NULL;
-
-               if (walk.nbytes == walk.total) {
-                       tail = 0;
-                       final_iv = orig_iv;
-               }
-
-               if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
-                       src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
-                                          src, walk.nbytes);
-
-               ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail,
-                                  ctx->key_enc, num_rounds(ctx),
-                                  mac, walk.iv, final_iv);
-
-               if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
-                       memcpy(walk.dst.virt.addr, dst, walk.nbytes);
-
-               if (walk.nbytes) {
-                       err = skcipher_walk_done(&walk, tail);
-               }
-       } while (walk.nbytes);
-
-       kernel_neon_end();
+       scoped_ksimd() {
+               if (req->assoclen)
+                       ccm_calculate_auth_mac(req, mac);
+
+               do {
+                       u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+                       const u8 *src = walk.src.virt.addr;
+                       u8 *dst = walk.dst.virt.addr;
+                       u8 buf[AES_BLOCK_SIZE];
+                       u8 *final_iv = NULL;
+
+                       if (walk.nbytes == walk.total) {
+                               tail = 0;
+                               final_iv = orig_iv;
+                       }
+
+                       if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+                               src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
+                                                  src, walk.nbytes);
+
+                       ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail,
+                                          ctx->key_enc, num_rounds(ctx),
+                                          mac, walk.iv, final_iv);
+
+                       if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+                               memcpy(walk.dst.virt.addr, dst, walk.nbytes);
+
+                       if (walk.nbytes) {
+                               err = skcipher_walk_done(&walk, tail);
+                       }
+               } while (walk.nbytes);
+       }
 
        if (unlikely(err))
                return err;
@@ -251,40 +250,38 @@ static int ccm_decrypt(struct aead_request *req)
        if (unlikely(err))
                return err;
 
-       kernel_neon_begin();
-
-       if (req->assoclen)
-               ccm_calculate_auth_mac(req, mac);
-
-       do {
-               u32 tail = walk.nbytes % AES_BLOCK_SIZE;
-               const u8 *src = walk.src.virt.addr;
-               u8 *dst = walk.dst.virt.addr;
-               u8 buf[AES_BLOCK_SIZE];
-               u8 *final_iv = NULL;
-
-               if (walk.nbytes == walk.total) {
-                       tail = 0;
-                       final_iv = orig_iv;
-               }
-
-               if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
-                       src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
-                                          src, walk.nbytes);
-
-               ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail,
-                                  ctx->key_enc, num_rounds(ctx),
-                                  mac, walk.iv, final_iv);
-
-               if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
-                       memcpy(walk.dst.virt.addr, dst, walk.nbytes);
-
-               if (walk.nbytes) {
-                       err = skcipher_walk_done(&walk, tail);
-               }
-       } while (walk.nbytes);
-
-       kernel_neon_end();
+       scoped_ksimd() {
+               if (req->assoclen)
+                       ccm_calculate_auth_mac(req, mac);
+
+               do {
+                       u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+                       const u8 *src = walk.src.virt.addr;
+                       u8 *dst = walk.dst.virt.addr;
+                       u8 buf[AES_BLOCK_SIZE];
+                       u8 *final_iv = NULL;
+
+                       if (walk.nbytes == walk.total) {
+                               tail = 0;
+                               final_iv = orig_iv;
+                       }
+
+                       if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+                               src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
+                                                  src, walk.nbytes);
+
+                       ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail,
+                                          ctx->key_enc, num_rounds(ctx),
+                                          mac, walk.iv, final_iv);
+
+                       if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+                               memcpy(walk.dst.virt.addr, dst, walk.nbytes);
+
+                       if (walk.nbytes) {
+                               err = skcipher_walk_done(&walk, tail);
+                       }
+               } while (walk.nbytes);
+       }
 
        if (unlikely(err))
                return err;