From: Ard Biesheuvel Date: Wed, 1 Oct 2025 11:43:01 +0000 (+0200) Subject: crypto/arm64: aes-ccm - Switch to 'ksimd' scoped guard API X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=b044c7e4c709fddf150127e8aac051f139424eea;p=thirdparty%2Fkernel%2Flinux.git crypto/arm64: aes-ccm - Switch to 'ksimd' scoped guard API Switch to the more abstract 'scoped_ksimd()' API, which will be modified in a future patch to transparently allocate a kernel mode FP/SIMD state buffer on the stack, so that kernel mode FP/SIMD code remains preemptible in principe, but without the memory overhead that adds 528 bytes to the size of struct task_struct. Reviewed-by: Eric Biggers Reviewed-by: Jonathan Cameron Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel --- diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 2eb4e76cabc3a..c4fd648471f18 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -8,7 +8,6 @@ * Author: Ard Biesheuvel */ -#include #include #include #include @@ -16,6 +15,8 @@ #include #include +#include + #include "aes-ce-setkey.h" MODULE_IMPORT_NS("CRYPTO_INTERNAL"); @@ -184,40 +185,38 @@ static int ccm_encrypt(struct aead_request *req) if (unlikely(err)) return err; - kernel_neon_begin(); - - if (req->assoclen) - ccm_calculate_auth_mac(req, mac); - - do { - u32 tail = walk.nbytes % AES_BLOCK_SIZE; - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - u8 buf[AES_BLOCK_SIZE]; - u8 *final_iv = NULL; - - if (walk.nbytes == walk.total) { - tail = 0; - final_iv = orig_iv; - } - - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) - src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], - src, walk.nbytes); - - ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail, - ctx->key_enc, num_rounds(ctx), - mac, walk.iv, final_iv); - - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) - memcpy(walk.dst.virt.addr, dst, walk.nbytes); - - if (walk.nbytes) { - err = skcipher_walk_done(&walk, tail); - } - } while (walk.nbytes); - - kernel_neon_end(); + scoped_ksimd() { + if (req->assoclen) + ccm_calculate_auth_mac(req, mac); + + do { + u32 tail = walk.nbytes % AES_BLOCK_SIZE; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; + u8 *final_iv = NULL; + + if (walk.nbytes == walk.total) { + tail = 0; + final_iv = orig_iv; + } + + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], + src, walk.nbytes); + + ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail, + ctx->key_enc, num_rounds(ctx), + mac, walk.iv, final_iv); + + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, dst, walk.nbytes); + + if (walk.nbytes) { + err = skcipher_walk_done(&walk, tail); + } + } while (walk.nbytes); + } if (unlikely(err)) return err; @@ -251,40 +250,38 @@ static int ccm_decrypt(struct aead_request *req) if (unlikely(err)) return err; - kernel_neon_begin(); - - if (req->assoclen) - ccm_calculate_auth_mac(req, mac); - - do { - u32 tail = walk.nbytes % AES_BLOCK_SIZE; - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - u8 buf[AES_BLOCK_SIZE]; - u8 *final_iv = NULL; - - if (walk.nbytes == walk.total) { - tail = 0; - final_iv = orig_iv; - } - - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) - src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], - src, walk.nbytes); - - ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail, - ctx->key_enc, num_rounds(ctx), - mac, walk.iv, final_iv); - - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) - memcpy(walk.dst.virt.addr, dst, walk.nbytes); - - if (walk.nbytes) { - err = skcipher_walk_done(&walk, tail); - } - } while (walk.nbytes); - - kernel_neon_end(); + scoped_ksimd() { + if (req->assoclen) + ccm_calculate_auth_mac(req, mac); + + do { + u32 tail = walk.nbytes % AES_BLOCK_SIZE; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; + u8 *final_iv = NULL; + + if (walk.nbytes == walk.total) { + tail = 0; + final_iv = orig_iv; + } + + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], + src, walk.nbytes); + + ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail, + ctx->key_enc, num_rounds(ctx), + mac, walk.iv, final_iv); + + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, dst, walk.nbytes); + + if (walk.nbytes) { + err = skcipher_walk_done(&walk, tail); + } + } while (walk.nbytes); + } if (unlikely(err)) return err;