* Author: Ard Biesheuvel <ardb@kernel.org>
*/
-#include <asm/neon.h>
#include <linux/unaligned.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
+#include <asm/simd.h>
+
#include "aes-ce-setkey.h"
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
if (unlikely(err))
return err;
- kernel_neon_begin();
-
- if (req->assoclen)
- ccm_calculate_auth_mac(req, mac);
-
- do {
- u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- u8 buf[AES_BLOCK_SIZE];
- u8 *final_iv = NULL;
-
- if (walk.nbytes == walk.total) {
- tail = 0;
- final_iv = orig_iv;
- }
-
- if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
- src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
- src, walk.nbytes);
-
- ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail,
- ctx->key_enc, num_rounds(ctx),
- mac, walk.iv, final_iv);
-
- if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
- memcpy(walk.dst.virt.addr, dst, walk.nbytes);
-
- if (walk.nbytes) {
- err = skcipher_walk_done(&walk, tail);
- }
- } while (walk.nbytes);
-
- kernel_neon_end();
+ scoped_ksimd() {
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ do {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 buf[AES_BLOCK_SIZE];
+ u8 *final_iv = NULL;
+
+ if (walk.nbytes == walk.total) {
+ tail = 0;
+ final_iv = orig_iv;
+ }
+
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
+ src, walk.nbytes);
+
+ ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail,
+ ctx->key_enc, num_rounds(ctx),
+ mac, walk.iv, final_iv);
+
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr, dst, walk.nbytes);
+
+ if (walk.nbytes) {
+ err = skcipher_walk_done(&walk, tail);
+ }
+ } while (walk.nbytes);
+ }
if (unlikely(err))
return err;
if (unlikely(err))
return err;
- kernel_neon_begin();
-
- if (req->assoclen)
- ccm_calculate_auth_mac(req, mac);
-
- do {
- u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- u8 buf[AES_BLOCK_SIZE];
- u8 *final_iv = NULL;
-
- if (walk.nbytes == walk.total) {
- tail = 0;
- final_iv = orig_iv;
- }
-
- if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
- src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
- src, walk.nbytes);
-
- ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail,
- ctx->key_enc, num_rounds(ctx),
- mac, walk.iv, final_iv);
-
- if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
- memcpy(walk.dst.virt.addr, dst, walk.nbytes);
-
- if (walk.nbytes) {
- err = skcipher_walk_done(&walk, tail);
- }
- } while (walk.nbytes);
-
- kernel_neon_end();
+ scoped_ksimd() {
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ do {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 buf[AES_BLOCK_SIZE];
+ u8 *final_iv = NULL;
+
+ if (walk.nbytes == walk.total) {
+ tail = 0;
+ final_iv = orig_iv;
+ }
+
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
+ src, walk.nbytes);
+
+ ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail,
+ ctx->key_enc, num_rounds(ctx),
+ mac, walk.iv, final_iv);
+
+ if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
+ memcpy(walk.dst.virt.addr, dst, walk.nbytes);
+
+ if (walk.nbytes) {
+ err = skcipher_walk_done(&walk, tail);
+ }
+ } while (walk.nbytes);
+ }
if (unlikely(err))
return err;