]> git.ipfire.org Git - thirdparty/strongswan.git/commitdiff
aesni: Added AES_ECB support
authorAndreas Steffen <andreas.steffen@strongswan.org>
Thu, 21 Nov 2019 11:58:48 +0000 (12:58 +0100)
committerTobias Brunner <tobias@strongswan.org>
Thu, 28 Nov 2019 16:03:08 +0000 (17:03 +0100)
src/libstrongswan/crypto/crypters/crypter.c
src/libstrongswan/crypto/crypters/crypter.h
src/libstrongswan/crypto/iv/iv_gen.c
src/libstrongswan/plugins/aesni/Makefile.am
src/libstrongswan/plugins/aesni/aesni_ecb.c [new file with mode: 0644]
src/libstrongswan/plugins/aesni/aesni_ecb.h [new file with mode: 0644]
src/libstrongswan/plugins/aesni/aesni_plugin.c
src/libstrongswan/plugins/test_vectors/Makefile.am
src/libstrongswan/plugins/test_vectors/test_vectors.h
src/libstrongswan/plugins/test_vectors/test_vectors/aes_ecb.c [new file with mode: 0644]

index 9bde663d138dcee8355be64f8f593bc25d4d40e9..90ce1fe4102212e5975280021161719c6fb933e7 100644 (file)
@@ -47,13 +47,14 @@ ENUM_NEXT(encryption_algorithm_names, ENCR_CAMELLIA_CBC, ENCR_CHACHA20_POLY1305,
        "CAMELLIA_CCM_12",
        "CAMELLIA_CCM_16",
        "CHACHA20_POLY1305");
-ENUM_NEXT(encryption_algorithm_names, ENCR_UNDEFINED, ENCR_RC2_CBC, ENCR_CHACHA20_POLY1305,
+ENUM_NEXT(encryption_algorithm_names, ENCR_UNDEFINED, ENCR_AES_ECB, ENCR_CHACHA20_POLY1305,
        "UNDEFINED",
        "DES_ECB",
        "SERPENT_CBC",
        "TWOFISH_CBC",
-       "RC2_CBC");
-ENUM_END(encryption_algorithm_names, ENCR_RC2_CBC);
+       "RC2_CBC",
+       "AES_ECB");
+ENUM_END(encryption_algorithm_names, ENCR_AES_ECB);
 
 /*
  * Described in header.
index 5ffcac25373ec87dbc74a7e249091bc80ea0c278..a74352c96d9e6669da2cef87b78ae71ad1ade579 100644 (file)
@@ -64,6 +64,7 @@ enum encryption_algorithm_t {
        ENCR_TWOFISH_CBC =      1027,
        /* see macros below to handle RC2 (effective) key length */
        ENCR_RC2_CBC =          1028,
+       ENCR_AES_ECB =                  1029,
 };
 
 #define DES_BLOCK_SIZE                  8
index c6efe08d0b9b5db217adfe2cc18b85d10bf19d32..f7c35e3f9bf1f47780039c7248affb565bffe6ca 100644 (file)
@@ -61,6 +61,7 @@ iv_gen_t* iv_gen_create_for_alg(encryption_algorithm_t alg)
                case ENCR_DES_ECB:
                case ENCR_DES_IV32:
                case ENCR_DES_IV64:
+               case ENCR_AES_ECB:
                        break;
        }
        return NULL;
index 2fe85c66c44512549654aa4bf8b227169d012999..0faf90f62d9c4a3cba7e6bbf1ea21039671c4c8c 100644 (file)
@@ -16,6 +16,7 @@ endif
 libstrongswan_aesni_la_SOURCES = \
        aesni_key.h aesni_key.c \
        aesni_cbc.h aesni_cbc.c \
+       aesni_ecb.h aesni_ecb.c \
        aesni_ctr.h aesni_ctr.c \
        aesni_ccm.h aesni_ccm.c \
        aesni_gcm.h aesni_gcm.c \
diff --git a/src/libstrongswan/plugins/aesni/aesni_ecb.c b/src/libstrongswan/plugins/aesni/aesni_ecb.c
new file mode 100644 (file)
index 0000000..b1b0249
--- /dev/null
@@ -0,0 +1,836 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * Copyright (C) 2019 Andreas Steffen
+ * HSR Hochschule fuer Technik Rapperswil
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.  See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_ecb.h"
+#include "aesni_key.h"
+
+/**
+ * Pipeline parallelism we use for ECB encryption/decryption
+ */
+#define ECB_PARALLELISM 4
+
+typedef struct private_aesni_ecb_t private_aesni_ecb_t;
+
+/**
+ * ECB en/decryption method type
+ */
+typedef void (*aesni_ecb_fn_t)(aesni_key_t*, u_int, u_char*, u_char*);
+
+/**
+ * Private data of an aesni_ecb_t object.
+ */
+struct private_aesni_ecb_t {
+
+       /**
+        * Public aesni_ecb_t interface.
+        */
+       aesni_ecb_t public;
+
+       /**
+        * Key size
+        */
+       u_int key_size;
+
+       /**
+        * Encryption key schedule
+        */
+       aesni_key_t *ekey;
+
+       /**
+        * Decryption key schedule
+        */
+       aesni_key_t *dkey;
+
+       /**
+        * Encryption method
+        */
+       aesni_ecb_fn_t encrypt;
+
+       /**
+        * Decryption method
+        */
+       aesni_ecb_fn_t decrypt;
+};
+
+/**
+ * AES-128 ECB encryption
+ */
+static void encrypt_ecb128(aesni_key_t *key, u_int blocks, u_char *in,
+                                                  u_char *out)
+{
+       __m128i *ks, *bi, *bo;
+       __m128i t1, t2, t3, t4;
+       u_int i, pblocks;
+
+       ks = key->schedule;
+       bi = (__m128i*)in;
+       bo = (__m128i*)out;
+       pblocks = blocks - (blocks % ECB_PARALLELISM);
+
+       for (i = 0; i < pblocks; i += ECB_PARALLELISM)
+       {
+               t1 = _mm_loadu_si128(bi + i + 0);
+               t2 = _mm_loadu_si128(bi + i + 1);
+               t3 = _mm_loadu_si128(bi + i + 2);
+               t4 = _mm_loadu_si128(bi + i + 3);
+
+               t1 = _mm_xor_si128(t1, ks[0]);
+               t2 = _mm_xor_si128(t2, ks[0]);
+               t3 = _mm_xor_si128(t3, ks[0]);
+               t4 = _mm_xor_si128(t4, ks[0]);
+
+               t1 = _mm_aesenc_si128(t1, ks[1]);
+               t2 = _mm_aesenc_si128(t2, ks[1]);
+               t3 = _mm_aesenc_si128(t3, ks[1]);
+               t4 = _mm_aesenc_si128(t4, ks[1]);
+               t1 = _mm_aesenc_si128(t1, ks[2]);
+               t2 = _mm_aesenc_si128(t2, ks[2]);
+               t3 = _mm_aesenc_si128(t3, ks[2]);
+               t4 = _mm_aesenc_si128(t4, ks[2]);
+               t1 = _mm_aesenc_si128(t1, ks[3]);
+               t2 = _mm_aesenc_si128(t2, ks[3]);
+               t3 = _mm_aesenc_si128(t3, ks[3]);
+               t4 = _mm_aesenc_si128(t4, ks[3]);
+               t1 = _mm_aesenc_si128(t1, ks[4]);
+               t2 = _mm_aesenc_si128(t2, ks[4]);
+               t3 = _mm_aesenc_si128(t3, ks[4]);
+               t4 = _mm_aesenc_si128(t4, ks[4]);
+               t1 = _mm_aesenc_si128(t1, ks[5]);
+               t2 = _mm_aesenc_si128(t2, ks[5]);
+               t3 = _mm_aesenc_si128(t3, ks[5]);
+               t4 = _mm_aesenc_si128(t4, ks[5]);
+               t1 = _mm_aesenc_si128(t1, ks[6]);
+               t2 = _mm_aesenc_si128(t2, ks[6]);
+               t3 = _mm_aesenc_si128(t3, ks[6]);
+               t4 = _mm_aesenc_si128(t4, ks[6]);
+               t1 = _mm_aesenc_si128(t1, ks[7]);
+               t2 = _mm_aesenc_si128(t2, ks[7]);
+               t3 = _mm_aesenc_si128(t3, ks[7]);
+               t4 = _mm_aesenc_si128(t4, ks[7]);
+               t1 = _mm_aesenc_si128(t1, ks[8]);
+               t2 = _mm_aesenc_si128(t2, ks[8]);
+               t3 = _mm_aesenc_si128(t3, ks[8]);
+               t4 = _mm_aesenc_si128(t4, ks[8]);
+               t1 = _mm_aesenc_si128(t1, ks[9]);
+               t2 = _mm_aesenc_si128(t2, ks[9]);
+               t3 = _mm_aesenc_si128(t3, ks[9]);
+               t4 = _mm_aesenc_si128(t4, ks[9]);
+
+               t1 = _mm_aesenclast_si128(t1, ks[10]);
+               t2 = _mm_aesenclast_si128(t2, ks[10]);
+               t3 = _mm_aesenclast_si128(t3, ks[10]);
+               t4 = _mm_aesenclast_si128(t4, ks[10]);
+
+               _mm_storeu_si128(bo + i + 0, t1);
+               _mm_storeu_si128(bo + i + 1, t2);
+               _mm_storeu_si128(bo + i + 2, t3);
+               _mm_storeu_si128(bo + i + 3, t4);
+       }
+
+       for (i = pblocks; i < blocks; i++)
+       {
+               t1 = _mm_loadu_si128(bi + i);
+               t1 = _mm_xor_si128(t1, ks[0]);
+
+               t1 = _mm_aesenc_si128(t1, ks[1]);
+               t1 = _mm_aesenc_si128(t1, ks[2]);
+               t1 = _mm_aesenc_si128(t1, ks[3]);
+               t1 = _mm_aesenc_si128(t1, ks[4]);
+               t1 = _mm_aesenc_si128(t1, ks[5]);
+               t1 = _mm_aesenc_si128(t1, ks[6]);
+               t1 = _mm_aesenc_si128(t1, ks[7]);
+               t1 = _mm_aesenc_si128(t1, ks[8]);
+               t1 = _mm_aesenc_si128(t1, ks[9]);
+
+               t1 = _mm_aesenclast_si128(t1, ks[10]);
+               _mm_storeu_si128(bo + i, t1);
+       }
+}
+
+/**
+ * AES-128 ECB decryption
+ */
+static void decrypt_ecb128(aesni_key_t *key, u_int blocks, u_char *in,
+                                                  u_char *out)
+{
+       __m128i *ks, *bi, *bo;
+       __m128i t1, t2, t3, t4;
+       u_int i, pblocks;
+
+       ks = key->schedule;
+       bi = (__m128i*)in;
+       bo = (__m128i*)out;
+       pblocks = blocks - (blocks % ECB_PARALLELISM);
+
+       for (i = 0; i < pblocks; i += ECB_PARALLELISM)
+       {
+               t1 = _mm_loadu_si128(bi + i + 0);
+               t2 = _mm_loadu_si128(bi + i + 1);
+               t3 = _mm_loadu_si128(bi + i + 2);
+               t4 = _mm_loadu_si128(bi + i + 3);
+
+               t1 = _mm_xor_si128(t1, ks[0]);
+               t2 = _mm_xor_si128(t2, ks[0]);
+               t3 = _mm_xor_si128(t3, ks[0]);
+               t4 = _mm_xor_si128(t4, ks[0]);
+
+               t1 = _mm_aesdec_si128(t1, ks[1]);
+               t2 = _mm_aesdec_si128(t2, ks[1]);
+               t3 = _mm_aesdec_si128(t3, ks[1]);
+               t4 = _mm_aesdec_si128(t4, ks[1]);
+               t1 = _mm_aesdec_si128(t1, ks[2]);
+               t2 = _mm_aesdec_si128(t2, ks[2]);
+               t3 = _mm_aesdec_si128(t3, ks[2]);
+               t4 = _mm_aesdec_si128(t4, ks[2]);
+               t1 = _mm_aesdec_si128(t1, ks[3]);
+               t2 = _mm_aesdec_si128(t2, ks[3]);
+               t3 = _mm_aesdec_si128(t3, ks[3]);
+               t4 = _mm_aesdec_si128(t4, ks[3]);
+               t1 = _mm_aesdec_si128(t1, ks[4]);
+               t2 = _mm_aesdec_si128(t2, ks[4]);
+               t3 = _mm_aesdec_si128(t3, ks[4]);
+               t4 = _mm_aesdec_si128(t4, ks[4]);
+               t1 = _mm_aesdec_si128(t1, ks[5]);
+               t2 = _mm_aesdec_si128(t2, ks[5]);
+               t3 = _mm_aesdec_si128(t3, ks[5]);
+               t4 = _mm_aesdec_si128(t4, ks[5]);
+               t1 = _mm_aesdec_si128(t1, ks[6]);
+               t2 = _mm_aesdec_si128(t2, ks[6]);
+               t3 = _mm_aesdec_si128(t3, ks[6]);
+               t4 = _mm_aesdec_si128(t4, ks[6]);
+               t1 = _mm_aesdec_si128(t1, ks[7]);
+               t2 = _mm_aesdec_si128(t2, ks[7]);
+               t3 = _mm_aesdec_si128(t3, ks[7]);
+               t4 = _mm_aesdec_si128(t4, ks[7]);
+               t1 = _mm_aesdec_si128(t1, ks[8]);
+               t2 = _mm_aesdec_si128(t2, ks[8]);
+               t3 = _mm_aesdec_si128(t3, ks[8]);
+               t4 = _mm_aesdec_si128(t4, ks[8]);
+               t1 = _mm_aesdec_si128(t1, ks[9]);
+               t2 = _mm_aesdec_si128(t2, ks[9]);
+               t3 = _mm_aesdec_si128(t3, ks[9]);
+               t4 = _mm_aesdec_si128(t4, ks[9]);
+
+               t1 = _mm_aesdeclast_si128(t1, ks[10]);
+               t2 = _mm_aesdeclast_si128(t2, ks[10]);
+               t3 = _mm_aesdeclast_si128(t3, ks[10]);
+               t4 = _mm_aesdeclast_si128(t4, ks[10]);
+
+               _mm_storeu_si128(bo + i + 0, t1);
+               _mm_storeu_si128(bo + i + 1, t2);
+               _mm_storeu_si128(bo + i + 2, t3);
+               _mm_storeu_si128(bo + i + 3, t4);
+       }
+
+       for (i = pblocks; i < blocks; i++)
+       {
+               t1 = _mm_loadu_si128(bi + i);
+               t1 = _mm_xor_si128(t1, ks[0]);
+
+               t1 = _mm_aesdec_si128(t1, ks[1]);
+               t1 = _mm_aesdec_si128(t1, ks[2]);
+               t1 = _mm_aesdec_si128(t1, ks[3]);
+               t1 = _mm_aesdec_si128(t1, ks[4]);
+               t1 = _mm_aesdec_si128(t1, ks[5]);
+               t1 = _mm_aesdec_si128(t1, ks[6]);
+               t1 = _mm_aesdec_si128(t1, ks[7]);
+               t1 = _mm_aesdec_si128(t1, ks[8]);
+               t1 = _mm_aesdec_si128(t1, ks[9]);
+
+               t1 = _mm_aesdeclast_si128(t1, ks[10]);
+               _mm_storeu_si128(bo + i, t1);
+       }
+}
+
+/**
+ * AES-192 ECB encryption
+ */
+static void encrypt_ecb192(aesni_key_t *key, u_int blocks, u_char *in,
+                                                  u_char *out)
+{
+       __m128i *ks, *bi, *bo;
+       __m128i t1, t2, t3, t4;
+       u_int i, pblocks;
+
+       ks = key->schedule;
+       bi = (__m128i*)in;
+       bo = (__m128i*)out;
+       pblocks = blocks - (blocks % ECB_PARALLELISM);
+
+       for (i = 0; i < pblocks; i += ECB_PARALLELISM)
+       {
+               t1 = _mm_loadu_si128(bi + i + 0);
+               t2 = _mm_loadu_si128(bi + i + 1);
+               t3 = _mm_loadu_si128(bi + i + 2);
+               t4 = _mm_loadu_si128(bi + i + 3);
+
+               t1 = _mm_xor_si128(t1, ks[0]);
+               t2 = _mm_xor_si128(t2, ks[0]);
+               t3 = _mm_xor_si128(t3, ks[0]);
+               t4 = _mm_xor_si128(t4, ks[0]);
+
+               t1 = _mm_aesenc_si128(t1, ks[1]);
+               t2 = _mm_aesenc_si128(t2, ks[1]);
+               t3 = _mm_aesenc_si128(t3, ks[1]);
+               t4 = _mm_aesenc_si128(t4, ks[1]);
+               t1 = _mm_aesenc_si128(t1, ks[2]);
+               t2 = _mm_aesenc_si128(t2, ks[2]);
+               t3 = _mm_aesenc_si128(t3, ks[2]);
+               t4 = _mm_aesenc_si128(t4, ks[2]);
+               t1 = _mm_aesenc_si128(t1, ks[3]);
+               t2 = _mm_aesenc_si128(t2, ks[3]);
+               t3 = _mm_aesenc_si128(t3, ks[3]);
+               t4 = _mm_aesenc_si128(t4, ks[3]);
+               t1 = _mm_aesenc_si128(t1, ks[4]);
+               t2 = _mm_aesenc_si128(t2, ks[4]);
+               t3 = _mm_aesenc_si128(t3, ks[4]);
+               t4 = _mm_aesenc_si128(t4, ks[4]);
+               t1 = _mm_aesenc_si128(t1, ks[5]);
+               t2 = _mm_aesenc_si128(t2, ks[5]);
+               t3 = _mm_aesenc_si128(t3, ks[5]);
+               t4 = _mm_aesenc_si128(t4, ks[5]);
+               t1 = _mm_aesenc_si128(t1, ks[6]);
+               t2 = _mm_aesenc_si128(t2, ks[6]);
+               t3 = _mm_aesenc_si128(t3, ks[6]);
+               t4 = _mm_aesenc_si128(t4, ks[6]);
+               t1 = _mm_aesenc_si128(t1, ks[7]);
+               t2 = _mm_aesenc_si128(t2, ks[7]);
+               t3 = _mm_aesenc_si128(t3, ks[7]);
+               t4 = _mm_aesenc_si128(t4, ks[7]);
+               t1 = _mm_aesenc_si128(t1, ks[8]);
+               t2 = _mm_aesenc_si128(t2, ks[8]);
+               t3 = _mm_aesenc_si128(t3, ks[8]);
+               t4 = _mm_aesenc_si128(t4, ks[8]);
+               t1 = _mm_aesenc_si128(t1, ks[9]);
+               t2 = _mm_aesenc_si128(t2, ks[9]);
+               t3 = _mm_aesenc_si128(t3, ks[9]);
+               t4 = _mm_aesenc_si128(t4, ks[9]);
+               t1 = _mm_aesenc_si128(t1, ks[10]);
+               t2 = _mm_aesenc_si128(t2, ks[10]);
+               t3 = _mm_aesenc_si128(t3, ks[10]);
+               t4 = _mm_aesenc_si128(t4, ks[10]);
+               t1 = _mm_aesenc_si128(t1, ks[11]);
+               t2 = _mm_aesenc_si128(t2, ks[11]);
+               t3 = _mm_aesenc_si128(t3, ks[11]);
+               t4 = _mm_aesenc_si128(t4, ks[11]);
+
+               t1 = _mm_aesenclast_si128(t1, ks[12]);
+               t2 = _mm_aesenclast_si128(t2, ks[12]);
+               t3 = _mm_aesenclast_si128(t3, ks[12]);
+               t4 = _mm_aesenclast_si128(t4, ks[12]);
+
+               _mm_storeu_si128(bo + i + 0, t1);
+               _mm_storeu_si128(bo + i + 1, t2);
+               _mm_storeu_si128(bo + i + 2, t3);
+               _mm_storeu_si128(bo + i + 3, t4);
+       }
+
+       for (i = pblocks; i < blocks; i++)
+       {
+               t1 = _mm_loadu_si128(bi + i);
+               t1 = _mm_xor_si128(t1, ks[0]);
+
+               t1 = _mm_aesenc_si128(t1, ks[1]);
+               t1 = _mm_aesenc_si128(t1, ks[2]);
+               t1 = _mm_aesenc_si128(t1, ks[3]);
+               t1 = _mm_aesenc_si128(t1, ks[4]);
+               t1 = _mm_aesenc_si128(t1, ks[5]);
+               t1 = _mm_aesenc_si128(t1, ks[6]);
+               t1 = _mm_aesenc_si128(t1, ks[7]);
+               t1 = _mm_aesenc_si128(t1, ks[8]);
+               t1 = _mm_aesenc_si128(t1, ks[9]);
+               t1 = _mm_aesenc_si128(t1, ks[10]);
+               t1 = _mm_aesenc_si128(t1, ks[11]);
+
+               t1 = _mm_aesenclast_si128(t1, ks[12]);
+               _mm_storeu_si128(bo + i, t1);
+       }
+}
+
+/**
+ * AES-192 ECB decryption
+ */
+static void decrypt_ecb192(aesni_key_t *key, u_int blocks, u_char *in,
+                                                  u_char *out)
+{
+       __m128i *ks, *bi, *bo;
+       __m128i t1, t2, t3, t4;
+       u_int i, pblocks;
+
+       ks = key->schedule;
+       bi = (__m128i*)in;
+       bo = (__m128i*)out;
+       pblocks = blocks - (blocks % ECB_PARALLELISM);
+
+       for (i = 0; i < pblocks; i += ECB_PARALLELISM)
+       {
+               t1 = _mm_loadu_si128(bi + i + 0);
+               t2 = _mm_loadu_si128(bi + i + 1);
+               t3 = _mm_loadu_si128(bi + i + 2);
+               t4 = _mm_loadu_si128(bi + i + 3);
+
+               t1 = _mm_xor_si128(t1, ks[0]);
+               t2 = _mm_xor_si128(t2, ks[0]);
+               t3 = _mm_xor_si128(t3, ks[0]);
+               t4 = _mm_xor_si128(t4, ks[0]);
+
+               t1 = _mm_aesdec_si128(t1, ks[1]);
+               t2 = _mm_aesdec_si128(t2, ks[1]);
+               t3 = _mm_aesdec_si128(t3, ks[1]);
+               t4 = _mm_aesdec_si128(t4, ks[1]);
+               t1 = _mm_aesdec_si128(t1, ks[2]);
+               t2 = _mm_aesdec_si128(t2, ks[2]);
+               t3 = _mm_aesdec_si128(t3, ks[2]);
+               t4 = _mm_aesdec_si128(t4, ks[2]);
+               t1 = _mm_aesdec_si128(t1, ks[3]);
+               t2 = _mm_aesdec_si128(t2, ks[3]);
+               t3 = _mm_aesdec_si128(t3, ks[3]);
+               t4 = _mm_aesdec_si128(t4, ks[3]);
+               t1 = _mm_aesdec_si128(t1, ks[4]);
+               t2 = _mm_aesdec_si128(t2, ks[4]);
+               t3 = _mm_aesdec_si128(t3, ks[4]);
+               t4 = _mm_aesdec_si128(t4, ks[4]);
+               t1 = _mm_aesdec_si128(t1, ks[5]);
+               t2 = _mm_aesdec_si128(t2, ks[5]);
+               t3 = _mm_aesdec_si128(t3, ks[5]);
+               t4 = _mm_aesdec_si128(t4, ks[5]);
+               t1 = _mm_aesdec_si128(t1, ks[6]);
+               t2 = _mm_aesdec_si128(t2, ks[6]);
+               t3 = _mm_aesdec_si128(t3, ks[6]);
+               t4 = _mm_aesdec_si128(t4, ks[6]);
+               t1 = _mm_aesdec_si128(t1, ks[7]);
+               t2 = _mm_aesdec_si128(t2, ks[7]);
+               t3 = _mm_aesdec_si128(t3, ks[7]);
+               t4 = _mm_aesdec_si128(t4, ks[7]);
+               t1 = _mm_aesdec_si128(t1, ks[8]);
+               t2 = _mm_aesdec_si128(t2, ks[8]);
+               t3 = _mm_aesdec_si128(t3, ks[8]);
+               t4 = _mm_aesdec_si128(t4, ks[8]);
+               t1 = _mm_aesdec_si128(t1, ks[9]);
+               t2 = _mm_aesdec_si128(t2, ks[9]);
+               t3 = _mm_aesdec_si128(t3, ks[9]);
+               t4 = _mm_aesdec_si128(t4, ks[9]);
+               t1 = _mm_aesdec_si128(t1, ks[10]);
+               t2 = _mm_aesdec_si128(t2, ks[10]);
+               t3 = _mm_aesdec_si128(t3, ks[10]);
+               t4 = _mm_aesdec_si128(t4, ks[10]);
+               t1 = _mm_aesdec_si128(t1, ks[11]);
+               t2 = _mm_aesdec_si128(t2, ks[11]);
+               t3 = _mm_aesdec_si128(t3, ks[11]);
+               t4 = _mm_aesdec_si128(t4, ks[11]);
+
+               t1 = _mm_aesdeclast_si128(t1, ks[12]);
+               t2 = _mm_aesdeclast_si128(t2, ks[12]);
+               t3 = _mm_aesdeclast_si128(t3, ks[12]);
+               t4 = _mm_aesdeclast_si128(t4, ks[12]);
+
+               _mm_storeu_si128(bo + i + 0, t1);
+               _mm_storeu_si128(bo + i + 1, t2);
+               _mm_storeu_si128(bo + i + 2, t3);
+               _mm_storeu_si128(bo + i + 3, t4);
+       }
+
+       for (i = pblocks; i < blocks; i++)
+       {
+               t1 = _mm_loadu_si128(bi + i);
+               t1 = _mm_xor_si128(t1, ks[0]);
+
+               t1 = _mm_aesdec_si128(t1, ks[1]);
+               t1 = _mm_aesdec_si128(t1, ks[2]);
+               t1 = _mm_aesdec_si128(t1, ks[3]);
+               t1 = _mm_aesdec_si128(t1, ks[4]);
+               t1 = _mm_aesdec_si128(t1, ks[5]);
+               t1 = _mm_aesdec_si128(t1, ks[6]);
+               t1 = _mm_aesdec_si128(t1, ks[7]);
+               t1 = _mm_aesdec_si128(t1, ks[8]);
+               t1 = _mm_aesdec_si128(t1, ks[9]);
+               t1 = _mm_aesdec_si128(t1, ks[10]);
+               t1 = _mm_aesdec_si128(t1, ks[11]);
+
+               t1 = _mm_aesdeclast_si128(t1, ks[12]);
+               _mm_storeu_si128(bo + i, t1);
+       }
+}
+
+/**
+ * AES-256 ECB encryption
+ */
+static void encrypt_ecb256(aesni_key_t *key, u_int blocks, u_char *in,
+                                                  u_char *out)
+{
+       __m128i *ks, *bi, *bo;
+       __m128i t1, t2, t3, t4;
+       u_int i, pblocks;
+
+       ks = key->schedule;
+       bi = (__m128i*)in;
+       bo = (__m128i*)out;
+       pblocks = blocks - (blocks % ECB_PARALLELISM);
+
+       for (i = 0; i < pblocks; i += ECB_PARALLELISM)
+       {
+               t1 = _mm_loadu_si128(bi + i + 0);
+               t2 = _mm_loadu_si128(bi + i + 1);
+               t3 = _mm_loadu_si128(bi + i + 2);
+               t4 = _mm_loadu_si128(bi + i + 3);
+
+               t1 = _mm_xor_si128(t1, ks[0]);
+               t2 = _mm_xor_si128(t2, ks[0]);
+               t3 = _mm_xor_si128(t3, ks[0]);
+               t4 = _mm_xor_si128(t4, ks[0]);
+
+               t1 = _mm_aesenc_si128(t1, ks[1]);
+               t2 = _mm_aesenc_si128(t2, ks[1]);
+               t3 = _mm_aesenc_si128(t3, ks[1]);
+               t4 = _mm_aesenc_si128(t4, ks[1]);
+               t1 = _mm_aesenc_si128(t1, ks[2]);
+               t2 = _mm_aesenc_si128(t2, ks[2]);
+               t3 = _mm_aesenc_si128(t3, ks[2]);
+               t4 = _mm_aesenc_si128(t4, ks[2]);
+               t1 = _mm_aesenc_si128(t1, ks[3]);
+               t2 = _mm_aesenc_si128(t2, ks[3]);
+               t3 = _mm_aesenc_si128(t3, ks[3]);
+               t4 = _mm_aesenc_si128(t4, ks[3]);
+               t1 = _mm_aesenc_si128(t1, ks[4]);
+               t2 = _mm_aesenc_si128(t2, ks[4]);
+               t3 = _mm_aesenc_si128(t3, ks[4]);
+               t4 = _mm_aesenc_si128(t4, ks[4]);
+               t1 = _mm_aesenc_si128(t1, ks[5]);
+               t2 = _mm_aesenc_si128(t2, ks[5]);
+               t3 = _mm_aesenc_si128(t3, ks[5]);
+               t4 = _mm_aesenc_si128(t4, ks[5]);
+               t1 = _mm_aesenc_si128(t1, ks[6]);
+               t2 = _mm_aesenc_si128(t2, ks[6]);
+               t3 = _mm_aesenc_si128(t3, ks[6]);
+               t4 = _mm_aesenc_si128(t4, ks[6]);
+               t1 = _mm_aesenc_si128(t1, ks[7]);
+               t2 = _mm_aesenc_si128(t2, ks[7]);
+               t3 = _mm_aesenc_si128(t3, ks[7]);
+               t4 = _mm_aesenc_si128(t4, ks[7]);
+               t1 = _mm_aesenc_si128(t1, ks[8]);
+               t2 = _mm_aesenc_si128(t2, ks[8]);
+               t3 = _mm_aesenc_si128(t3, ks[8]);
+               t4 = _mm_aesenc_si128(t4, ks[8]);
+               t1 = _mm_aesenc_si128(t1, ks[9]);
+               t2 = _mm_aesenc_si128(t2, ks[9]);
+               t3 = _mm_aesenc_si128(t3, ks[9]);
+               t4 = _mm_aesenc_si128(t4, ks[9]);
+               t1 = _mm_aesenc_si128(t1, ks[10]);
+               t2 = _mm_aesenc_si128(t2, ks[10]);
+               t3 = _mm_aesenc_si128(t3, ks[10]);
+               t4 = _mm_aesenc_si128(t4, ks[10]);
+               t1 = _mm_aesenc_si128(t1, ks[11]);
+               t2 = _mm_aesenc_si128(t2, ks[11]);
+               t3 = _mm_aesenc_si128(t3, ks[11]);
+               t4 = _mm_aesenc_si128(t4, ks[11]);
+               t1 = _mm_aesenc_si128(t1, ks[12]);
+               t2 = _mm_aesenc_si128(t2, ks[12]);
+               t3 = _mm_aesenc_si128(t3, ks[12]);
+               t4 = _mm_aesenc_si128(t4, ks[12]);
+               t1 = _mm_aesenc_si128(t1, ks[13]);
+               t2 = _mm_aesenc_si128(t2, ks[13]);
+               t3 = _mm_aesenc_si128(t3, ks[13]);
+               t4 = _mm_aesenc_si128(t4, ks[13]);
+
+               t1 = _mm_aesenclast_si128(t1, ks[14]);
+               t2 = _mm_aesenclast_si128(t2, ks[14]);
+               t3 = _mm_aesenclast_si128(t3, ks[14]);
+               t4 = _mm_aesenclast_si128(t4, ks[14]);
+
+               _mm_storeu_si128(bo + i + 0, t1);
+               _mm_storeu_si128(bo + i + 1, t2);
+               _mm_storeu_si128(bo + i + 2, t3);
+               _mm_storeu_si128(bo + i + 3, t4);
+       }
+
+       for (i = pblocks; i < blocks; i++)
+       {
+               t1 = _mm_loadu_si128(bi + i);
+               t1 = _mm_xor_si128(t1, ks[0]);
+
+               t1 = _mm_aesenc_si128(t1, ks[1]);
+               t1 = _mm_aesenc_si128(t1, ks[2]);
+               t1 = _mm_aesenc_si128(t1, ks[3]);
+               t1 = _mm_aesenc_si128(t1, ks[4]);
+               t1 = _mm_aesenc_si128(t1, ks[5]);
+               t1 = _mm_aesenc_si128(t1, ks[6]);
+               t1 = _mm_aesenc_si128(t1, ks[7]);
+               t1 = _mm_aesenc_si128(t1, ks[8]);
+               t1 = _mm_aesenc_si128(t1, ks[9]);
+               t1 = _mm_aesenc_si128(t1, ks[10]);
+               t1 = _mm_aesenc_si128(t1, ks[11]);
+               t1 = _mm_aesenc_si128(t1, ks[12]);
+               t1 = _mm_aesenc_si128(t1, ks[13]);
+
+               t1 = _mm_aesenclast_si128(t1, ks[14]);
+               _mm_storeu_si128(bo + i, t1);
+       }
+}
+
+/**
+ * AES-256 ECB decryption
+ */
+static void decrypt_ecb256(aesni_key_t *key, u_int blocks, u_char *in,
+                                                  u_char *out)
+{
+       __m128i *ks, *bi, *bo;
+       __m128i t1, t2, t3, t4;
+       u_int i, pblocks;
+
+       ks = key->schedule;
+       bi = (__m128i*)in;
+       bo = (__m128i*)out;
+       pblocks = blocks - (blocks % ECB_PARALLELISM);
+
+       for (i = 0; i < pblocks; i += ECB_PARALLELISM)
+       {
+               t1 = _mm_loadu_si128(bi + i + 0);
+               t2 = _mm_loadu_si128(bi + i + 1);
+               t3 = _mm_loadu_si128(bi + i + 2);
+               t4 = _mm_loadu_si128(bi + i + 3);
+
+               t1 = _mm_xor_si128(t1, ks[0]);
+               t2 = _mm_xor_si128(t2, ks[0]);
+               t3 = _mm_xor_si128(t3, ks[0]);
+               t4 = _mm_xor_si128(t4, ks[0]);
+
+               t1 = _mm_aesdec_si128(t1, ks[1]);
+               t2 = _mm_aesdec_si128(t2, ks[1]);
+               t3 = _mm_aesdec_si128(t3, ks[1]);
+               t4 = _mm_aesdec_si128(t4, ks[1]);
+               t1 = _mm_aesdec_si128(t1, ks[2]);
+               t2 = _mm_aesdec_si128(t2, ks[2]);
+               t3 = _mm_aesdec_si128(t3, ks[2]);
+               t4 = _mm_aesdec_si128(t4, ks[2]);
+               t1 = _mm_aesdec_si128(t1, ks[3]);
+               t2 = _mm_aesdec_si128(t2, ks[3]);
+               t3 = _mm_aesdec_si128(t3, ks[3]);
+               t4 = _mm_aesdec_si128(t4, ks[3]);
+               t1 = _mm_aesdec_si128(t1, ks[4]);
+               t2 = _mm_aesdec_si128(t2, ks[4]);
+               t3 = _mm_aesdec_si128(t3, ks[4]);
+               t4 = _mm_aesdec_si128(t4, ks[4]);
+               t1 = _mm_aesdec_si128(t1, ks[5]);
+               t2 = _mm_aesdec_si128(t2, ks[5]);
+               t3 = _mm_aesdec_si128(t3, ks[5]);
+               t4 = _mm_aesdec_si128(t4, ks[5]);
+               t1 = _mm_aesdec_si128(t1, ks[6]);
+               t2 = _mm_aesdec_si128(t2, ks[6]);
+               t3 = _mm_aesdec_si128(t3, ks[6]);
+               t4 = _mm_aesdec_si128(t4, ks[6]);
+               t1 = _mm_aesdec_si128(t1, ks[7]);
+               t2 = _mm_aesdec_si128(t2, ks[7]);
+               t3 = _mm_aesdec_si128(t3, ks[7]);
+               t4 = _mm_aesdec_si128(t4, ks[7]);
+               t1 = _mm_aesdec_si128(t1, ks[8]);
+               t2 = _mm_aesdec_si128(t2, ks[8]);
+               t3 = _mm_aesdec_si128(t3, ks[8]);
+               t4 = _mm_aesdec_si128(t4, ks[8]);
+               t1 = _mm_aesdec_si128(t1, ks[9]);
+               t2 = _mm_aesdec_si128(t2, ks[9]);
+               t3 = _mm_aesdec_si128(t3, ks[9]);
+               t4 = _mm_aesdec_si128(t4, ks[9]);
+               t1 = _mm_aesdec_si128(t1, ks[10]);
+               t2 = _mm_aesdec_si128(t2, ks[10]);
+               t3 = _mm_aesdec_si128(t3, ks[10]);
+               t4 = _mm_aesdec_si128(t4, ks[10]);
+               t1 = _mm_aesdec_si128(t1, ks[11]);
+               t2 = _mm_aesdec_si128(t2, ks[11]);
+               t3 = _mm_aesdec_si128(t3, ks[11]);
+               t4 = _mm_aesdec_si128(t4, ks[11]);
+               t1 = _mm_aesdec_si128(t1, ks[12]);
+               t2 = _mm_aesdec_si128(t2, ks[12]);
+               t3 = _mm_aesdec_si128(t3, ks[12]);
+               t4 = _mm_aesdec_si128(t4, ks[12]);
+               t1 = _mm_aesdec_si128(t1, ks[13]);
+               t2 = _mm_aesdec_si128(t2, ks[13]);
+               t3 = _mm_aesdec_si128(t3, ks[13]);
+               t4 = _mm_aesdec_si128(t4, ks[13]);
+
+               t1 = _mm_aesdeclast_si128(t1, ks[14]);
+               t2 = _mm_aesdeclast_si128(t2, ks[14]);
+               t3 = _mm_aesdeclast_si128(t3, ks[14]);
+               t4 = _mm_aesdeclast_si128(t4, ks[14]);
+
+               _mm_storeu_si128(bo + i + 0, t1);
+               _mm_storeu_si128(bo + i + 1, t2);
+               _mm_storeu_si128(bo + i + 2, t3);
+               _mm_storeu_si128(bo + i + 3, t4);
+       }
+
+       for (i = pblocks; i < blocks; i++)
+       {
+               t1 = _mm_loadu_si128(bi + i);
+               t1 = _mm_xor_si128(t1, ks[0]);
+
+               t1 = _mm_aesdec_si128(t1, ks[1]);
+               t1 = _mm_aesdec_si128(t1, ks[2]);
+               t1 = _mm_aesdec_si128(t1, ks[3]);
+               t1 = _mm_aesdec_si128(t1, ks[4]);
+               t1 = _mm_aesdec_si128(t1, ks[5]);
+               t1 = _mm_aesdec_si128(t1, ks[6]);
+               t1 = _mm_aesdec_si128(t1, ks[7]);
+               t1 = _mm_aesdec_si128(t1, ks[8]);
+               t1 = _mm_aesdec_si128(t1, ks[9]);
+               t1 = _mm_aesdec_si128(t1, ks[10]);
+               t1 = _mm_aesdec_si128(t1, ks[11]);
+               t1 = _mm_aesdec_si128(t1, ks[12]);
+               t1 = _mm_aesdec_si128(t1, ks[13]);
+
+               t1 = _mm_aesdeclast_si128(t1, ks[14]);
+               _mm_storeu_si128(bo + i, t1);
+       }
+}
+
+/**
+ * Do inline or allocated de/encryption using key schedule
+ */
+static bool crypt(aesni_ecb_fn_t fn, aesni_key_t *key, chunk_t data,
+                                 chunk_t *out)
+{
+       u_char *buf;
+
+       if (!key || data.len % AES_BLOCK_SIZE)
+       {
+               return FALSE;
+       }
+       if (out)
+       {
+               *out = chunk_alloc(data.len);
+               buf = out->ptr;
+       }
+       else
+       {
+               buf = data.ptr;
+       }
+       fn(key, data.len / AES_BLOCK_SIZE, data.ptr, buf);
+       return TRUE;
+}
+
+METHOD(crypter_t, encrypt, bool,
+       private_aesni_ecb_t *this, chunk_t data, chunk_t iv, chunk_t *encrypted)
+{
+       return crypt(this->encrypt, this->ekey, data, encrypted);
+}
+
+METHOD(crypter_t, decrypt, bool,
+       private_aesni_ecb_t *this, chunk_t data, chunk_t iv, chunk_t *decrypted)
+{
+       return crypt(this->decrypt, this->dkey, data, decrypted);
+}
+
+METHOD(crypter_t, get_block_size, size_t,
+       private_aesni_ecb_t *this)
+{
+       return AES_BLOCK_SIZE;
+}
+
+METHOD(crypter_t, get_iv_size, size_t,
+       private_aesni_ecb_t *this)
+{
+       return 0;
+}
+
+METHOD(crypter_t, get_key_size, size_t,
+       private_aesni_ecb_t *this)
+{
+       return this->key_size;
+}
+
+METHOD(crypter_t, set_key, bool,
+       private_aesni_ecb_t *this, chunk_t key)
+{
+       if (key.len != this->key_size)
+       {
+               return FALSE;
+       }
+
+       DESTROY_IF(this->ekey);
+       DESTROY_IF(this->dkey);
+
+       this->ekey = aesni_key_create(TRUE, key);
+       this->dkey = aesni_key_create(FALSE, key);
+
+       return this->ekey && this->dkey;
+}
+
+METHOD(crypter_t, destroy, void,
+       private_aesni_ecb_t *this)
+{
+       DESTROY_IF(this->ekey);
+       DESTROY_IF(this->dkey);
+       free_align(this);
+}
+
+/**
+ * See header
+ */
+aesni_ecb_t *aesni_ecb_create(encryption_algorithm_t algo, size_t key_size)
+{
+       private_aesni_ecb_t *this;
+
+       if (algo != ENCR_AES_ECB)
+       {
+               return NULL;
+       }
+       switch (key_size)
+       {
+               case 0:
+                       key_size = 16;
+                       break;
+               case 16:
+               case 24:
+               case 32:
+                       break;
+               default:
+                       return NULL;
+       }
+
+       INIT_ALIGN(this, sizeof(__m128i),
+               .public = {
+                       .crypter = {
+                               .encrypt = _encrypt,
+                               .decrypt = _decrypt,
+                               .get_block_size = _get_block_size,
+                               .get_iv_size = _get_iv_size,
+                               .get_key_size = _get_key_size,
+                               .set_key = _set_key,
+                               .destroy = _destroy,
+                       },
+               },
+               .key_size = key_size,
+       );
+
+       switch (key_size)
+       {
+               case 16:
+                       this->encrypt = encrypt_ecb128;
+                       this->decrypt = decrypt_ecb128;
+                       break;
+               case 24:
+                       this->encrypt = encrypt_ecb192;
+                       this->decrypt = decrypt_ecb192;
+                       break;
+               case 32:
+                       this->encrypt = encrypt_ecb256;
+                       this->decrypt = decrypt_ecb256;
+                       break;
+       }
+
+       return &this->public;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_ecb.h b/src/libstrongswan/plugins/aesni/aesni_ecb.h
new file mode 100644 (file)
index 0000000..d429abf
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2019 Andreas Steffen
+ * HSR Hochschule fuer Technik Rapperswil
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.  See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_ecb aesni_ecb
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_ECB_H_
+#define AESNI_ECB_H_
+
+#include <library.h>
+
+typedef struct aesni_ecb_t aesni_ecb_t;
+
+/**
+ * ECB mode crypter using AES-NI
+ */
+struct aesni_ecb_t {
+
+       /**
+        * Implements crypter interface
+        */
+       crypter_t crypter;
+};
+
+/**
+ * Create a aesni_ecb instance.
+ *
+ * @param algo                 encryption algorithm, AES_ENCR_ECB
+ * @param key_size             AES key size, in bytes
+ * @return                             AES-ECB crypter, NULL if not supported
+ */
+aesni_ecb_t *aesni_ecb_create(encryption_algorithm_t algo, size_t key_size);
+
+#endif /** AESNI_ECB_H_ @}*/
index b92419dc4ab0f5007b4e0693f87e26df3a5f9cbf..b83575a66a6f382163243db5f234097ac63356e4 100644 (file)
@@ -15,6 +15,7 @@
 
 #include "aesni_plugin.h"
 #include "aesni_cbc.h"
+#include "aesni_ecb.h"
 #include "aesni_ctr.h"
 #include "aesni_ccm.h"
 #include "aesni_gcm.h"
@@ -55,6 +56,10 @@ METHOD(plugin_t, get_features, int,
                        PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CBC, 16),
                        PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CBC, 24),
                        PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CBC, 32),
+               PLUGIN_REGISTER(CRYPTER, aesni_ecb_create),
+                       PLUGIN_PROVIDE(CRYPTER, ENCR_AES_ECB, 16),
+                       PLUGIN_PROVIDE(CRYPTER, ENCR_AES_ECB, 24),
+                       PLUGIN_PROVIDE(CRYPTER, ENCR_AES_ECB, 32),
                PLUGIN_REGISTER(CRYPTER, aesni_ctr_create),
                        PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CTR, 16),
                        PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CTR, 24),
index 90cb7d396371cd9e77fbe101cf083ac9241a4087..fda2485f2f03a66998cc92d7139864c03d77331f 100644 (file)
@@ -14,6 +14,7 @@ libstrongswan_test_vectors_la_SOURCES = \
        test_vectors_plugin.h test_vectors_plugin.c test_vectors.h \
        test_vectors/3des_cbc.c \
        test_vectors/aes_cbc.c \
+       test_vectors/aes_ecb.c \
        test_vectors/aes_ctr.c \
        test_vectors/aes_xcbc.c \
        test_vectors/aes_cmac.c \
index 006fea352eee22ab0e8db0536ee5a290ee4e6122..e108d6fc210f3eea8b7abace68903a46b3f09e38 100644 (file)
@@ -20,6 +20,9 @@ TEST_VECTOR_CRYPTER(aes_cbc3)
 TEST_VECTOR_CRYPTER(aes_cbc4)
 TEST_VECTOR_CRYPTER(aes_cbc5)
 TEST_VECTOR_CRYPTER(aes_cbc6)
+TEST_VECTOR_CRYPTER(aes_ecb1)
+TEST_VECTOR_CRYPTER(aes_ecb2)
+TEST_VECTOR_CRYPTER(aes_ecb3)
 TEST_VECTOR_CRYPTER(aes_ctr1)
 TEST_VECTOR_CRYPTER(aes_ctr2)
 TEST_VECTOR_CRYPTER(aes_ctr3)
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors/aes_ecb.c b/src/libstrongswan/plugins/test_vectors/test_vectors/aes_ecb.c
new file mode 100644 (file)
index 0000000..54d18a8
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2019 Andreas Steffen
+ * HSR Hochschule fuer Technik Rapperswil
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the Licenseor (at your
+ * option) any later version.  See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be usefulbut
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <crypto/crypto_tester.h>
+
+/**
+ * Test F.1.1 of NIST SP 800-38A 2001
+ */
+crypter_test_vector_t aes_ecb1 = {
+       .alg = ENCR_AES_ECB, .key_size = 16, .len = 64,
+       .key    = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+       .iv             = "",
+       .plain  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+                         "\xae\x2d\x8a\x57\x1e\x03\xac\x9c\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+                         "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+                         "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+       .cipher = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60\xa8\x9e\xca\xf3\x24\x66\xef\x97"
+                         "\xf5\xd3\xd5\x85\x03\xb9\x69\x9d\xe7\x85\x89\x5a\x96\xfd\xba\xaf"
+                         "\x43\xb1\xcd\x7f\x59\x8e\xce\x23\x88\x1b\x00\xe3\xed\x03\x06\x88"
+                         "\x7b\x0c\x78\x5e\x27\xe8\xad\x3f\x82\x23\x20\x71\x04\x72\x5d\xd4"
+};
+
+/**
+ * Test F.1.3 of NIST SP 800-38A 2001
+ */
+crypter_test_vector_t aes_ecb2 = {
+       .alg = ENCR_AES_ECB, .key_size = 24, .len = 64,
+       .key    = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+                         "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+       .iv             = "",
+       .plain  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+                         "\xae\x2d\x8a\x57\x1e\x03\xac\x9c\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+                         "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+                         "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+       .cipher = "\xbd\x33\x4f\x1d\x6e\x45\xf2\x5f\xf7\x12\xa2\x14\x57\x1f\xa5\xcc"
+                         "\x97\x41\x04\x84\x6d\x0a\xd3\xad\x77\x34\xec\xb3\xec\xee\x4e\xef"
+                         "\xef\x7a\xfd\x22\x70\xe2\xe6\x0a\xdc\xe0\xba\x2f\xac\xe6\x44\x4e"
+                         "\x9a\x4b\x41\xba\x73\x8d\x6c\x72\xfb\x16\x69\x16\x03\xc1\x8e\x0e"
+};
+
+/**
+ * Test F.1.5 of NIST SP 800-38A 2001
+ */
+crypter_test_vector_t aes_ecb3 = {
+       .alg = ENCR_AES_ECB, .key_size = 32, .len = 64,
+       .key    = "\x60\x3d\xeb\x10\x15\xca\x71\xbe\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+                         "\x1f\x35\x2c\x07\x3b\x61\x08\xd7\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+       .iv             = "",
+       .plain  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+                         "\xae\x2d\x8a\x57\x1e\x03\xac\x9c\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+                         "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+                         "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+       .cipher = "\xf3\xee\xd1\xbd\xb5\xd2\xa0\x3c\x06\x4b\x5a\x7e\x3d\xb1\x81\xf8"
+                         "\x59\x1c\xcb\x10\xd4\x10\xed\x26\xdc\x5b\xa7\x4a\x31\x36\x28\x70"
+                         "\xb6\xed\x21\xb9\x9c\xa6\xf4\xf9\xf1\x53\xe7\xb1\xbe\xaf\xed\x1d"
+                         "\x23\x30\x4b\x7a\x39\xf9\xf3\xff\x06\x7d\x8d\x8f\x9e\x24\xec\xc7"
+};
+