*
* Context: Any context.
*/
-#define aes_encrypt(key, out, in) \
- _Generic((key), \
- struct crypto_aes_ctx *: aes_encrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- const struct crypto_aes_ctx *: aes_encrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- struct aes_enckey *: aes_encrypt_new((const struct aes_enckey *)(key), (out), (in)), \
- const struct aes_enckey *: aes_encrypt_new((const struct aes_enckey *)(key), (out), (in)), \
- struct aes_key *: aes_encrypt_new((const struct aes_key *)(key), (out), (in)), \
- const struct aes_key *: aes_encrypt_new((const struct aes_key *)(key), (out), (in)))
-void aes_encrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
-void aes_encrypt_new(aes_encrypt_arg key, u8 out[at_least AES_BLOCK_SIZE],
- const u8 in[at_least AES_BLOCK_SIZE]);
+void aes_encrypt(aes_encrypt_arg key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
/**
* aes_decrypt() - Decrypt a single AES block
*
* Context: Any context.
*/
-#define aes_decrypt(key, out, in) \
- _Generic((key), \
- struct crypto_aes_ctx *: aes_decrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- const struct crypto_aes_ctx *: aes_decrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- struct aes_key *: aes_decrypt_new((const struct aes_key *)(key), (out), (in)), \
- const struct aes_key *: aes_decrypt_new((const struct aes_key *)(key), (out), (in)))
-void aes_decrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
-void aes_decrypt_new(const struct aes_key *key, u8 out[at_least AES_BLOCK_SIZE],
- const u8 in[at_least AES_BLOCK_SIZE]);
+void aes_decrypt(const struct aes_key *key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
return mix_columns(x ^ y ^ ror32(y, 16));
}
-static __always_inline u32 subshift(u32 in[], int pos)
-{
- return (aes_sbox[in[pos] & 0xff]) ^
- (aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
- (aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
- (aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
-}
-
-static __always_inline u32 inv_subshift(u32 in[], int pos)
-{
- return (aes_inv_sbox[in[pos] & 0xff]) ^
- (aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
- (aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
- (aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
-}
-
static u32 subw(u32 in)
{
return (aes_sbox[in & 0xff]) ^
}
EXPORT_SYMBOL(aes_expandkey);
-void aes_encrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
-{
- const u32 *rkp = ctx->key_enc + 4;
- int rounds = 6 + ctx->key_length / 4;
- u32 st0[4], st1[4];
- int round;
-
- st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
- st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
- st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
- st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
-
- /*
- * Force the compiler to emit data independent Sbox references,
- * by xoring the input with Sbox values that are known to add up
- * to zero. This pulls the entire Sbox into the D-cache before any
- * data dependent lookups are done.
- */
- st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
- st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
- st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
- st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
-
- for (round = 0;; round += 2, rkp += 8) {
- st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
- st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
- st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
- st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
-
- if (round == rounds - 2)
- break;
-
- st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
- st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
- st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
- st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
- }
-
- put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
- put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
- put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
- put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
-}
-EXPORT_SYMBOL(aes_encrypt_old);
-
static __always_inline u32 enc_quarterround(const u32 w[4], int i, u32 rk)
{
return rk ^ aes_enc_tab[(u8)w[i]] ^
put_unaligned_le32(declast_quarterround(w, 3, *rkp++), &out[12]);
}
-void aes_decrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
-{
- const u32 *rkp = ctx->key_dec + 4;
- int rounds = 6 + ctx->key_length / 4;
- u32 st0[4], st1[4];
- int round;
-
- st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
- st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
- st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
- st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
-
- /*
- * Force the compiler to emit data independent Sbox references,
- * by xoring the input with Sbox values that are known to add up
- * to zero. This pulls the entire Sbox into the D-cache before any
- * data dependent lookups are done.
- */
- st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
- st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
- st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
- st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
-
- for (round = 0;; round += 2, rkp += 8) {
- st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
- st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
- st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
- st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
-
- if (round == rounds - 2)
- break;
-
- st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
- st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
- st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
- st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
- }
-
- put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
- put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
- put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
- put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
-}
-EXPORT_SYMBOL(aes_decrypt_old);
-
/*
* Note: the aes_prepare*key_* names reflect the fact that the implementation
* might not actually expand the key. (The s390 code for example doesn't.)
}
EXPORT_SYMBOL(aes_prepareenckey);
-void aes_encrypt_new(aes_encrypt_arg key, u8 out[AES_BLOCK_SIZE],
- const u8 in[AES_BLOCK_SIZE])
+void aes_encrypt(aes_encrypt_arg key, u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
{
aes_encrypt_arch(key.enc_key, out, in);
}
-EXPORT_SYMBOL(aes_encrypt_new);
+EXPORT_SYMBOL(aes_encrypt);
-void aes_decrypt_new(const struct aes_key *key, u8 out[AES_BLOCK_SIZE],
- const u8 in[AES_BLOCK_SIZE])
+void aes_decrypt(const struct aes_key *key, u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
{
aes_decrypt_arch(key, out, in);
}
-EXPORT_SYMBOL(aes_decrypt_new);
+EXPORT_SYMBOL(aes_decrypt);
#ifdef aes_mod_init_arch
static int __init aes_mod_init(void)