#include "crypto_int.h"
#include "aes.h"
-#define CHECK_SIZES 0
-
/*
* Private per-key data to cache after first generation. We don't
* want to mess with the imported AES implementation too much, so
};
#define CACHE(X) ((struct aes_key_info_cache *)((X)->cache))
+/* out = out ^ in */
static inline void
-enc(unsigned char *out, const unsigned char *in, aes_ctx *ctx)
+xorblock(const unsigned char *in, unsigned char *out)
+{
+ size_t q;
+
+ for (q = 0; q < BLOCK_SIZE; q += 4)
+ store_32_n(load_32_n(out + q) ^ load_32_n(in + q), out + q);
+}
+
+static inline krb5_error_code
+init_key_cache(krb5_key key)
{
- if (aes_enc_blk(in, out, ctx) != aes_good)
+ if (key->cache != NULL)
+ return 0;
+ key->cache = malloc(sizeof(struct aes_key_info_cache));
+ if (key->cache == NULL)
+ return ENOMEM;
+ CACHE(key)->enc_ctx.n_rnd = CACHE(key)->dec_ctx.n_rnd = 0;
+ return 0;
+}
+
+static inline void
+expand_enc_key(krb5_key key)
+{
+ if (CACHE(key)->enc_ctx.n_rnd)
+ return;
+ if (aes_enc_key(key->keyblock.contents, key->keyblock.length,
+ &CACHE(key)->enc_ctx) != aes_good)
abort();
}
static inline void
-dec(unsigned char *out, const unsigned char *in, aes_ctx *ctx)
+expand_dec_key(krb5_key key)
{
- if (aes_dec_blk(in, out, ctx) != aes_good)
+ if (CACHE(key)->dec_ctx.n_rnd)
+ return;
+ if (aes_dec_key(key->keyblock.contents, key->keyblock.length,
+ &CACHE(key)->dec_ctx) != aes_good)
abort();
}
-static void
-xorblock(unsigned char *out, const unsigned char *in)
+/* CBC encrypt nblocks blocks of data in place, using and updating iv. */
+static inline void
+cbc_enc(krb5_key key, unsigned char *data, size_t nblocks, unsigned char *iv)
{
- int z;
- for (z = 0; z < BLOCK_SIZE/4; z++) {
- unsigned char *outptr = &out[z*4];
- const unsigned char *inptr = &in[z*4];
- /*
- * Use unaligned accesses. On x86, this will probably still be faster
- * than multiple byte accesses for unaligned data, and for aligned data
- * should be far better. (One test indicated about 2.4% faster
- * encryption for 1024-byte messages.)
- *
- * If some other CPU has really slow unaligned-word or byte accesses,
- * perhaps this function (or the load/store helpers?) should test for
- * alignment first.
- *
- * If byte accesses are faster than unaligned words, we may need to
- * conditionalize on CPU type, as that may be hard to determine
- * automatically.
- */
- store_32_n (load_32_n(outptr) ^ load_32_n(inptr), outptr);
+ for (; nblocks > 0; nblocks--, data += BLOCK_SIZE) {
+ xorblock(iv, data);
+ if (aes_enc_blk(data, data, &CACHE(key)->enc_ctx) != aes_good)
+ abort();
+ memcpy(iv, data, BLOCK_SIZE);
}
}
+/* CBC decrypt nblocks blocks of data in place, using and updating iv. */
+static inline void
+cbc_dec(krb5_key key, unsigned char *data, size_t nblocks, unsigned char *iv)
+{
+ unsigned char last_cipherblock[BLOCK_SIZE];
+
+ assert(nblocks > 0);
+ data += (nblocks - 1) * BLOCK_SIZE;
+ memcpy(last_cipherblock, data, BLOCK_SIZE);
+ for (; nblocks > 0; nblocks--, data -= BLOCK_SIZE) {
+ if (aes_dec_blk(data, data, &CACHE(key)->dec_ctx) != aes_good)
+ abort();
+ xorblock(nblocks == 1 ? iv : data - BLOCK_SIZE, data);
+ }
+ memcpy(iv, last_cipherblock, BLOCK_SIZE);
+}
+
krb5_error_code
krb5int_aes_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data)
{
- unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE];
- size_t input_length, nblocks, blockno;
+ unsigned char iv[BLOCK_SIZE], block[BLOCK_SIZE];
+ unsigned char blockN2[BLOCK_SIZE], blockN1[BLOCK_SIZE];
+ size_t input_length, nblocks, ncontig;
struct iov_cursor cursor;
- if (key->cache == NULL) {
- key->cache = malloc(sizeof(struct aes_key_info_cache));
- if (key->cache == NULL)
- return ENOMEM;
- CACHE(key)->enc_ctx.n_rnd = CACHE(key)->dec_ctx.n_rnd = 0;
- }
- if (CACHE(key)->enc_ctx.n_rnd == 0) {
- if (aes_enc_key(key->keyblock.contents, key->keyblock.length,
- &CACHE(key)->enc_ctx)
- != aes_good)
- abort();
- }
- if (ivec != NULL)
- memcpy(tmp, ivec->data, BLOCK_SIZE);
- else
- memset(tmp, 0, BLOCK_SIZE);
+ if (init_key_cache(key))
+ return ENOMEM;
+ expand_enc_key(key);
k5_iov_cursor_init(&cursor, data, num_data, BLOCK_SIZE, FALSE);
input_length = iov_total_length(data, num_data, FALSE);
nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (nblocks == 1) {
- k5_iov_cursor_get(&cursor, tmp);
- enc(tmp2, tmp, &CACHE(key)->enc_ctx);
- k5_iov_cursor_put(&cursor, tmp2);
- } else if (nblocks > 1) {
- unsigned char blockN2[BLOCK_SIZE]; /* second last */
- unsigned char blockN1[BLOCK_SIZE]; /* last block */
-
- for (blockno = 0; blockno < nblocks - 2; blockno++) {
- unsigned char block[BLOCK_SIZE];
+ k5_iov_cursor_get(&cursor, block);
+ memset(iv, 0, BLOCK_SIZE);
+ cbc_enc(key, block, 1, iv);
+ k5_iov_cursor_put(&cursor, block);
+ return 0;
+ }
+ if (ivec != NULL)
+ memcpy(iv, ivec->data, BLOCK_SIZE);
+ else
+ memset(iv, 0, BLOCK_SIZE);
+
+ while (nblocks > 2) {
+ ncontig = iov_cursor_contig_blocks(&cursor);
+ if (ncontig > 0) {
+ /* Encrypt a series of contiguous blocks in place if we can, but
+ * don't touch the last two blocks. */
+ ncontig = (ncontig > nblocks - 2) ? nblocks - 2 : ncontig;
+ cbc_enc(key, iov_cursor_ptr(&cursor), ncontig, iv);
+ iov_cursor_advance(&cursor, ncontig);
+ nblocks -= ncontig;
+ } else {
k5_iov_cursor_get(&cursor, block);
- xorblock(tmp, block);
- enc(block, tmp, &CACHE(key)->enc_ctx);
+ cbc_enc(key, block, 1, iv);
k5_iov_cursor_put(&cursor, block);
-
- /* Set up for next block. */
- memcpy(tmp, block, BLOCK_SIZE);
+ nblocks--;
}
+ }
- /* Do final CTS step for last two blocks (the second of which
- may or may not be incomplete). */
-
- /* First, get the last two blocks */
- k5_iov_cursor_get(&cursor, blockN2);
- k5_iov_cursor_get(&cursor, blockN1);
-
- /* Encrypt second last block */
- xorblock(tmp, blockN2);
- enc(tmp2, tmp, &CACHE(key)->enc_ctx);
- memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */
- memcpy(tmp, tmp2, BLOCK_SIZE);
-
- /* Encrypt last block */
- xorblock(tmp, blockN1);
- enc(tmp2, tmp, &CACHE(key)->enc_ctx);
- memcpy(blockN1, tmp2, BLOCK_SIZE);
-
- /* Put the last two blocks back into the iovec (reverse order) */
- k5_iov_cursor_put(&cursor, blockN1);
- k5_iov_cursor_put(&cursor, blockN2);
+ /* Encrypt the last two blocks and put them back in reverse order, possibly
+ * truncating the encrypted second-to-last block. */
+ k5_iov_cursor_get(&cursor, blockN2);
+ k5_iov_cursor_get(&cursor, blockN1);
+ cbc_enc(key, blockN2, 1, iv);
+ cbc_enc(key, blockN1, 1, iv);
+ k5_iov_cursor_put(&cursor, blockN1);
+ k5_iov_cursor_put(&cursor, blockN2);
- if (ivec != NULL)
- memcpy(ivec->data, blockN1, BLOCK_SIZE);
- }
+ if (ivec != NULL)
+ memcpy(ivec->data, iv, BLOCK_SIZE);
return 0;
}
krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data,
size_t num_data)
{
- unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE];
- size_t input_length, nblocks, blockno;
+ unsigned char iv[BLOCK_SIZE], dummy_iv[BLOCK_SIZE], block[BLOCK_SIZE];
+ unsigned char blockN2[BLOCK_SIZE], blockN1[BLOCK_SIZE];
+ size_t input_length, last_len, nblocks, ncontig;
struct iov_cursor cursor;
- CHECK_SIZES;
-
- if (key->cache == NULL) {
- key->cache = malloc(sizeof(struct aes_key_info_cache));
- if (key->cache == NULL)
- return ENOMEM;
- CACHE(key)->enc_ctx.n_rnd = CACHE(key)->dec_ctx.n_rnd = 0;
- }
- if (CACHE(key)->dec_ctx.n_rnd == 0) {
- if (aes_dec_key(key->keyblock.contents, key->keyblock.length,
- &CACHE(key)->dec_ctx) != aes_good)
- abort();
- }
-
- if (ivec != NULL)
- memcpy(tmp, ivec->data, BLOCK_SIZE);
- else
- memset(tmp, 0, BLOCK_SIZE);
+ if (init_key_cache(key))
+ return ENOMEM;
+ expand_dec_key(key);
k5_iov_cursor_init(&cursor, data, num_data, BLOCK_SIZE, FALSE);
input_length = iov_total_length(data, num_data, FALSE);
nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ last_len = input_length - (nblocks - 1) * BLOCK_SIZE;
if (nblocks == 1) {
- k5_iov_cursor_get(&cursor, tmp);
- dec(tmp2, tmp, &CACHE(key)->dec_ctx);
- k5_iov_cursor_put(&cursor, tmp2);
- } else if (nblocks > 1) {
- unsigned char blockN2[BLOCK_SIZE]; /* second last */
- unsigned char blockN1[BLOCK_SIZE]; /* last block */
-
- for (blockno = 0; blockno < nblocks - 2; blockno++) {
- unsigned char block[BLOCK_SIZE];
+ k5_iov_cursor_get(&cursor, block);
+ memset(iv, 0, BLOCK_SIZE);
+ cbc_dec(key, block, 1, iv);
+ k5_iov_cursor_put(&cursor, block);
+ return 0;
+ }
+ if (ivec != NULL)
+ memcpy(iv, ivec->data, BLOCK_SIZE);
+ else
+ memset(iv, 0, BLOCK_SIZE);
+
+ while (nblocks > 2) {
+ ncontig = iov_cursor_contig_blocks(&cursor);
+ if (ncontig > 0) {
+ /* Decrypt a series of contiguous blocks in place if we can, but
+ * don't touch the last two blocks. */
+ ncontig = (ncontig > nblocks - 2) ? nblocks - 2 : ncontig;
+ cbc_dec(key, iov_cursor_ptr(&cursor), ncontig, iv);
+ iov_cursor_advance(&cursor, ncontig);
+ nblocks -= ncontig;
+ } else {
k5_iov_cursor_get(&cursor, block);
- memcpy(tmp2, block, BLOCK_SIZE);
- dec(block, block, &CACHE(key)->dec_ctx);
- xorblock(block, tmp);
- memcpy(tmp, tmp2, BLOCK_SIZE);
+ cbc_dec(key, block, 1, iv);
k5_iov_cursor_put(&cursor, block);
+ nblocks--;
}
-
- /* Do last two blocks, the second of which (next-to-last block
- of plaintext) may be incomplete. */
-
- /* First, get the last two encrypted blocks */
- k5_iov_cursor_get(&cursor, blockN2);
- k5_iov_cursor_get(&cursor, blockN1);
-
- if (ivec != NULL)
- memcpy(ivec->data, blockN2, BLOCK_SIZE);
-
- /* Decrypt second last block */
- dec(tmp2, blockN2, &CACHE(key)->dec_ctx);
- /* Set tmp2 to last (possibly partial) plaintext block, and
- save it. */
- xorblock(tmp2, blockN1);
- memcpy(blockN2, tmp2, BLOCK_SIZE);
-
- /* Maybe keep the trailing part, and copy in the last
- ciphertext block. */
- input_length %= BLOCK_SIZE;
- memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE);
- dec(tmp3, tmp2, &CACHE(key)->dec_ctx);
- xorblock(tmp3, tmp);
- memcpy(blockN1, tmp3, BLOCK_SIZE);
-
- /* Put the last two blocks back into the iovec */
- k5_iov_cursor_put(&cursor, blockN1);
- k5_iov_cursor_put(&cursor, blockN2);
}
+ /* Get the last two ciphertext blocks. Save the first as the new iv. */
+ k5_iov_cursor_get(&cursor, blockN2);
+ k5_iov_cursor_get(&cursor, blockN1);
+ if (ivec != NULL)
+ memcpy(ivec->data, blockN2, BLOCK_SIZE);
+
+ /* Decrypt the second-to-last ciphertext block, using the final ciphertext
+ * block as the CBC IV. This produces the final plaintext block. */
+ memcpy(dummy_iv, blockN1, sizeof(dummy_iv));
+ cbc_dec(key, blockN2, 1, dummy_iv);
+
+ /* Use the final bits of the decrypted plaintext to pad the last ciphertext
+ * block, and decrypt it to produce the second-to-last plaintext block. */
+ memcpy(blockN1 + last_len, blockN2 + last_len, BLOCK_SIZE - last_len);
+ cbc_dec(key, blockN1, 1, iv);
+
+ /* Put the last two plaintext blocks back into the iovec. */
+ k5_iov_cursor_put(&cursor, blockN1);
+ k5_iov_cursor_put(&cursor, blockN2);
+
return 0;
}
/*
* Private per-key data to cache after first generation. We don't want to mess
- * with the imported Cemallia implementation too much, so we'll just use two
+ * with the imported Camellia implementation too much, so we'll just use two
* copies of its context, one for encryption and one for decryption, and use
- * the #rounds field as a flag for whether we've initialized each half.
+ * the keybitlen field as a flag for whether we've initialized each half.
*/
struct camellia_key_info_cache {
camellia_ctx enc_ctx, dec_ctx;
};
#define CACHE(X) ((struct camellia_key_info_cache *)((X)->cache))
+/* out = out ^ in */
static inline void
-enc(unsigned char *out, const unsigned char *in, camellia_ctx *ctx)
+xorblock(const unsigned char *in, unsigned char *out)
{
- if (camellia_enc_blk(in, out, ctx) != camellia_good)
+ size_t q;
+
+ for (q = 0; q < BLOCK_SIZE; q += 4)
+ store_32_n(load_32_n(out + q) ^ load_32_n(in + q), out + q);
+}
+
+static inline krb5_error_code
+init_key_cache(krb5_key key)
+{
+ if (key->cache != NULL)
+ return 0;
+ key->cache = malloc(sizeof(struct camellia_key_info_cache));
+ if (key->cache == NULL)
+ return ENOMEM;
+ CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0;
+ return 0;
+}
+
+static inline void
+expand_enc_key(krb5_key key)
+{
+ if (CACHE(key)->enc_ctx.keybitlen)
+ return;
+ if (camellia_enc_key(key->keyblock.contents, key->keyblock.length,
+ &CACHE(key)->enc_ctx) != camellia_good)
abort();
}
static inline void
-dec(unsigned char *out, const unsigned char *in, camellia_ctx *ctx)
+expand_dec_key(krb5_key key)
{
- if (camellia_dec_blk(in, out, ctx) != camellia_good)
+ if (CACHE(key)->dec_ctx.keybitlen)
+ return;
+ if (camellia_dec_key(key->keyblock.contents, key->keyblock.length,
+ &CACHE(key)->dec_ctx) != camellia_good)
abort();
}
-static void
-xorblock(unsigned char *out, const unsigned char *in)
+/* CBC encrypt nblocks blocks of data in place, using and updating iv. */
+static inline void
+cbc_enc(krb5_key key, unsigned char *data, size_t nblocks, unsigned char *iv)
+{
+ for (; nblocks > 0; nblocks--, data += BLOCK_SIZE) {
+ xorblock(iv, data);
+ if (camellia_enc_blk(data, data, &CACHE(key)->enc_ctx) !=
+ camellia_good)
+ abort();
+ memcpy(iv, data, BLOCK_SIZE);
+ }
+}
+
+/* CBC decrypt nblocks blocks of data in place, using and updating iv. */
+static inline void
+cbc_dec(krb5_key key, unsigned char *data, size_t nblocks, unsigned char *iv)
{
- int z;
- for (z = 0; z < BLOCK_SIZE/4; z++) {
- unsigned char *outptr = &out[z*4];
- const unsigned char *inptr = &in[z*4];
- /*
- * Use unaligned accesses. On x86, this will probably still be faster
- * than multiple byte accesses for unaligned data, and for aligned data
- * should be far better. (One test indicated about 2.4% faster
- * encryption for 1024-byte messages.)
- *
- * If some other CPU has really slow unaligned-word or byte accesses,
- * perhaps this function (or the load/store helpers?) should test for
- * alignment first.
- *
- * If byte accesses are faster than unaligned words, we may need to
- * conditionalize on CPU type, as that may be hard to determine
- * automatically.
- */
- store_32_n(load_32_n(outptr) ^ load_32_n(inptr), outptr);
+ unsigned char last_cipherblock[BLOCK_SIZE];
+
+ assert(nblocks > 0);
+ data += (nblocks - 1) * BLOCK_SIZE;
+ memcpy(last_cipherblock, data, BLOCK_SIZE);
+ for (; nblocks > 0; nblocks--, data -= BLOCK_SIZE) {
+ if (camellia_dec_blk(data, data, &CACHE(key)->dec_ctx) !=
+ camellia_good)
+ abort();
+ xorblock(nblocks == 1 ? iv : data - BLOCK_SIZE, data);
}
+ memcpy(iv, last_cipherblock, BLOCK_SIZE);
}
static krb5_error_code
krb5int_camellia_encrypt(krb5_key key, const krb5_data *ivec,
krb5_crypto_iov *data, size_t num_data)
{
- unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE];
- size_t input_length, nblocks, blockno;
+ unsigned char iv[BLOCK_SIZE], block[BLOCK_SIZE];
+ unsigned char blockN2[BLOCK_SIZE], blockN1[BLOCK_SIZE];
+ size_t input_length, nblocks, ncontig;
struct iov_cursor cursor;
- if (key->cache == NULL) {
- key->cache = malloc(sizeof(struct camellia_key_info_cache));
- if (key->cache == NULL)
- return ENOMEM;
- CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0;
- }
- if (CACHE(key)->enc_ctx.keybitlen == 0) {
- if (camellia_enc_key(key->keyblock.contents, key->keyblock.length,
- &CACHE(key)->enc_ctx) != camellia_good)
- abort();
- }
- if (ivec != NULL)
- memcpy(tmp, ivec->data, BLOCK_SIZE);
- else
- memset(tmp, 0, BLOCK_SIZE);
+ if (init_key_cache(key))
+ return ENOMEM;
+ expand_enc_key(key);
k5_iov_cursor_init(&cursor, data, num_data, BLOCK_SIZE, FALSE);
input_length = iov_total_length(data, num_data, FALSE);
nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (nblocks == 1) {
- k5_iov_cursor_get(&cursor, tmp);
- enc(tmp2, tmp, &CACHE(key)->enc_ctx);
- k5_iov_cursor_put(&cursor, tmp2);
- } else if (nblocks > 1) {
- unsigned char blockN2[BLOCK_SIZE]; /* second last */
- unsigned char blockN1[BLOCK_SIZE]; /* last block */
-
- for (blockno = 0; blockno < nblocks - 2; blockno++) {
- unsigned char block[BLOCK_SIZE];
+ k5_iov_cursor_get(&cursor, block);
+ memset(iv, 0, BLOCK_SIZE);
+ cbc_enc(key, block, 1, iv);
+ k5_iov_cursor_put(&cursor, block);
+ return 0;
+ }
+ if (ivec != NULL)
+ memcpy(iv, ivec->data, BLOCK_SIZE);
+ else
+ memset(iv, 0, BLOCK_SIZE);
+
+ while (nblocks > 2) {
+ ncontig = iov_cursor_contig_blocks(&cursor);
+ if (ncontig > 0) {
+ /* Encrypt a series of contiguous blocks in place if we can, but
+ * don't touch the last two blocks. */
+ ncontig = (ncontig > nblocks - 2) ? nblocks - 2 : ncontig;
+ cbc_enc(key, iov_cursor_ptr(&cursor), ncontig, iv);
+ iov_cursor_advance(&cursor, ncontig);
+ nblocks -= ncontig;
+ } else {
k5_iov_cursor_get(&cursor, block);
- xorblock(tmp, block);
- enc(block, tmp, &CACHE(key)->enc_ctx);
+ cbc_enc(key, block, 1, iv);
k5_iov_cursor_put(&cursor, block);
-
- /* Set up for next block. */
- memcpy(tmp, block, BLOCK_SIZE);
+ nblocks--;
}
+ }
- /* Do final CTS step for last two blocks (the second of which
- may or may not be incomplete). */
-
- /* First, get the last two blocks */
- k5_iov_cursor_get(&cursor, blockN2);
- k5_iov_cursor_get(&cursor, blockN1);
-
- /* Encrypt second last block */
- xorblock(tmp, blockN2);
- enc(tmp2, tmp, &CACHE(key)->enc_ctx);
- memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */
- memcpy(tmp, tmp2, BLOCK_SIZE);
-
- /* Encrypt last block */
- xorblock(tmp, blockN1);
- enc(tmp2, tmp, &CACHE(key)->enc_ctx);
- memcpy(blockN1, tmp2, BLOCK_SIZE);
-
- /* Put the last two blocks back into the iovec (reverse order) */
- k5_iov_cursor_put(&cursor, blockN1);
- k5_iov_cursor_put(&cursor, blockN2);
+ /* Encrypt the last two blocks and put them back in reverse order, possibly
+ * truncating the encrypted second-to-last block. */
+ k5_iov_cursor_get(&cursor, blockN2);
+ k5_iov_cursor_get(&cursor, blockN1);
+ cbc_enc(key, blockN2, 1, iv);
+ cbc_enc(key, blockN1, 1, iv);
+ k5_iov_cursor_put(&cursor, blockN1);
+ k5_iov_cursor_put(&cursor, blockN2);
- if (ivec != NULL)
- memcpy(ivec->data, blockN1, BLOCK_SIZE);
- }
+ if (ivec != NULL)
+ memcpy(ivec->data, iv, BLOCK_SIZE);
return 0;
}
krb5int_camellia_decrypt(krb5_key key, const krb5_data *ivec,
krb5_crypto_iov *data, size_t num_data)
{
- unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE];
- size_t input_length, nblocks, blockno;
+ unsigned char iv[BLOCK_SIZE], dummy_iv[BLOCK_SIZE], block[BLOCK_SIZE];
+ unsigned char blockN2[BLOCK_SIZE], blockN1[BLOCK_SIZE];
+ size_t input_length, last_len, nblocks, ncontig;
struct iov_cursor cursor;
- if (key->cache == NULL) {
- key->cache = malloc(sizeof(struct camellia_key_info_cache));
- if (key->cache == NULL)
- return ENOMEM;
- CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0;
- }
- if (CACHE(key)->dec_ctx.keybitlen == 0) {
- if (camellia_dec_key(key->keyblock.contents, key->keyblock.length,
- &CACHE(key)->dec_ctx) != camellia_good)
- abort();
- }
-
- if (ivec != NULL)
- memcpy(tmp, ivec->data, BLOCK_SIZE);
- else
- memset(tmp, 0, BLOCK_SIZE);
+ if (init_key_cache(key))
+ return ENOMEM;
+ expand_dec_key(key);
k5_iov_cursor_init(&cursor, data, num_data, BLOCK_SIZE, FALSE);
input_length = iov_total_length(data, num_data, FALSE);
nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ last_len = input_length - (nblocks - 1) * BLOCK_SIZE;
if (nblocks == 1) {
- k5_iov_cursor_get(&cursor, tmp);
- dec(tmp2, tmp, &CACHE(key)->dec_ctx);
- k5_iov_cursor_put(&cursor, tmp2);
- } else if (nblocks > 1) {
- unsigned char blockN2[BLOCK_SIZE]; /* second last */
- unsigned char blockN1[BLOCK_SIZE]; /* last block */
-
- for (blockno = 0; blockno < nblocks - 2; blockno++) {
- unsigned char block[BLOCK_SIZE];
+ k5_iov_cursor_get(&cursor, block);
+ memset(iv, 0, BLOCK_SIZE);
+ cbc_dec(key, block, 1, iv);
+ k5_iov_cursor_put(&cursor, block);
+ return 0;
+ }
+ if (ivec != NULL)
+ memcpy(iv, ivec->data, BLOCK_SIZE);
+ else
+ memset(iv, 0, BLOCK_SIZE);
+
+ while (nblocks > 2) {
+ ncontig = iov_cursor_contig_blocks(&cursor);
+ if (ncontig > 0) {
+ /* Encrypt a series of contiguous blocks in place if we can, but
+ * don't touch the last two blocks. */
+ ncontig = (ncontig > nblocks - 2) ? nblocks - 2 : ncontig;
+ cbc_dec(key, iov_cursor_ptr(&cursor), ncontig, iv);
+ iov_cursor_advance(&cursor, ncontig);
+ nblocks -= ncontig;
+ } else {
k5_iov_cursor_get(&cursor, block);
- memcpy(tmp2, block, BLOCK_SIZE);
- dec(block, block, &CACHE(key)->dec_ctx);
- xorblock(block, tmp);
- memcpy(tmp, tmp2, BLOCK_SIZE);
+ cbc_dec(key, block, 1, iv);
k5_iov_cursor_put(&cursor, block);
+ nblocks--;
}
-
- /* Do last two blocks, the second of which (next-to-last block
- of plaintext) may be incomplete. */
-
- /* First, get the last two encrypted blocks */
- k5_iov_cursor_get(&cursor, blockN2);
- k5_iov_cursor_get(&cursor, blockN1);
-
- if (ivec != NULL)
- memcpy(ivec->data, blockN2, BLOCK_SIZE);
-
- /* Decrypt second last block */
- dec(tmp2, blockN2, &CACHE(key)->dec_ctx);
- /* Set tmp2 to last (possibly partial) plaintext block, and
- save it. */
- xorblock(tmp2, blockN1);
- memcpy(blockN2, tmp2, BLOCK_SIZE);
-
- /* Maybe keep the trailing part, and copy in the last
- ciphertext block. */
- input_length %= BLOCK_SIZE;
- memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE);
- dec(tmp3, tmp2, &CACHE(key)->dec_ctx);
- xorblock(tmp3, tmp);
- memcpy(blockN1, tmp3, BLOCK_SIZE);
-
- /* Put the last two blocks back into the iovec */
- k5_iov_cursor_put(&cursor, blockN1);
- k5_iov_cursor_put(&cursor, blockN2);
}
+ /* Get the last two ciphertext blocks. Save the first as the new iv. */
+ k5_iov_cursor_get(&cursor, blockN2);
+ k5_iov_cursor_get(&cursor, blockN1);
+ if (ivec != NULL)
+ memcpy(ivec->data, blockN2, BLOCK_SIZE);
+
+ /* Decrypt the second-to-last ciphertext block, using the final ciphertext
+ * block as the CBC IV. This produces the final plaintext block. */
+ memcpy(dummy_iv, blockN1, sizeof(dummy_iv));
+ cbc_dec(key, blockN2, 1, dummy_iv);
+
+ /* Use the final bits of the decrypted plaintext to pad the last ciphertext
+ * block, and decrypt it to produce the second-to-last plaintext block. */
+ memcpy(blockN1 + last_len, blockN2 + last_len, BLOCK_SIZE - last_len);
+ cbc_dec(key, blockN1, 1, iv);
+
+ /* Put the last two plaintext blocks back into the iovec. */
+ k5_iov_cursor_put(&cursor, blockN1);
+ k5_iov_cursor_put(&cursor, blockN2);
+
return 0;
}
krb5_error_code
krb5int_camellia_cbc_mac(krb5_key key, const krb5_crypto_iov *data,
- size_t num_data, const krb5_data *iv,
+ size_t num_data, const krb5_data *ivec,
krb5_data *output)
{
- camellia_ctx ctx;
- unsigned char blockY[BLOCK_SIZE], blockB[BLOCK_SIZE];
+ unsigned char iv[BLOCK_SIZE], block[BLOCK_SIZE];
struct iov_cursor cursor;
if (output->length < BLOCK_SIZE)
return KRB5_BAD_MSIZE;
- if (camellia_enc_key(key->keyblock.contents,
- key->keyblock.length, &ctx) != camellia_good)
- abort();
+ if (init_key_cache(key))
+ return ENOMEM;
+ expand_enc_key(key);
- if (iv != NULL)
- memcpy(blockY, iv->data, BLOCK_SIZE);
+ if (ivec != NULL)
+ memcpy(iv, ivec->data, BLOCK_SIZE);
else
- memset(blockY, 0, BLOCK_SIZE);
+ memset(iv, 0, BLOCK_SIZE);
k5_iov_cursor_init(&cursor, data, num_data, BLOCK_SIZE, FALSE);
- while (k5_iov_cursor_get(&cursor, blockB)) {
- xorblock(blockB, blockY);
- if (camellia_enc_blk(blockB, blockY, &ctx) != camellia_good)
- abort();
- }
+ while (k5_iov_cursor_get(&cursor, block))
+ cbc_enc(key, block, 1, iv);
output->length = BLOCK_SIZE;
- memcpy(output->data, blockY, BLOCK_SIZE);
+ memcpy(output->data, iv, BLOCK_SIZE);
return 0;
}