};
struct adiantum_request_ctx {
-
- /*
- * Buffer for right-hand part of data, i.e.
- *
- * P_L => P_M => C_M => C_R when encrypting, or
- * C_R => C_M => P_M => P_L when decrypting.
- *
- * Also used to build the IV for the stream cipher.
- */
- union {
- u8 bytes[XCHACHA_IV_SIZE];
- __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
- le128 bignum; /* interpret as element of Z/(2^{128}Z) */
- } rbuf;
-
- bool enc; /* true if encrypting, false if decrypting */
-
- /*
- * The result of the Poly1305 ε-∆U hash function applied to
- * (bulk length, tweak)
- */
- le128 header_hash;
-
/*
* skcipher sub-request size is unknown at compile-time, so it needs to
* go after the members with known sizes.
/*
* Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
- * result to rctx->header_hash. This is the calculation
+ * result to @out. This is the calculation
*
* H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
*
* inputs only) taken over the left-hand part (the "bulk") of the message, to
* give the overall Adiantum hash of the (tweak, left-hand part) pair.
*/
-static void adiantum_hash_header(struct skcipher_request *req)
+static void adiantum_hash_header(struct skcipher_request *req, le128 *out)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct {
__le64 message_bits;
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
- poly1305_core_emit(&state, NULL, &rctx->header_hash);
+ poly1305_core_emit(&state, NULL, out);
}
/* Pass the next NH hash value through Poly1305 */
nhpoly1305_final(&rctx->u.hash_ctx, tctx, out);
}
-/* Continue Adiantum encryption/decryption after the stream cipher step */
-static int adiantum_finish(struct skcipher_request *req)
+static int adiantum_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
- struct scatterlist *dst = req->dst;
- le128 digest;
-
- /* If decrypting, decrypt C_M with the block cipher to get P_M */
- if (!rctx->enc)
- crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
- rctx->rbuf.bytes);
-
+ struct scatterlist *src = req->src, *dst = req->dst;
/*
- * Second hash step
- * enc: C_R = C_M - H_{K_H}(T, C_L)
- * dec: P_R = P_M - H_{K_H}(T, P_L)
+ * Buffer for right-hand part of data, i.e.
+ *
+ * P_L => P_M => C_M => C_R when encrypting, or
+ * C_R => C_M => P_M => P_L when decrypting.
+ *
+ * Also used to build the IV for the stream cipher.
*/
- le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
- if (dst->length >= req->cryptlen &&
- dst->offset + req->cryptlen <= PAGE_SIZE) {
- /* Fast path for single-page destination */
- struct page *page = sg_page(dst);
- void *virt = kmap_local_page(page) + dst->offset;
-
- nhpoly1305_init(&rctx->u.hash_ctx);
- nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
- nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
- le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
- memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
- flush_dcache_page(page);
- kunmap_local(virt);
- } else {
- /* Slow path that works for any destination scatterlist */
- adiantum_hash_message(req, dst, &digest);
- le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
- memcpy_to_sglist(dst, bulk_len, &rctx->rbuf.bignum,
- sizeof(le128));
- }
- return 0;
-}
-
-static void adiantum_streamcipher_done(void *data, int err)
-{
- struct skcipher_request *req = data;
-
- if (!err)
- err = adiantum_finish(req);
-
- skcipher_request_complete(req, err);
-}
-
-static int adiantum_crypt(struct skcipher_request *req, bool enc)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
- struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
- const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
- struct scatterlist *src = req->src;
+ union {
+ u8 bytes[XCHACHA_IV_SIZE];
+ __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
+ le128 bignum; /* interpret as element of Z/(2^{128}Z) */
+ } rbuf;
+ le128 header_hash, msg_hash;
unsigned int stream_len;
- le128 digest;
+ int err;
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
return -EINVAL;
- rctx->enc = enc;
-
/*
* First hash step
* enc: P_M = P_R + H_{K_H}(T, P_L)
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
- adiantum_hash_header(req);
+ adiantum_hash_header(req, &header_hash);
if (src->length >= req->cryptlen &&
src->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page source */
nhpoly1305_init(&rctx->u.hash_ctx);
nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
- nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
- memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
+ nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash);
+ memcpy(&rbuf.bignum, virt + bulk_len, sizeof(le128));
kunmap_local(virt);
} else {
/* Slow path that works for any source scatterlist */
- adiantum_hash_message(req, src, &digest);
- memcpy_from_sglist(&rctx->rbuf.bignum, src, bulk_len,
- sizeof(le128));
+ adiantum_hash_message(req, src, &msg_hash);
+ memcpy_from_sglist(&rbuf.bignum, src, bulk_len, sizeof(le128));
}
- le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
- le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+ le128_add(&rbuf.bignum, &rbuf.bignum, &header_hash);
+ le128_add(&rbuf.bignum, &rbuf.bignum, &msg_hash);
/* If encrypting, encrypt P_M with the block cipher to get C_M */
if (enc)
- crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
- rctx->rbuf.bytes);
+ crypto_cipher_encrypt_one(tctx->blockcipher, rbuf.bytes,
+ rbuf.bytes);
/* Initialize the rest of the XChaCha IV (first part is C_M) */
BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
- rctx->rbuf.words[4] = cpu_to_le32(1);
- rctx->rbuf.words[5] = 0;
- rctx->rbuf.words[6] = 0;
- rctx->rbuf.words[7] = 0;
+ rbuf.words[4] = cpu_to_le32(1);
+ rbuf.words[5] = 0;
+ rbuf.words[6] = 0;
+ rbuf.words[7] = 0;
/*
* XChaCha needs to be done on all the data except the last 16 bytes;
skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
- req->dst, stream_len, &rctx->rbuf);
+ req->dst, stream_len, &rbuf);
skcipher_request_set_callback(&rctx->u.streamcipher_req,
- req->base.flags,
- adiantum_streamcipher_done, req);
- return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
- adiantum_finish(req);
+ req->base.flags, NULL, NULL);
+ err = crypto_skcipher_encrypt(&rctx->u.streamcipher_req);
+ if (err)
+ return err;
+
+ /* If decrypting, decrypt C_M with the block cipher to get P_M */
+ if (!enc)
+ crypto_cipher_decrypt_one(tctx->blockcipher, rbuf.bytes,
+ rbuf.bytes);
+
+ /*
+ * Second hash step
+ * enc: C_R = C_M - H_{K_H}(T, C_L)
+ * dec: P_R = P_M - H_{K_H}(T, P_L)
+ */
+ le128_sub(&rbuf.bignum, &rbuf.bignum, &header_hash);
+ if (dst->length >= req->cryptlen &&
+ dst->offset + req->cryptlen <= PAGE_SIZE) {
+ /* Fast path for single-page destination */
+ struct page *page = sg_page(dst);
+ void *virt = kmap_local_page(page) + dst->offset;
+
+ nhpoly1305_init(&rctx->u.hash_ctx);
+ nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
+ nhpoly1305_final(&rctx->u.hash_ctx, tctx, &msg_hash);
+ le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash);
+ memcpy(virt + bulk_len, &rbuf.bignum, sizeof(le128));
+ flush_dcache_page(page);
+ kunmap_local(virt);
+ } else {
+ /* Slow path that works for any destination scatterlist */
+ adiantum_hash_message(req, dst, &msg_hash);
+ le128_sub(&rbuf.bignum, &rbuf.bignum, &msg_hash);
+ memcpy_to_sglist(dst, bulk_len, &rbuf.bignum, sizeof(le128));
+ }
+ return 0;
}
static int adiantum_encrypt(struct skcipher_request *req)
/* Stream cipher, e.g. "xchacha12" */
err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
skcipher_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, mask);
+ crypto_attr_alg_name(tb[1]), 0,
+ mask | CRYPTO_ALG_ASYNC /* sync only */);
if (err)
goto err_free_inst;
streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);