*
* - Stream cipher: XChaCha12 or XChaCha20
* - Block cipher: any with a 128-bit block size and 256-bit key
- *
- * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
- * HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
- * but still provably as secure, and also the ε-∆U hash function of HBSH is
- * formally defined to take two inputs (tweak, message) which makes it difficult
- * to wrap with the crypto_shash API. Rather, some details need to be handled
- * here. Nevertheless, if needed in the future, support for other ε-∆U hash
- * functions could be added here.
*/
#include <crypto/b128ops.h>
#include <crypto/chacha.h>
#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/nhpoly1305.h>
+#include <crypto/nh.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#define BLOCKCIPHER_KEY_SIZE 32
/* Size of the hash key (K_H) in bytes */
-#define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
+#define HASH_KEY_SIZE (2 * POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
/*
* The specification allows variable-length tweaks, but Linux's crypto API
struct adiantum_instance_ctx {
struct crypto_skcipher_spawn streamcipher_spawn;
struct crypto_cipher_spawn blockcipher_spawn;
- struct crypto_shash_spawn hash_spawn;
};
struct adiantum_tfm_ctx {
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
- struct crypto_shash *hash;
struct poly1305_core_key header_hash_key;
+ struct poly1305_core_key msg_poly_key;
+ u32 nh_key[NH_KEY_WORDS];
+};
+
+struct nhpoly1305_ctx {
+ /* Running total of polynomial evaluation */
+ struct poly1305_state poly_state;
+
+ /* Partial block buffer */
+ u8 buffer[NH_MESSAGE_UNIT];
+ unsigned int buflen;
+
+ /*
+ * Number of bytes remaining until the current NH message reaches
+ * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
+ */
+ unsigned int nh_remaining;
+
+ __le64 nh_hash[NH_NUM_PASSES];
};
struct adiantum_request_ctx {
*/
le128 header_hash;
- /* Sub-requests, must be last */
+ /*
+ * skcipher sub-request size is unknown at compile-time, so it needs to
+ * go after the members with known sizes.
+ */
union {
- struct shash_desc hash_desc;
+ struct nhpoly1305_ctx hash_ctx;
struct skcipher_request streamcipher_req;
} u;
};
/* Set the hash key (K_H) */
poly1305_core_setkey(&tctx->header_hash_key, keyp);
keyp += POLY1305_BLOCK_SIZE;
-
- crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
- crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
- keyp += NHPOLY1305_KEY_SIZE;
+ poly1305_core_setkey(&tctx->msg_poly_key, keyp);
+ keyp += POLY1305_BLOCK_SIZE;
+ for (int i = 0; i < NH_KEY_WORDS; i++)
+ tctx->nh_key[i] = get_unaligned_le32(&keyp[i * 4]);
+ keyp += NH_KEY_BYTES;
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
out:
kfree_sensitive(data);
poly1305_core_emit(&state, NULL, &rctx->header_hash);
}
-/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
-static int adiantum_hash_message(struct skcipher_request *req,
- struct scatterlist *sgl, unsigned int nents,
- le128 *digest)
+/* Pass the next NH hash value through Poly1305 */
+static void process_nh_hash_value(struct nhpoly1305_ctx *ctx,
+ const struct adiantum_tfm_ctx *key)
+{
+ static_assert(NH_HASH_BYTES % POLY1305_BLOCK_SIZE == 0);
+
+ poly1305_core_blocks(&ctx->poly_state, &key->msg_poly_key, ctx->nh_hash,
+ NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1);
+}
+
+/*
+ * Feed the next portion of the message data, as a whole number of 16-byte
+ * "NH message units", through NH and Poly1305. Each NH hash is taken over
+ * 1024 bytes, except possibly the final one which is taken over a multiple of
+ * 16 bytes up to 1024. Also, in the case where data is passed in misaligned
+ * chunks, we combine partial hashes; the end result is the same either way.
+ */
+static void nhpoly1305_units(struct nhpoly1305_ctx *ctx,
+ const struct adiantum_tfm_ctx *key,
+ const u8 *data, size_t len)
+{
+ do {
+ unsigned int bytes;
+
+ if (ctx->nh_remaining == 0) {
+ /* Starting a new NH message */
+ bytes = min(len, NH_MESSAGE_BYTES);
+ nh(key->nh_key, data, bytes, ctx->nh_hash);
+ ctx->nh_remaining = NH_MESSAGE_BYTES - bytes;
+ } else {
+ /* Continuing a previous NH message */
+ __le64 tmp_hash[NH_NUM_PASSES];
+ unsigned int pos;
+
+ pos = NH_MESSAGE_BYTES - ctx->nh_remaining;
+ bytes = min(len, ctx->nh_remaining);
+ nh(&key->nh_key[pos / 4], data, bytes, tmp_hash);
+ for (int i = 0; i < NH_NUM_PASSES; i++)
+ le64_add_cpu(&ctx->nh_hash[i],
+ le64_to_cpu(tmp_hash[i]));
+ ctx->nh_remaining -= bytes;
+ }
+ if (ctx->nh_remaining == 0)
+ process_nh_hash_value(ctx, key);
+ data += bytes;
+ len -= bytes;
+ } while (len);
+}
+
+static void nhpoly1305_init(struct nhpoly1305_ctx *ctx)
+{
+ poly1305_core_init(&ctx->poly_state);
+ ctx->buflen = 0;
+ ctx->nh_remaining = 0;
+}
+
+static void nhpoly1305_update(struct nhpoly1305_ctx *ctx,
+ const struct adiantum_tfm_ctx *key,
+ const u8 *data, size_t len)
{
+ unsigned int bytes;
+
+ if (ctx->buflen) {
+ bytes = min(len, (int)NH_MESSAGE_UNIT - ctx->buflen);
+ memcpy(&ctx->buffer[ctx->buflen], data, bytes);
+ ctx->buflen += bytes;
+ if (ctx->buflen < NH_MESSAGE_UNIT)
+ return;
+ nhpoly1305_units(ctx, key, ctx->buffer, NH_MESSAGE_UNIT);
+ ctx->buflen = 0;
+ data += bytes;
+ len -= bytes;
+ }
+
+ if (len >= NH_MESSAGE_UNIT) {
+ bytes = round_down(len, NH_MESSAGE_UNIT);
+ nhpoly1305_units(ctx, key, data, bytes);
+ data += bytes;
+ len -= bytes;
+ }
+
+ if (len) {
+ memcpy(ctx->buffer, data, len);
+ ctx->buflen = len;
+ }
+}
+
+static void nhpoly1305_final(struct nhpoly1305_ctx *ctx,
+ const struct adiantum_tfm_ctx *key, le128 *out)
+{
+ if (ctx->buflen) {
+ memset(&ctx->buffer[ctx->buflen], 0,
+ NH_MESSAGE_UNIT - ctx->buflen);
+ nhpoly1305_units(ctx, key, ctx->buffer, NH_MESSAGE_UNIT);
+ }
+
+ if (ctx->nh_remaining)
+ process_nh_hash_value(ctx, key);
+
+ poly1305_core_emit(&ctx->poly_state, NULL, out);
+}
+
+/*
+ * Hash the left-hand part (the "bulk") of the message as follows:
+ *
+ * H_L ← Poly1305_{K_L}(NH_{K_N}(pad_{128}(L)))
+ *
+ * See section 6.4 of the Adiantum paper. This is an ε-almost-∆-universal
+ * (ε-∆U) hash function for equal-length inputs over Z/(2^{128}Z), where the "∆"
+ * operation is addition. It hashes 1024-byte chunks of the input with the NH
+ * hash function, reducing the input length by 32x. The resulting NH hashes are
+ * evaluated as a polynomial in GF(2^{130}-5), like in the Poly1305 MAC. Note
+ * that the polynomial evaluation by itself would suffice to achieve the ε-∆U
+ * property; NH is used for performance since it's much faster than Poly1305.
+ */
+static void adiantum_hash_message(struct skcipher_request *req,
+ struct scatterlist *sgl, unsigned int nents,
+ le128 *out)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
- struct shash_desc *hash_desc = &rctx->u.hash_desc;
struct sg_mapping_iter miter;
unsigned int i, n;
- int err;
- err = crypto_shash_init(hash_desc);
- if (err)
- return err;
+ nhpoly1305_init(&rctx->u.hash_ctx);
sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
- err = crypto_shash_update(hash_desc, miter.addr, n);
- if (err)
- break;
+ nhpoly1305_update(&rctx->u.hash_ctx, tctx, miter.addr, n);
}
sg_miter_stop(&miter);
- if (err)
- return err;
- return crypto_shash_final(hash_desc, (u8 *)digest);
+ nhpoly1305_final(&rctx->u.hash_ctx, tctx, out);
}
/* Continue Adiantum encryption/decryption after the stream cipher step */
struct scatterlist *dst = req->dst;
const unsigned int dst_nents = sg_nents(dst);
le128 digest;
- int err;
/* If decrypting, decrypt C_M with the block cipher to get P_M */
if (!rctx->enc)
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
- rctx->u.hash_desc.tfm = tctx->hash;
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page destination */
struct page *page = sg_page(dst);
void *virt = kmap_local_page(page) + dst->offset;
- err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
- (u8 *)&digest);
- if (err) {
- kunmap_local(virt);
- return err;
- }
+ nhpoly1305_init(&rctx->u.hash_ctx);
+ nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
+ nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
flush_dcache_page(page);
kunmap_local(virt);
} else {
/* Slow path that works for any destination scatterlist */
- err = adiantum_hash_message(req, dst, dst_nents, &digest);
- if (err)
- return err;
+ adiantum_hash_message(req, dst, dst_nents, &digest);
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
bulk_len, sizeof(le128), 1);
const unsigned int src_nents = sg_nents(src);
unsigned int stream_len;
le128 digest;
- int err;
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
return -EINVAL;
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
adiantum_hash_header(req);
- rctx->u.hash_desc.tfm = tctx->hash;
if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page source */
void *virt = kmap_local_page(sg_page(src)) + src->offset;
- err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
- (u8 *)&digest);
+ nhpoly1305_init(&rctx->u.hash_ctx);
+ nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
+ nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
kunmap_local(virt);
} else {
/* Slow path that works for any source scatterlist */
- err = adiantum_hash_message(req, src, src_nents, &digest);
+ adiantum_hash_message(req, src, src_nents, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
bulk_len, sizeof(le128), 0);
}
- if (err)
- return err;
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
- struct crypto_shash *hash;
- unsigned int subreq_size;
int err;
streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
goto err_free_streamcipher;
}
- hash = crypto_spawn_shash(&ictx->hash_spawn);
- if (IS_ERR(hash)) {
- err = PTR_ERR(hash);
- goto err_free_blockcipher;
- }
-
tctx->streamcipher = streamcipher;
tctx->blockcipher = blockcipher;
- tctx->hash = hash;
BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
sizeof(struct adiantum_request_ctx));
- subreq_size = max(sizeof_field(struct adiantum_request_ctx,
- u.hash_desc) +
- crypto_shash_descsize(hash),
- sizeof_field(struct adiantum_request_ctx,
- u.streamcipher_req) +
- crypto_skcipher_reqsize(streamcipher));
-
- crypto_skcipher_set_reqsize(tfm,
- offsetof(struct adiantum_request_ctx, u) +
- subreq_size);
+ crypto_skcipher_set_reqsize(
+ tfm, max(sizeof(struct adiantum_request_ctx),
+ offsetofend(struct adiantum_request_ctx,
+ u.streamcipher_req) +
+ crypto_skcipher_reqsize(streamcipher)));
return 0;
-err_free_blockcipher:
- crypto_free_cipher(blockcipher);
err_free_streamcipher:
crypto_free_skcipher(streamcipher);
return err;
crypto_free_skcipher(tctx->streamcipher);
crypto_free_cipher(tctx->blockcipher);
- crypto_free_shash(tctx->hash);
}
static void adiantum_free_instance(struct skcipher_instance *inst)
crypto_drop_skcipher(&ictx->streamcipher_spawn);
crypto_drop_cipher(&ictx->blockcipher_spawn);
- crypto_drop_shash(&ictx->hash_spawn);
kfree(inst);
}
* Check for a supported set of inner algorithms.
* See the comment at the beginning of this file.
*/
-static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
- struct crypto_alg *blockcipher_alg,
- struct shash_alg *hash_alg)
+static bool
+adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
+ struct crypto_alg *blockcipher_alg)
{
if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
return false;
- if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
- return false;
-
return true;
}
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
{
u32 mask;
- const char *nhpoly1305_name;
struct skcipher_instance *inst;
struct adiantum_instance_ctx *ictx;
struct skcipher_alg_common *streamcipher_alg;
struct crypto_alg *blockcipher_alg;
- struct shash_alg *hash_alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
goto err_free_inst;
blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
- /* NHPoly1305 ε-∆U hash function */
- nhpoly1305_name = crypto_attr_alg_name(tb[3]);
- if (nhpoly1305_name == ERR_PTR(-ENOENT))
- nhpoly1305_name = "nhpoly1305";
- err = crypto_grab_shash(&ictx->hash_spawn,
- skcipher_crypto_instance(inst),
- nhpoly1305_name, 0, mask);
- if (err)
+ /*
+ * Originally there was an optional third parameter, for requesting a
+ * specific implementation of "nhpoly1305" for message hashing. This is
+ * no longer supported. The best implementation is just always used.
+ */
+ if (crypto_attr_alg_name(tb[3]) != ERR_PTR(-ENOENT)) {
+ err = -ENOENT;
goto err_free_inst;
- hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
+ }
/* Check the set of algorithms */
- if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
- hash_alg)) {
- pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
+ if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg)) {
+ pr_warn("Unsupported Adiantum instantiation: (%s,%s)\n",
streamcipher_alg->base.cra_name,
- blockcipher_alg->cra_name, hash_alg->base.cra_name);
+ blockcipher_alg->cra_name);
err = -EINVAL;
goto err_free_inst;
}
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "adiantum(%s,%s,%s)",
- streamcipher_alg->base.cra_driver_name,
- blockcipher_alg->cra_driver_name,
- hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ "adiantum(%s,%s)", streamcipher_alg->base.cra_driver_name,
+ blockcipher_alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
/*
* The block cipher is only invoked once per message, so for long
* messages (e.g. sectors for disk encryption) its performance doesn't
- * matter as much as that of the stream cipher and hash function. Thus,
- * weigh the block cipher's ->cra_priority less.
+ * matter as much as that of the stream cipher. Thus, weigh the block
+ * cipher's ->cra_priority less.
*/
inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
- 2 * hash_alg->base.cra_priority +
- blockcipher_alg->cra_priority) / 7;
+ blockcipher_alg->cra_priority) /
+ 5;
inst->alg.setkey = adiantum_setkey;
inst->alg.encrypt = adiantum_encrypt;
return err;
}
-/* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
+/* adiantum(streamcipher_name, blockcipher_name) */
static struct crypto_template adiantum_tmpl = {
.name = "adiantum",
.create = adiantum_create,