]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP
authorEric Biggers <ebiggers@google.com>
Fri, 1 Feb 2019 07:51:38 +0000 (23:51 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Mar 2019 19:09:55 +0000 (20:09 +0100)
commit ba6771c0a0bc2fac9d6a8759bab8493bd1cffe3b upstream.

The x86 AEGIS implementations all fail the improved AEAD tests because
they produce the wrong result with some data layouts.  The issue is that
they assume that if the skcipher_walk API gives 'nbytes' not aligned to
the walksize (a.k.a. walk.stride), then it is the end of the data.  In
fact, this can happen before the end.

Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
incorrectly sleep in the skcipher_walk_*() functions while preemption
has been disabled by kernel_fpu_begin().

Fix these bugs.

Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/crypto/aegis128-aesni-glue.c
arch/x86/crypto/aegis128l-aesni-glue.c
arch/x86/crypto/aegis256-aesni-glue.c

index 2a356b948720e10f93d49765a071c3c6dd6edb36..3ea71b8718135fc5cee116e2ccd1896189882052 100644 (file)
@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
 }
 
 static void crypto_aegis128_aesni_process_crypt(
-               struct aegis_state *state, struct aead_request *req,
+               struct aegis_state *state, struct skcipher_walk *walk,
                const struct aegis_crypt_ops *ops)
 {
-       struct skcipher_walk walk;
-       u8 *src, *dst;
-       unsigned int chunksize, base;
-
-       ops->skcipher_walk_init(&walk, req, false);
-
-       while (walk.nbytes) {
-               src = walk.src.virt.addr;
-               dst = walk.dst.virt.addr;
-               chunksize = walk.nbytes;
-
-               ops->crypt_blocks(state, chunksize, src, dst);
-
-               base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
-               src += base;
-               dst += base;
-               chunksize &= AEGIS128_BLOCK_SIZE - 1;
-
-               if (chunksize > 0)
-                       ops->crypt_tail(state, chunksize, src, dst);
+       while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
+               ops->crypt_blocks(state,
+                                 round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
+                                 walk->src.virt.addr, walk->dst.virt.addr);
+               skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+       }
 
-               skcipher_walk_done(&walk, 0);
+       if (walk->nbytes) {
+               ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+                               walk->dst.virt.addr);
+               skcipher_walk_done(walk, 0);
        }
 }
 
@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
+       struct skcipher_walk walk;
        struct aegis_state state;
 
+       ops->skcipher_walk_init(&walk, req, true);
+
        kernel_fpu_begin();
 
        crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
        crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
-       crypto_aegis128_aesni_process_crypt(&state, req, ops);
+       crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
        crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
 
        kernel_fpu_end();
index dbe8bb980da15c46cb3c815d0ab529cd2522a346..1b1b39c66c5e2c907dbbaa84e6e58cc77fd34899 100644 (file)
@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
 }
 
 static void crypto_aegis128l_aesni_process_crypt(
-               struct aegis_state *state, struct aead_request *req,
+               struct aegis_state *state, struct skcipher_walk *walk,
                const struct aegis_crypt_ops *ops)
 {
-       struct skcipher_walk walk;
-       u8 *src, *dst;
-       unsigned int chunksize, base;
-
-       ops->skcipher_walk_init(&walk, req, false);
-
-       while (walk.nbytes) {
-               src = walk.src.virt.addr;
-               dst = walk.dst.virt.addr;
-               chunksize = walk.nbytes;
-
-               ops->crypt_blocks(state, chunksize, src, dst);
-
-               base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
-               src += base;
-               dst += base;
-               chunksize &= AEGIS128L_BLOCK_SIZE - 1;
-
-               if (chunksize > 0)
-                       ops->crypt_tail(state, chunksize, src, dst);
+       while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
+               ops->crypt_blocks(state, round_down(walk->nbytes,
+                                                   AEGIS128L_BLOCK_SIZE),
+                                 walk->src.virt.addr, walk->dst.virt.addr);
+               skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
+       }
 
-               skcipher_walk_done(&walk, 0);
+       if (walk->nbytes) {
+               ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+                               walk->dst.virt.addr);
+               skcipher_walk_done(walk, 0);
        }
 }
 
@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
+       struct skcipher_walk walk;
        struct aegis_state state;
 
+       ops->skcipher_walk_init(&walk, req, true);
+
        kernel_fpu_begin();
 
        crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
        crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
-       crypto_aegis128l_aesni_process_crypt(&state, req, ops);
+       crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
        crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
 
        kernel_fpu_end();
index 8bebda2de92fe3f4453cb453596faeb4dfbfd6bb..6227ca3220a05a0d40c3558c3349fefdeb1ccafb 100644 (file)
@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
 }
 
 static void crypto_aegis256_aesni_process_crypt(
-               struct aegis_state *state, struct aead_request *req,
+               struct aegis_state *state, struct skcipher_walk *walk,
                const struct aegis_crypt_ops *ops)
 {
-       struct skcipher_walk walk;
-       u8 *src, *dst;
-       unsigned int chunksize, base;
-
-       ops->skcipher_walk_init(&walk, req, false);
-
-       while (walk.nbytes) {
-               src = walk.src.virt.addr;
-               dst = walk.dst.virt.addr;
-               chunksize = walk.nbytes;
-
-               ops->crypt_blocks(state, chunksize, src, dst);
-
-               base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
-               src += base;
-               dst += base;
-               chunksize &= AEGIS256_BLOCK_SIZE - 1;
-
-               if (chunksize > 0)
-                       ops->crypt_tail(state, chunksize, src, dst);
+       while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
+               ops->crypt_blocks(state,
+                                 round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
+                                 walk->src.virt.addr, walk->dst.virt.addr);
+               skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
+       }
 
-               skcipher_walk_done(&walk, 0);
+       if (walk->nbytes) {
+               ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+                               walk->dst.virt.addr);
+               skcipher_walk_done(walk, 0);
        }
 }
 
@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
+       struct skcipher_walk walk;
        struct aegis_state state;
 
+       ops->skcipher_walk_init(&walk, req, true);
+
        kernel_fpu_begin();
 
        crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
        crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
-       crypto_aegis256_aesni_process_crypt(&state, req, ops);
+       crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
        crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
 
        kernel_fpu_end();