]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 24 Oct 2020 10:39:31 +0000 (12:39 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 24 Oct 2020 10:39:31 +0000 (12:39 +0200)
added patches:
crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch
crypto-caam-jr-add-fallback-for-xts-with-more-than-8b-iv.patch
crypto-caam-jr-add-support-for-more-xts-key-lengths.patch
crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
crypto-caam-qi-add-support-for-more-xts-key-lengths.patch
crypto-caam-qi2-add-fallback-for-xts-with-more-than-8b-iv.patch
crypto-caam-qi2-add-support-for-more-xts-key-lengths.patch
ima-don-t-ignore-errors-from-crypto_shash_update.patch

queue-5.9/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch [new file with mode: 0644]
queue-5.9/crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch [new file with mode: 0644]
queue-5.9/crypto-caam-jr-add-fallback-for-xts-with-more-than-8b-iv.patch [new file with mode: 0644]
queue-5.9/crypto-caam-jr-add-support-for-more-xts-key-lengths.patch [new file with mode: 0644]
queue-5.9/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch [new file with mode: 0644]
queue-5.9/crypto-caam-qi-add-support-for-more-xts-key-lengths.patch [new file with mode: 0644]
queue-5.9/crypto-caam-qi2-add-fallback-for-xts-with-more-than-8b-iv.patch [new file with mode: 0644]
queue-5.9/crypto-caam-qi2-add-support-for-more-xts-key-lengths.patch [new file with mode: 0644]
queue-5.9/ima-don-t-ignore-errors-from-crypto_shash_update.patch [new file with mode: 0644]
queue-5.9/series

diff --git a/queue-5.9/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch b/queue-5.9/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
new file mode 100644 (file)
index 0000000..b7a5582
--- /dev/null
@@ -0,0 +1,56 @@
+From cbdad1f246dd98e6c9c32a6e5212337f542aa7e0 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Fri, 31 Jul 2020 17:03:50 +1000
+Subject: crypto: algif_aead - Do not set MAY_BACKLOG on the async path
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit cbdad1f246dd98e6c9c32a6e5212337f542aa7e0 upstream.
+
+The async path cannot use MAY_BACKLOG because it is not meant to
+block, which is what MAY_BACKLOG does.  On the other hand, both
+the sync and async paths can make use of MAY_SLEEP.
+
+Fixes: 83094e5e9e49 ("crypto: af_alg - add async support to...")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algif_aead.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct c
+       SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+       skcipher_request_set_sync_tfm(skreq, null_tfm);
+-      skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
++      skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+       skcipher_request_set_crypt(skreq, src, dst, len, NULL);
+@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *
+               areq->outlen = outlen;
+               aead_request_set_callback(&areq->cra_u.aead_req,
+-                                        CRYPTO_TFM_REQ_MAY_BACKLOG,
++                                        CRYPTO_TFM_REQ_MAY_SLEEP,
+                                         af_alg_async_cb, areq);
+               err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+                                crypto_aead_decrypt(&areq->cra_u.aead_req);
+               /* AIO operation in progress */
+-              if (err == -EINPROGRESS || err == -EBUSY)
++              if (err == -EINPROGRESS)
+                       return -EIOCBQUEUED;
+               sock_put(sk);
+       } else {
+               /* Synchronous operation */
+               aead_request_set_callback(&areq->cra_u.aead_req,
++                                        CRYPTO_TFM_REQ_MAY_SLEEP |
+                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                         crypto_req_done, &ctx->wait);
+               err = crypto_wait_req(ctx->enc ?
diff --git a/queue-5.9/crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch b/queue-5.9/crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch
new file mode 100644 (file)
index 0000000..e83927a
--- /dev/null
@@ -0,0 +1,90 @@
+From 297b931c2a3cada230d8b84432ee982fc68cf76a Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:25 +0300
+Subject: crypto: caam - add xts check for block length equal to zero
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 297b931c2a3cada230d8b84432ee982fc68cf76a upstream.
+
+XTS should not return succes when dealing with block length equal to zero.
+This is different than the rest of the skcipher algorithms.
+
+Fixes: 31bb2f0da1b50 ("crypto: caam - check zero-length input")
+Cc: <stable@vger.kernel.org> # v5.4+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg.c     |    7 ++++++-
+ drivers/crypto/caam/caamalg_qi.c  |    7 ++++++-
+ drivers/crypto/caam/caamalg_qi2.c |   14 ++++++++++++--
+ 3 files changed, 24 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -1765,7 +1765,12 @@ static inline int skcipher_crypt(struct
+       u32 *desc;
+       int ret = 0;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       /* allocate extended descriptor */
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -1380,7 +1380,12 @@ static inline int skcipher_crypt(struct
+       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+       int ret;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       if (unlikely(caam_congested))
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -1451,7 +1451,12 @@ static int skcipher_encrypt(struct skcip
+       struct caam_request *caam_req = skcipher_request_ctx(req);
+       int ret;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       /* allocate extended descriptor */
+@@ -1482,7 +1487,12 @@ static int skcipher_decrypt(struct skcip
+       struct caam_request *caam_req = skcipher_request_ctx(req);
+       int ret;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       /* allocate extended descriptor */
+       edesc = skcipher_edesc_alloc(req);
diff --git a/queue-5.9/crypto-caam-jr-add-fallback-for-xts-with-more-than-8b-iv.patch b/queue-5.9/crypto-caam-jr-add-fallback-for-xts-with-more-than-8b-iv.patch
new file mode 100644 (file)
index 0000000..0995b4c
--- /dev/null
@@ -0,0 +1,188 @@
+From 9d9b14dbe077c8704d8c3546e38820d35aff2d35 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:19 +0300
+Subject: crypto: caam/jr - add fallback for XTS with more than 8B IV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 9d9b14dbe077c8704d8c3546e38820d35aff2d35 upstream.
+
+A hardware limitation exists for CAAM until Era 9 which restricts
+the accelerator to IVs with only 8 bytes. When CAAM has a lower era
+a fallback is necessary to process 16 bytes IV.
+
+Fixes: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)")
+Cc: <stable@vger.kernel.org> # v4.4+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/Kconfig   |    1 
+ drivers/crypto/caam/caamalg.c |   72 +++++++++++++++++++++++++++++++++++++-----
+ 2 files changed, 66 insertions(+), 7 deletions(-)
+
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -101,6 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+       select CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       select CRYPTO_LIB_DES
++      select CRYPTO_XTS
+       help
+         Selecting this will offload crypto for users of the
+         scatterlist crypto API (such as the linux native IPSec
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -57,6 +57,7 @@
+ #include "key_gen.h"
+ #include "caamalg_desc.h"
+ #include <crypto/engine.h>
++#include <asm/unaligned.h>
+ /*
+  * crypto alg
+@@ -114,10 +115,12 @@ struct caam_ctx {
+       struct alginfo adata;
+       struct alginfo cdata;
+       unsigned int authsize;
++      struct crypto_skcipher *fallback;
+ };
+ struct caam_skcipher_req_ctx {
+       struct skcipher_edesc *edesc;
++      struct skcipher_request fallback_req;
+ };
+ struct caam_aead_req_ctx {
+@@ -830,12 +833,17 @@ static int xts_skcipher_setkey(struct cr
+       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+       struct device *jrdev = ctx->jrdev;
+       u32 *desc;
++      int err;
+       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+               dev_dbg(jrdev, "key size mismatch\n");
+               return -EINVAL;
+       }
++      err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++      if (err)
++              return err;
++
+       ctx->cdata.keylen = keylen;
+       ctx->cdata.key_virt = key;
+       ctx->cdata.key_inline = true;
+@@ -1755,6 +1763,14 @@ static int skcipher_do_one_req(struct cr
+       return ret;
+ }
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++      struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++      unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++      return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ {
+       struct skcipher_edesc *edesc;
+@@ -1773,6 +1789,21 @@ static inline int skcipher_crypt(struct
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
++      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++              struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
++
++              skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++              skcipher_request_set_callback(&rctx->fallback_req,
++                                            req->base.flags,
++                                            req->base.complete,
++                                            req->base.data);
++              skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++                                         req->dst, req->cryptlen, req->iv);
++
++              return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
++                               crypto_skcipher_decrypt(&rctx->fallback_req);
++      }
++
+       /* allocate extended descriptor */
+       edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+@@ -1910,6 +1941,7 @@ static struct caam_skcipher_alg driver_a
+                       .base = {
+                               .cra_name = "xts(aes)",
+                               .cra_driver_name = "xts-aes-caam",
++                              .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = xts_skcipher_setkey,
+@@ -3349,13 +3381,35 @@ static int caam_cra_init(struct crypto_s
+       struct caam_skcipher_alg *caam_alg =
+               container_of(alg, typeof(*caam_alg), skcipher);
+       struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+-
+-      crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
++      u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++      int ret = 0;
+       ctx->enginectx.op.do_one_request = skcipher_do_one_req;
+-      return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+-                              false);
++      if (alg_aai == OP_ALG_AAI_XTS) {
++              const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++              struct crypto_skcipher *fallback;
++
++              fallback = crypto_alloc_skcipher(tfm_name, 0,
++                                               CRYPTO_ALG_NEED_FALLBACK);
++              if (IS_ERR(fallback)) {
++                      dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
++                              tfm_name, PTR_ERR(fallback));
++                      return PTR_ERR(fallback);
++              }
++
++              ctx->fallback = fallback;
++              crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
++                                          crypto_skcipher_reqsize(fallback));
++      } else {
++              crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
++      }
++
++      ret = caam_init_common(ctx, &caam_alg->caam, false);
++      if (ret && ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
++
++      return ret;
+ }
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -3383,7 +3437,11 @@ static void caam_exit_common(struct caam
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-      caam_exit_common(crypto_skcipher_ctx(tfm));
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++      if (ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
++      caam_exit_common(ctx);
+ }
+ static void caam_aead_exit(struct crypto_aead *tfm)
+@@ -3417,8 +3475,8 @@ static void caam_skcipher_alg_init(struc
+       alg->base.cra_module = THIS_MODULE;
+       alg->base.cra_priority = CAAM_CRA_PRIORITY;
+       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-      alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+-                            CRYPTO_ALG_KERN_DRIVER_ONLY;
++      alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
++                            CRYPTO_ALG_KERN_DRIVER_ONLY);
+       alg->init = caam_cra_init;
+       alg->exit = caam_cra_exit;
diff --git a/queue-5.9/crypto-caam-jr-add-support-for-more-xts-key-lengths.patch b/queue-5.9/crypto-caam-jr-add-support-for-more-xts-key-lengths.patch
new file mode 100644 (file)
index 0000000..75ac390
--- /dev/null
@@ -0,0 +1,74 @@
+From c91f734862664ca86dc3ee7e55f199e2bde829e4 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:22 +0300
+Subject: crypto: caam/jr - add support for more XTS key lengths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit c91f734862664ca86dc3ee7e55f199e2bde829e4 upstream.
+
+CAAM accelerator only supports XTS-AES-128 and XTS-AES-256 since
+it adheres strictly to the standard. All the other key lengths
+are accepted and processed through a fallback as long as they pass
+the xts_verify_key() checks.
+
+Fixes: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)")
+Cc: <stable@vger.kernel.org> # v4.4+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg.c |   13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -57,6 +57,7 @@
+ #include "key_gen.h"
+ #include "caamalg_desc.h"
+ #include <crypto/engine.h>
++#include <crypto/xts.h>
+ #include <asm/unaligned.h>
+ /*
+@@ -115,6 +116,7 @@ struct caam_ctx {
+       struct alginfo adata;
+       struct alginfo cdata;
+       unsigned int authsize;
++      bool xts_key_fallback;
+       struct crypto_skcipher *fallback;
+ };
+@@ -835,11 +837,15 @@ static int xts_skcipher_setkey(struct cr
+       u32 *desc;
+       int err;
+-      if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++      err = xts_verify_key(skcipher, key, keylen);
++      if (err) {
+               dev_dbg(jrdev, "key size mismatch\n");
+-              return -EINVAL;
++              return err;
+       }
++      if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
++              ctx->xts_key_fallback = true;
++
+       err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+       if (err)
+               return err;
+@@ -1789,7 +1795,8 @@ static inline int skcipher_crypt(struct
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
+-      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++      if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++                            ctx->xts_key_fallback)) {
+               struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+               skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
diff --git a/queue-5.9/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch b/queue-5.9/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
new file mode 100644 (file)
index 0000000..5d1e413
--- /dev/null
@@ -0,0 +1,182 @@
+From 83e8aa9121380b23ebae6e413962fa2a7b52cf92 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:20 +0300
+Subject: crypto: caam/qi - add fallback for XTS with more than 8B IV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 83e8aa9121380b23ebae6e413962fa2a7b52cf92 upstream.
+
+A hardware limitation exists for CAAM until Era 9 which restricts
+the accelerator to IVs with only 8 bytes. When CAAM has a lower era
+a fallback is necessary to process 16 bytes IV.
+
+Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/Kconfig      |    1 
+ drivers/crypto/caam/caamalg_qi.c |   72 ++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 68 insertions(+), 5 deletions(-)
+
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -114,6 +114,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+       select CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       select CRYPTO_DES
++      select CRYPTO_XTS
+       help
+         Selecting this will use CAAM Queue Interface (QI) for sending
+         & receiving crypto jobs to/from CAAM. This gives better performance
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -18,6 +18,7 @@
+ #include "qi.h"
+ #include "jr.h"
+ #include "caamalg_desc.h"
++#include <asm/unaligned.h>
+ /*
+  * crypto alg
+@@ -67,6 +68,11 @@ struct caam_ctx {
+       struct device *qidev;
+       spinlock_t lock;        /* Protects multiple init of driver context */
+       struct caam_drv_ctx *drv_ctx[NUM_OP];
++      struct crypto_skcipher *fallback;
++};
++
++struct caam_skcipher_req_ctx {
++      struct skcipher_request fallback_req;
+ };
+ static int aead_set_sh_desc(struct crypto_aead *aead)
+@@ -726,12 +732,17 @@ static int xts_skcipher_setkey(struct cr
+       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+       struct device *jrdev = ctx->jrdev;
+       int ret = 0;
++      int err;
+       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+               dev_dbg(jrdev, "key size mismatch\n");
+               return -EINVAL;
+       }
++      err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++      if (err)
++              return err;
++
+       ctx->cdata.keylen = keylen;
+       ctx->cdata.key_virt = key;
+       ctx->cdata.key_inline = true;
+@@ -1373,6 +1384,14 @@ static struct skcipher_edesc *skcipher_e
+       return edesc;
+ }
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++      struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++      unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++      return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ {
+       struct skcipher_edesc *edesc;
+@@ -1388,6 +1407,21 @@ static inline int skcipher_crypt(struct
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
++      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++              struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
++
++              skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++              skcipher_request_set_callback(&rctx->fallback_req,
++                                            req->base.flags,
++                                            req->base.complete,
++                                            req->base.data);
++              skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++                                         req->dst, req->cryptlen, req->iv);
++
++              return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
++                               crypto_skcipher_decrypt(&rctx->fallback_req);
++      }
++
+       if (unlikely(caam_congested))
+               return -EAGAIN;
+@@ -1512,6 +1546,7 @@ static struct caam_skcipher_alg driver_a
+                       .base = {
+                               .cra_name = "xts(aes)",
+                               .cra_driver_name = "xts-aes-caam-qi",
++                              .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = xts_skcipher_setkey,
+@@ -2445,9 +2480,32 @@ static int caam_cra_init(struct crypto_s
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct caam_skcipher_alg *caam_alg =
+               container_of(alg, typeof(*caam_alg), skcipher);
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++      u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++      int ret = 0;
++
++      if (alg_aai == OP_ALG_AAI_XTS) {
++              const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++              struct crypto_skcipher *fallback;
++
++              fallback = crypto_alloc_skcipher(tfm_name, 0,
++                                               CRYPTO_ALG_NEED_FALLBACK);
++              if (IS_ERR(fallback)) {
++                      dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
++                              tfm_name, PTR_ERR(fallback));
++                      return PTR_ERR(fallback);
++              }
++
++              ctx->fallback = fallback;
++              crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
++                                          crypto_skcipher_reqsize(fallback));
++      }
++
++      ret = caam_init_common(ctx, &caam_alg->caam, false);
++      if (ret && ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
+-      return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+-                              false);
++      return ret;
+ }
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -2473,7 +2531,11 @@ static void caam_exit_common(struct caam
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-      caam_exit_common(crypto_skcipher_ctx(tfm));
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++      if (ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
++      caam_exit_common(ctx);
+ }
+ static void caam_aead_exit(struct crypto_aead *tfm)
+@@ -2507,8 +2569,8 @@ static void caam_skcipher_alg_init(struc
+       alg->base.cra_module = THIS_MODULE;
+       alg->base.cra_priority = CAAM_CRA_PRIORITY;
+       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-      alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+-                            CRYPTO_ALG_KERN_DRIVER_ONLY;
++      alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
++                              CRYPTO_ALG_KERN_DRIVER_ONLY);
+       alg->init = caam_cra_init;
+       alg->exit = caam_cra_exit;
diff --git a/queue-5.9/crypto-caam-qi-add-support-for-more-xts-key-lengths.patch b/queue-5.9/crypto-caam-qi-add-support-for-more-xts-key-lengths.patch
new file mode 100644 (file)
index 0000000..42f45e4
--- /dev/null
@@ -0,0 +1,74 @@
+From 62b9a6690926ee199445b23fd46e6349d9057146 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:23 +0300
+Subject: crypto: caam/qi - add support for more XTS key lengths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 62b9a6690926ee199445b23fd46e6349d9057146 upstream.
+
+CAAM accelerator only supports XTS-AES-128 and XTS-AES-256 since
+it adheres strictly to the standard. All the other key lengths
+are accepted and processed through a fallback as long as they pass
+the xts_verify_key() checks.
+
+Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi.c |   13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -18,6 +18,7 @@
+ #include "qi.h"
+ #include "jr.h"
+ #include "caamalg_desc.h"
++#include <crypto/xts.h>
+ #include <asm/unaligned.h>
+ /*
+@@ -68,6 +69,7 @@ struct caam_ctx {
+       struct device *qidev;
+       spinlock_t lock;        /* Protects multiple init of driver context */
+       struct caam_drv_ctx *drv_ctx[NUM_OP];
++      bool xts_key_fallback;
+       struct crypto_skcipher *fallback;
+ };
+@@ -734,11 +736,15 @@ static int xts_skcipher_setkey(struct cr
+       int ret = 0;
+       int err;
+-      if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++      err = xts_verify_key(skcipher, key, keylen);
++      if (err) {
+               dev_dbg(jrdev, "key size mismatch\n");
+-              return -EINVAL;
++              return err;
+       }
++      if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
++              ctx->xts_key_fallback = true;
++
+       err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+       if (err)
+               return err;
+@@ -1407,7 +1413,8 @@ static inline int skcipher_crypt(struct
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
+-      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++      if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++                            ctx->xts_key_fallback)) {
+               struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+               skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
diff --git a/queue-5.9/crypto-caam-qi2-add-fallback-for-xts-with-more-than-8b-iv.patch b/queue-5.9/crypto-caam-qi2-add-fallback-for-xts-with-more-than-8b-iv.patch
new file mode 100644 (file)
index 0000000..f05e141
--- /dev/null
@@ -0,0 +1,216 @@
+From 36e2d7cfdcf17b6126863d884d4200191e922524 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:21 +0300
+Subject: crypto: caam/qi2 - add fallback for XTS with more than 8B IV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 36e2d7cfdcf17b6126863d884d4200191e922524 upstream.
+
+A hardware limitation exists for CAAM until Era 9 which restricts
+the accelerator to IVs with only 8 bytes. When CAAM has a lower era
+a fallback is necessary to process 16 bytes IV.
+
+Fixes: 226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
+Cc: <stable@vger.kernel.org> # v4.20+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/Kconfig       |    1 
+ drivers/crypto/caam/caamalg_qi2.c |   80 +++++++++++++++++++++++++++++++++++---
+ drivers/crypto/caam/caamalg_qi2.h |    2 
+ 3 files changed, 78 insertions(+), 5 deletions(-)
+
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -167,6 +167,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
+       select CRYPTO_AEAD
+       select CRYPTO_HASH
+       select CRYPTO_DES
++      select CRYPTO_XTS
+       help
+         CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+         It handles DPSECI DPAA2 objects that sit on the Management Complex
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -19,6 +19,7 @@
+ #include <linux/fsl/mc.h>
+ #include <soc/fsl/dpaa2-io.h>
+ #include <soc/fsl/dpaa2-fd.h>
++#include <asm/unaligned.h>
+ #define CAAM_CRA_PRIORITY     2000
+@@ -80,6 +81,7 @@ struct caam_ctx {
+       struct alginfo adata;
+       struct alginfo cdata;
+       unsigned int authsize;
++      struct crypto_skcipher *fallback;
+ };
+ static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+@@ -1056,12 +1058,17 @@ static int xts_skcipher_setkey(struct cr
+       struct device *dev = ctx->dev;
+       struct caam_flc *flc;
+       u32 *desc;
++      int err;
+       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+               dev_dbg(dev, "key size mismatch\n");
+               return -EINVAL;
+       }
++      err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++      if (err)
++              return err;
++
+       ctx->cdata.keylen = keylen;
+       ctx->cdata.key_virt = key;
+       ctx->cdata.key_inline = true;
+@@ -1443,6 +1450,14 @@ static void skcipher_decrypt_done(void *
+       skcipher_request_complete(req, ecode);
+ }
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++      struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++      unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++      return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static int skcipher_encrypt(struct skcipher_request *req)
+ {
+       struct skcipher_edesc *edesc;
+@@ -1459,6 +1474,18 @@ static int skcipher_encrypt(struct skcip
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
++      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++              skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
++              skcipher_request_set_callback(&caam_req->fallback_req,
++                                            req->base.flags,
++                                            req->base.complete,
++                                            req->base.data);
++              skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
++                                         req->dst, req->cryptlen, req->iv);
++
++              return crypto_skcipher_encrypt(&caam_req->fallback_req);
++      }
++
+       /* allocate extended descriptor */
+       edesc = skcipher_edesc_alloc(req);
+       if (IS_ERR(edesc))
+@@ -1494,6 +1521,19 @@ static int skcipher_decrypt(struct skcip
+        */
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
++
++      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++              skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
++              skcipher_request_set_callback(&caam_req->fallback_req,
++                                            req->base.flags,
++                                            req->base.complete,
++                                            req->base.data);
++              skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
++                                         req->dst, req->cryptlen, req->iv);
++
++              return crypto_skcipher_decrypt(&caam_req->fallback_req);
++      }
++
+       /* allocate extended descriptor */
+       edesc = skcipher_edesc_alloc(req);
+       if (IS_ERR(edesc))
+@@ -1547,9 +1587,34 @@ static int caam_cra_init_skcipher(struct
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct caam_skcipher_alg *caam_alg =
+               container_of(alg, typeof(*caam_alg), skcipher);
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++      u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++      int ret = 0;
++
++      if (alg_aai == OP_ALG_AAI_XTS) {
++              const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++              struct crypto_skcipher *fallback;
++
++              fallback = crypto_alloc_skcipher(tfm_name, 0,
++                                               CRYPTO_ALG_NEED_FALLBACK);
++              if (IS_ERR(fallback)) {
++                      dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n",
++                              tfm_name, PTR_ERR(fallback));
++                      return PTR_ERR(fallback);
++              }
++
++              ctx->fallback = fallback;
++              crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
++                                          crypto_skcipher_reqsize(fallback));
++      } else {
++              crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
++      }
++
++      ret = caam_cra_init(ctx, &caam_alg->caam, false);
++      if (ret && ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
+-      crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+-      return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
++      return ret;
+ }
+ static int caam_cra_init_aead(struct crypto_aead *tfm)
+@@ -1572,7 +1637,11 @@ static void caam_exit_common(struct caam
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-      caam_exit_common(crypto_skcipher_ctx(tfm));
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++      if (ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
++      caam_exit_common(ctx);
+ }
+ static void caam_cra_exit_aead(struct crypto_aead *tfm)
+@@ -1675,6 +1744,7 @@ static struct caam_skcipher_alg driver_a
+                       .base = {
+                               .cra_name = "xts(aes)",
+                               .cra_driver_name = "xts-aes-caam-qi2",
++                              .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = xts_skcipher_setkey,
+@@ -2922,8 +2992,8 @@ static void caam_skcipher_alg_init(struc
+       alg->base.cra_module = THIS_MODULE;
+       alg->base.cra_priority = CAAM_CRA_PRIORITY;
+       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-      alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+-                            CRYPTO_ALG_KERN_DRIVER_ONLY;
++      alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
++                            CRYPTO_ALG_KERN_DRIVER_ONLY);
+       alg->init = caam_cra_init_skcipher;
+       alg->exit = caam_cra_exit;
+--- a/drivers/crypto/caam/caamalg_qi2.h
++++ b/drivers/crypto/caam/caamalg_qi2.h
+@@ -13,6 +13,7 @@
+ #include <linux/netdevice.h>
+ #include "dpseci.h"
+ #include "desc_constr.h"
++#include <crypto/skcipher.h>
+ #define DPAA2_CAAM_STORE_SIZE 16
+ /* NAPI weight *must* be a multiple of the store size. */
+@@ -186,6 +187,7 @@ struct caam_request {
+       void (*cbk)(void *ctx, u32 err);
+       void *ctx;
+       void *edesc;
++      struct skcipher_request fallback_req;
+ };
+ /**
diff --git a/queue-5.9/crypto-caam-qi2-add-support-for-more-xts-key-lengths.patch b/queue-5.9/crypto-caam-qi2-add-support-for-more-xts-key-lengths.patch
new file mode 100644 (file)
index 0000000..6338811
--- /dev/null
@@ -0,0 +1,84 @@
+From 596efd57cfa1e1bee575e2a2df44fd8ec5e4a42d Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:24 +0300
+Subject: crypto: caam/qi2 - add support for more XTS key lengths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 596efd57cfa1e1bee575e2a2df44fd8ec5e4a42d upstream.
+
+CAAM accelerator only supports XTS-AES-128 and XTS-AES-256 since
+it adheres strictly to the standard. All the other key lengths
+are accepted and processed through a fallback as long as they pass
+the xts_verify_key() checks.
+
+Fixes: 226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
+Cc: <stable@vger.kernel.org> # v4.20+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi2.c |   16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -19,6 +19,7 @@
+ #include <linux/fsl/mc.h>
+ #include <soc/fsl/dpaa2-io.h>
+ #include <soc/fsl/dpaa2-fd.h>
++#include <crypto/xts.h>
+ #include <asm/unaligned.h>
+ #define CAAM_CRA_PRIORITY     2000
+@@ -81,6 +82,7 @@ struct caam_ctx {
+       struct alginfo adata;
+       struct alginfo cdata;
+       unsigned int authsize;
++      bool xts_key_fallback;
+       struct crypto_skcipher *fallback;
+ };
+@@ -1060,11 +1062,15 @@ static int xts_skcipher_setkey(struct cr
+       u32 *desc;
+       int err;
+-      if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++      err = xts_verify_key(skcipher, key, keylen);
++      if (err) {
+               dev_dbg(dev, "key size mismatch\n");
+-              return -EINVAL;
++              return err;
+       }
++      if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
++              ctx->xts_key_fallback = true;
++
+       err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+       if (err)
+               return err;
+@@ -1474,7 +1480,8 @@ static int skcipher_encrypt(struct skcip
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
+-      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++      if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++                            ctx->xts_key_fallback)) {
+               skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+               skcipher_request_set_callback(&caam_req->fallback_req,
+                                             req->base.flags,
+@@ -1522,7 +1529,8 @@ static int skcipher_decrypt(struct skcip
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
+-      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++      if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++                            ctx->xts_key_fallback)) {
+               skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+               skcipher_request_set_callback(&caam_req->fallback_req,
+                                             req->base.flags,
diff --git a/queue-5.9/ima-don-t-ignore-errors-from-crypto_shash_update.patch b/queue-5.9/ima-don-t-ignore-errors-from-crypto_shash_update.patch
new file mode 100644 (file)
index 0000000..85c9788
--- /dev/null
@@ -0,0 +1,35 @@
+From 60386b854008adc951c470067f90a2d85b5d520f Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Fri, 4 Sep 2020 11:23:28 +0200
+Subject: ima: Don't ignore errors from crypto_shash_update()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 60386b854008adc951c470067f90a2d85b5d520f upstream.
+
+Errors returned by crypto_shash_update() are not checked in
+ima_calc_boot_aggregate_tfm() and thus can be overwritten at the next
+iteration of the loop. This patch adds a check after calling
+crypto_shash_update() and returns immediately if the result is not zero.
+
+Cc: stable@vger.kernel.org
+Fixes: 3323eec921efd ("integrity: IMA as an integrity service provider")
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima_crypto.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -829,6 +829,8 @@ static int ima_calc_boot_aggregate_tfm(c
+               /* now accumulate with current aggregate */
+               rc = crypto_shash_update(shash, d.digest,
+                                        crypto_shash_digestsize(tfm));
++              if (rc != 0)
++                      return rc;
+       }
+       /*
+        * Extend cumulative digest over TPM registers 8-9, which contain
index d8ae5cbe27d59a3028696784004011775776b518..5a4e19ba4ea7ca0e457ddad0cbda4bee1c050906 100644 (file)
@@ -76,3 +76,12 @@ kvm-nvmx-reload-vmcs01-if-getting-vmcs12-s-pages-fails.patch
 kvm-x86-mmu-commit-zap-of-remaining-invalid-pages-when-recovering-lpages.patch
 kvm-x86-intercept-la57-to-inject-gp-fault-when-it-s-reserved.patch
 kvm-svm-initialize-prev_ga_tag-before-use.patch
+ima-don-t-ignore-errors-from-crypto_shash_update.patch
+crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch
+crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
+crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
+crypto-caam-qi-add-support-for-more-xts-key-lengths.patch
+crypto-caam-jr-add-fallback-for-xts-with-more-than-8b-iv.patch
+crypto-caam-jr-add-support-for-more-xts-key-lengths.patch
+crypto-caam-qi2-add-fallback-for-xts-with-more-than-8b-iv.patch
+crypto-caam-qi2-add-support-for-more-xts-key-lengths.patch