]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 24 Oct 2020 10:42:01 +0000 (12:42 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 24 Oct 2020 10:42:01 +0000 (12:42 +0200)
added patches:
crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch
crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
crypto-caam-qi-add-support-for-more-xts-key-lengths.patch
ima-don-t-ignore-errors-from-crypto_shash_update.patch

queue-5.8/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch [new file with mode: 0644]
queue-5.8/crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch [new file with mode: 0644]
queue-5.8/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch [new file with mode: 0644]
queue-5.8/crypto-caam-qi-add-support-for-more-xts-key-lengths.patch [new file with mode: 0644]
queue-5.8/ima-don-t-ignore-errors-from-crypto_shash_update.patch [new file with mode: 0644]
queue-5.8/series

diff --git a/queue-5.8/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch b/queue-5.8/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
new file mode 100644 (file)
index 0000000..b7a5582
--- /dev/null
@@ -0,0 +1,56 @@
+From cbdad1f246dd98e6c9c32a6e5212337f542aa7e0 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Fri, 31 Jul 2020 17:03:50 +1000
+Subject: crypto: algif_aead - Do not set MAY_BACKLOG on the async path
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit cbdad1f246dd98e6c9c32a6e5212337f542aa7e0 upstream.
+
+The async path cannot use MAY_BACKLOG because it is not meant to
+block, which is what MAY_BACKLOG does.  On the other hand, both
+the sync and async paths can make use of MAY_SLEEP.
+
+Fixes: 83094e5e9e49 ("crypto: af_alg - add async support to...")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algif_aead.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct c
+       SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+       skcipher_request_set_sync_tfm(skreq, null_tfm);
+-      skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
++      skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+       skcipher_request_set_crypt(skreq, src, dst, len, NULL);
+@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *
+               areq->outlen = outlen;
+               aead_request_set_callback(&areq->cra_u.aead_req,
+-                                        CRYPTO_TFM_REQ_MAY_BACKLOG,
++                                        CRYPTO_TFM_REQ_MAY_SLEEP,
+                                         af_alg_async_cb, areq);
+               err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+                                crypto_aead_decrypt(&areq->cra_u.aead_req);
+               /* AIO operation in progress */
+-              if (err == -EINPROGRESS || err == -EBUSY)
++              if (err == -EINPROGRESS)
+                       return -EIOCBQUEUED;
+               sock_put(sk);
+       } else {
+               /* Synchronous operation */
+               aead_request_set_callback(&areq->cra_u.aead_req,
++                                        CRYPTO_TFM_REQ_MAY_SLEEP |
+                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                         crypto_req_done, &ctx->wait);
+               err = crypto_wait_req(ctx->enc ?
diff --git a/queue-5.8/crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch b/queue-5.8/crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch
new file mode 100644 (file)
index 0000000..e83927a
--- /dev/null
@@ -0,0 +1,90 @@
+From 297b931c2a3cada230d8b84432ee982fc68cf76a Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:25 +0300
+Subject: crypto: caam - add xts check for block length equal to zero
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 297b931c2a3cada230d8b84432ee982fc68cf76a upstream.
+
+XTS should not return succes when dealing with block length equal to zero.
+This is different than the rest of the skcipher algorithms.
+
+Fixes: 31bb2f0da1b50 ("crypto: caam - check zero-length input")
+Cc: <stable@vger.kernel.org> # v5.4+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg.c     |    7 ++++++-
+ drivers/crypto/caam/caamalg_qi.c  |    7 ++++++-
+ drivers/crypto/caam/caamalg_qi2.c |   14 ++++++++++++--
+ 3 files changed, 24 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -1765,7 +1765,12 @@ static inline int skcipher_crypt(struct
+       u32 *desc;
+       int ret = 0;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       /* allocate extended descriptor */
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -1380,7 +1380,12 @@ static inline int skcipher_crypt(struct
+       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+       int ret;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       if (unlikely(caam_congested))
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -1451,7 +1451,12 @@ static int skcipher_encrypt(struct skcip
+       struct caam_request *caam_req = skcipher_request_ctx(req);
+       int ret;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       /* allocate extended descriptor */
+@@ -1482,7 +1487,12 @@ static int skcipher_decrypt(struct skcip
+       struct caam_request *caam_req = skcipher_request_ctx(req);
+       int ret;
+-      if (!req->cryptlen)
++      /*
++       * XTS is expected to return an error even for input length = 0
++       * Note that the case input length < block size will be caught during
++       * HW offloading and return an error.
++       */
++      if (!req->cryptlen && !ctx->fallback)
+               return 0;
+       /* allocate extended descriptor */
+       edesc = skcipher_edesc_alloc(req);
diff --git a/queue-5.8/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch b/queue-5.8/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
new file mode 100644 (file)
index 0000000..3e5aa97
--- /dev/null
@@ -0,0 +1,180 @@
+From 83e8aa9121380b23ebae6e413962fa2a7b52cf92 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:20 +0300
+Subject: crypto: caam/qi - add fallback for XTS with more than 8B IV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 83e8aa9121380b23ebae6e413962fa2a7b52cf92 upstream.
+
+A hardware limitation exists for CAAM until Era 9 which restricts
+the accelerator to IVs with only 8 bytes. When CAAM has a lower era
+a fallback is necessary to process 16 bytes IV.
+
+Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/Kconfig      |    1 
+ drivers/crypto/caam/caamalg_qi.c |   70 ++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 67 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -114,6 +114,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+       select CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       select CRYPTO_DES
++      select CRYPTO_XTS
+       help
+         Selecting this will use CAAM Queue Interface (QI) for sending
+         & receiving crypto jobs to/from CAAM. This gives better performance
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -18,6 +18,7 @@
+ #include "qi.h"
+ #include "jr.h"
+ #include "caamalg_desc.h"
++#include <asm/unaligned.h>
+ /*
+  * crypto alg
+@@ -67,6 +68,11 @@ struct caam_ctx {
+       struct device *qidev;
+       spinlock_t lock;        /* Protects multiple init of driver context */
+       struct caam_drv_ctx *drv_ctx[NUM_OP];
++      struct crypto_skcipher *fallback;
++};
++
++struct caam_skcipher_req_ctx {
++      struct skcipher_request fallback_req;
+ };
+ static int aead_set_sh_desc(struct crypto_aead *aead)
+@@ -726,12 +732,17 @@ static int xts_skcipher_setkey(struct cr
+       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+       struct device *jrdev = ctx->jrdev;
+       int ret = 0;
++      int err;
+       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+               dev_dbg(jrdev, "key size mismatch\n");
+               return -EINVAL;
+       }
++      err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++      if (err)
++              return err;
++
+       ctx->cdata.keylen = keylen;
+       ctx->cdata.key_virt = key;
+       ctx->cdata.key_inline = true;
+@@ -1373,6 +1384,14 @@ static struct skcipher_edesc *skcipher_e
+       return edesc;
+ }
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++      struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++      unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++      return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ {
+       struct skcipher_edesc *edesc;
+@@ -1388,6 +1407,21 @@ static inline int skcipher_crypt(struct
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
++      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++              struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
++
++              skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++              skcipher_request_set_callback(&rctx->fallback_req,
++                                            req->base.flags,
++                                            req->base.complete,
++                                            req->base.data);
++              skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++                                         req->dst, req->cryptlen, req->iv);
++
++              return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
++                               crypto_skcipher_decrypt(&rctx->fallback_req);
++      }
++
+       if (unlikely(caam_congested))
+               return -EAGAIN;
+@@ -1512,6 +1546,7 @@ static struct caam_skcipher_alg driver_a
+                       .base = {
+                               .cra_name = "xts(aes)",
+                               .cra_driver_name = "xts-aes-caam-qi",
++                              .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = xts_skcipher_setkey,
+@@ -2445,9 +2480,32 @@ static int caam_cra_init(struct crypto_s
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct caam_skcipher_alg *caam_alg =
+               container_of(alg, typeof(*caam_alg), skcipher);
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++      u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++      int ret = 0;
++
++      if (alg_aai == OP_ALG_AAI_XTS) {
++              const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++              struct crypto_skcipher *fallback;
++
++              fallback = crypto_alloc_skcipher(tfm_name, 0,
++                                               CRYPTO_ALG_NEED_FALLBACK);
++              if (IS_ERR(fallback)) {
++                      dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
++                              tfm_name, PTR_ERR(fallback));
++                      return PTR_ERR(fallback);
++              }
++
++              ctx->fallback = fallback;
++              crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
++                                          crypto_skcipher_reqsize(fallback));
++      }
++
++      ret = caam_init_common(ctx, &caam_alg->caam, false);
++      if (ret && ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
+-      return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+-                              false);
++      return ret;
+ }
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -2473,7 +2531,11 @@ static void caam_exit_common(struct caam
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-      caam_exit_common(crypto_skcipher_ctx(tfm));
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++      if (ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
++      caam_exit_common(ctx);
+ }
+ static void caam_aead_exit(struct crypto_aead *tfm)
+@@ -2507,7 +2569,7 @@ static void caam_skcipher_alg_init(struc
+       alg->base.cra_module = THIS_MODULE;
+       alg->base.cra_priority = CAAM_CRA_PRIORITY;
+       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-      alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
++      alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+       alg->init = caam_cra_init;
+       alg->exit = caam_cra_exit;
diff --git a/queue-5.8/crypto-caam-qi-add-support-for-more-xts-key-lengths.patch b/queue-5.8/crypto-caam-qi-add-support-for-more-xts-key-lengths.patch
new file mode 100644 (file)
index 0000000..42f45e4
--- /dev/null
@@ -0,0 +1,74 @@
+From 62b9a6690926ee199445b23fd46e6349d9057146 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:23 +0300
+Subject: crypto: caam/qi - add support for more XTS key lengths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 62b9a6690926ee199445b23fd46e6349d9057146 upstream.
+
+CAAM accelerator only supports XTS-AES-128 and XTS-AES-256 since
+it adheres strictly to the standard. All the other key lengths
+are accepted and processed through a fallback as long as they pass
+the xts_verify_key() checks.
+
+Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi.c |   13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -18,6 +18,7 @@
+ #include "qi.h"
+ #include "jr.h"
+ #include "caamalg_desc.h"
++#include <crypto/xts.h>
+ #include <asm/unaligned.h>
+ /*
+@@ -68,6 +69,7 @@ struct caam_ctx {
+       struct device *qidev;
+       spinlock_t lock;        /* Protects multiple init of driver context */
+       struct caam_drv_ctx *drv_ctx[NUM_OP];
++      bool xts_key_fallback;
+       struct crypto_skcipher *fallback;
+ };
+@@ -734,11 +736,15 @@ static int xts_skcipher_setkey(struct cr
+       int ret = 0;
+       int err;
+-      if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++      err = xts_verify_key(skcipher, key, keylen);
++      if (err) {
+               dev_dbg(jrdev, "key size mismatch\n");
+-              return -EINVAL;
++              return err;
+       }
++      if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
++              ctx->xts_key_fallback = true;
++
+       err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+       if (err)
+               return err;
+@@ -1407,7 +1413,8 @@ static inline int skcipher_crypt(struct
+       if (!req->cryptlen && !ctx->fallback)
+               return 0;
+-      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++      if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++                            ctx->xts_key_fallback)) {
+               struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+               skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
diff --git a/queue-5.8/ima-don-t-ignore-errors-from-crypto_shash_update.patch b/queue-5.8/ima-don-t-ignore-errors-from-crypto_shash_update.patch
new file mode 100644 (file)
index 0000000..85c9788
--- /dev/null
@@ -0,0 +1,35 @@
+From 60386b854008adc951c470067f90a2d85b5d520f Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Fri, 4 Sep 2020 11:23:28 +0200
+Subject: ima: Don't ignore errors from crypto_shash_update()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 60386b854008adc951c470067f90a2d85b5d520f upstream.
+
+Errors returned by crypto_shash_update() are not checked in
+ima_calc_boot_aggregate_tfm() and thus can be overwritten at the next
+iteration of the loop. This patch adds a check after calling
+crypto_shash_update() and returns immediately if the result is not zero.
+
+Cc: stable@vger.kernel.org
+Fixes: 3323eec921efd ("integrity: IMA as an integrity service provider")
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima_crypto.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -829,6 +829,8 @@ static int ima_calc_boot_aggregate_tfm(c
+               /* now accumulate with current aggregate */
+               rc = crypto_shash_update(shash, d.digest,
+                                        crypto_shash_digestsize(tfm));
++              if (rc != 0)
++                      return rc;
+       }
+       /*
+        * Extend cumulative digest over TPM registers 8-9, which contain
index 492fc5a66ce06a4cbb298ed60908e24482432ed5..8ae6fcf09ce6b9af0106552d030944442a601933 100644 (file)
@@ -72,3 +72,8 @@ kvm-nvmx-reload-vmcs01-if-getting-vmcs12-s-pages-fails.patch
 kvm-x86-mmu-commit-zap-of-remaining-invalid-pages-when-recovering-lpages.patch
 kvm-x86-intercept-la57-to-inject-gp-fault-when-it-s-reserved.patch
 kvm-svm-initialize-prev_ga_tag-before-use.patch
+ima-don-t-ignore-errors-from-crypto_shash_update.patch
+crypto-caam-add-xts-check-for-block-length-equal-to-zero.patch
+crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
+crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
+crypto-caam-qi-add-support-for-more-xts-key-lengths.patch