]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 24 Oct 2020 10:41:46 +0000 (12:41 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 24 Oct 2020 10:41:46 +0000 (12:41 +0200)
added patches:
crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
ima-don-t-ignore-errors-from-crypto_shash_update.patch

queue-5.4/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch [new file with mode: 0644]
queue-5.4/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch [new file with mode: 0644]
queue-5.4/ima-don-t-ignore-errors-from-crypto_shash_update.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch b/queue-5.4/crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
new file mode 100644 (file)
index 0000000..b7a5582
--- /dev/null
@@ -0,0 +1,56 @@
+From cbdad1f246dd98e6c9c32a6e5212337f542aa7e0 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Fri, 31 Jul 2020 17:03:50 +1000
+Subject: crypto: algif_aead - Do not set MAY_BACKLOG on the async path
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit cbdad1f246dd98e6c9c32a6e5212337f542aa7e0 upstream.
+
+The async path cannot use MAY_BACKLOG because it is not meant to
+block, which is what MAY_BACKLOG does.  On the other hand, both
+the sync and async paths can make use of MAY_SLEEP.
+
+Fixes: 83094e5e9e49 ("crypto: af_alg - add async support to...")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algif_aead.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct c
+       SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+       skcipher_request_set_sync_tfm(skreq, null_tfm);
+-      skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
++      skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+       skcipher_request_set_crypt(skreq, src, dst, len, NULL);
+@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *
+               areq->outlen = outlen;
+               aead_request_set_callback(&areq->cra_u.aead_req,
+-                                        CRYPTO_TFM_REQ_MAY_BACKLOG,
++                                        CRYPTO_TFM_REQ_MAY_SLEEP,
+                                         af_alg_async_cb, areq);
+               err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+                                crypto_aead_decrypt(&areq->cra_u.aead_req);
+               /* AIO operation in progress */
+-              if (err == -EINPROGRESS || err == -EBUSY)
++              if (err == -EINPROGRESS)
+                       return -EIOCBQUEUED;
+               sock_put(sk);
+       } else {
+               /* Synchronous operation */
+               aead_request_set_callback(&areq->cra_u.aead_req,
++                                        CRYPTO_TFM_REQ_MAY_SLEEP |
+                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                         crypto_req_done, &ctx->wait);
+               err = crypto_wait_req(ctx->enc ?
diff --git a/queue-5.4/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch b/queue-5.4/crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch
new file mode 100644 (file)
index 0000000..d08e0c5
--- /dev/null
@@ -0,0 +1,180 @@
+From 83e8aa9121380b23ebae6e413962fa2a7b52cf92 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Tue, 22 Sep 2020 19:03:20 +0300
+Subject: crypto: caam/qi - add fallback for XTS with more than 8B IV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 83e8aa9121380b23ebae6e413962fa2a7b52cf92 upstream.
+
+A hardware limitation exists for CAAM until Era 9 which restricts
+the accelerator to IVs with only 8 bytes. When CAAM has a lower era
+a fallback is necessary to process 16 bytes IV.
+
+Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/Kconfig      |    1 
+ drivers/crypto/caam/caamalg_qi.c |   70 ++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 67 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -112,6 +112,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+       select CRYPTO_AUTHENC
+       select CRYPTO_BLKCIPHER
+       select CRYPTO_DES
++      select CRYPTO_XTS
+       help
+         Selecting this will use CAAM Queue Interface (QI) for sending
+         & receiving crypto jobs to/from CAAM. This gives better performance
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -18,6 +18,7 @@
+ #include "qi.h"
+ #include "jr.h"
+ #include "caamalg_desc.h"
++#include <asm/unaligned.h>
+ /*
+  * crypto alg
+@@ -67,6 +68,11 @@ struct caam_ctx {
+       struct device *qidev;
+       spinlock_t lock;        /* Protects multiple init of driver context */
+       struct caam_drv_ctx *drv_ctx[NUM_OP];
++      struct crypto_skcipher *fallback;
++};
++
++struct caam_skcipher_req_ctx {
++      struct skcipher_request fallback_req;
+ };
+ static int aead_set_sh_desc(struct crypto_aead *aead)
+@@ -745,12 +751,17 @@ static int xts_skcipher_setkey(struct cr
+       struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+       struct device *jrdev = ctx->jrdev;
+       int ret = 0;
++      int err;
+       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+               dev_err(jrdev, "key size mismatch\n");
+               goto badkey;
+       }
++      err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++      if (err)
++              return err;
++
+       ctx->cdata.keylen = keylen;
+       ctx->cdata.key_virt = key;
+       ctx->cdata.key_inline = true;
+@@ -1395,6 +1406,14 @@ static struct skcipher_edesc *skcipher_e
+       return edesc;
+ }
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++      struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++      unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++      return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ {
+       struct skcipher_edesc *edesc;
+@@ -1405,6 +1424,21 @@ static inline int skcipher_crypt(struct
+       if (!req->cryptlen)
+               return 0;
++      if (ctx->fallback && xts_skcipher_ivsize(req)) {
++              struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
++
++              skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++              skcipher_request_set_callback(&rctx->fallback_req,
++                                            req->base.flags,
++                                            req->base.complete,
++                                            req->base.data);
++              skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++                                         req->dst, req->cryptlen, req->iv);
++
++              return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
++                               crypto_skcipher_decrypt(&rctx->fallback_req);
++      }
++
+       if (unlikely(caam_congested))
+               return -EAGAIN;
+@@ -1529,6 +1563,7 @@ static struct caam_skcipher_alg driver_a
+                       .base = {
+                               .cra_name = "xts(aes)",
+                               .cra_driver_name = "xts-aes-caam-qi",
++                              .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                               .cra_blocksize = AES_BLOCK_SIZE,
+                       },
+                       .setkey = xts_skcipher_setkey,
+@@ -2462,9 +2497,32 @@ static int caam_cra_init(struct crypto_s
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       struct caam_skcipher_alg *caam_alg =
+               container_of(alg, typeof(*caam_alg), skcipher);
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++      u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++      int ret = 0;
++
++      if (alg_aai == OP_ALG_AAI_XTS) {
++              const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++              struct crypto_skcipher *fallback;
++
++              fallback = crypto_alloc_skcipher(tfm_name, 0,
++                                               CRYPTO_ALG_NEED_FALLBACK);
++              if (IS_ERR(fallback)) {
++                      dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
++                              tfm_name, PTR_ERR(fallback));
++                      return PTR_ERR(fallback);
++              }
++
++              ctx->fallback = fallback;
++              crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
++                                          crypto_skcipher_reqsize(fallback));
++      }
++
++      ret = caam_init_common(ctx, &caam_alg->caam, false);
++      if (ret && ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
+-      return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+-                              false);
++      return ret;
+ }
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -2490,7 +2548,11 @@ static void caam_exit_common(struct caam
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-      caam_exit_common(crypto_skcipher_ctx(tfm));
++      struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++      if (ctx->fallback)
++              crypto_free_skcipher(ctx->fallback);
++      caam_exit_common(ctx);
+ }
+ static void caam_aead_exit(struct crypto_aead *tfm)
+@@ -2524,7 +2586,7 @@ static void caam_skcipher_alg_init(struc
+       alg->base.cra_module = THIS_MODULE;
+       alg->base.cra_priority = CAAM_CRA_PRIORITY;
+       alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-      alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
++      alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+       alg->init = caam_cra_init;
+       alg->exit = caam_cra_exit;
diff --git a/queue-5.4/ima-don-t-ignore-errors-from-crypto_shash_update.patch b/queue-5.4/ima-don-t-ignore-errors-from-crypto_shash_update.patch
new file mode 100644 (file)
index 0000000..5c05784
--- /dev/null
@@ -0,0 +1,35 @@
+From 60386b854008adc951c470067f90a2d85b5d520f Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Fri, 4 Sep 2020 11:23:28 +0200
+Subject: ima: Don't ignore errors from crypto_shash_update()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 60386b854008adc951c470067f90a2d85b5d520f upstream.
+
+Errors returned by crypto_shash_update() are not checked in
+ima_calc_boot_aggregate_tfm() and thus can be overwritten at the next
+iteration of the loop. This patch adds a check after calling
+crypto_shash_update() and returns immediately if the result is not zero.
+
+Cc: stable@vger.kernel.org
+Fixes: 3323eec921efd ("integrity: IMA as an integrity service provider")
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima_crypto.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -688,6 +688,8 @@ static int ima_calc_boot_aggregate_tfm(c
+               /* now accumulate with current aggregate */
+               rc = crypto_shash_update(shash, d.digest,
+                                        crypto_shash_digestsize(tfm));
++              if (rc != 0)
++                      return rc;
+       }
+       if (!rc)
+               crypto_shash_final(shash, digest);
index 9c46b19a28f79af79fbcb167c16e8d89306f8f46..007436d3c5bc1dfbca8aff81fdb0f5bd87a999e1 100644 (file)
@@ -49,3 +49,6 @@ kvm-nvmx-reset-the-segment-cache-when-stuffing-guest-segs.patch
 kvm-nvmx-reload-vmcs01-if-getting-vmcs12-s-pages-fails.patch
 kvm-x86-mmu-commit-zap-of-remaining-invalid-pages-when-recovering-lpages.patch
 kvm-svm-initialize-prev_ga_tag-before-use.patch
+ima-don-t-ignore-errors-from-crypto_shash_update.patch
+crypto-algif_aead-do-not-set-may_backlog-on-the-async-path.patch
+crypto-caam-qi-add-fallback-for-xts-with-more-than-8b-iv.patch