]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Jun 2018 13:13:26 +0000 (15:13 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Jun 2018 13:13:26 +0000 (15:13 +0200)
added patches:
crypto-caam-fix-dma-mapping-dir-for-generated-iv.patch
crypto-caam-fix-iv-dma-mapping-and-updating.patch
crypto-caam-fix-size-of-rsa-prime-factor-q.patch
crypto-caam-qi-fix-iv-dma-mapping-and-updating.patch
crypto-caam-strip-input-zeros-from-rsa-input-buffer.patch
crypto-cavium-fix-fallout-from-config_vmap_stack.patch
crypto-cavium-limit-result-reading-attempts.patch
crypto-omap-sham-fix-memleak.patch
crypto-vmx-remove-overly-verbose-printk-from-aes-init-routines.patch
crypto-vmx-remove-overly-verbose-printk-from-aes-xts-init.patch
input-elan_i2c-add-elan0612-lenovo-v330-14ikb-acpi-id.patch
input-goodix-add-new-acpi-id-for-gpd-win-2-touch-screen.patch

13 files changed:
queue-4.17/crypto-caam-fix-dma-mapping-dir-for-generated-iv.patch [new file with mode: 0644]
queue-4.17/crypto-caam-fix-iv-dma-mapping-and-updating.patch [new file with mode: 0644]
queue-4.17/crypto-caam-fix-size-of-rsa-prime-factor-q.patch [new file with mode: 0644]
queue-4.17/crypto-caam-qi-fix-iv-dma-mapping-and-updating.patch [new file with mode: 0644]
queue-4.17/crypto-caam-strip-input-zeros-from-rsa-input-buffer.patch [new file with mode: 0644]
queue-4.17/crypto-cavium-fix-fallout-from-config_vmap_stack.patch [new file with mode: 0644]
queue-4.17/crypto-cavium-limit-result-reading-attempts.patch [new file with mode: 0644]
queue-4.17/crypto-omap-sham-fix-memleak.patch [new file with mode: 0644]
queue-4.17/crypto-vmx-remove-overly-verbose-printk-from-aes-init-routines.patch [new file with mode: 0644]
queue-4.17/crypto-vmx-remove-overly-verbose-printk-from-aes-xts-init.patch [new file with mode: 0644]
queue-4.17/input-elan_i2c-add-elan0612-lenovo-v330-14ikb-acpi-id.patch [new file with mode: 0644]
queue-4.17/input-goodix-add-new-acpi-id-for-gpd-win-2-touch-screen.patch [new file with mode: 0644]
queue-4.17/series

diff --git a/queue-4.17/crypto-caam-fix-dma-mapping-dir-for-generated-iv.patch b/queue-4.17/crypto-caam-fix-dma-mapping-dir-for-generated-iv.patch
new file mode 100644 (file)
index 0000000..f177625
--- /dev/null
@@ -0,0 +1,164 @@
+From a38acd236cac914aafffd80af79b9556fc2c3934 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Wed, 28 Mar 2018 15:39:17 +0300
+Subject: crypto: caam - fix DMA mapping dir for generated IV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit a38acd236cac914aafffd80af79b9556fc2c3934 upstream.
+
+In case of GIVCIPHER, IV is generated by the device.
+Fix the DMA mapping direction.
+
+Cc: <stable@vger.kernel.org> # 3.19+
+Fixes: 7222d1a34103 ("crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes))")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg.c |   29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -769,6 +769,7 @@ struct aead_edesc {
+  * @src_nents: number of segments in input s/w scatterlist
+  * @dst_nents: number of segments in output s/w scatterlist
+  * @iv_dma: dma address of iv for checking continuity and link table
++ * @iv_dir: DMA mapping direction for IV
+  * @sec4_sg_bytes: length of dma mapped sec4_sg space
+  * @sec4_sg_dma: bus physical mapped address of h/w link table
+  * @sec4_sg: pointer to h/w link table
+@@ -778,6 +779,7 @@ struct ablkcipher_edesc {
+       int src_nents;
+       int dst_nents;
+       dma_addr_t iv_dma;
++      enum dma_data_direction iv_dir;
+       int sec4_sg_bytes;
+       dma_addr_t sec4_sg_dma;
+       struct sec4_sg_entry *sec4_sg;
+@@ -787,7 +789,8 @@ struct ablkcipher_edesc {
+ static void caam_unmap(struct device *dev, struct scatterlist *src,
+                      struct scatterlist *dst, int src_nents,
+                      int dst_nents,
+-                     dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
++                     dma_addr_t iv_dma, int ivsize,
++                     enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+                      int sec4_sg_bytes)
+ {
+       if (dst != src) {
+@@ -799,7 +802,7 @@ static void caam_unmap(struct device *de
+       }
+       if (iv_dma)
+-              dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
++              dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+       if (sec4_sg_bytes)
+               dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
+                                DMA_TO_DEVICE);
+@@ -810,7 +813,7 @@ static void aead_unmap(struct device *de
+                      struct aead_request *req)
+ {
+       caam_unmap(dev, req->src, req->dst,
+-                 edesc->src_nents, edesc->dst_nents, 0, 0,
++                 edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+                  edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+ }
+@@ -823,7 +826,7 @@ static void ablkcipher_unmap(struct devi
+       caam_unmap(dev, req->src, req->dst,
+                  edesc->src_nents, edesc->dst_nents,
+-                 edesc->iv_dma, ivsize,
++                 edesc->iv_dma, ivsize, edesc->iv_dir,
+                  edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+ }
+@@ -1287,7 +1290,7 @@ static struct aead_edesc *aead_edesc_all
+                       GFP_DMA | flags);
+       if (!edesc) {
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0);
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1550,7 +1553,7 @@ static struct ablkcipher_edesc *ablkciph
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0);
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1572,7 +1575,7 @@ static struct ablkcipher_edesc *ablkciph
+       if (!edesc) {
+               dev_err(jrdev, "could not allocate extended descriptor\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++                         iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1581,6 +1584,7 @@ static struct ablkcipher_edesc *ablkciph
+       edesc->sec4_sg_bytes = sec4_sg_bytes;
+       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+                        desc_bytes;
++      edesc->iv_dir = DMA_TO_DEVICE;
+       if (!in_contig) {
+               dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+@@ -1598,7 +1602,7 @@ static struct ablkcipher_edesc *ablkciph
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++                         iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+               kfree(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1756,11 +1760,11 @@ static struct ablkcipher_edesc *ablkciph
+        * Check if iv can be contiguous with source and destination.
+        * If so, include it. If not, create scatterlist.
+        */
+-      iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
++      iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0);
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1781,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph
+       if (!edesc) {
+               dev_err(jrdev, "could not allocate extended descriptor\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++                         iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1790,6 +1794,7 @@ static struct ablkcipher_edesc *ablkciph
+       edesc->sec4_sg_bytes = sec4_sg_bytes;
+       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+                        desc_bytes;
++      edesc->iv_dir = DMA_FROM_DEVICE;
+       if (mapped_src_nents > 1)
+               sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
+@@ -1807,7 +1812,7 @@ static struct ablkcipher_edesc *ablkciph
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, 0, 0);
++                         iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
+               kfree(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
diff --git a/queue-4.17/crypto-caam-fix-iv-dma-mapping-and-updating.patch b/queue-4.17/crypto-caam-fix-iv-dma-mapping-and-updating.patch
new file mode 100644 (file)
index 0000000..9603974
--- /dev/null
@@ -0,0 +1,464 @@
+From 115957bb3e59fcb226ce76b97af14533f239e0ac Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Wed, 28 Mar 2018 15:39:18 +0300
+Subject: crypto: caam - fix IV DMA mapping and updating
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 115957bb3e59fcb226ce76b97af14533f239e0ac upstream.
+
+There are two IV-related issues:
+(1) crypto API does not guarantee to provide an IV buffer that is DMAable,
+thus it's incorrect to DMA map it
+(2) for in-place decryption, since ciphertext is overwritten with
+plaintext, updated req->info will contain the last block of plaintext
+(instead of the last block of ciphertext)
+
+While these two issues could be fixed separately, it's straightforward
+to fix both in the same time - by allocating extra space in the
+ablkcipher_edesc for the IV that will be fed to the crypto engine;
+this allows for fixing (2) by saving req->src[last_block] in req->info
+directly, i.e. without allocating another temporary buffer.
+
+A side effect of the fix is that it's no longer possible to have the IV
+and req->src contiguous. Code checking for this case is removed.
+
+Cc: <stable@vger.kernel.org> # 4.13+
+Fixes: 854b06f76879 ("crypto: caam - properly set IV after {en,de}crypt")
+Link: http://lkml.kernel.org/r/20170113084620.GF22022@gondor.apana.org.au
+Reported-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg.c |  212 ++++++++++++++++++------------------------
+ 1 file changed, 91 insertions(+), 121 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -774,6 +774,7 @@ struct aead_edesc {
+  * @sec4_sg_dma: bus physical mapped address of h/w link table
+  * @sec4_sg: pointer to h/w link table
+  * @hw_desc: the h/w job descriptor followed by any referenced link tables
++ *         and IV
+  */
+ struct ablkcipher_edesc {
+       int src_nents;
+@@ -915,6 +916,18 @@ static void ablkcipher_encrypt_done(stru
+       scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+                                ivsize, 0);
++      /* In case initial IV was generated, copy it in GIVCIPHER request */
++      if (edesc->iv_dir == DMA_FROM_DEVICE) {
++              u8 *iv;
++              struct skcipher_givcrypt_request *greq;
++
++              greq = container_of(req, struct skcipher_givcrypt_request,
++                                  creq);
++              iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
++                   edesc->sec4_sg_bytes;
++              memcpy(greq->giv, iv, ivsize);
++      }
++
+       kfree(edesc);
+       ablkcipher_request_complete(req, err);
+@@ -925,10 +938,10 @@ static void ablkcipher_decrypt_done(stru
+ {
+       struct ablkcipher_request *req = context;
+       struct ablkcipher_edesc *edesc;
++#ifdef DEBUG
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+-#ifdef DEBUG
+       dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+@@ -946,14 +959,6 @@ static void ablkcipher_decrypt_done(stru
+                    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+       ablkcipher_unmap(jrdev, edesc, req);
+-
+-      /*
+-       * The crypto API expects us to set the IV (req->info) to the last
+-       * ciphertext block.
+-       */
+-      scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+-                               ivsize, 0);
+-
+       kfree(edesc);
+       ablkcipher_request_complete(req, err);
+@@ -1102,15 +1107,14 @@ static void init_authenc_job(struct aead
+  */
+ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+                               struct ablkcipher_edesc *edesc,
+-                              struct ablkcipher_request *req,
+-                              bool iv_contig)
++                              struct ablkcipher_request *req)
+ {
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       u32 *desc = edesc->hw_desc;
+-      u32 out_options = 0, in_options;
+-      dma_addr_t dst_dma, src_dma;
+-      int len, sec4_sg_index = 0;
++      u32 out_options = 0;
++      dma_addr_t dst_dma;
++      int len;
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+@@ -1126,30 +1130,18 @@ static void init_ablkcipher_job(u32 *sh_
+       len = desc_len(sh_desc);
+       init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+-      if (iv_contig) {
+-              src_dma = edesc->iv_dma;
+-              in_options = 0;
+-      } else {
+-              src_dma = edesc->sec4_sg_dma;
+-              sec4_sg_index += edesc->src_nents + 1;
+-              in_options = LDST_SGF;
+-      }
+-      append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
++      append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
++                        LDST_SGF);
+       if (likely(req->src == req->dst)) {
+-              if (edesc->src_nents == 1 && iv_contig) {
+-                      dst_dma = sg_dma_address(req->src);
+-              } else {
+-                      dst_dma = edesc->sec4_sg_dma +
+-                              sizeof(struct sec4_sg_entry);
+-                      out_options = LDST_SGF;
+-              }
++              dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
++              out_options = LDST_SGF;
+       } else {
+               if (edesc->dst_nents == 1) {
+                       dst_dma = sg_dma_address(req->dst);
+               } else {
+-                      dst_dma = edesc->sec4_sg_dma +
+-                              sec4_sg_index * sizeof(struct sec4_sg_entry);
++                      dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
++                                sizeof(struct sec4_sg_entry);
+                       out_options = LDST_SGF;
+               }
+       }
+@@ -1161,13 +1153,12 @@ static void init_ablkcipher_job(u32 *sh_
+  */
+ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+                                   struct ablkcipher_edesc *edesc,
+-                                  struct ablkcipher_request *req,
+-                                  bool iv_contig)
++                                  struct ablkcipher_request *req)
+ {
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       u32 *desc = edesc->hw_desc;
+-      u32 out_options, in_options;
++      u32 in_options;
+       dma_addr_t dst_dma, src_dma;
+       int len, sec4_sg_index = 0;
+@@ -1193,15 +1184,9 @@ static void init_ablkcipher_giv_job(u32
+       }
+       append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+-      if (iv_contig) {
+-              dst_dma = edesc->iv_dma;
+-              out_options = 0;
+-      } else {
+-              dst_dma = edesc->sec4_sg_dma +
+-                        sec4_sg_index * sizeof(struct sec4_sg_entry);
+-              out_options = LDST_SGF;
+-      }
+-      append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
++      dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
++                sizeof(struct sec4_sg_entry);
++      append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ }
+ /*
+@@ -1494,8 +1479,7 @@ static int aead_decrypt(struct aead_requ
+  * allocate and map the ablkcipher extended descriptor for ablkcipher
+  */
+ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+-                                                     *req, int desc_bytes,
+-                                                     bool *iv_contig_out)
++                                                     *req, int desc_bytes)
+ {
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+@@ -1504,8 +1488,8 @@ static struct ablkcipher_edesc *ablkciph
+                      GFP_KERNEL : GFP_ATOMIC;
+       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+       struct ablkcipher_edesc *edesc;
+-      dma_addr_t iv_dma = 0;
+-      bool in_contig;
++      dma_addr_t iv_dma;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+@@ -1549,33 +1533,20 @@ static struct ablkcipher_edesc *ablkciph
+               }
+       }
+-      iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+-      if (dma_mapping_error(jrdev, iv_dma)) {
+-              dev_err(jrdev, "unable to map IV\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, DMA_NONE, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+-      if (mapped_src_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->src)) {
+-              in_contig = true;
+-              sec4_sg_ents = 0;
+-      } else {
+-              in_contig = false;
+-              sec4_sg_ents = 1 + mapped_src_nents;
+-      }
++      sec4_sg_ents = 1 + mapped_src_nents;
+       dst_sg_idx = sec4_sg_ents;
+       sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+-      /* allocate space for base edesc and hw desc commands, link tables */
+-      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
++      /*
++       * allocate space for base edesc and hw desc commands, link tables, IV
++       */
++      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+                       GFP_DMA | flags);
+       if (!edesc) {
+               dev_err(jrdev, "could not allocate extended descriptor\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1586,12 +1557,22 @@ static struct ablkcipher_edesc *ablkciph
+                        desc_bytes;
+       edesc->iv_dir = DMA_TO_DEVICE;
+-      if (!in_contig) {
+-              dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+-              sg_to_sec4_sg_last(req->src, mapped_src_nents,
+-                                 edesc->sec4_sg + 1, 0);
++      /* Make sure IV is located in a DMAable area */
++      iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
++      memcpy(iv, req->info, ivsize);
++
++      iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
++      if (dma_mapping_error(jrdev, iv_dma)) {
++              dev_err(jrdev, "unable to map IV\n");
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
++              kfree(edesc);
++              return ERR_PTR(-ENOMEM);
+       }
++      dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
++      sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
++
+       if (mapped_dst_nents > 1) {
+               sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+                                  edesc->sec4_sg + dst_sg_idx, 0);
+@@ -1615,7 +1596,6 @@ static struct ablkcipher_edesc *ablkciph
+                      sec4_sg_bytes, 1);
+ #endif
+-      *iv_contig_out = in_contig;
+       return edesc;
+ }
+@@ -1625,19 +1605,16 @@ static int ablkcipher_encrypt(struct abl
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+       struct device *jrdev = ctx->jrdev;
+-      bool iv_contig;
+       u32 *desc;
+       int ret = 0;
+       /* allocate extended descriptor */
+-      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+-                                     CAAM_CMD_SZ, &iv_contig);
++      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+       /* Create and submit job descriptor*/
+-      init_ablkcipher_job(ctx->sh_desc_enc,
+-              ctx->sh_desc_enc_dma, edesc, req, iv_contig);
++      init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+@@ -1661,20 +1638,25 @@ static int ablkcipher_decrypt(struct abl
+       struct ablkcipher_edesc *edesc;
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       struct device *jrdev = ctx->jrdev;
+-      bool iv_contig;
+       u32 *desc;
+       int ret = 0;
+       /* allocate extended descriptor */
+-      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+-                                     CAAM_CMD_SZ, &iv_contig);
++      edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block.
++       */
++      scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
++                               ivsize, 0);
++
+       /* Create and submit job descriptor*/
+-      init_ablkcipher_job(ctx->sh_desc_dec,
+-              ctx->sh_desc_dec_dma, edesc, req, iv_contig);
++      init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+       desc = edesc->hw_desc;
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+@@ -1699,8 +1681,7 @@ static int ablkcipher_decrypt(struct abl
+  */
+ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+                               struct skcipher_givcrypt_request *greq,
+-                              int desc_bytes,
+-                              bool *iv_contig_out)
++                              int desc_bytes)
+ {
+       struct ablkcipher_request *req = &greq->creq;
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+@@ -1710,8 +1691,8 @@ static struct ablkcipher_edesc *ablkciph
+                      GFP_KERNEL : GFP_ATOMIC;
+       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+       struct ablkcipher_edesc *edesc;
+-      dma_addr_t iv_dma = 0;
+-      bool out_contig;
++      dma_addr_t iv_dma;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+@@ -1756,36 +1737,20 @@ static struct ablkcipher_edesc *ablkciph
+               }
+       }
+-      /*
+-       * Check if iv can be contiguous with source and destination.
+-       * If so, include it. If not, create scatterlist.
+-       */
+-      iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_FROM_DEVICE);
+-      if (dma_mapping_error(jrdev, iv_dma)) {
+-              dev_err(jrdev, "unable to map IV\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, DMA_NONE, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+       sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+       dst_sg_idx = sec4_sg_ents;
+-      if (mapped_dst_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->dst)) {
+-              out_contig = true;
+-      } else {
+-              out_contig = false;
+-              sec4_sg_ents += 1 + mapped_dst_nents;
+-      }
++      sec4_sg_ents += 1 + mapped_dst_nents;
+-      /* allocate space for base edesc and hw desc commands, link tables */
++      /*
++       * allocate space for base edesc and hw desc commands, link tables, IV
++       */
+       sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+-      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
++      edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+                       GFP_DMA | flags);
+       if (!edesc) {
+               dev_err(jrdev, "could not allocate extended descriptor\n");
+-              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+@@ -1796,16 +1761,24 @@ static struct ablkcipher_edesc *ablkciph
+                        desc_bytes;
+       edesc->iv_dir = DMA_FROM_DEVICE;
++      /* Make sure IV is located in a DMAable area */
++      iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
++      iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
++      if (dma_mapping_error(jrdev, iv_dma)) {
++              dev_err(jrdev, "unable to map IV\n");
++              caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, DMA_NONE, 0, 0);
++              kfree(edesc);
++              return ERR_PTR(-ENOMEM);
++      }
++
+       if (mapped_src_nents > 1)
+               sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
+                                  0);
+-      if (!out_contig) {
+-              dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
+-                                 iv_dma, ivsize, 0);
+-              sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+-                                 edesc->sec4_sg + dst_sg_idx + 1, 0);
+-      }
++      dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
++      sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
++                         dst_sg_idx + 1, 0);
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+@@ -1825,7 +1798,6 @@ static struct ablkcipher_edesc *ablkciph
+                      sec4_sg_bytes, 1);
+ #endif
+-      *iv_contig_out = out_contig;
+       return edesc;
+ }
+@@ -1836,19 +1808,17 @@ static int ablkcipher_givencrypt(struct
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+       struct device *jrdev = ctx->jrdev;
+-      bool iv_contig = false;
+       u32 *desc;
+       int ret = 0;
+       /* allocate extended descriptor */
+-      edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+-                                     CAAM_CMD_SZ, &iv_contig);
++      edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+       /* Create and submit job descriptor*/
+       init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+-                              edesc, req, iv_contig);
++                              edesc, req);
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR,
+                      "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
diff --git a/queue-4.17/crypto-caam-fix-size-of-rsa-prime-factor-q.patch b/queue-4.17/crypto-caam-fix-size-of-rsa-prime-factor-q.patch
new file mode 100644 (file)
index 0000000..d9d746c
--- /dev/null
@@ -0,0 +1,65 @@
+From 4bffaab373d9afaf862f3924442c33340bd26736 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Fri, 27 Apr 2018 11:40:11 +0300
+Subject: crypto: caam - fix size of RSA prime factor q
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 4bffaab373d9afaf862f3924442c33340bd26736 upstream.
+
+Fix a typo where size of RSA prime factor q is using the size of
+prime factor p.
+
+Cc: <stable@vger.kernel.org> # 4.13+
+Fixes: 52e26d77b8b3 ("crypto: caam - add support for RSA key form 2")
+Fixes: 4a651b122adb ("crypto: caam - add support for RSA key form 3")
+Reported-by: David Binderman <dcb314@hotmail.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caampkc.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct dev
+       struct caam_rsa_key *key = &ctx->key;
+       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+       dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+@@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct dev
+       struct caam_rsa_key *key = &ctx->key;
+       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+       dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+@@ -397,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct ak
+       struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+       int sec4_sg_index = 0;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+       pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, pdb->d_dma)) {
+@@ -472,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct ak
+       struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+       int sec4_sg_index = 0;
+       size_t p_sz = key->p_sz;
+-      size_t q_sz = key->p_sz;
++      size_t q_sz = key->q_sz;
+       pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, pdb->p_dma)) {
diff --git a/queue-4.17/crypto-caam-qi-fix-iv-dma-mapping-and-updating.patch b/queue-4.17/crypto-caam-qi-fix-iv-dma-mapping-and-updating.patch
new file mode 100644 (file)
index 0000000..2fd8d3f
--- /dev/null
@@ -0,0 +1,431 @@
+From 3a488aaec6f343b5dc6d94529847a840bbeaf009 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Wed, 28 Mar 2018 15:39:19 +0300
+Subject: crypto: caam/qi - fix IV DMA mapping and updating
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 3a488aaec6f343b5dc6d94529847a840bbeaf009 upstream.
+
+There are two IV-related issues:
+(1) crypto API does not guarantee to provide an IV buffer that is DMAable,
+thus it's incorrect to DMA map it
+(2) for in-place decryption, since ciphertext is overwritten with
+plaintext, updated IV (req->info) will contain the last block of plaintext
+(instead of the last block of ciphertext)
+
+While these two issues could be fixed separately, it's straightforward
+to fix both in the same time - by using the {ablkcipher,aead}_edesc
+extended descriptor to store the IV that will be fed to the crypto engine;
+this allows for fixing (2) by saving req->src[last_block] in req->info
+directly, i.e. without allocating yet another temporary buffer.
+
+A side effect of the fix is that it's no longer possible to have the IV
+contiguous with req->src or req->dst.
+Code checking for this case is removed.
+
+Cc: <stable@vger.kernel.org> # 4.14+
+Fixes: a68a19380522 ("crypto: caam/qi - properly set IV after {en,de}crypt")
+Link: http://lkml.kernel.org/r/20170113084620.GF22022@gondor.apana.org.au
+Reported-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_qi.c |  227 +++++++++++++++++++--------------------
+ 1 file changed, 116 insertions(+), 111 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -728,7 +728,7 @@ badkey:
+  * @assoclen: associated data length, in CAAM endianness
+  * @assoclen_dma: bus physical mapped address of req->assoclen
+  * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table
++ * @sgt: the h/w link table, followed by IV
+  */
+ struct aead_edesc {
+       int src_nents;
+@@ -739,9 +739,6 @@ struct aead_edesc {
+       unsigned int assoclen;
+       dma_addr_t assoclen_dma;
+       struct caam_drv_req drv_req;
+-#define CAAM_QI_MAX_AEAD_SG                                           \
+-      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
+-       sizeof(struct qm_sg_entry))
+       struct qm_sg_entry sgt[0];
+ };
+@@ -753,7 +750,7 @@ struct aead_edesc {
+  * @qm_sg_bytes: length of dma mapped h/w link table
+  * @qm_sg_dma: bus physical mapped address of h/w link table
+  * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table
++ * @sgt: the h/w link table, followed by IV
+  */
+ struct ablkcipher_edesc {
+       int src_nents;
+@@ -762,9 +759,6 @@ struct ablkcipher_edesc {
+       int qm_sg_bytes;
+       dma_addr_t qm_sg_dma;
+       struct caam_drv_req drv_req;
+-#define CAAM_QI_MAX_ABLKCIPHER_SG                                         \
+-      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+-       sizeof(struct qm_sg_entry))
+       struct qm_sg_entry sgt[0];
+ };
+@@ -986,17 +980,8 @@ static struct aead_edesc *aead_edesc_all
+               }
+       }
+-      if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
++      if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+               ivsize = crypto_aead_ivsize(aead);
+-              iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
+-              if (dma_mapping_error(qidev, iv_dma)) {
+-                      dev_err(qidev, "unable to map IV\n");
+-                      caam_unmap(qidev, req->src, req->dst, src_nents,
+-                                 dst_nents, 0, 0, op_type, 0, 0);
+-                      qi_cache_free(edesc);
+-                      return ERR_PTR(-ENOMEM);
+-              }
+-      }
+       /*
+        * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+@@ -1004,16 +989,33 @@ static struct aead_edesc *aead_edesc_all
+        */
+       qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+                    (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+-      if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
+-              dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-                      qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, op_type, 0, 0);
++      sg_table = &edesc->sgt[0];
++      qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++      if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
++                   CAAM_QI_MEMCACHE_SIZE)) {
++              dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++                      qm_sg_ents, ivsize);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
+               qi_cache_free(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+-      sg_table = &edesc->sgt[0];
+-      qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++      if (ivsize) {
++              u8 *iv = (u8 *)(sg_table + qm_sg_ents);
++
++              /* Make sure IV is located in a DMAable area */
++              memcpy(iv, req->iv, ivsize);
++
++              iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++              if (dma_mapping_error(qidev, iv_dma)) {
++                      dev_err(qidev, "unable to map IV\n");
++                      caam_unmap(qidev, req->src, req->dst, src_nents,
++                                 dst_nents, 0, 0, 0, 0, 0);
++                      qi_cache_free(edesc);
++                      return ERR_PTR(-ENOMEM);
++              }
++      }
+       edesc->src_nents = src_nents;
+       edesc->dst_nents = dst_nents;
+@@ -1166,15 +1168,27 @@ static void ablkcipher_done(struct caam_
+ #endif
+       ablkcipher_unmap(qidev, edesc, req);
+-      qi_cache_free(edesc);
++
++      /* In case initial IV was generated, copy it in GIVCIPHER request */
++      if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
++              u8 *iv;
++              struct skcipher_givcrypt_request *greq;
++
++              greq = container_of(req, struct skcipher_givcrypt_request,
++                                  creq);
++              iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
++              memcpy(greq->giv, iv, ivsize);
++      }
+       /*
+        * The crypto API expects us to set the IV (req->info) to the last
+        * ciphertext block. This is used e.g. by the CTS mode.
+        */
+-      scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+-                               ivsize, 0);
++      if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
++              scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
++                                       ivsize, ivsize, 0);
++      qi_cache_free(edesc);
+       ablkcipher_request_complete(req, status);
+ }
+@@ -1189,9 +1203,9 @@ static struct ablkcipher_edesc *ablkciph
+       int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+       struct ablkcipher_edesc *edesc;
+       dma_addr_t iv_dma;
+-      bool in_contig;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+-      int dst_sg_idx, qm_sg_ents;
++      int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+       struct qm_sg_entry *sg_table, *fd_sgt;
+       struct caam_drv_ctx *drv_ctx;
+       enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+@@ -1238,55 +1252,53 @@ static struct ablkcipher_edesc *ablkciph
+               }
+       }
+-      iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
+-      if (dma_mapping_error(qidev, iv_dma)) {
+-              dev_err(qidev, "unable to map IV\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+-      if (mapped_src_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->src)) {
+-              in_contig = true;
+-              qm_sg_ents = 0;
+-      } else {
+-              in_contig = false;
+-              qm_sg_ents = 1 + mapped_src_nents;
+-      }
++      qm_sg_ents = 1 + mapped_src_nents;
+       dst_sg_idx = qm_sg_ents;
+       qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+-      if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+-              dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-                      qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, op_type, 0, 0);
++      qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
++      if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
++                   ivsize > CAAM_QI_MEMCACHE_SIZE)) {
++              dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++                      qm_sg_ents, ivsize);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+-      /* allocate space for base edesc and link tables */
++      /* allocate space for base edesc, link tables and IV */
+       edesc = qi_cache_alloc(GFP_DMA | flags);
+       if (unlikely(!edesc)) {
+               dev_err(qidev, "could not allocate extended descriptor\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, op_type, 0, 0);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      /* Make sure IV is located in a DMAable area */
++      sg_table = &edesc->sgt[0];
++      iv = (u8 *)(sg_table + qm_sg_ents);
++      memcpy(iv, req->info, ivsize);
++
++      iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++      if (dma_mapping_error(qidev, iv_dma)) {
++              dev_err(qidev, "unable to map IV\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              qi_cache_free(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+       edesc->src_nents = src_nents;
+       edesc->dst_nents = dst_nents;
+       edesc->iv_dma = iv_dma;
+-      sg_table = &edesc->sgt[0];
+-      edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++      edesc->qm_sg_bytes = qm_sg_bytes;
+       edesc->drv_req.app_ctx = req;
+       edesc->drv_req.cbk = ablkcipher_done;
+       edesc->drv_req.drv_ctx = drv_ctx;
+-      if (!in_contig) {
+-              dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+-              sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+-      }
++      dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++      sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+       if (mapped_dst_nents > 1)
+               sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+@@ -1304,20 +1316,12 @@ static struct ablkcipher_edesc *ablkciph
+       fd_sgt = &edesc->drv_req.fd_sgt[0];
+-      if (!in_contig)
+-              dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+-                                        ivsize + req->nbytes, 0);
+-      else
+-              dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
+-                                    0);
++      dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
++                                ivsize + req->nbytes, 0);
+       if (req->src == req->dst) {
+-              if (!in_contig)
+-                      dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+-                                           sizeof(*sg_table), req->nbytes, 0);
+-              else
+-                      dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+-                                       req->nbytes, 0);
++              dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
++                                   sizeof(*sg_table), req->nbytes, 0);
+       } else if (mapped_dst_nents > 1) {
+               dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+                                    sizeof(*sg_table), req->nbytes, 0);
+@@ -1341,10 +1345,10 @@ static struct ablkcipher_edesc *ablkciph
+       int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+       struct ablkcipher_edesc *edesc;
+       dma_addr_t iv_dma;
+-      bool out_contig;
++      u8 *iv;
+       int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       struct qm_sg_entry *sg_table, *fd_sgt;
+-      int dst_sg_idx, qm_sg_ents;
++      int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+       struct caam_drv_ctx *drv_ctx;
+       drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
+@@ -1392,46 +1396,45 @@ static struct ablkcipher_edesc *ablkciph
+               mapped_dst_nents = src_nents;
+       }
+-      iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
+-      if (dma_mapping_error(qidev, iv_dma)) {
+-              dev_err(qidev, "unable to map IV\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+-                         0, 0, 0, 0);
+-              return ERR_PTR(-ENOMEM);
+-      }
+-
+       qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+       dst_sg_idx = qm_sg_ents;
+-      if (mapped_dst_nents == 1 &&
+-          iv_dma + ivsize == sg_dma_address(req->dst)) {
+-              out_contig = true;
+-      } else {
+-              out_contig = false;
+-              qm_sg_ents += 1 + mapped_dst_nents;
+-      }
+-      if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+-              dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-                      qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, GIVENCRYPT, 0, 0);
++      qm_sg_ents += 1 + mapped_dst_nents;
++      qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
++      if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
++                   ivsize > CAAM_QI_MEMCACHE_SIZE)) {
++              dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++                      qm_sg_ents, ivsize);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
+               return ERR_PTR(-ENOMEM);
+       }
+-      /* allocate space for base edesc and link tables */
++      /* allocate space for base edesc, link tables and IV */
+       edesc = qi_cache_alloc(GFP_DMA | flags);
+       if (!edesc) {
+               dev_err(qidev, "could not allocate extended descriptor\n");
+-              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-                         iv_dma, ivsize, GIVENCRYPT, 0, 0);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      /* Make sure IV is located in a DMAable area */
++      sg_table = &edesc->sgt[0];
++      iv = (u8 *)(sg_table + qm_sg_ents);
++      iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
++      if (dma_mapping_error(qidev, iv_dma)) {
++              dev_err(qidev, "unable to map IV\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              qi_cache_free(edesc);
+               return ERR_PTR(-ENOMEM);
+       }
+       edesc->src_nents = src_nents;
+       edesc->dst_nents = dst_nents;
+       edesc->iv_dma = iv_dma;
+-      sg_table = &edesc->sgt[0];
+-      edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++      edesc->qm_sg_bytes = qm_sg_bytes;
+       edesc->drv_req.app_ctx = req;
+       edesc->drv_req.cbk = ablkcipher_done;
+       edesc->drv_req.drv_ctx = drv_ctx;
+@@ -1439,11 +1442,9 @@ static struct ablkcipher_edesc *ablkciph
+       if (mapped_src_nents > 1)
+               sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+-      if (!out_contig) {
+-              dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+-              sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+-                               dst_sg_idx + 1, 0);
+-      }
++      dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
++      sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
++                       0);
+       edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+                                         DMA_TO_DEVICE);
+@@ -1464,13 +1465,8 @@ static struct ablkcipher_edesc *ablkciph
+               dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
+                                req->nbytes, 0);
+-      if (!out_contig)
+-              dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+-                                   sizeof(*sg_table), ivsize + req->nbytes,
+-                                   0);
+-      else
+-              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+-                               ivsize + req->nbytes, 0);
++      dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
++                           sizeof(*sg_table), ivsize + req->nbytes, 0);
+       return edesc;
+ }
+@@ -1480,6 +1476,7 @@ static inline int ablkcipher_crypt(struc
+       struct ablkcipher_edesc *edesc;
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       int ret;
+       if (unlikely(caam_congested))
+@@ -1490,6 +1487,14 @@ static inline int ablkcipher_crypt(struc
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block.
++       */
++      if (!encrypt)
++              scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
++                                       ivsize, ivsize, 0);
++
+       ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+       if (!ret) {
+               ret = -EINPROGRESS;
diff --git a/queue-4.17/crypto-caam-strip-input-zeros-from-rsa-input-buffer.patch b/queue-4.17/crypto-caam-strip-input-zeros-from-rsa-input-buffer.patch
new file mode 100644 (file)
index 0000000..7415d0a
--- /dev/null
@@ -0,0 +1,138 @@
+From 8a2a0dd35f2e54c023d9041a5428b6c5639af86c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Mon, 16 Apr 2018 08:07:05 -0500
+Subject: crypto: caam - strip input zeros from RSA input buffer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 8a2a0dd35f2e54c023d9041a5428b6c5639af86c upstream.
+
+Sometimes the provided RSA input buffer provided is not stripped
+of leading zeros. This could cause its size to be bigger than that
+of the modulus, making the HW complain:
+
+caam_jr 2142000.jr1: 40000789: DECO: desc idx 7:
+Protocol Size Error - A protocol has seen an error in size. When
+running RSA, pdb size N < (size of F) when no formatting is used; or
+pdb size N < (F + 11) when formatting is used.
+
+Fix the problem by stripping off the leading zero from input data
+before feeding it to the CAAM accelerator.
+
+Fixes: 8c419778ab57e ("crypto: caam - add support for RSA algorithm")
+Cc: <stable@vger.kernel.org> # 4.8+
+Reported-by: Martin Townsend <mtownsend1973@gmail.com>
+Link: https://lkml.kernel.org/r/CABatt_ytYORYKtApcB4izhNanEKkGFi9XAQMjHi_n-8YWoCRiw@mail.gmail.com
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Tested-by: Fabio Estevam <fabio.estevam@nxp.com>
+Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caampkc.c |   54 ++++++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/caam/caampkc.h |    8 ++++++
+ 2 files changed, 62 insertions(+)
+
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct devi
+       akcipher_request_complete(req, err);
+ }
++static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
++                                      unsigned int nbytes,
++                                      unsigned int flags)
++{
++      struct sg_mapping_iter miter;
++      int lzeros, ents;
++      unsigned int len;
++      unsigned int tbytes = nbytes;
++      const u8 *buff;
++
++      ents = sg_nents_for_len(sgl, nbytes);
++      if (ents < 0)
++              return ents;
++
++      sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
++
++      lzeros = 0;
++      len = 0;
++      while (nbytes > 0) {
++              while (len && !*buff) {
++                      lzeros++;
++                      len--;
++                      buff++;
++              }
++
++              if (len && *buff)
++                      break;
++
++              sg_miter_next(&miter);
++              buff = miter.addr;
++              len = miter.length;
++
++              nbytes -= lzeros;
++              lzeros = 0;
++      }
++
++      miter.consumed = lzeros;
++      sg_miter_stop(&miter);
++      nbytes -= lzeros;
++
++      return tbytes - nbytes;
++}
++
+ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+                                        size_t desclen)
+ {
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = ctx->dev;
++      struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+       struct rsa_edesc *edesc;
+       gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+                      GFP_KERNEL : GFP_ATOMIC;
++      int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
+       int sgc;
+       int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+       int src_nents, dst_nents;
++      int lzeros;
++
++      lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
++      if (lzeros < 0)
++              return ERR_PTR(lzeros);
++
++      req->src_len -= lzeros;
++      req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+       src_nents = sg_nents_for_len(req->src, req->src_len);
+       dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+@@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = {
+       .max_size = caam_rsa_max_size,
+       .init = caam_rsa_init_tfm,
+       .exit = caam_rsa_exit_tfm,
++      .reqsize = sizeof(struct caam_rsa_req_ctx),
+       .base = {
+               .cra_name = "rsa",
+               .cra_driver_name = "rsa-caam",
+--- a/drivers/crypto/caam/caampkc.h
++++ b/drivers/crypto/caam/caampkc.h
+@@ -96,6 +96,14 @@ struct caam_rsa_ctx {
+ };
+ /**
++ * caam_rsa_req_ctx - per request context.
++ * @src: input scatterlist (stripped of leading zeros)
++ */
++struct caam_rsa_req_ctx {
++      struct scatterlist src[2];
++};
++
++/**
+  * rsa_edesc - s/w-extended rsa descriptor
+  * @src_nents     : number of segments in input scatterlist
+  * @dst_nents     : number of segments in output scatterlist
diff --git a/queue-4.17/crypto-cavium-fix-fallout-from-config_vmap_stack.patch b/queue-4.17/crypto-cavium-fix-fallout-from-config_vmap_stack.patch
new file mode 100644 (file)
index 0000000..8c7dc92
--- /dev/null
@@ -0,0 +1,111 @@
+From 37ff02acaa3d7be87ecb89f198a549ffd3ae2403 Mon Sep 17 00:00:00 2001
+From: Jan Glauber <jglauber@cavium.com>
+Date: Mon, 9 Apr 2018 17:45:50 +0200
+Subject: crypto: cavium - Fix fallout from CONFIG_VMAP_STACK
+
+From: Jan Glauber <jglauber@cavium.com>
+
+commit 37ff02acaa3d7be87ecb89f198a549ffd3ae2403 upstream.
+
+Enabling virtual mapped kernel stacks breaks the thunderx_zip
+driver. On compression or decompression the executing CPU hangs
+in an endless loop. The reason for this is the usage of __pa
+by the driver which does no longer work for an address that is
+not part of the 1:1 mapping.
+
+The zip driver allocates a result struct on the stack and needs
+to tell the hardware the physical address within this struct
+that is used to signal the completion of the request.
+
+As the hardware gets the wrong address after the broken __pa
+conversion it writes to an arbitrary address. The zip driver then
+waits forever for the completion byte to contain a non-zero value.
+
+Allocating the result struct from 1:1 mapped memory resolves this
+bug.
+
+Signed-off-by: Jan Glauber <jglauber@cavium.com>
+Reviewed-by: Robert Richter <rrichter@cavium.com>
+Cc: stable <stable@vger.kernel.org> # 4.14
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/cavium/zip/zip_crypto.c |   22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/drivers/crypto/cavium/zip/zip_crypto.c
++++ b/drivers/crypto/cavium/zip/zip_crypto.c
+@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned
+                struct zip_kernel_ctx *zip_ctx)
+ {
+       struct zip_operation  *zip_ops   = NULL;
+-      struct zip_state      zip_state;
++      struct zip_state      *zip_state;
+       struct zip_device     *zip = NULL;
+       int ret;
+@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned
+       if (!zip)
+               return -ENODEV;
+-      memset(&zip_state, 0, sizeof(struct zip_state));
++      zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
++      if (!zip_state)
++              return -ENOMEM;
++
+       zip_ops = &zip_ctx->zip_comp;
+       zip_ops->input_len  = slen;
+       zip_ops->output_len = *dlen;
+       memcpy(zip_ops->input, src, slen);
+-      ret = zip_deflate(zip_ops, &zip_state, zip);
++      ret = zip_deflate(zip_ops, zip_state, zip);
+       if (!ret) {
+               *dlen = zip_ops->output_len;
+               memcpy(dst, zip_ops->output, *dlen);
+       }
+-
++      kfree(zip_state);
+       return ret;
+ }
+@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsign
+                  struct zip_kernel_ctx *zip_ctx)
+ {
+       struct zip_operation  *zip_ops   = NULL;
+-      struct zip_state      zip_state;
++      struct zip_state      *zip_state;
+       struct zip_device     *zip = NULL;
+       int ret;
+@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsign
+       if (!zip)
+               return -ENODEV;
+-      memset(&zip_state, 0, sizeof(struct zip_state));
++      zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
++      if (!zip_state)
++              return -ENOMEM;
++
+       zip_ops = &zip_ctx->zip_decomp;
+       memcpy(zip_ops->input, src, slen);
+@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsign
+       zip_ops->input_len  = slen;
+       zip_ops->output_len = *dlen;
+-      ret = zip_inflate(zip_ops, &zip_state, zip);
++      ret = zip_inflate(zip_ops, zip_state, zip);
+       if (!ret) {
+               *dlen = zip_ops->output_len;
+               memcpy(dst, zip_ops->output, *dlen);
+       }
+-
++      kfree(zip_state);
+       return ret;
+ }
diff --git a/queue-4.17/crypto-cavium-limit-result-reading-attempts.patch b/queue-4.17/crypto-cavium-limit-result-reading-attempts.patch
new file mode 100644 (file)
index 0000000..d072ffc
--- /dev/null
@@ -0,0 +1,94 @@
+From c782a8c43e94ba6c09e9de2d69b5e3a5840ce61c Mon Sep 17 00:00:00 2001
+From: Jan Glauber <jglauber@cavium.com>
+Date: Mon, 9 Apr 2018 17:45:51 +0200
+Subject: crypto: cavium - Limit result reading attempts
+
+From: Jan Glauber <jglauber@cavium.com>
+
+commit c782a8c43e94ba6c09e9de2d69b5e3a5840ce61c upstream.
+
+After issuing a request an endless loop was used to read the
+completion state from memory which is asynchronously updated
+by the ZIP coprocessor.
+
+Add an upper bound to the retry attempts to prevent a CPU getting stuck
+forever in case of an error. Additionally, add a read memory barrier
+and a small delay between the reading attempts.
+
+Signed-off-by: Jan Glauber <jglauber@cavium.com>
+Reviewed-by: Robert Richter <rrichter@cavium.com>
+Cc: stable <stable@vger.kernel.org> # 4.14
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/cavium/zip/common.h      |   21 +++++++++++++++++++++
+ drivers/crypto/cavium/zip/zip_deflate.c |    4 ++--
+ drivers/crypto/cavium/zip/zip_inflate.c |    4 ++--
+ 3 files changed, 25 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/cavium/zip/common.h
++++ b/drivers/crypto/cavium/zip/common.h
+@@ -46,8 +46,10 @@
+ #ifndef __COMMON_H__
+ #define __COMMON_H__
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+@@ -149,6 +151,25 @@ struct zip_operation {
+       u32   sizeofzops;
+ };
++static inline int zip_poll_result(union zip_zres_s *result)
++{
++      int retries = 1000;
++
++      while (!result->s.compcode) {
++              if (!--retries) {
++                      pr_err("ZIP ERR: request timed out");
++                      return -ETIMEDOUT;
++              }
++              udelay(10);
++              /*
++               * Force re-reading of compcode which is updated
++               * by the ZIP coprocessor.
++               */
++              rmb();
++      }
++      return 0;
++}
++
+ /* error messages */
+ #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
+                             fmt "\n", __func__, __LINE__, ## args)
+--- a/drivers/crypto/cavium/zip/zip_deflate.c
++++ b/drivers/crypto/cavium/zip/zip_deflate.c
+@@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zi
+       /* Stats update for compression requests submitted */
+       atomic64_inc(&zip_dev->stats.comp_req_submit);
+-      while (!result_ptr->s.compcode)
+-              continue;
++      /* Wait for completion or error */
++      zip_poll_result(result_ptr);
+       /* Stats update for compression requests completed */
+       atomic64_inc(&zip_dev->stats.comp_req_complete);
+--- a/drivers/crypto/cavium/zip/zip_inflate.c
++++ b/drivers/crypto/cavium/zip/zip_inflate.c
+@@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zi
+       /* Decompression requests submitted stats update */
+       atomic64_inc(&zip_dev->stats.decomp_req_submit);
+-      while (!result_ptr->s.compcode)
+-              continue;
++      /* Wait for completion or error */
++      zip_poll_result(result_ptr);
+       /* Decompression requests completed stats update */
+       atomic64_inc(&zip_dev->stats.decomp_req_complete);
diff --git a/queue-4.17/crypto-omap-sham-fix-memleak.patch b/queue-4.17/crypto-omap-sham-fix-memleak.patch
new file mode 100644 (file)
index 0000000..b65bdfb
--- /dev/null
@@ -0,0 +1,35 @@
+From 9dbc8a0328efa485a6f5b68b867f9f523a3fbeff Mon Sep 17 00:00:00 2001
+From: Bin Liu <b-liu@ti.com>
+Date: Tue, 17 Apr 2018 14:53:13 -0500
+Subject: crypto: omap-sham - fix memleak
+
+From: Bin Liu <b-liu@ti.com>
+
+commit 9dbc8a0328efa485a6f5b68b867f9f523a3fbeff upstream.
+
+Fixes: 8043bb1ae03cb ("crypto: omap-sham - convert driver logic to use sgs for data xmit")
+
+The memory pages freed in omap_sham_finish_req() were less than those
+allocated in omap_sham_copy_sgs().
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Bin Liu <b-liu@ti.com>
+Acked-by: Tero Kristo <t-kristo@ti.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/omap-sham.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -1087,7 +1087,7 @@ static void omap_sham_finish_req(struct
+       if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
+               free_pages((unsigned long)sg_virt(ctx->sg),
+-                         get_order(ctx->sg->length));
++                         get_order(ctx->sg->length + ctx->bufcnt));
+       if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
+               kfree(ctx->sg);
diff --git a/queue-4.17/crypto-vmx-remove-overly-verbose-printk-from-aes-init-routines.patch b/queue-4.17/crypto-vmx-remove-overly-verbose-printk-from-aes-init-routines.patch
new file mode 100644 (file)
index 0000000..8dfbb67
--- /dev/null
@@ -0,0 +1,79 @@
+From 1411b5218adbcf1d45ddb260db5553c52e8d917c Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 3 May 2018 22:29:29 +1000
+Subject: crypto: vmx - Remove overly verbose printk from AES init routines
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 1411b5218adbcf1d45ddb260db5553c52e8d917c upstream.
+
+In the vmx AES init routines we do a printk(KERN_INFO ...) to report
+the fallback implementation we're using.
+
+However with a slow console this can significantly affect the speed of
+crypto operations. Using 'cryptsetup benchmark' the removal of the
+printk() leads to a ~5x speedup for aes-cbc decryption.
+
+So remove them.
+
+Fixes: 8676590a1593 ("crypto: vmx - Adding AES routines for VMX module")
+Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module")
+Fixes: 4f7f60d312b3 ("crypto: vmx - Adding CTR routines for VMX module")
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Cc: stable@vger.kernel.org # v4.1+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/aes.c     |    2 --
+ drivers/crypto/vmx/aes_cbc.c |    3 ---
+ drivers/crypto/vmx/aes_ctr.c |    2 --
+ drivers/crypto/vmx/ghash.c   |    2 --
+ 4 files changed, 9 deletions(-)
+
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+       crypto_cipher_set_flags(fallback,
+                               crypto_cipher_get_flags((struct
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-              crypto_skcipher_driver_name(fallback));
+-
+       crypto_skcipher_set_flags(
+               fallback,
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -50,8 +50,6 @@ static int p8_aes_ctr_init(struct crypto
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-              crypto_skcipher_driver_name(fallback));
+       crypto_skcipher_set_flags(
+               fallback,
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct cryp
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
+       crypto_shash_set_flags(fallback,
+                              crypto_shash_get_flags((struct crypto_shash
diff --git a/queue-4.17/crypto-vmx-remove-overly-verbose-printk-from-aes-xts-init.patch b/queue-4.17/crypto-vmx-remove-overly-verbose-printk-from-aes-xts-init.patch
new file mode 100644 (file)
index 0000000..1e552be
--- /dev/null
@@ -0,0 +1,34 @@
+From 730f23b66095a700e2f0786abda6bca011b31558 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 3 May 2018 22:29:30 +1000
+Subject: crypto: vmx - Remove overly verbose printk from AES XTS init
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 730f23b66095a700e2f0786abda6bca011b31558 upstream.
+
+In p8_aes_xts_init() we do a printk(KERN_INFO ...) to report the
+fallback implementation we're using. However with a slow console this
+can significantly affect the speed of crypto operations. So remove it.
+
+Fixes: c07f5d3da643 ("crypto: vmx - Adding support for XTS")
+Cc: stable@vger.kernel.org # v4.8+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/aes_xts.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/crypto/vmx/aes_xts.c
++++ b/drivers/crypto/vmx/aes_xts.c
+@@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto
+                       alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-              crypto_skcipher_driver_name(fallback));
+       crypto_skcipher_set_flags(
+               fallback,
diff --git a/queue-4.17/input-elan_i2c-add-elan0612-lenovo-v330-14ikb-acpi-id.patch b/queue-4.17/input-elan_i2c-add-elan0612-lenovo-v330-14ikb-acpi-id.patch
new file mode 100644 (file)
index 0000000..07acfcc
--- /dev/null
@@ -0,0 +1,32 @@
+From e6e7e9cd8eed0e18217c899843bffbe8c7dae564 Mon Sep 17 00:00:00 2001
+From: Johannes Wienke <languitar@semipol.de>
+Date: Mon, 4 Jun 2018 13:37:26 -0700
+Subject: Input: elan_i2c - add ELAN0612 (Lenovo v330 14IKB) ACPI ID
+
+From: Johannes Wienke <languitar@semipol.de>
+
+commit e6e7e9cd8eed0e18217c899843bffbe8c7dae564 upstream.
+
+Add ELAN0612 to the list of supported touchpads; this ID is used in Lenovo
+v330 14IKB devices.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199253
+Signed-off-by: Johannes Wienke <languitar@semipol.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/elan_i2c_core.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1262,6 +1262,7 @@ static const struct acpi_device_id elan_
+       { "ELAN060B", 0 },
+       { "ELAN060C", 0 },
+       { "ELAN0611", 0 },
++      { "ELAN0612", 0 },
+       { "ELAN1000", 0 },
+       { }
+ };
diff --git a/queue-4.17/input-goodix-add-new-acpi-id-for-gpd-win-2-touch-screen.patch b/queue-4.17/input-goodix-add-new-acpi-id-for-gpd-win-2-touch-screen.patch
new file mode 100644 (file)
index 0000000..93aead9
--- /dev/null
@@ -0,0 +1,32 @@
+From 5ca4d1ae9bad0f59bd6f851c39b19f5366953666 Mon Sep 17 00:00:00 2001
+From: Ethan Lee <flibitijibibo@gmail.com>
+Date: Thu, 31 May 2018 16:13:17 -0700
+Subject: Input: goodix - add new ACPI id for GPD Win 2 touch screen
+
+From: Ethan Lee <flibitijibibo@gmail.com>
+
+commit 5ca4d1ae9bad0f59bd6f851c39b19f5366953666 upstream.
+
+GPD Win 2 Website: http://www.gpd.hk/gpdwin2.asp
+
+Tested on a unit from the first production run sent to Indiegogo backers
+
+Signed-off-by: Ethan Lee <flibitijibibo@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/touchscreen/goodix.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -933,6 +933,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id goodix_acpi_match[] = {
+       { "GDIX1001", 0 },
++      { "GDIX1002", 0 },
+       { }
+ };
+ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
index 9d46be2ac7b9c4aeaddc510fa5aa37c9285245a0..a33958daaf0ee9d81a40ec92bfc9a6a5cf73ed4b 100644 (file)
@@ -31,3 +31,15 @@ doc-fix-sysfs-abi-documentation.patch
 arm64-defconfig-enable-config_pinctrl_mt7622-by-default.patch
 tty-pl011-avoid-spuriously-stuck-off-interrupts.patch
 crypto-ccree-correct-host-regs-offset.patch
+input-goodix-add-new-acpi-id-for-gpd-win-2-touch-screen.patch
+input-elan_i2c-add-elan0612-lenovo-v330-14ikb-acpi-id.patch
+crypto-caam-strip-input-zeros-from-rsa-input-buffer.patch
+crypto-caam-fix-dma-mapping-dir-for-generated-iv.patch
+crypto-caam-fix-iv-dma-mapping-and-updating.patch
+crypto-caam-qi-fix-iv-dma-mapping-and-updating.patch
+crypto-caam-fix-size-of-rsa-prime-factor-q.patch
+crypto-cavium-fix-fallout-from-config_vmap_stack.patch
+crypto-cavium-limit-result-reading-attempts.patch
+crypto-vmx-remove-overly-verbose-printk-from-aes-init-routines.patch
+crypto-vmx-remove-overly-verbose-printk-from-aes-xts-init.patch
+crypto-omap-sham-fix-memleak.patch