]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
fixes for 5.5
authorSasha Levin <sashal@kernel.org>
Mon, 10 Feb 2020 03:11:55 +0000 (22:11 -0500)
committerSasha Levin <sashal@kernel.org>
Mon, 10 Feb 2020 03:11:55 +0000 (22:11 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.5/crypto-atmel-aes-fix-ctr-counter-overflow-when-multi.patch [new file with mode: 0644]
queue-5.5/crypto-atmel-aes-fix-saving-of-iv-for-ctr-mode.patch [new file with mode: 0644]
queue-5.5/crypto-atmel-aes-tdes-do-not-save-iv-for-ecb-mode.patch [new file with mode: 0644]
queue-5.5/crypto-atmel-tdes-map-driver-data-flags-to-mode-regi.patch [new file with mode: 0644]
queue-5.5/ib-core-fix-build-failure-without-hugepages.patch [new file with mode: 0644]
queue-5.5/rxrpc-fix-service-call-disconnection.patch [new file with mode: 0644]
queue-5.5/series

diff --git a/queue-5.5/crypto-atmel-aes-fix-ctr-counter-overflow-when-multi.patch b/queue-5.5/crypto-atmel-aes-fix-ctr-counter-overflow-when-multi.patch
new file mode 100644 (file)
index 0000000..0b1eb76
--- /dev/null
@@ -0,0 +1,51 @@
+From 561acf4a4138f630b4cd1d78b76e6b294112542d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Dec 2019 14:45:44 +0000
+Subject: crypto: atmel-aes - Fix CTR counter overflow when multiple fragments
+
+From: Tudor Ambarus <tudor.ambarus@microchip.com>
+
+[ Upstream commit 3907ccfaec5d9965e306729936fc732c94d2c1e7 ]
+
+The CTR transfer works in fragments of data of maximum 1 MByte because
+of the 16 bit CTR counter embedded in the IP. Fix the CTR counter
+overflow handling for messages larger than 1 MByte.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 781a08d9740a ("crypto: atmel-aes - Fix counter overflow in CTR mode")
+Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/atmel-aes.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index ea9dcd7ce799b..b4dee726b2530 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -121,7 +121,7 @@ struct atmel_aes_ctr_ctx {
+       size_t                  offset;
+       struct scatterlist      src[2];
+       struct scatterlist      dst[2];
+-      u16                     blocks;
++      u32                     blocks;
+ };
+ struct atmel_aes_gcm_ctx {
+@@ -528,6 +528,12 @@ static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
+       unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+       int i;
++      /*
++       * The CTR transfer works in fragments of data of maximum 1 MByte
++       * because of the 16 bit CTR counter embedded in the IP. When reaching
++       * here, ctx->blocks contains the number of blocks of the last fragment
++       * processed, there is no need to explicit cast it to u16.
++       */
+       for (i = 0; i < ctx->blocks; i++)
+               crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
+-- 
+2.20.1
+
diff --git a/queue-5.5/crypto-atmel-aes-fix-saving-of-iv-for-ctr-mode.patch b/queue-5.5/crypto-atmel-aes-fix-saving-of-iv-for-ctr-mode.patch
new file mode 100644 (file)
index 0000000..f4dce19
--- /dev/null
@@ -0,0 +1,117 @@
+From 5e00103fd0d4213ee073bd5d4bf194f93ef8580d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Dec 2019 09:54:03 +0000
+Subject: crypto: atmel-aes - Fix saving of IV for CTR mode
+
+From: Tudor Ambarus <tudor.ambarus@microchip.com>
+
+[ Upstream commit 371731ec2179d5810683406e7fc284b41b127df7 ]
+
+The req->iv of the skcipher_request is expected to contain the
+last used IV. Update the req->iv for CTR mode.
+
+Fixes: bd3c7b5c2aba ("crypto: atmel - add Atmel AES driver")
+Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/atmel-aes.c | 43 +++++++++++++++++++++++++++-----------
+ 1 file changed, 31 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 7b7079db2e860..ea9dcd7ce799b 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -121,6 +121,7 @@ struct atmel_aes_ctr_ctx {
+       size_t                  offset;
+       struct scatterlist      src[2];
+       struct scatterlist      dst[2];
++      u16                     blocks;
+ };
+ struct atmel_aes_gcm_ctx {
+@@ -513,6 +514,26 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
+       }
+ }
++static inline struct atmel_aes_ctr_ctx *
++atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
++{
++      return container_of(ctx, struct atmel_aes_ctr_ctx, base);
++}
++
++static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
++{
++      struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
++      struct skcipher_request *req = skcipher_request_cast(dd->areq);
++      struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++      unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++      int i;
++
++      for (i = 0; i < ctx->blocks; i++)
++              crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
++
++      memcpy(req->iv, ctx->iv, ivsize);
++}
++
+ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+ {
+       struct skcipher_request *req = skcipher_request_cast(dd->areq);
+@@ -527,8 +548,12 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+       dd->flags &= ~AES_FLAGS_BUSY;
+       if (!dd->ctx->is_aead &&
+-          (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB)
+-              atmel_aes_set_iv_as_last_ciphertext_block(dd);
++          (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
++              if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
++                      atmel_aes_set_iv_as_last_ciphertext_block(dd);
++              else
++                      atmel_aes_ctr_update_req_iv(dd);
++      }
+       if (dd->is_async)
+               dd->areq->complete(dd->areq, err);
+@@ -1007,12 +1032,6 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
+                                  atmel_aes_transfer_complete);
+ }
+-static inline struct atmel_aes_ctr_ctx *
+-atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
+-{
+-      return container_of(ctx, struct atmel_aes_ctr_ctx, base);
+-}
+-
+ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
+ {
+       struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+@@ -1020,7 +1039,7 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
+       struct scatterlist *src, *dst;
+       size_t datalen;
+       u32 ctr;
+-      u16 blocks, start, end;
++      u16 start, end;
+       bool use_dma, fragmented = false;
+       /* Check for transfer completion. */
+@@ -1030,14 +1049,14 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
+       /* Compute data length. */
+       datalen = req->cryptlen - ctx->offset;
+-      blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
++      ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+       ctr = be32_to_cpu(ctx->iv[3]);
+       /* Check 16bit counter overflow. */
+       start = ctr & 0xffff;
+-      end = start + blocks - 1;
++      end = start + ctx->blocks - 1;
+-      if (blocks >> 16 || end < start) {
++      if (ctx->blocks >> 16 || end < start) {
+               ctr |= 0xffff;
+               datalen = AES_BLOCK_SIZE * (0x10000 - start);
+               fragmented = true;
+-- 
+2.20.1
+
diff --git a/queue-5.5/crypto-atmel-aes-tdes-do-not-save-iv-for-ecb-mode.patch b/queue-5.5/crypto-atmel-aes-tdes-do-not-save-iv-for-ecb-mode.patch
new file mode 100644 (file)
index 0000000..54dbbe3
--- /dev/null
@@ -0,0 +1,86 @@
+From 4b17ea31971131026bab74d8f92d9ff9b3d8716e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Dec 2019 09:54:00 +0000
+Subject: crypto: atmel-{aes,tdes} - Do not save IV for ECB mode
+
+From: Tudor Ambarus <tudor.ambarus@microchip.com>
+
+[ Upstream commit c65d123742a7bf2a5bc9fa8398e1fd2376eb4c43 ]
+
+ECB mode does not use IV.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/atmel-aes.c  | 9 +++++++--
+ drivers/crypto/atmel-tdes.c | 7 +++++--
+ 2 files changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 24f1fba6513ef..7b7079db2e860 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -515,6 +515,9 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
+ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+ {
++      struct skcipher_request *req = skcipher_request_cast(dd->areq);
++      struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
++
+ #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+       if (dd->ctx->is_aead)
+               atmel_aes_authenc_complete(dd, err);
+@@ -523,7 +526,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+       clk_disable(dd->iclk);
+       dd->flags &= ~AES_FLAGS_BUSY;
+-      if (!dd->ctx->is_aead)
++      if (!dd->ctx->is_aead &&
++          (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB)
+               atmel_aes_set_iv_as_last_ciphertext_block(dd);
+       if (dd->is_async)
+@@ -1121,7 +1125,8 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
+       rctx = skcipher_request_ctx(req);
+       rctx->mode = mode;
+-      if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
++      if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
++          !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
+               unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+               if (req->cryptlen >= ivsize)
+diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
+index 0c1f79b30fc1b..eaa14a80d40ce 100644
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -600,12 +600,14 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
+ static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
+ {
+       struct skcipher_request *req = dd->req;
++      struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+       clk_disable_unprepare(dd->iclk);
+       dd->flags &= ~TDES_FLAGS_BUSY;
+-      atmel_tdes_set_iv_as_last_ciphertext_block(dd);
++      if ((rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
++              atmel_tdes_set_iv_as_last_ciphertext_block(dd);
+       req->base.complete(&req->base, err);
+ }
+@@ -727,7 +729,8 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
+       rctx->mode = mode;
+-      if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
++      if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
++          !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+               unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+               if (req->cryptlen >= ivsize)
+-- 
+2.20.1
+
diff --git a/queue-5.5/crypto-atmel-tdes-map-driver-data-flags-to-mode-regi.patch b/queue-5.5/crypto-atmel-tdes-map-driver-data-flags-to-mode-regi.patch
new file mode 100644 (file)
index 0000000..a8adc1a
--- /dev/null
@@ -0,0 +1,307 @@
+From 18496ab852286672851a6d1bd482250c0a883194 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Dec 2019 09:53:56 +0000
+Subject: crypto: atmel-tdes - Map driver data flags to Mode Register
+
+From: Tudor Ambarus <tudor.ambarus@microchip.com>
+
+[ Upstream commit 848572f817721499c05b66553afc7ce0c08b1723 ]
+
+Simplifies the configuration of the TDES IP.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/atmel-tdes.c | 144 ++++++++++++++++++------------------
+ 1 file changed, 71 insertions(+), 73 deletions(-)
+
+diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
+index eaa14a80d40ce..fde34846b0170 100644
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -41,20 +41,23 @@
+ #include "atmel-tdes-regs.h"
+ /* TDES flags  */
+-#define TDES_FLAGS_MODE_MASK          0x00ff
+-#define TDES_FLAGS_ENCRYPT    BIT(0)
+-#define TDES_FLAGS_CBC                BIT(1)
+-#define TDES_FLAGS_CFB                BIT(2)
+-#define TDES_FLAGS_CFB8               BIT(3)
+-#define TDES_FLAGS_CFB16      BIT(4)
+-#define TDES_FLAGS_CFB32      BIT(5)
+-#define TDES_FLAGS_CFB64      BIT(6)
+-#define TDES_FLAGS_OFB                BIT(7)
+-
+-#define TDES_FLAGS_INIT               BIT(16)
+-#define TDES_FLAGS_FAST               BIT(17)
+-#define TDES_FLAGS_BUSY               BIT(18)
+-#define TDES_FLAGS_DMA                BIT(19)
++/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
++#define TDES_FLAGS_ENCRYPT    TDES_MR_CYPHER_ENC
++#define TDES_FLAGS_OPMODE_MASK        (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
++#define TDES_FLAGS_ECB                TDES_MR_OPMOD_ECB
++#define TDES_FLAGS_CBC                TDES_MR_OPMOD_CBC
++#define TDES_FLAGS_OFB                TDES_MR_OPMOD_OFB
++#define TDES_FLAGS_CFB64      (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
++#define TDES_FLAGS_CFB32      (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
++#define TDES_FLAGS_CFB16      (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
++#define TDES_FLAGS_CFB8               (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
++
++#define TDES_FLAGS_MODE_MASK  (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
++
++#define TDES_FLAGS_INIT               BIT(3)
++#define TDES_FLAGS_FAST               BIT(4)
++#define TDES_FLAGS_BUSY               BIT(5)
++#define TDES_FLAGS_DMA                BIT(6)
+ #define ATMEL_TDES_QUEUE_LENGTH       50
+@@ -282,25 +285,7 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
+               valmr |= TDES_MR_TDESMOD_DES;
+       }
+-      if (dd->flags & TDES_FLAGS_CBC) {
+-              valmr |= TDES_MR_OPMOD_CBC;
+-      } else if (dd->flags & TDES_FLAGS_CFB) {
+-              valmr |= TDES_MR_OPMOD_CFB;
+-
+-              if (dd->flags & TDES_FLAGS_CFB8)
+-                      valmr |= TDES_MR_CFBS_8b;
+-              else if (dd->flags & TDES_FLAGS_CFB16)
+-                      valmr |= TDES_MR_CFBS_16b;
+-              else if (dd->flags & TDES_FLAGS_CFB32)
+-                      valmr |= TDES_MR_CFBS_32b;
+-              else if (dd->flags & TDES_FLAGS_CFB64)
+-                      valmr |= TDES_MR_CFBS_64b;
+-      } else if (dd->flags & TDES_FLAGS_OFB) {
+-              valmr |= TDES_MR_OPMOD_OFB;
+-      }
+-
+-      if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
+-              valmr |= TDES_MR_CYPHER_ENC;
++      valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
+       atmel_tdes_write(dd, TDES_CR, valcr);
+       atmel_tdes_write(dd, TDES_MR, valmr);
+@@ -308,10 +293,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
+       atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
+                                               dd->ctx->keylen >> 2);
+-      if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
+-              (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
++      if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
+               atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
+-      }
+       return 0;
+ }
+@@ -402,6 +385,7 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+ {
+       struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct atmel_tdes_dev *dd = ctx->dd;
++      struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
+       int len32;
+       dd->dma_size = length;
+@@ -411,12 +395,19 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+                                          DMA_TO_DEVICE);
+       }
+-      if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
++      switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
++      case TDES_FLAGS_CFB8:
+               len32 = DIV_ROUND_UP(length, sizeof(u8));
+-      else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
++              break;
++
++      case TDES_FLAGS_CFB16:
+               len32 = DIV_ROUND_UP(length, sizeof(u16));
+-      else
++              break;
++
++      default:
+               len32 = DIV_ROUND_UP(length, sizeof(u32));
++              break;
++      }
+       atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+       atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
+@@ -438,8 +429,10 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+ {
+       struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct atmel_tdes_dev *dd = ctx->dd;
++      struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
+       struct scatterlist sg[2];
+       struct dma_async_tx_descriptor  *in_desc, *out_desc;
++      enum dma_slave_buswidth addr_width;
+       dd->dma_size = length;
+@@ -448,23 +441,23 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+                                          DMA_TO_DEVICE);
+       }
+-      if (dd->flags & TDES_FLAGS_CFB8) {
+-              dd->dma_lch_in.dma_conf.dst_addr_width =
+-                      DMA_SLAVE_BUSWIDTH_1_BYTE;
+-              dd->dma_lch_out.dma_conf.src_addr_width =
+-                      DMA_SLAVE_BUSWIDTH_1_BYTE;
+-      } else if (dd->flags & TDES_FLAGS_CFB16) {
+-              dd->dma_lch_in.dma_conf.dst_addr_width =
+-                      DMA_SLAVE_BUSWIDTH_2_BYTES;
+-              dd->dma_lch_out.dma_conf.src_addr_width =
+-                      DMA_SLAVE_BUSWIDTH_2_BYTES;
+-      } else {
+-              dd->dma_lch_in.dma_conf.dst_addr_width =
+-                      DMA_SLAVE_BUSWIDTH_4_BYTES;
+-              dd->dma_lch_out.dma_conf.src_addr_width =
+-                      DMA_SLAVE_BUSWIDTH_4_BYTES;
++      switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
++      case TDES_FLAGS_CFB8:
++              addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++              break;
++
++      case TDES_FLAGS_CFB16:
++              addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++              break;
++
++      default:
++              addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++              break;
+       }
++      dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
++      dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
++
+       dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+       dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+@@ -701,30 +694,38 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
+       struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
+       struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+-      if (mode & TDES_FLAGS_CFB8) {
++      switch (mode & TDES_FLAGS_OPMODE_MASK) {
++      case TDES_FLAGS_CFB8:
+               if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
+                       pr_err("request size is not exact amount of CFB8 blocks\n");
+                       return -EINVAL;
+               }
+               ctx->block_size = CFB8_BLOCK_SIZE;
+-      } else if (mode & TDES_FLAGS_CFB16) {
++              break;
++
++      case TDES_FLAGS_CFB16:
+               if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
+                       pr_err("request size is not exact amount of CFB16 blocks\n");
+                       return -EINVAL;
+               }
+               ctx->block_size = CFB16_BLOCK_SIZE;
+-      } else if (mode & TDES_FLAGS_CFB32) {
++              break;
++
++      case TDES_FLAGS_CFB32:
+               if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
+                       pr_err("request size is not exact amount of CFB32 blocks\n");
+                       return -EINVAL;
+               }
+               ctx->block_size = CFB32_BLOCK_SIZE;
+-      } else {
++              break;
++
++      default:
+               if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
+                       pr_err("request size is not exact amount of DES blocks\n");
+                       return -EINVAL;
+               }
+               ctx->block_size = DES_BLOCK_SIZE;
++              break;
+       }
+       rctx->mode = mode;
+@@ -844,17 +845,17 @@ static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
++      return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
+ }
+ static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, 0);
++      return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
+ }
+ static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
+ }
+ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
+@@ -863,50 +864,47 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
+ }
+ static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
+ }
+ static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
+ }
+ static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+-                                              TDES_FLAGS_CFB8);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
+ }
+ static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
+ }
+ static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+-                                              TDES_FLAGS_CFB16);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
+ }
+ static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
+ }
+ static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+-                                              TDES_FLAGS_CFB32);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
+ }
+ static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
++      return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
+ }
+ static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
+ {
+-      return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
++      return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
+ }
+ static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
+-- 
+2.20.1
+
diff --git a/queue-5.5/ib-core-fix-build-failure-without-hugepages.patch b/queue-5.5/ib-core-fix-build-failure-without-hugepages.patch
new file mode 100644 (file)
index 0000000..fb8cd18
--- /dev/null
@@ -0,0 +1,47 @@
+From 6024d301e9aa39aed128c7913c73ab6cac2fb230 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jan 2020 09:47:30 +0100
+Subject: IB/core: Fix build failure without hugepages
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 74f75cda754eb69a77f910ceb5bc85f8e9ba56a5 ]
+
+HPAGE_SHIFT is only defined on architectures that support hugepages:
+
+drivers/infiniband/core/umem_odp.c: In function 'ib_umem_odp_get':
+drivers/infiniband/core/umem_odp.c:245:26: error: 'HPAGE_SHIFT' undeclared (first use in this function); did you mean 'PAGE_SHIFT'?
+
+Enclose this in an #ifdef.
+
+Fixes: 9ff1b6466a29 ("IB/core: Fix ODP with IB_ACCESS_HUGETLB handling")
+Link: https://lore.kernel.org/r/20200109084740.2872079-1-arnd@arndb.de
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/umem_odp.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index f42fa31c24a29..b9baf7d0a5cb7 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -241,10 +241,11 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
+       umem_odp->umem.owning_mm = mm = current->mm;
+       umem_odp->notifier.ops = ops;
++      umem_odp->page_shift = PAGE_SHIFT;
++#ifdef CONFIG_HUGETLB_PAGE
+       if (access & IB_ACCESS_HUGETLB)
+               umem_odp->page_shift = HPAGE_SHIFT;
+-      else
+-              umem_odp->page_shift = PAGE_SHIFT;
++#endif
+       umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+       ret = ib_init_umem_odp(umem_odp, ops);
+-- 
+2.20.1
+
diff --git a/queue-5.5/rxrpc-fix-service-call-disconnection.patch b/queue-5.5/rxrpc-fix-service-call-disconnection.patch
new file mode 100644 (file)
index 0000000..60c132b
--- /dev/null
@@ -0,0 +1,63 @@
+From 00626355dd8e2e952c03f1972a7bdf5ec5fcb91d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 13:55:01 +0000
+Subject: rxrpc: Fix service call disconnection
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit b39a934ec72fa2b5a74123891f25273a38378b90 ]
+
+The recent patch that substituted a flag on an rxrpc_call for the
+connection pointer being NULL as an indication that a call was disconnected
+puts the set_bit in the wrong place for service calls.  This is only a
+problem if a call is implicitly terminated by a new call coming in on the
+same connection channel instead of a terminating ACK packet.
+
+In such a case, rxrpc_input_implicit_end_call() calls
+__rxrpc_disconnect_call(), which is now (incorrectly) setting the
+disconnection bit, meaning that when rxrpc_release_call() is later called,
+it doesn't call rxrpc_disconnect_call() and so the call isn't removed from
+the peer's error distribution list and the list gets corrupted.
+
+KASAN finds the issue as an access after release on a call, but the
+position at which it occurs is confusing as it appears to be related to a
+different call (the call site is where the latter call is being removed
+from the error distribution list and either the next or pprev pointer
+points to a previously released call).
+
+Fix this by moving the setting of the flag from __rxrpc_disconnect_call()
+to rxrpc_disconnect_call() in the same place that the connection pointer
+was being cleared.
+
+Fixes: 5273a191dca6 ("rxrpc: Fix NULL pointer deref due to call->conn being cleared on disconnect")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/conn_object.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index c0b3154f7a7e1..19e141eeed17d 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -171,8 +171,6 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
+       _enter("%d,%x", conn->debug_id, call->cid);
+-      set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+-
+       if (rcu_access_pointer(chan->call) == call) {
+               /* Save the result of the call so that we can repeat it if necessary
+                * through the channel, whilst disposing of the actual call record.
+@@ -225,6 +223,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
+       __rxrpc_disconnect_call(conn, call);
+       spin_unlock(&conn->channel_lock);
++      set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+       conn->idle_timestamp = jiffies;
+ }
+-- 
+2.20.1
+
index c177bd194a65d345d204e481e8059440546a2eed..0750e365273bac2311263229a3777400c828e600 100644 (file)
@@ -357,3 +357,9 @@ kvm-x86-protect-exit_reason-from-being-used-in-spect.patch
 kvm-nvmx-vmread-should-not-set-rflags-to-specify-suc.patch
 kvm-use-vcpu-specific-gva-hva-translation-when-query.patch
 kvm-play-nice-with-read-only-memslots-when-querying-.patch
+rxrpc-fix-service-call-disconnection.patch
+ib-core-fix-build-failure-without-hugepages.patch
+crypto-atmel-aes-tdes-do-not-save-iv-for-ecb-mode.patch
+crypto-atmel-aes-fix-saving-of-iv-for-ctr-mode.patch
+crypto-atmel-aes-fix-ctr-counter-overflow-when-multi.patch
+crypto-atmel-tdes-map-driver-data-flags-to-mode-regi.patch