]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Aug 2018 17:01:38 +0000 (19:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Aug 2018 17:01:38 +0000 (19:01 +0200)
added patches:
bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch
crypto-ablkcipher-fix-crash-flushing-dcache-in-error-path.patch
crypto-blkcipher-fix-crash-flushing-dcache-in-error-path.patch
crypto-ccp-check-for-null-psp-pointer-at-module-unload.patch
crypto-ccp-fix-command-completion-detection-race.patch
crypto-ccree-fix-finup.patch
crypto-ccree-fix-iv-handling.patch
crypto-skcipher-fix-aligning-block-size-in-skcipher_copy_iv.patch
crypto-skcipher-fix-crash-flushing-dcache-in-error-path.patch
crypto-vmac-require-a-block-cipher-with-128-bit-block-size.patch
crypto-vmac-separate-tfm-and-request-context.patch
crypto-x86-sha256-mb-fix-digest-copy-in-sha256_mb_mgr_get_comp_job_avx2.patch
ioremap-update-pgtable-free-interfaces-with-addr.patch
kbuild-verify-that-depmod-is-installed.patch
x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch

16 files changed:
queue-4.17/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch [new file with mode: 0644]
queue-4.17/crypto-ablkcipher-fix-crash-flushing-dcache-in-error-path.patch [new file with mode: 0644]
queue-4.17/crypto-blkcipher-fix-crash-flushing-dcache-in-error-path.patch [new file with mode: 0644]
queue-4.17/crypto-ccp-check-for-null-psp-pointer-at-module-unload.patch [new file with mode: 0644]
queue-4.17/crypto-ccp-fix-command-completion-detection-race.patch [new file with mode: 0644]
queue-4.17/crypto-ccree-fix-finup.patch [new file with mode: 0644]
queue-4.17/crypto-ccree-fix-iv-handling.patch [new file with mode: 0644]
queue-4.17/crypto-skcipher-fix-aligning-block-size-in-skcipher_copy_iv.patch [new file with mode: 0644]
queue-4.17/crypto-skcipher-fix-crash-flushing-dcache-in-error-path.patch [new file with mode: 0644]
queue-4.17/crypto-vmac-require-a-block-cipher-with-128-bit-block-size.patch [new file with mode: 0644]
queue-4.17/crypto-vmac-separate-tfm-and-request-context.patch [new file with mode: 0644]
queue-4.17/crypto-x86-sha256-mb-fix-digest-copy-in-sha256_mb_mgr_get_comp_job_avx2.patch [new file with mode: 0644]
queue-4.17/ioremap-update-pgtable-free-interfaces-with-addr.patch [new file with mode: 0644]
queue-4.17/kbuild-verify-that-depmod-is-installed.patch [new file with mode: 0644]
queue-4.17/series
queue-4.17/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch [new file with mode: 0644]

diff --git a/queue-4.17/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch b/queue-4.17/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch
new file mode 100644 (file)
index 0000000..35f48bd
--- /dev/null
@@ -0,0 +1,50 @@
+From 7992c18810e568b95c869b227137a2215702a805 Mon Sep 17 00:00:00 2001
+From: Mark Salyzyn <salyzyn@android.com>
+Date: Tue, 31 Jul 2018 15:02:13 -0700
+Subject: Bluetooth: hidp: buffer overflow in hidp_process_report
+
+From: Mark Salyzyn <salyzyn@android.com>
+
+commit 7992c18810e568b95c869b227137a2215702a805 upstream.
+
+CVE-2018-9363
+
+The buffer length is unsigned at all layers, but gets cast to int and
+checked in hidp_process_report and can lead to a buffer overflow.
+Switch len parameter to unsigned int to resolve issue.
+
+This affects 3.18 and newer kernels.
+
+Signed-off-by: Mark Salyzyn <salyzyn@android.com>
+Fixes: a4b1b5877b514b276f0f31efe02388a9c2836728 ("HID: Bluetooth: hidp: make sure input buffers are big enough")
+Cc: Marcel Holtmann <marcel@holtmann.org>
+Cc: Johan Hedberg <johan.hedberg@gmail.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Cc: linux-bluetooth@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: security@kernel.org
+Cc: kernel-team@android.com
+Acked-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/hidp/core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_s
+               del_timer(&session->timer);
+ }
+-static void hidp_process_report(struct hidp_session *session,
+-                              int type, const u8 *data, int len, int intr)
++static void hidp_process_report(struct hidp_session *session, int type,
++                              const u8 *data, unsigned int len, int intr)
+ {
+       if (len > HID_MAX_BUFFER_SIZE)
+               len = HID_MAX_BUFFER_SIZE;
diff --git a/queue-4.17/crypto-ablkcipher-fix-crash-flushing-dcache-in-error-path.patch b/queue-4.17/crypto-ablkcipher-fix-crash-flushing-dcache-in-error-path.patch
new file mode 100644 (file)
index 0000000..a0b9456
--- /dev/null
@@ -0,0 +1,133 @@
+From 318abdfbe708aaaa652c79fb500e9bd60521f9dc Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 23 Jul 2018 10:54:58 -0700
+Subject: crypto: ablkcipher - fix crash flushing dcache in error path
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 318abdfbe708aaaa652c79fb500e9bd60521f9dc upstream.
+
+Like the skcipher_walk and blkcipher_walk cases:
+
+scatterwalk_done() is only meant to be called after a nonzero number of
+bytes have been processed, since scatterwalk_pagedone() will flush the
+dcache of the *previous* page.  But in the error case of
+ablkcipher_walk_done(), e.g. if the input wasn't an integer number of
+blocks, scatterwalk_done() was actually called after advancing 0 bytes.
+This caused a crash ("BUG: unable to handle kernel paging request")
+during '!PageSlab(page)' on architectures like arm and arm64 that define
+ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
+page-aligned as in that case walk->offset == 0.
+
+Fix it by reorganizing ablkcipher_walk_done() to skip the
+scatterwalk_advance() and scatterwalk_done() if an error has occurred.
+
+Reported-by: Liu Chao <liuchao741@huawei.com>
+Fixes: bf06099db18a ("crypto: skcipher - Add ablkcipher_walk interfaces")
+Cc: <stable@vger.kernel.org> # v2.6.35+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ablkcipher.c |   57 +++++++++++++++++++++++-----------------------------
+ 1 file changed, 26 insertions(+), 31 deletions(-)
+
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8
+       return max(start, end_page);
+ }
+-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+-                                              unsigned int bsize)
++static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
++                                      unsigned int n)
+ {
+-      unsigned int n = bsize;
+-
+       for (;;) {
+               unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_do
+               n -= len_this_page;
+               scatterwalk_start(&walk->out, sg_next(walk->out.sg));
+       }
+-
+-      return bsize;
+ }
+-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+-                                              unsigned int n)
++static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
++                                      unsigned int n)
+ {
+       scatterwalk_advance(&walk->in, n);
+       scatterwalk_advance(&walk->out, n);
+-
+-      return n;
+ }
+ static int ablkcipher_walk_next(struct ablkcipher_request *req,
+@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkciph
+                        struct ablkcipher_walk *walk, int err)
+ {
+       struct crypto_tfm *tfm = req->base.tfm;
+-      unsigned int nbytes = 0;
++      unsigned int n; /* bytes processed */
++      bool more;
+-      if (likely(err >= 0)) {
+-              unsigned int n = walk->nbytes - err;
++      if (unlikely(err < 0))
++              goto finish;
+-              if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+-                      n = ablkcipher_done_fast(walk, n);
+-              else if (WARN_ON(err)) {
++      n = walk->nbytes - err;
++      walk->total -= n;
++      more = (walk->total != 0);
++
++      if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
++              ablkcipher_done_fast(walk, n);
++      } else {
++              if (WARN_ON(err)) {
++                      /* unexpected case; didn't process all bytes */
+                       err = -EINVAL;
+-                      goto err;
+-              } else
+-                      n = ablkcipher_done_slow(walk, n);
+-
+-              nbytes = walk->total - n;
+-              err = 0;
++                      goto finish;
++              }
++              ablkcipher_done_slow(walk, n);
+       }
+-      scatterwalk_done(&walk->in, 0, nbytes);
+-      scatterwalk_done(&walk->out, 1, nbytes);
++      scatterwalk_done(&walk->in, 0, more);
++      scatterwalk_done(&walk->out, 1, more);
+-err:
+-      walk->total = nbytes;
+-      walk->nbytes = nbytes;
+-
+-      if (nbytes) {
++      if (more) {
+               crypto_yield(req->base.flags);
+               return ablkcipher_walk_next(req, walk);
+       }
+-
++      err = 0;
++finish:
++      walk->nbytes = 0;
+       if (walk->iv != req->info)
+               memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+       kfree(walk->iv_buffer);
+-
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
diff --git a/queue-4.17/crypto-blkcipher-fix-crash-flushing-dcache-in-error-path.patch b/queue-4.17/crypto-blkcipher-fix-crash-flushing-dcache-in-error-path.patch
new file mode 100644 (file)
index 0000000..fd26878
--- /dev/null
@@ -0,0 +1,158 @@
+From 0868def3e4100591e7a1fdbf3eed1439cc8f7ca3 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 23 Jul 2018 10:54:57 -0700
+Subject: crypto: blkcipher - fix crash flushing dcache in error path
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 0868def3e4100591e7a1fdbf3eed1439cc8f7ca3 upstream.
+
+Like the skcipher_walk case:
+
+scatterwalk_done() is only meant to be called after a nonzero number of
+bytes have been processed, since scatterwalk_pagedone() will flush the
+dcache of the *previous* page.  But in the error case of
+blkcipher_walk_done(), e.g. if the input wasn't an integer number of
+blocks, scatterwalk_done() was actually called after advancing 0 bytes.
+This caused a crash ("BUG: unable to handle kernel paging request")
+during '!PageSlab(page)' on architectures like arm and arm64 that define
+ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
+page-aligned as in that case walk->offset == 0.
+
+Fix it by reorganizing blkcipher_walk_done() to skip the
+scatterwalk_advance() and scatterwalk_done() if an error has occurred.
+
+This bug was found by syzkaller fuzzing.
+
+Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
+
+       #include <linux/if_alg.h>
+       #include <sys/socket.h>
+       #include <unistd.h>
+
+       int main()
+       {
+               struct sockaddr_alg addr = {
+                       .salg_type = "skcipher",
+                       .salg_name = "ecb(aes-generic)",
+               };
+               char buffer[4096] __attribute__((aligned(4096))) = { 0 };
+               int fd;
+
+               fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+               bind(fd, (void *)&addr, sizeof(addr));
+               setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
+               fd = accept(fd, NULL, NULL);
+               write(fd, buffer, 15);
+               read(fd, buffer, 15);
+       }
+
+Reported-by: Liu Chao <liuchao741@huawei.com>
+Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
+Cc: <stable@vger.kernel.org> # v2.6.19+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/blkcipher.c |   54 +++++++++++++++++++++++++----------------------------
+ 1 file changed, 26 insertions(+), 28 deletions(-)
+
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8
+       return max(start, end_page);
+ }
+-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
+-                                             unsigned int bsize)
++static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
++                                     unsigned int bsize)
+ {
+       u8 *addr;
+       addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+       addr = blkcipher_get_spot(addr, bsize);
+       scatterwalk_copychunks(addr, &walk->out, bsize, 1);
+-      return bsize;
+ }
+-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
+-                                             unsigned int n)
++static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
++                                     unsigned int n)
+ {
+       if (walk->flags & BLKCIPHER_WALK_COPY) {
+               blkcipher_map_dst(walk);
+@@ -96,49 +95,48 @@ static inline unsigned int blkcipher_don
+       scatterwalk_advance(&walk->in, n);
+       scatterwalk_advance(&walk->out, n);
+-
+-      return n;
+ }
+ int blkcipher_walk_done(struct blkcipher_desc *desc,
+                       struct blkcipher_walk *walk, int err)
+ {
+-      unsigned int nbytes = 0;
++      unsigned int n; /* bytes processed */
++      bool more;
+-      if (likely(err >= 0)) {
+-              unsigned int n = walk->nbytes - err;
++      if (unlikely(err < 0))
++              goto finish;
+-              if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
+-                      n = blkcipher_done_fast(walk, n);
+-              else if (WARN_ON(err)) {
++      n = walk->nbytes - err;
++      walk->total -= n;
++      more = (walk->total != 0);
++
++      if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
++              blkcipher_done_fast(walk, n);
++      } else {
++              if (WARN_ON(err)) {
++                      /* unexpected case; didn't process all bytes */
+                       err = -EINVAL;
+-                      goto err;
+-              } else
+-                      n = blkcipher_done_slow(walk, n);
+-
+-              nbytes = walk->total - n;
+-              err = 0;
++                      goto finish;
++              }
++              blkcipher_done_slow(walk, n);
+       }
+-      scatterwalk_done(&walk->in, 0, nbytes);
+-      scatterwalk_done(&walk->out, 1, nbytes);
+-
+-err:
+-      walk->total = nbytes;
+-      walk->nbytes = nbytes;
++      scatterwalk_done(&walk->in, 0, more);
++      scatterwalk_done(&walk->out, 1, more);
+-      if (nbytes) {
++      if (more) {
+               crypto_yield(desc->flags);
+               return blkcipher_walk_next(desc, walk);
+       }
+-
++      err = 0;
++finish:
++      walk->nbytes = 0;
+       if (walk->iv != desc->info)
+               memcpy(desc->info, walk->iv, walk->ivsize);
+       if (walk->buffer != walk->page)
+               kfree(walk->buffer);
+       if (walk->page)
+               free_page((unsigned long)walk->page);
+-
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_done);
diff --git a/queue-4.17/crypto-ccp-check-for-null-psp-pointer-at-module-unload.patch b/queue-4.17/crypto-ccp-check-for-null-psp-pointer-at-module-unload.patch
new file mode 100644 (file)
index 0000000..5187f87
--- /dev/null
@@ -0,0 +1,40 @@
+From afb31cd2d1a1bc3ca055fb2519ec4e9ab969ffe0 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 26 Jul 2018 09:37:59 -0500
+Subject: crypto: ccp - Check for NULL PSP pointer at module unload
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit afb31cd2d1a1bc3ca055fb2519ec4e9ab969ffe0 upstream.
+
+Should the PSP initialization fail, the PSP data structure will be
+freed and the value contained in the sp_device struct set to NULL.
+At module unload, psp_dev_destroy() does not check if the pointer
+value is NULL and will end up dereferencing a NULL pointer.
+
+Add a pointer check of the psp_data field in the sp_device struct
+in psp_dev_destroy() and return immediately if it is NULL.
+
+Cc: <stable@vger.kernel.org> # 4.16.x-
+Fixes: 2a6170dfe755 ("crypto: ccp: Add Platform Security Processor (PSP) device support")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/psp-dev.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -732,6 +732,9 @@ void psp_dev_destroy(struct sp_device *s
+ {
+       struct psp_device *psp = sp->psp_data;
++      if (!psp)
++              return;
++
+       if (psp->sev_misc)
+               kref_put(&misc_dev->refcount, sev_exit);
diff --git a/queue-4.17/crypto-ccp-fix-command-completion-detection-race.patch b/queue-4.17/crypto-ccp-fix-command-completion-detection-race.patch
new file mode 100644 (file)
index 0000000..2466c6c
--- /dev/null
@@ -0,0 +1,50 @@
+From f426d2b20f1cd63818873593031593e15c3db20b Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Tue, 3 Jul 2018 12:11:33 -0500
+Subject: crypto: ccp - Fix command completion detection race
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit f426d2b20f1cd63818873593031593e15c3db20b upstream.
+
+The wait_event() function is used to detect command completion.  The
+interrupt handler will set the wait condition variable when the interrupt
+is triggered.  However, the variable used for wait_event() is initialized
+after the command has been submitted, which can create a race condition
+with the interrupt handler and result in the wait_event() never returning.
+Move the initialization of the wait condition variable to just before
+command submission.
+
+Fixes: 200664d5237f ("crypto: ccp: Add Secure Encrypted Virtualization (SEV) command support")
+Cc: <stable@vger.kernel.org> # 4.16.x-
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Reviewed-by: Brijesh Singh <brijesh.singh@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/psp-dev.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -78,8 +78,6 @@ done:
+ static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+ {
+-      psp->sev_int_rcvd = 0;
+-
+       wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+       *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ }
+@@ -140,6 +138,8 @@ static int __sev_do_cmd_locked(int cmd,
+       iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
+       iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
++      psp->sev_int_rcvd = 0;
++
+       reg = cmd;
+       reg <<= PSP_CMDRESP_CMD_SHIFT;
+       reg |= PSP_CMDRESP_IOC;
diff --git a/queue-4.17/crypto-ccree-fix-finup.patch b/queue-4.17/crypto-ccree-fix-finup.patch
new file mode 100644 (file)
index 0000000..b6f5474
--- /dev/null
@@ -0,0 +1,140 @@
+From 26497e72a1aba4d27c50c4cbf0182db94e58a590 Mon Sep 17 00:00:00 2001
+From: Hadar Gat <hadar.gat@arm.com>
+Date: Sun, 1 Jul 2018 08:02:34 +0100
+Subject: crypto: ccree - fix finup
+
+From: Hadar Gat <hadar.gat@arm.com>
+
+commit 26497e72a1aba4d27c50c4cbf0182db94e58a590 upstream.
+
+finup() operation was incorrect, padding was missing.
+Fix by setting the ccree HW to enable padding.
+
+Signed-off-by: Hadar Gat <hadar.gat@arm.com>
+[ gilad@benyossef.com: refactored for better code sharing ]
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_hash.c |   81 ++++++++---------------------------------
+ 1 file changed, 16 insertions(+), 65 deletions(-)
+
+--- a/drivers/crypto/ccree/cc_hash.c
++++ b/drivers/crypto/ccree/cc_hash.c
+@@ -602,7 +602,7 @@ static int cc_hash_update(struct ahash_r
+       return rc;
+ }
+-static int cc_hash_finup(struct ahash_request *req)
++static int cc_do_finup(struct ahash_request *req, bool update)
+ {
+       struct ahash_req_ctx *state = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -619,15 +619,15 @@ static int cc_hash_finup(struct ahash_re
+       int rc;
+       gfp_t flags = cc_gfp_flags(&req->base);
+-      dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
+-              nbytes);
++      dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
++              update ? "finup" : "final", nbytes);
+       if (cc_map_req(dev, state, ctx)) {
+               dev_err(dev, "map_ahash_source() failed\n");
+               return -EINVAL;
+       }
+-      if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
++      if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
+                                     flags)) {
+               dev_err(dev, "map_ahash_request_final() failed\n");
+               cc_unmap_req(dev, state, ctx);
+@@ -646,67 +646,7 @@ static int cc_hash_finup(struct ahash_re
+       idx = cc_restore_hash(desc, ctx, state, idx);
+-      if (is_hmac)
+-              idx = cc_fin_hmac(desc, req, idx);
+-
+-      idx = cc_fin_result(desc, req, idx);
+-
+-      rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+-      if (rc != -EINPROGRESS && rc != -EBUSY) {
+-              dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+-              cc_unmap_hash_request(dev, state, src, true);
+-              cc_unmap_result(dev, state, digestsize, result);
+-              cc_unmap_req(dev, state, ctx);
+-      }
+-      return rc;
+-}
+-
+-static int cc_hash_final(struct ahash_request *req)
+-{
+-      struct ahash_req_ctx *state = ahash_request_ctx(req);
+-      struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+-      struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+-      u32 digestsize = crypto_ahash_digestsize(tfm);
+-      struct scatterlist *src = req->src;
+-      unsigned int nbytes = req->nbytes;
+-      u8 *result = req->result;
+-      struct device *dev = drvdata_to_dev(ctx->drvdata);
+-      bool is_hmac = ctx->is_hmac;
+-      struct cc_crypto_req cc_req = {};
+-      struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+-      unsigned int idx = 0;
+-      int rc;
+-      gfp_t flags = cc_gfp_flags(&req->base);
+-
+-      dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
+-              nbytes);
+-
+-      if (cc_map_req(dev, state, ctx)) {
+-              dev_err(dev, "map_ahash_source() failed\n");
+-              return -EINVAL;
+-      }
+-
+-      if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+-                                    flags)) {
+-              dev_err(dev, "map_ahash_request_final() failed\n");
+-              cc_unmap_req(dev, state, ctx);
+-              return -ENOMEM;
+-      }
+-
+-      if (cc_map_result(dev, state, digestsize)) {
+-              dev_err(dev, "map_ahash_digest() failed\n");
+-              cc_unmap_hash_request(dev, state, src, true);
+-              cc_unmap_req(dev, state, ctx);
+-              return -ENOMEM;
+-      }
+-
+-      /* Setup request structure */
+-      cc_req.user_cb = cc_hash_complete;
+-      cc_req.user_arg = req;
+-
+-      idx = cc_restore_hash(desc, ctx, state, idx);
+-
+-      /* "DO-PAD" must be enabled only when writing current length to HW */
++      /* Pad the hash */
+       hw_desc_init(&desc[idx]);
+       set_cipher_do(&desc[idx], DO_PAD);
+       set_cipher_mode(&desc[idx], ctx->hw_mode);
+@@ -731,6 +671,17 @@ static int cc_hash_final(struct ahash_re
+       return rc;
+ }
++static int cc_hash_finup(struct ahash_request *req)
++{
++      return cc_do_finup(req, true);
++}
++
++
++static int cc_hash_final(struct ahash_request *req)
++{
++      return cc_do_finup(req, false);
++}
++
+ static int cc_hash_init(struct ahash_request *req)
+ {
+       struct ahash_req_ctx *state = ahash_request_ctx(req);
diff --git a/queue-4.17/crypto-ccree-fix-iv-handling.patch b/queue-4.17/crypto-ccree-fix-iv-handling.patch
new file mode 100644 (file)
index 0000000..9acdb61
--- /dev/null
@@ -0,0 +1,167 @@
+From 00904aa0cd59a36d659ec93d272309e2174bcb5b Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Sun, 1 Jul 2018 08:02:36 +0100
+Subject: crypto: ccree - fix iv handling
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 00904aa0cd59a36d659ec93d272309e2174bcb5b upstream.
+
+We were copying our last cipher block into the request for use as IV for
+all modes of operations. Fix this by discerning the behaviour based on
+the mode of operation used: copy ciphertext for CBC, update counter for
+CTR.
+
+CC: stable@vger.kernel.org
+Fixes: 63ee04c8b491 ("crypto: ccree - add skcipher support")
+Reported by: Hadar Gat <hadar.gat@arm.com>
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_cipher.c |  111 +++++++++++++++++++++++++++++----------
+ 1 file changed, 84 insertions(+), 27 deletions(-)
+
+--- a/drivers/crypto/ccree/cc_cipher.c
++++ b/drivers/crypto/ccree/cc_cipher.c
+@@ -554,34 +554,82 @@ static void cc_setup_cipher_data(struct
+       }
+ }
++/*
++ * Update a CTR-AES 128 bit counter
++ */
++static void cc_update_ctr(u8 *ctr, unsigned int increment)
++{
++      if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
++          IS_ALIGNED((unsigned long)ctr, 8)) {
++
++              __be64 *high_be = (__be64 *)ctr;
++              __be64 *low_be = high_be + 1;
++              u64 orig_low = __be64_to_cpu(*low_be);
++              u64 new_low = orig_low + (u64)increment;
++
++              *low_be = __cpu_to_be64(new_low);
++
++              if (new_low < orig_low)
++                      *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
++      } else {
++              u8 *pos = (ctr + AES_BLOCK_SIZE);
++              u8 val;
++              unsigned int size;
++
++              for (; increment; increment--)
++                      for (size = AES_BLOCK_SIZE; size; size--) {
++                              val = *--pos + 1;
++                              *pos = val;
++                              if (val)
++                                      break;
++                      }
++      }
++}
++
+ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+ {
+       struct skcipher_request *req = (struct skcipher_request *)cc_req;
+       struct scatterlist *dst = req->dst;
+       struct scatterlist *src = req->src;
+       struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+-      struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+-      unsigned int ivsize = crypto_skcipher_ivsize(tfm);
++      struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
++      struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
++      struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
++      unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
++      unsigned int len;
+-      cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+-      kzfree(req_ctx->iv);
++      switch (ctx_p->cipher_mode) {
++      case DRV_CIPHER_CBC:
++              /*
++               * The crypto API expects us to set the req->iv to the last
++               * ciphertext block. For encrypt, simply copy from the result.
++               * For decrypt, we must copy from a saved buffer since this
++               * could be an in-place decryption operation and the src is
++               * lost by this point.
++               */
++              if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
++                      memcpy(req->iv, req_ctx->backup_info, ivsize);
++                      kzfree(req_ctx->backup_info);
++              } else if (!err) {
++                      len = req->cryptlen - ivsize;
++                      scatterwalk_map_and_copy(req->iv, req->dst, len,
++                                               ivsize, 0);
++              }
++              break;
++
++      case DRV_CIPHER_CTR:
++              /* Compute the counter of the last block */
++              len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
++              cc_update_ctr((u8 *)req->iv, len);
++              break;
+-      /*
+-       * The crypto API expects us to set the req->iv to the last
+-       * ciphertext block. For encrypt, simply copy from the result.
+-       * For decrypt, we must copy from a saved buffer since this
+-       * could be an in-place decryption operation and the src is
+-       * lost by this point.
+-       */
+-      if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
+-              memcpy(req->iv, req_ctx->backup_info, ivsize);
+-              kzfree(req_ctx->backup_info);
+-      } else if (!err) {
+-              scatterwalk_map_and_copy(req->iv, req->dst,
+-                                       (req->cryptlen - ivsize),
+-                                       ivsize, 0);
++      default:
++              break;
+       }
++      cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
++      kzfree(req_ctx->iv);
++
+       skcipher_request_complete(req, err);
+ }
+@@ -713,20 +761,29 @@ static int cc_cipher_encrypt(struct skci
+ static int cc_cipher_decrypt(struct skcipher_request *req)
+ {
+       struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
++      struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
++      struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+       struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+       unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+       gfp_t flags = cc_gfp_flags(&req->base);
++      unsigned int len;
+-      /*
+-       * Allocate and save the last IV sized bytes of the source, which will
+-       * be lost in case of in-place decryption and might be needed for CTS.
+-       */
+-      req_ctx->backup_info = kmalloc(ivsize, flags);
+-      if (!req_ctx->backup_info)
+-              return -ENOMEM;
++      if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
++
++              /* Allocate and save the last IV sized bytes of the source,
++               * which will be lost in case of in-place decryption.
++               */
++              req_ctx->backup_info = kzalloc(ivsize, flags);
++              if (!req_ctx->backup_info)
++                      return -ENOMEM;
++
++              len = req->cryptlen - ivsize;
++              scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
++                                       ivsize, 0);
++      } else {
++              req_ctx->backup_info = NULL;
++      }
+-      scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+-                               (req->cryptlen - ivsize), ivsize, 0);
+       req_ctx->is_giv = false;
+       return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
diff --git a/queue-4.17/crypto-skcipher-fix-aligning-block-size-in-skcipher_copy_iv.patch b/queue-4.17/crypto-skcipher-fix-aligning-block-size-in-skcipher_copy_iv.patch
new file mode 100644 (file)
index 0000000..fa14ae2
--- /dev/null
@@ -0,0 +1,33 @@
+From 0567fc9e90b9b1c8dbce8a5468758e6206744d4a Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 23 Jul 2018 09:57:50 -0700
+Subject: crypto: skcipher - fix aligning block size in skcipher_copy_iv()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 0567fc9e90b9b1c8dbce8a5468758e6206744d4a upstream.
+
+The ALIGN() macro needs to be passed the alignment, not the alignmask
+(which is the alignment minus 1).
+
+Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
+Cc: <stable@vger.kernel.org> # v4.10+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/skcipher.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -399,7 +399,7 @@ static int skcipher_copy_iv(struct skcip
+       unsigned size;
+       u8 *iv;
+-      aligned_bs = ALIGN(bs, alignmask);
++      aligned_bs = ALIGN(bs, alignmask + 1);
+       /* Minimum size to align buffer by alignmask. */
+       size = alignmask & ~a;
diff --git a/queue-4.17/crypto-skcipher-fix-crash-flushing-dcache-in-error-path.patch b/queue-4.17/crypto-skcipher-fix-crash-flushing-dcache-in-error-path.patch
new file mode 100644 (file)
index 0000000..06d08de
--- /dev/null
@@ -0,0 +1,146 @@
+From 8088d3dd4d7c6933a65aa169393b5d88d8065672 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 23 Jul 2018 10:54:56 -0700
+Subject: crypto: skcipher - fix crash flushing dcache in error path
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 8088d3dd4d7c6933a65aa169393b5d88d8065672 upstream.
+
+scatterwalk_done() is only meant to be called after a nonzero number of
+bytes have been processed, since scatterwalk_pagedone() will flush the
+dcache of the *previous* page.  But in the error case of
+skcipher_walk_done(), e.g. if the input wasn't an integer number of
+blocks, scatterwalk_done() was actually called after advancing 0 bytes.
+This caused a crash ("BUG: unable to handle kernel paging request")
+during '!PageSlab(page)' on architectures like arm and arm64 that define
+ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
+page-aligned as in that case walk->offset == 0.
+
+Fix it by reorganizing skcipher_walk_done() to skip the
+scatterwalk_advance() and scatterwalk_done() if an error has occurred.
+
+This bug was found by syzkaller fuzzing.
+
+Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
+
+       #include <linux/if_alg.h>
+       #include <sys/socket.h>
+       #include <unistd.h>
+
+       int main()
+       {
+               struct sockaddr_alg addr = {
+                       .salg_type = "skcipher",
+                       .salg_name = "cbc(aes-generic)",
+               };
+               char buffer[4096] __attribute__((aligned(4096))) = { 0 };
+               int fd;
+
+               fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+               bind(fd, (void *)&addr, sizeof(addr));
+               setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
+               fd = accept(fd, NULL, NULL);
+               write(fd, buffer, 15);
+               read(fd, buffer, 15);
+       }
+
+Reported-by: Liu Chao <liuchao741@huawei.com>
+Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
+Cc: <stable@vger.kernel.org> # v4.10+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/skcipher.c |   49 +++++++++++++++++++++++++------------------------
+ 1 file changed, 25 insertions(+), 24 deletions(-)
+
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *
+       return max(start, end_page);
+ }
+-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
++static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+ {
+       u8 *addr;
+@@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skc
+       addr = skcipher_get_spot(addr, bsize);
+       scatterwalk_copychunks(addr, &walk->out, bsize,
+                              (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+-      return 0;
+ }
+ int skcipher_walk_done(struct skcipher_walk *walk, int err)
+ {
+-      unsigned int n = walk->nbytes - err;
+-      unsigned int nbytes;
++      unsigned int n; /* bytes processed */
++      bool more;
+-      nbytes = walk->total - n;
++      if (unlikely(err < 0))
++              goto finish;
+-      if (unlikely(err < 0)) {
+-              nbytes = 0;
+-              n = 0;
+-      } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+-                                         SKCIPHER_WALK_SLOW |
+-                                         SKCIPHER_WALK_COPY |
+-                                         SKCIPHER_WALK_DIFF)))) {
++      n = walk->nbytes - err;
++      walk->total -= n;
++      more = (walk->total != 0);
++
++      if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
++                                  SKCIPHER_WALK_SLOW |
++                                  SKCIPHER_WALK_COPY |
++                                  SKCIPHER_WALK_DIFF)))) {
+ unmap_src:
+               skcipher_unmap_src(walk);
+       } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+@@ -131,28 +132,28 @@ unmap_src:
+               skcipher_unmap_dst(walk);
+       } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+               if (WARN_ON(err)) {
++                      /* unexpected case; didn't process all bytes */
+                       err = -EINVAL;
+-                      nbytes = 0;
+-              } else
+-                      n = skcipher_done_slow(walk, n);
++                      goto finish;
++              }
++              skcipher_done_slow(walk, n);
++              goto already_advanced;
+       }
+-      if (err > 0)
+-              err = 0;
+-
+-      walk->total = nbytes;
+-      walk->nbytes = nbytes;
+-
+       scatterwalk_advance(&walk->in, n);
+       scatterwalk_advance(&walk->out, n);
+-      scatterwalk_done(&walk->in, 0, nbytes);
+-      scatterwalk_done(&walk->out, 1, nbytes);
++already_advanced:
++      scatterwalk_done(&walk->in, 0, more);
++      scatterwalk_done(&walk->out, 1, more);
+-      if (nbytes) {
++      if (more) {
+               crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
+                            CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+               return skcipher_walk_next(walk);
+       }
++      err = 0;
++finish:
++      walk->nbytes = 0;
+       /* Short-circuit for the common/fast path. */
+       if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
diff --git a/queue-4.17/crypto-vmac-require-a-block-cipher-with-128-bit-block-size.patch b/queue-4.17/crypto-vmac-require-a-block-cipher-with-128-bit-block-size.patch
new file mode 100644 (file)
index 0000000..9d82287
--- /dev/null
@@ -0,0 +1,39 @@
+From 73bf20ef3df262026c3470241ae4ac8196943ffa Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 18 Jun 2018 10:22:37 -0700
+Subject: crypto: vmac - require a block cipher with 128-bit block size
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 73bf20ef3df262026c3470241ae4ac8196943ffa upstream.
+
+The VMAC template assumes the block cipher has a 128-bit block size, but
+it failed to check for that.  Thus it was possible to instantiate it
+using a 64-bit block size cipher, e.g. "vmac(cast5)", causing
+uninitialized memory to be used.
+
+Add the needed check when instantiating the template.
+
+Fixes: f1939f7c5645 ("crypto: vmac - New hash algorithm for intel_txt support")
+Cc: <stable@vger.kernel.org> # v2.6.32+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/vmac.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/crypto/vmac.c
++++ b/crypto/vmac.c
+@@ -655,6 +655,10 @@ static int vmac_create(struct crypto_tem
+       if (IS_ERR(alg))
+               return PTR_ERR(alg);
++      err = -EINVAL;
++      if (alg->cra_blocksize != 16)
++              goto out_put_alg;
++
+       inst = shash_alloc_instance("vmac", alg);
+       err = PTR_ERR(inst);
+       if (IS_ERR(inst))
diff --git a/queue-4.17/crypto-vmac-separate-tfm-and-request-context.patch b/queue-4.17/crypto-vmac-separate-tfm-and-request-context.patch
new file mode 100644 (file)
index 0000000..f45e976
--- /dev/null
@@ -0,0 +1,653 @@
+From bb29648102335586e9a66289a1d98a0cb392b6e5 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 18 Jun 2018 10:22:38 -0700
+Subject: crypto: vmac - separate tfm and request context
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit bb29648102335586e9a66289a1d98a0cb392b6e5 upstream.
+
+syzbot reported a crash in vmac_final() when multiple threads
+concurrently use the same "vmac(aes)" transform through AF_ALG.  The bug
+is pretty fundamental: the VMAC template doesn't separate per-request
+state from per-tfm (per-key) state like the other hash algorithms do,
+but rather stores it all in the tfm context.  That's wrong.
+
+Also, vmac_final() incorrectly zeroes most of the state including the
+derived keys and cached pseudorandom pad.  Therefore, only the first
+VMAC invocation with a given key calculates the correct digest.
+
+Fix these bugs by splitting the per-tfm state from the per-request state
+and using the proper init/update/final sequencing for requests.
+
+Reproducer for the crash:
+
+    #include <linux/if_alg.h>
+    #include <sys/socket.h>
+    #include <unistd.h>
+
+    int main()
+    {
+            int fd;
+            struct sockaddr_alg addr = {
+                    .salg_type = "hash",
+                    .salg_name = "vmac(aes)",
+            };
+            char buf[256] = { 0 };
+
+            fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+            bind(fd, (void *)&addr, sizeof(addr));
+            setsockopt(fd, SOL_ALG, ALG_SET_KEY, buf, 16);
+            fork();
+            fd = accept(fd, NULL, NULL);
+            for (;;)
+                    write(fd, buf, 256);
+    }
+
+The immediate cause of the crash is that vmac_ctx_t.partial_size exceeds
+VMAC_NHBYTES, causing vmac_final() to memset() a negative length.
+
+Reported-by: syzbot+264bca3a6e8d645550d3@syzkaller.appspotmail.com
+Fixes: f1939f7c5645 ("crypto: vmac - New hash algorithm for intel_txt support")
+Cc: <stable@vger.kernel.org> # v2.6.32+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/vmac.c         |  414 ++++++++++++++++++++++----------------------------
+ include/crypto/vmac.h |   63 -------
+ 2 files changed, 184 insertions(+), 293 deletions(-)
+
+--- a/crypto/vmac.c
++++ b/crypto/vmac.c
+@@ -1,6 +1,10 @@
+ /*
+- * Modified to interface to the Linux kernel
++ * VMAC: Message Authentication Code using Universal Hashing
++ *
++ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
++ *
+  * Copyright (c) 2009, Intel Corporation.
++ * Copyright (c) 2018, Google Inc.
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms and conditions of the GNU General Public License,
+@@ -16,14 +20,15 @@
+  * Place - Suite 330, Boston, MA 02111-1307 USA.
+  */
+-/* --------------------------------------------------------------------------
+- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+- * This implementation is herby placed in the public domain.
+- * The authors offers no warranty. Use at your own risk.
+- * Please send bug reports to the authors.
+- * Last modified: 17 APR 08, 1700 PDT
+- * ----------------------------------------------------------------------- */
++/*
++ * Derived from:
++ *    VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
++ *    This implementation is herby placed in the public domain.
++ *    The authors offers no warranty. Use at your own risk.
++ *    Last modified: 17 APR 08, 1700 PDT
++ */
++#include <asm/unaligned.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/crypto.h>
+@@ -31,10 +36,36 @@
+ #include <linux/scatterlist.h>
+ #include <asm/byteorder.h>
+ #include <crypto/scatterwalk.h>
+-#include <crypto/vmac.h>
+ #include <crypto/internal/hash.h>
+ /*
++ * User definable settings.
++ */
++#define VMAC_TAG_LEN  64
++#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256                   */
++#define VMAC_KEY_LEN  (VMAC_KEY_SIZE/8)
++#define VMAC_NHBYTES  128/* Must 2^i for any 3 < i < 13 Standard = 128*/
++
++/* per-transform (per-key) context */
++struct vmac_tfm_ctx {
++      struct crypto_cipher *cipher;
++      u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
++      u64 polykey[2*VMAC_TAG_LEN/64];
++      u64 l3key[2*VMAC_TAG_LEN/64];
++};
++
++/* per-request context */
++struct vmac_desc_ctx {
++      union {
++              u8 partial[VMAC_NHBYTES];       /* partial block */
++              __le64 partial_words[VMAC_NHBYTES / 8];
++      };
++      unsigned int partial_size;      /* size of the partial block */
++      bool first_block_processed;
++      u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
++};
++
++/*
+  * Constants and masks
+  */
+ #define UINT64_C(x) x##ULL
+@@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64
+       } while (0)
+ #endif
+-static void vhash_abort(struct vmac_ctx *ctx)
+-{
+-      ctx->polytmp[0] = ctx->polykey[0] ;
+-      ctx->polytmp[1] = ctx->polykey[1] ;
+-      ctx->first_block_processed = 0;
+-}
+-
+ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
+ {
+       u64 rh, rl, t, z = 0;
+@@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1
+       return rl;
+ }
+-static void vhash_update(const unsigned char *m,
+-                      unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
+-                      struct vmac_ctx *ctx)
+-{
+-      u64 rh, rl, *mptr;
+-      const u64 *kptr = (u64 *)ctx->nhkey;
+-      int i;
+-      u64 ch, cl;
+-      u64 pkh = ctx->polykey[0];
+-      u64 pkl = ctx->polykey[1];
+-
+-      if (!mbytes)
+-              return;
+-
+-      BUG_ON(mbytes % VMAC_NHBYTES);
++/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
++static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
++                       struct vmac_desc_ctx *dctx,
++                       const __le64 *mptr, unsigned int blocks)
++{
++      const u64 *kptr = tctx->nhkey;
++      const u64 pkh = tctx->polykey[0];
++      const u64 pkl = tctx->polykey[1];
++      u64 ch = dctx->polytmp[0];
++      u64 cl = dctx->polytmp[1];
++      u64 rh, rl;
+-      mptr = (u64 *)m;
+-      i = mbytes / VMAC_NHBYTES;  /* Must be non-zero */
+-
+-      ch = ctx->polytmp[0];
+-      cl = ctx->polytmp[1];
+-
+-      if (!ctx->first_block_processed) {
+-              ctx->first_block_processed = 1;
++      if (!dctx->first_block_processed) {
++              dctx->first_block_processed = true;
+               nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+               rh &= m62;
+               ADD128(ch, cl, rh, rl);
+               mptr += (VMAC_NHBYTES/sizeof(u64));
+-              i--;
++              blocks--;
+       }
+-      while (i--) {
++      while (blocks--) {
+               nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+               rh &= m62;
+               poly_step(ch, cl, pkh, pkl, rh, rl);
+               mptr += (VMAC_NHBYTES/sizeof(u64));
+       }
+-      ctx->polytmp[0] = ch;
+-      ctx->polytmp[1] = cl;
++      dctx->polytmp[0] = ch;
++      dctx->polytmp[1] = cl;
+ }
+-static u64 vhash(unsigned char m[], unsigned int mbytes,
+-                      u64 *tagl, struct vmac_ctx *ctx)
++static int vmac_setkey(struct crypto_shash *tfm,
++                     const u8 *key, unsigned int keylen)
+ {
+-      u64 rh, rl, *mptr;
+-      const u64 *kptr = (u64 *)ctx->nhkey;
+-      int i, remaining;
+-      u64 ch, cl;
+-      u64 pkh = ctx->polykey[0];
+-      u64 pkl = ctx->polykey[1];
+-
+-      mptr = (u64 *)m;
+-      i = mbytes / VMAC_NHBYTES;
+-      remaining = mbytes % VMAC_NHBYTES;
+-
+-      if (ctx->first_block_processed) {
+-              ch = ctx->polytmp[0];
+-              cl = ctx->polytmp[1];
+-      } else if (i) {
+-              nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
+-              ch &= m62;
+-              ADD128(ch, cl, pkh, pkl);
+-              mptr += (VMAC_NHBYTES/sizeof(u64));
+-              i--;
+-      } else if (remaining) {
+-              nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
+-              ch &= m62;
+-              ADD128(ch, cl, pkh, pkl);
+-              mptr += (VMAC_NHBYTES/sizeof(u64));
+-              goto do_l3;
+-      } else {/* Empty String */
+-              ch = pkh; cl = pkl;
+-              goto do_l3;
+-      }
+-
+-      while (i--) {
+-              nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+-              rh &= m62;
+-              poly_step(ch, cl, pkh, pkl, rh, rl);
+-              mptr += (VMAC_NHBYTES/sizeof(u64));
+-      }
+-      if (remaining) {
+-              nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
+-              rh &= m62;
+-              poly_step(ch, cl, pkh, pkl, rh, rl);
+-      }
+-
+-do_l3:
+-      vhash_abort(ctx);
+-      remaining *= 8;
+-      return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
+-}
+-
+-static u64 vmac(unsigned char m[], unsigned int mbytes,
+-                      const unsigned char n[16], u64 *tagl,
+-                      struct vmac_ctx_t *ctx)
+-{
+-      u64 *in_n, *out_p;
+-      u64 p, h;
+-      int i;
+-
+-      in_n = ctx->__vmac_ctx.cached_nonce;
+-      out_p = ctx->__vmac_ctx.cached_aes;
+-
+-      i = n[15] & 1;
+-      if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
+-              in_n[0] = *(u64 *)(n);
+-              in_n[1] = *(u64 *)(n+8);
+-              ((unsigned char *)in_n)[15] &= 0xFE;
+-              crypto_cipher_encrypt_one(ctx->child,
+-                      (unsigned char *)out_p, (unsigned char *)in_n);
++      struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
++      __be64 out[2];
++      u8 in[16] = { 0 };
++      unsigned int i;
++      int err;
+-              ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
++      if (keylen != VMAC_KEY_LEN) {
++              crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++              return -EINVAL;
+       }
+-      p = be64_to_cpup(out_p + i);
+-      h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
+-      return le64_to_cpu(p + h);
+-}
+-
+-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
+-{
+-      u64 in[2] = {0}, out[2];
+-      unsigned i;
+-      int err = 0;
+-      err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
++      err = crypto_cipher_setkey(tctx->cipher, key, keylen);
+       if (err)
+               return err;
+       /* Fill nh key */
+-      ((unsigned char *)in)[0] = 0x80;
+-      for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
+-              crypto_cipher_encrypt_one(ctx->child,
+-                      (unsigned char *)out, (unsigned char *)in);
+-              ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
+-              ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
+-              ((unsigned char *)in)[15] += 1;
++      in[0] = 0x80;
++      for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
++              crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++              tctx->nhkey[i] = be64_to_cpu(out[0]);
++              tctx->nhkey[i+1] = be64_to_cpu(out[1]);
++              in[15]++;
+       }
+       /* Fill poly key */
+-      ((unsigned char *)in)[0] = 0xC0;
+-      in[1] = 0;
+-      for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
+-              crypto_cipher_encrypt_one(ctx->child,
+-                      (unsigned char *)out, (unsigned char *)in);
+-              ctx->__vmac_ctx.polytmp[i] =
+-                      ctx->__vmac_ctx.polykey[i] =
+-                              be64_to_cpup(out) & mpoly;
+-              ctx->__vmac_ctx.polytmp[i+1] =
+-                      ctx->__vmac_ctx.polykey[i+1] =
+-                              be64_to_cpup(out+1) & mpoly;
+-              ((unsigned char *)in)[15] += 1;
++      in[0] = 0xC0;
++      in[15] = 0;
++      for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
++              crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++              tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
++              tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
++              in[15]++;
+       }
+       /* Fill ip key */
+-      ((unsigned char *)in)[0] = 0xE0;
+-      in[1] = 0;
+-      for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
++      in[0] = 0xE0;
++      in[15] = 0;
++      for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
+               do {
+-                      crypto_cipher_encrypt_one(ctx->child,
+-                              (unsigned char *)out, (unsigned char *)in);
+-                      ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
+-                      ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
+-                      ((unsigned char *)in)[15] += 1;
+-              } while (ctx->__vmac_ctx.l3key[i] >= p64
+-                      || ctx->__vmac_ctx.l3key[i+1] >= p64);
++                      crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++                      tctx->l3key[i] = be64_to_cpu(out[0]);
++                      tctx->l3key[i+1] = be64_to_cpu(out[1]);
++                      in[15]++;
++              } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
+       }
+-      /* Invalidate nonce/aes cache and reset other elements */
+-      ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
+-      ctx->__vmac_ctx.cached_nonce[1] = (u64)0;  /* Ensure illegal nonce */
+-      ctx->__vmac_ctx.first_block_processed = 0;
+-
+-      return err;
++      return 0;
+ }
+-static int vmac_setkey(struct crypto_shash *parent,
+-              const u8 *key, unsigned int keylen)
++static int vmac_init(struct shash_desc *desc)
+ {
+-      struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
++      const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++      struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+-      if (keylen != VMAC_KEY_LEN) {
+-              crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
+-              return -EINVAL;
+-      }
+-
+-      return vmac_set_key((u8 *)key, ctx);
+-}
+-
+-static int vmac_init(struct shash_desc *pdesc)
+-{
++      dctx->partial_size = 0;
++      dctx->first_block_processed = false;
++      memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+       return 0;
+ }
+-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
+-              unsigned int len)
++static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
+ {
+-      struct crypto_shash *parent = pdesc->tfm;
+-      struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+-      int expand;
+-      int min;
+-
+-      expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
+-                      VMAC_NHBYTES - ctx->partial_size : 0;
+-
+-      min = len < expand ? len : expand;
+-
+-      memcpy(ctx->partial + ctx->partial_size, p, min);
+-      ctx->partial_size += min;
+-
+-      if (len < expand)
+-              return 0;
+-
+-      vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
+-      ctx->partial_size = 0;
+-
+-      len -= expand;
+-      p += expand;
+-
+-      if (len % VMAC_NHBYTES) {
+-              memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
+-                      len % VMAC_NHBYTES);
+-              ctx->partial_size = len % VMAC_NHBYTES;
++      const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++      struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
++      unsigned int n;
++
++      if (dctx->partial_size) {
++              n = min(len, VMAC_NHBYTES - dctx->partial_size);
++              memcpy(&dctx->partial[dctx->partial_size], p, n);
++              dctx->partial_size += n;
++              p += n;
++              len -= n;
++              if (dctx->partial_size == VMAC_NHBYTES) {
++                      vhash_blocks(tctx, dctx, dctx->partial_words, 1);
++                      dctx->partial_size = 0;
++              }
++      }
++
++      if (len >= VMAC_NHBYTES) {
++              n = round_down(len, VMAC_NHBYTES);
++              /* TODO: 'p' may be misaligned here */
++              vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
++              p += n;
++              len -= n;
++      }
++
++      if (len) {
++              memcpy(dctx->partial, p, len);
++              dctx->partial_size = len;
+       }
+-      vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
+-
+       return 0;
+ }
+-static int vmac_final(struct shash_desc *pdesc, u8 *out)
++static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
++                     struct vmac_desc_ctx *dctx)
+ {
+-      struct crypto_shash *parent = pdesc->tfm;
+-      struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+-      vmac_t mac;
+-      u8 nonce[16] = {};
+-
+-      /* vmac() ends up accessing outside the array bounds that
+-       * we specify.  In appears to access up to the next 2-word
+-       * boundary.  We'll just be uber cautious and zero the
+-       * unwritten bytes in the buffer.
+-       */
+-      if (ctx->partial_size) {
+-              memset(ctx->partial + ctx->partial_size, 0,
+-                      VMAC_NHBYTES - ctx->partial_size);
+-      }
+-      mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
+-      memcpy(out, &mac, sizeof(vmac_t));
+-      memzero_explicit(&mac, sizeof(vmac_t));
+-      memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+-      ctx->partial_size = 0;
++      unsigned int partial = dctx->partial_size;
++      u64 ch = dctx->polytmp[0];
++      u64 cl = dctx->polytmp[1];
++
++      /* L1 and L2-hash the final block if needed */
++      if (partial) {
++              /* Zero-pad to next 128-bit boundary */
++              unsigned int n = round_up(partial, 16);
++              u64 rh, rl;
++
++              memset(&dctx->partial[partial], 0, n - partial);
++              nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
++              rh &= m62;
++              if (dctx->first_block_processed)
++                      poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
++                                rh, rl);
++              else
++                      ADD128(ch, cl, rh, rl);
++      }
++
++      /* L3-hash the 128-bit output of L2-hash */
++      return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
++}
++
++static int vmac_final(struct shash_desc *desc, u8 *out)
++{
++      const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++      struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
++      static const u8 nonce[16] = {}; /* TODO: this is insecure */
++      union {
++              u8 bytes[16];
++              __be64 pads[2];
++      } block;
++      int index;
++      u64 hash, pad;
++
++      /* Finish calculating the VHASH of the message */
++      hash = vhash_final(tctx, dctx);
++
++      /* Generate pseudorandom pad by encrypting the nonce */
++      memcpy(&block, nonce, 16);
++      index = block.bytes[15] & 1;
++      block.bytes[15] &= ~1;
++      crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
++      pad = be64_to_cpu(block.pads[index]);
++
++      /* The VMAC is the sum of VHASH and the pseudorandom pad */
++      put_unaligned_le64(hash + pad, out);
+       return 0;
+ }
+ static int vmac_init_tfm(struct crypto_tfm *tfm)
+ {
+-      struct crypto_cipher *cipher;
+-      struct crypto_instance *inst = (void *)tfm->__crt_alg;
++      struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+       struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+-      struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
++      struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
++      struct crypto_cipher *cipher;
+       cipher = crypto_spawn_cipher(spawn);
+       if (IS_ERR(cipher))
+               return PTR_ERR(cipher);
+-      ctx->child = cipher;
++      tctx->cipher = cipher;
+       return 0;
+ }
+ static void vmac_exit_tfm(struct crypto_tfm *tfm)
+ {
+-      struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+-      crypto_free_cipher(ctx->child);
++      struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
++
++      crypto_free_cipher(tctx->cipher);
+ }
+ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+@@ -674,11 +627,12 @@ static int vmac_create(struct crypto_tem
+       inst->alg.base.cra_blocksize = alg->cra_blocksize;
+       inst->alg.base.cra_alignmask = alg->cra_alignmask;
+-      inst->alg.digestsize = sizeof(vmac_t);
+-      inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
++      inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
+       inst->alg.base.cra_init = vmac_init_tfm;
+       inst->alg.base.cra_exit = vmac_exit_tfm;
++      inst->alg.descsize = sizeof(struct vmac_desc_ctx);
++      inst->alg.digestsize = VMAC_TAG_LEN / 8;
+       inst->alg.init = vmac_init;
+       inst->alg.update = vmac_update;
+       inst->alg.final = vmac_final;
+--- a/include/crypto/vmac.h
++++ /dev/null
+@@ -1,63 +0,0 @@
+-/*
+- * Modified to interface to the Linux kernel
+- * Copyright (c) 2009, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+- * Place - Suite 330, Boston, MA 02111-1307 USA.
+- */
+-
+-#ifndef __CRYPTO_VMAC_H
+-#define __CRYPTO_VMAC_H
+-
+-/* --------------------------------------------------------------------------
+- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+- * This implementation is herby placed in the public domain.
+- * The authors offers no warranty. Use at your own risk.
+- * Please send bug reports to the authors.
+- * Last modified: 17 APR 08, 1700 PDT
+- * ----------------------------------------------------------------------- */
+-
+-/*
+- * User definable settings.
+- */
+-#define VMAC_TAG_LEN  64
+-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256                   */
+-#define VMAC_KEY_LEN  (VMAC_KEY_SIZE/8)
+-#define VMAC_NHBYTES  128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+-
+-/*
+- * This implementation uses u32 and u64 as names for unsigned 32-
+- * and 64-bit integer types. These are defined in C99 stdint.h. The
+- * following may need adaptation if you are not running a C99 or
+- * Microsoft C environment.
+- */
+-struct vmac_ctx {
+-      u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+-      u64 polykey[2*VMAC_TAG_LEN/64];
+-      u64 l3key[2*VMAC_TAG_LEN/64];
+-      u64 polytmp[2*VMAC_TAG_LEN/64];
+-      u64 cached_nonce[2];
+-      u64 cached_aes[2];
+-      int first_block_processed;
+-};
+-
+-typedef u64 vmac_t;
+-
+-struct vmac_ctx_t {
+-      struct crypto_cipher *child;
+-      struct vmac_ctx __vmac_ctx;
+-      u8 partial[VMAC_NHBYTES];       /* partial block */
+-      int partial_size;               /* size of the partial block */
+-};
+-
+-#endif /* __CRYPTO_VMAC_H */
diff --git a/queue-4.17/crypto-x86-sha256-mb-fix-digest-copy-in-sha256_mb_mgr_get_comp_job_avx2.patch b/queue-4.17/crypto-x86-sha256-mb-fix-digest-copy-in-sha256_mb_mgr_get_comp_job_avx2.patch
new file mode 100644 (file)
index 0000000..f0dff58
--- /dev/null
@@ -0,0 +1,81 @@
+From af839b4e546613aed1fbd64def73956aa98631e7 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 29 Jun 2018 14:14:35 -0700
+Subject: crypto: x86/sha256-mb - fix digest copy in sha256_mb_mgr_get_comp_job_avx2()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit af839b4e546613aed1fbd64def73956aa98631e7 upstream.
+
+There is a copy-paste error where sha256_mb_mgr_get_comp_job_avx2()
+copies the SHA-256 digest state from sha256_mb_mgr::args::digest to
+job_sha256::result_digest.  Consequently, the sha256_mb algorithm
+sometimes calculates the wrong digest.  Fix it.
+
+Reproducer using AF_ALG:
+
+    #include <assert.h>
+    #include <linux/if_alg.h>
+    #include <stdio.h>
+    #include <string.h>
+    #include <sys/socket.h>
+    #include <unistd.h>
+
+    static const __u8 expected[32] =
+        "\xad\x7f\xac\xb2\x58\x6f\xc6\xe9\x66\xc0\x04\xd7\xd1\xd1\x6b\x02"
+        "\x4f\x58\x05\xff\x7c\xb4\x7c\x7a\x85\xda\xbd\x8b\x48\x89\x2c\xa7";
+
+    int main()
+    {
+        int fd;
+        struct sockaddr_alg addr = {
+            .salg_type = "hash",
+            .salg_name = "sha256_mb",
+        };
+        __u8 data[4096] = { 0 };
+        __u8 digest[32];
+        int ret;
+        int i;
+
+        fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+        bind(fd, (void *)&addr, sizeof(addr));
+        fork();
+        fd = accept(fd, 0, 0);
+        do {
+            ret = write(fd, data, 4096);
+            assert(ret == 4096);
+            ret = read(fd, digest, 32);
+            assert(ret == 32);
+        } while (memcmp(digest, expected, 32) == 0);
+
+        printf("wrong digest: ");
+        for (i = 0; i < 32; i++)
+            printf("%02x", digest[i]);
+        printf("\n");
+    }
+
+Output was:
+
+    wrong digest: ad7facb2000000000000000000000000ffffffef7cb47c7a85dabd8b48892ca7
+
+Fixes: 172b1d6b5a93 ("crypto: sha256-mb - fix ctx pointer and digest copy")
+Cc: <stable@vger.kernel.org> # v4.8+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+@@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+       vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
+       vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
+       vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
+-      vmovd   _args_digest(state , idx, 4) , %xmm0
++      vmovd   _args_digest+4*32(state, idx, 4), %xmm1
+       vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
+       vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
+       vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
diff --git a/queue-4.17/ioremap-update-pgtable-free-interfaces-with-addr.patch b/queue-4.17/ioremap-update-pgtable-free-interfaces-with-addr.patch
new file mode 100644 (file)
index 0000000..13aa84e
--- /dev/null
@@ -0,0 +1,177 @@
+From 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc Mon Sep 17 00:00:00 2001
+From: Chintan Pandya <cpandya@codeaurora.org>
+Date: Wed, 27 Jun 2018 08:13:47 -0600
+Subject: ioremap: Update pgtable free interfaces with addr
+
+From: Chintan Pandya <cpandya@codeaurora.org>
+
+commit 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc upstream.
+
+The following kernel panic was observed on ARM64 platform due to a stale
+TLB entry.
+
+ 1. ioremap with 4K size, a valid pte page table is set.
+ 2. iounmap it, its pte entry is set to 0.
+ 3. ioremap the same address with 2M size, update its pmd entry with
+    a new value.
+ 4. CPU may hit an exception because the old pmd entry is still in TLB,
+    which leads to a kernel panic.
+
+Commit b6bdb7517c3d ("mm/vmalloc: add interfaces to free unmapped page
+table") has addressed this panic by falling to pte mappings in the above
+case on ARM64.
+
+To support pmd mappings in all cases, TLB purge needs to be performed
+in this case on ARM64.
+
+Add a new arg, 'addr', to pud_free_pmd_page() and pmd_free_pte_page()
+so that TLB purge can be added later in seprate patches.
+
+[toshi.kani@hpe.com: merge changes, rewrite patch description]
+Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces")
+Signed-off-by: Chintan Pandya <cpandya@codeaurora.org>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: mhocko@suse.com
+Cc: akpm@linux-foundation.org
+Cc: hpa@zytor.com
+Cc: linux-mm@kvack.org
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: stable@vger.kernel.org
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20180627141348.21777-3-toshi.kani@hpe.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c           |    4 ++--
+ arch/x86/mm/pgtable.c         |   12 +++++++-----
+ include/asm-generic/pgtable.h |    8 ++++----
+ lib/ioremap.c                 |    4 ++--
+ 4 files changed, 15 insertions(+), 13 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp)
+       return 1;
+ }
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return pud_none(*pud);
+ }
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return pmd_none(*pmd);
+ }
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -719,11 +719,12 @@ int pmd_clear_huge(pmd_t *pmd)
+ /**
+  * pud_free_pmd_page - Clear pud entry and free pmd page.
+  * @pud: Pointer to a PUD.
++ * @addr: Virtual address associated with pud.
+  *
+  * Context: The pud range has been unmaped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+  */
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       pmd_t *pmd;
+       int i;
+@@ -734,7 +735,7 @@ int pud_free_pmd_page(pud_t *pud)
+       pmd = (pmd_t *)pud_page_vaddr(*pud);
+       for (i = 0; i < PTRS_PER_PMD; i++)
+-              if (!pmd_free_pte_page(&pmd[i]))
++              if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE)))
+                       return 0;
+       pud_clear(pud);
+@@ -746,11 +747,12 @@ int pud_free_pmd_page(pud_t *pud)
+ /**
+  * pmd_free_pte_page - Clear pmd entry and free pte page.
+  * @pmd: Pointer to a PMD.
++ * @addr: Virtual address associated with pmd.
+  *
+  * Context: The pmd range has been unmaped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+  */
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       pte_t *pte;
+@@ -766,7 +768,7 @@ int pmd_free_pte_page(pmd_t *pmd)
+ #else /* !CONFIG_X86_64 */
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return pud_none(*pud);
+ }
+@@ -775,7 +777,7 @@ int pud_free_pmd_page(pud_t *pud)
+  * Disable free page handling on x86-PAE. This assures that ioremap()
+  * does not update sync'd pmd entries. See vmalloc_sync_one().
+  */
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return pmd_none(*pmd);
+ }
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t
+ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+ int pud_clear_huge(pud_t *pud);
+ int pmd_clear_huge(pmd_t *pmd);
+-int pud_free_pmd_page(pud_t *pud);
+-int pmd_free_pte_page(pmd_t *pmd);
++int pud_free_pmd_page(pud_t *pud, unsigned long addr);
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+ #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+ static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+ {
+@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *
+ {
+       return 0;
+ }
+-static inline int pud_free_pmd_page(pud_t *pud)
++static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return 0;
+ }
+-static inline int pmd_free_pte_page(pmd_t *pmd)
++static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return 0;
+ }
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_
+               if (ioremap_pmd_enabled() &&
+                   ((next - addr) == PMD_SIZE) &&
+                   IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
+-                  pmd_free_pte_page(pmd)) {
++                  pmd_free_pte_page(pmd, addr)) {
+                       if (pmd_set_huge(pmd, phys_addr + addr, prot))
+                               continue;
+               }
+@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_
+               if (ioremap_pud_enabled() &&
+                   ((next - addr) == PUD_SIZE) &&
+                   IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
+-                  pud_free_pmd_page(pud)) {
++                  pud_free_pmd_page(pud, addr)) {
+                       if (pud_set_huge(pud, phys_addr + addr, prot))
+                               continue;
+               }
diff --git a/queue-4.17/kbuild-verify-that-depmod-is-installed.patch b/queue-4.17/kbuild-verify-that-depmod-is-installed.patch
new file mode 100644 (file)
index 0000000..38635f7
--- /dev/null
@@ -0,0 +1,101 @@
+From 934193a654c1f4d0643ddbf4b2529b508cae926e Mon Sep 17 00:00:00 2001
+From: Randy Dunlap <rdunlap@infradead.org>
+Date: Sun, 1 Jul 2018 19:46:06 -0700
+Subject: kbuild: verify that $DEPMOD is installed
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+commit 934193a654c1f4d0643ddbf4b2529b508cae926e upstream.
+
+Verify that 'depmod' ($DEPMOD) is installed.
+This is a partial revert of commit 620c231c7a7f
+("kbuild: do not check for ancient modutils tools").
+
+Also update Documentation/process/changes.rst to refer to
+kmod instead of module-init-tools.
+
+Fixes kernel bugzilla #198965:
+https://bugzilla.kernel.org/show_bug.cgi?id=198965
+
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Lucas De Marchi <lucas.demarchi@profusion.mobi>
+Cc: Lucas De Marchi <lucas.de.marchi@gmail.com>
+Cc: Michal Marek <michal.lkml@markovi.net>
+Cc: Jessica Yu <jeyu@kernel.org>
+Cc: Chih-Wei Huang <cwhuang@linux.org.tw>
+Cc: stable@vger.kernel.org # any kernel since 2012
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/process/changes.rst |   19 +++++++------------
+ scripts/depmod.sh                 |    8 +++++++-
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+--- a/Documentation/process/changes.rst
++++ b/Documentation/process/changes.rst
+@@ -35,7 +35,7 @@ binutils               2.20
+ flex                   2.5.35           flex --version
+ bison                  2.0              bison --version
+ util-linux             2.10o            fdformat --version
+-module-init-tools      0.9.10           depmod -V
++kmod                   13               depmod -V
+ e2fsprogs              1.41.4           e2fsck -V
+ jfsutils               1.1.3            fsck.jfs -V
+ reiserfsprogs          3.6.3            reiserfsck -V
+@@ -156,12 +156,6 @@ is not build with ``CONFIG_KALLSYMS`` an
+ reproduce the Oops with that option, then you can still decode that Oops
+ with ksymoops.
+-Module-Init-Tools
+------------------
+-
+-A new module loader is now in the kernel that requires ``module-init-tools``
+-to use.  It is backward compatible with the 2.4.x series kernels.
+-
+ Mkinitrd
+ --------
+@@ -371,16 +365,17 @@ Util-linux
+ - <https://www.kernel.org/pub/linux/utils/util-linux/>
++Kmod
++----
++
++- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
++- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
++
+ Ksymoops
+ --------
+ - <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
+-Module-Init-Tools
+------------------
+-
+-- <https://www.kernel.org/pub/linux/utils/kernel/module-init-tools/>
+-
+ Mkinitrd
+ --------
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -11,10 +11,16 @@ DEPMOD=$1
+ KERNELRELEASE=$2
+ SYMBOL_PREFIX=$3
+-if ! test -r System.map -a -x "$DEPMOD"; then
++if ! test -r System.map ; then
+       exit 0
+ fi
++if [ -z $(command -v $DEPMOD) ]; then
++      echo "'make modules_install' requires $DEPMOD. Please install it." >&2
++      echo "This is probably in the kmod package." >&2
++      exit 1
++fi
++
+ # older versions of depmod don't support -P <symbol-prefix>
+ # support was added in module-init-tools 3.13
+ if test -n "$SYMBOL_PREFIX"; then
index 0451cfa284f312fa637f668b5d30a20e85da231a..c55373e153e5de024177b848e4c42db0a0ab6046 100644 (file)
@@ -4,3 +4,18 @@ x86-platform-uv-mark-memblock-related-init-code-and-data-correctly.patch
 x86-mm-pti-clear-global-bit-more-aggressively.patch
 xen-pv-call-get_cpu_address_sizes-to-set-x86_virt-phys_bits.patch
 x86-mm-disable-ioremap-free-page-handling-on-x86-pae.patch
+kbuild-verify-that-depmod-is-installed.patch
+crypto-ccree-fix-finup.patch
+crypto-ccree-fix-iv-handling.patch
+crypto-ccp-check-for-null-psp-pointer-at-module-unload.patch
+crypto-ccp-fix-command-completion-detection-race.patch
+crypto-x86-sha256-mb-fix-digest-copy-in-sha256_mb_mgr_get_comp_job_avx2.patch
+crypto-vmac-require-a-block-cipher-with-128-bit-block-size.patch
+crypto-vmac-separate-tfm-and-request-context.patch
+crypto-blkcipher-fix-crash-flushing-dcache-in-error-path.patch
+crypto-ablkcipher-fix-crash-flushing-dcache-in-error-path.patch
+crypto-skcipher-fix-aligning-block-size-in-skcipher_copy_iv.patch
+crypto-skcipher-fix-crash-flushing-dcache-in-error-path.patch
+bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch
+ioremap-update-pgtable-free-interfaces-with-addr.patch
+x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch
diff --git a/queue-4.17/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch b/queue-4.17/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch
new file mode 100644 (file)
index 0000000..041de39
--- /dev/null
@@ -0,0 +1,124 @@
+From 5e0fb5df2ee871b841f96f9cb6a7f2784e96aa4e Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 27 Jun 2018 08:13:48 -0600
+Subject: x86/mm: Add TLB purge to free pmd/pte page interfaces
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit 5e0fb5df2ee871b841f96f9cb6a7f2784e96aa4e upstream.
+
+ioremap() calls pud_free_pmd_page() / pmd_free_pte_page() when it creates
+a pud / pmd map.  The following preconditions are met at their entry.
+ - All pte entries for a target pud/pmd address range have been cleared.
+ - System-wide TLB purges have been peformed for a target pud/pmd address
+   range.
+
+The preconditions assure that there is no stale TLB entry for the range.
+Speculation may not cache TLB entries since it requires all levels of page
+entries, including ptes, to have P & A-bits set for an associated address.
+However, speculation may cache pud/pmd entries (paging-structure caches)
+when they have P-bit set.
+
+Add a system-wide TLB purge (INVLPG) to a single page after clearing
+pud/pmd entry's P-bit.
+
+SDM 4.10.4.1, Operation that Invalidate TLBs and Paging-Structure Caches,
+states that:
+  INVLPG invalidates all paging-structure caches associated with the
+  current PCID regardless of the liner addresses to which they correspond.
+
+Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces")
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: mhocko@suse.com
+Cc: akpm@linux-foundation.org
+Cc: hpa@zytor.com
+Cc: cpandya@codeaurora.org
+Cc: linux-mm@kvack.org
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: stable@vger.kernel.org
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20180627141348.21777-4-toshi.kani@hpe.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/pgtable.c |   38 +++++++++++++++++++++++++++++++-------
+ 1 file changed, 31 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -721,24 +721,44 @@ int pmd_clear_huge(pmd_t *pmd)
+  * @pud: Pointer to a PUD.
+  * @addr: Virtual address associated with pud.
+  *
+- * Context: The pud range has been unmaped and TLB purged.
++ * Context: The pud range has been unmapped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
++ *
++ * NOTE: Callers must allow a single page allocation.
+  */
+ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+-      pmd_t *pmd;
++      pmd_t *pmd, *pmd_sv;
++      pte_t *pte;
+       int i;
+       if (pud_none(*pud))
+               return 1;
+       pmd = (pmd_t *)pud_page_vaddr(*pud);
+-
+-      for (i = 0; i < PTRS_PER_PMD; i++)
+-              if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE)))
+-                      return 0;
++      pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
++      if (!pmd_sv)
++              return 0;
++
++      for (i = 0; i < PTRS_PER_PMD; i++) {
++              pmd_sv[i] = pmd[i];
++              if (!pmd_none(pmd[i]))
++                      pmd_clear(&pmd[i]);
++      }
+       pud_clear(pud);
++
++      /* INVLPG to clear all paging-structure caches */
++      flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
++      for (i = 0; i < PTRS_PER_PMD; i++) {
++              if (!pmd_none(pmd_sv[i])) {
++                      pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
++                      free_page((unsigned long)pte);
++              }
++      }
++
++      free_page((unsigned long)pmd_sv);
+       free_page((unsigned long)pmd);
+       return 1;
+@@ -749,7 +769,7 @@ int pud_free_pmd_page(pud_t *pud, unsign
+  * @pmd: Pointer to a PMD.
+  * @addr: Virtual address associated with pmd.
+  *
+- * Context: The pmd range has been unmaped and TLB purged.
++ * Context: The pmd range has been unmapped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+  */
+ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+@@ -761,6 +781,10 @@ int pmd_free_pte_page(pmd_t *pmd, unsign
+       pte = (pte_t *)pmd_page_vaddr(*pmd);
+       pmd_clear(pmd);
++
++      /* INVLPG to clear all paging-structure caches */
++      flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
+       free_page((unsigned long)pte);
+       return 1;