--- /dev/null
+From eafc0a02391b7b36617b36c97c4b5d6832cf5e24 Mon Sep 17 00:00:00 2001
+From: Guo Xuenan <guoxuenan@huawei.com>
+Date: Fri, 8 Apr 2022 13:08:58 -0700
+Subject: lz4: fix LZ4_decompress_safe_partial read out of bound
+
+From: Guo Xuenan <guoxuenan@huawei.com>
+
+commit eafc0a02391b7b36617b36c97c4b5d6832cf5e24 upstream.
+
+When partialDecoding, it is EOF if we've either filled the output buffer
+or can't proceed with reading an offset for following match.
+
+In some extreme corner cases when compressed data is suitably corrupted,
+UAF will occur. As reported by KASAN [1], LZ4_decompress_safe_partial
+may lead to read out of bound problem during decoding. lz4 upstream has
+fixed it [2] and this issue has been disscussed here [3] before.
+
+current decompression routine was ported from lz4 v1.8.3, bumping
+lib/lz4 to v1.9.+ is certainly a huge work to be done later, so, we'd
+better fix it first.
+
+[1] https://lore.kernel.org/all/000000000000830d1205cf7f0477@google.com/
+[2] https://github.com/lz4/lz4/commit/c5d6f8a8be3927c0bec91bcc58667a6cfad244ad#
+[3] https://lore.kernel.org/all/CC666AE8-4CA4-4951-B6FB-A2EFDE3AC03B@fb.com/
+
+Link: https://lkml.kernel.org/r/20211111105048.2006070-1-guoxuenan@huawei.com
+Reported-by: syzbot+63d688f1d899c588fb71@syzkaller.appspotmail.com
+Signed-off-by: Guo Xuenan <guoxuenan@huawei.com>
+Reviewed-by: Nick Terrell <terrelln@fb.com>
+Acked-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Cc: Yann Collet <cyan@fb.com>
+Cc: Chengyang Fan <cy.fan@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/lz4/lz4_decompress.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -268,8 +268,12 @@ static FORCE_INLINE int LZ4_decompress_g
+ ip += length;
+ op += length;
+
+- /* Necessarily EOF, due to parsing restrictions */
+- if (!partialDecoding || (cpy == oend))
++ /* Necessarily EOF when !partialDecoding.
++ * When partialDecoding, it is EOF if we've either
++ * filled the output buffer or
++ * can't proceed with reading an offset for following match.
++ */
++ if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
+ break;
+ } else {
+ /* may overwrite up to WILDCOPYLENGTH beyond cpy */
--- /dev/null
+From 4ad099559b00ac01c3726e5c95dc3108ef47d03e Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Fri, 8 Apr 2022 13:09:07 -0700
+Subject: mm/mempolicy: fix mpol_new leak in shared_policy_replace
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 4ad099559b00ac01c3726e5c95dc3108ef47d03e upstream.
+
+If mpol_new is allocated but not used in restart loop, mpol_new will be
+freed via mpol_put before returning to the caller. But refcnt is not
+initialized yet, so mpol_put could not do the right things and might
+leak the unused mpol_new. This would happen if mempolicy was updated on
+the shared shmem file while the sp->lock has been dropped during the
+memory allocation.
+
+This issue could be triggered easily with the below code snippet if
+there are many processes doing the below work at the same time:
+
+ shmid = shmget((key_t)5566, 1024 * PAGE_SIZE, 0666|IPC_CREAT);
+ shm = shmat(shmid, 0, 0);
+ loop many times {
+ mbind(shm, 1024 * PAGE_SIZE, MPOL_LOCAL, mask, maxnode, 0);
+ mbind(shm + 128 * PAGE_SIZE, 128 * PAGE_SIZE, MPOL_DEFAULT, mask,
+ maxnode, 0);
+ }
+
+Link: https://lkml.kernel.org/r/20220329111416.27954-1-linmiaohe@huawei.com
+Fixes: 42288fe366c4 ("mm: mempolicy: Convert shared_policy mutex to spinlock")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: <stable@vger.kernel.org> [3.8]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mempolicy.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2559,6 +2559,7 @@ alloc_new:
+ mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
+ if (!mpol_new)
+ goto err_out;
++ atomic_set(&mpol_new->refcnt, 1);
+ goto restart;
+ }
+
--- /dev/null
+From 03e59b1e2f56245163b14c69e0a830c24b1a3a47 Mon Sep 17 00:00:00 2001
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Date: Mon, 4 Apr 2022 13:49:02 +0200
+Subject: mmc: renesas_sdhi: don't overwrite TAP settings when HS400 tuning is complete
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+commit 03e59b1e2f56245163b14c69e0a830c24b1a3a47 upstream.
+
+When HS400 tuning is complete and HS400 is going to be activated, we
+have to keep the current number of TAPs and should not overwrite them
+with a hardcoded value. This was probably a copy&paste mistake when
+upporting HS400 support from the BSP.
+
+Fixes: 26eb2607fa28 ("mmc: renesas_sdhi: add eMMC HS400 mode support")
+Reported-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220404114902.12175-1-wsa+renesas@sang-engineering.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/renesas_sdhi_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -349,10 +349,10 @@ static void renesas_sdhi_hs400_complete(
+ SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
+
+- /* Set the sampling clock selection range of HS400 mode */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
+- 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
++ sd_scc_read32(host, priv,
++ SH_MOBILE_SDHI_SCC_DTCNTL));
+
+
+ if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
--- /dev/null
+From 01e67e04c28170c47700c2c226d732bbfedb1ad0 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 8 Apr 2022 13:09:04 -0700
+Subject: mmmremap.c: avoid pointless invalidate_range_start/end on mremap(old_size=0)
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 01e67e04c28170c47700c2c226d732bbfedb1ad0 upstream.
+
+If an mremap() syscall with old_size=0 ends up in move_page_tables(), it
+will call invalidate_range_start()/invalidate_range_end() unnecessarily,
+i.e. with an empty range.
+
+This causes a WARN in KVM's mmu_notifier. In the past, empty ranges
+have been diagnosed to be off-by-one bugs, hence the WARNing. Given the
+low (so far) number of unique reports, the benefits of detecting more
+buggy callers seem to outweigh the cost of having to fix cases such as
+this one, where userspace is doing something silly. In this particular
+case, an early return from move_page_tables() is enough to fix the
+issue.
+
+Link: https://lkml.kernel.org/r/20220329173155.172439-1-pbonzini@redhat.com
+Reported-by: syzbot+6bde52d89cfdf9f61425@syzkaller.appspotmail.com
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mremap.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -246,6 +246,9 @@ unsigned long move_page_tables(struct vm
+ struct mmu_notifier_range range;
+ pmd_t *old_pmd, *new_pmd;
+
++ if (!len)
++ return 0;
++
+ old_end = old_addr + len;
+ flush_cache_range(vma, old_addr, old_end);
+
--- /dev/null
+From 7e2646ed47542123168d43916b84b954532e5386 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org>
+Date: Fri, 18 Mar 2022 15:14:41 +0100
+Subject: Revert "mmc: sdhci-xenon: fix annoying 1.8V regulator warning"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pali Rohár <pali@kernel.org>
+
+commit 7e2646ed47542123168d43916b84b954532e5386 upstream.
+
+This reverts commit bb32e1987bc55ce1db400faf47d85891da3c9b9f.
+
+Commit 1a3ed0dc3594 ("mmc: sdhci-xenon: fix 1.8v regulator stabilization")
+contains proper fix for the issue described in commit bb32e1987bc5 ("mmc:
+sdhci-xenon: fix annoying 1.8V regulator warning").
+
+Fixes: 8d876bf472db ("mmc: sdhci-xenon: wait 5ms after set 1.8V signal enable")
+Cc: stable@vger.kernel.org # 1a3ed0dc3594 ("mmc: sdhci-xenon: fix 1.8v regulator stabilization")
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Reviewed-by: Marek Behún <kabel@kernel.org>
+Reviewed-by: Marcin Wojtas <mw@semihalf.com>
+Link: https://lore.kernel.org/r/20220318141441.32329-1-pali@kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-xenon.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -240,16 +240,6 @@ static void xenon_voltage_switch(struct
+ {
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ usleep_range(5000, 5500);
+-
+- /*
+- * For some reason the controller's Host Control2 register reports
+- * the bit representing 1.8V signaling as 0 when read after it was
+- * written as 1. Subsequent read reports 1.
+- *
+- * Since this may cause some issues, do an empty read of the Host
+- * Control2 register here to circumvent this.
+- */
+- sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ }
+
+ static const struct sdhci_ops sdhci_xenon_ops = {
sunrpc-handle-low-memory-situations-in-call_status.patch
perf-tools-fix-perf-s-libperf_print-callback.patch
perf-session-remap-buf-if-there-is-no-space-for-even.patch
+revert-mmc-sdhci-xenon-fix-annoying-1.8v-regulator-warning.patch
+mmc-renesas_sdhi-don-t-overwrite-tap-settings-when-hs400-tuning-is-complete.patch
+lz4-fix-lz4_decompress_safe_partial-read-out-of-bound.patch
+mmmremap.c-avoid-pointless-invalidate_range_start-end-on-mremap-old_size-0.patch
+mm-mempolicy-fix-mpol_new-leak-in-shared_policy_replace.patch