--- /dev/null
+From 1ecd5b129252249b9bc03d7645a7bda512747277 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Wed, 21 Apr 2021 17:43:16 +0100
+Subject: ACPI: GTDT: Don't corrupt interrupt mappings on watchdow probe failure
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 1ecd5b129252249b9bc03d7645a7bda512747277 upstream.
+
+When failing the driver probe because of invalid firmware properties,
+the GTDT driver unmaps the interrupt that it mapped earlier.
+
+However, it never checks whether the mapping of the interrupt actially
+succeeded. Even more, should the firmware report an illegal interrupt
+number that overlaps with the GIC SGI range, this can result in an
+IPI being unmapped, and subsequent fireworks (as reported by Dann
+Frazier).
+
+Rework the driver to have a slightly saner behaviour and actually
+check whether the interrupt has been mapped before unmapping things.
+
+Reported-by: dann frazier <dann.frazier@canonical.com>
+Fixes: ca9ae5ec4ef0 ("acpi/arm64: Add SBSA Generic Watchdog support in GTDT driver")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/YH87dtTfwYgavusz@xps13.dannf
+Cc: <stable@vger.kernel.org>
+Cc: Fu Wei <wefu@redhat.com>
+Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
+Tested-by: dann frazier <dann.frazier@canonical.com>
+Tested-by: Hanjun Guo <guohanjun@huawei.com>
+Reviewed-by: Hanjun Guo <guohanjun@huawei.com>
+Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Link: https://lore.kernel.org/r/20210421164317.1718831-2-maz@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/arm64/gtdt.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/acpi/arm64/gtdt.c
++++ b/drivers/acpi/arm64/gtdt.c
+@@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(
+ int index)
+ {
+ struct platform_device *pdev;
+- int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
++ int irq;
+
+ /*
+ * According to SBSA specification the size of refresh and control
+@@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(
+ struct resource res[] = {
+ DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
+ DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
+- DEFINE_RES_IRQ(irq),
++ {},
+ };
+ int nr_res = ARRAY_SIZE(res);
+
+@@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(
+
+ if (!(wd->refresh_frame_address && wd->control_frame_address)) {
+ pr_err(FW_BUG "failed to get the Watchdog base address.\n");
+- acpi_unregister_gsi(wd->timer_interrupt);
+ return -EINVAL;
+ }
+
++ irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
++ res[2] = (struct resource)DEFINE_RES_IRQ(irq);
+ if (irq <= 0) {
+ pr_warn("failed to map the Watchdog interrupt.\n");
+ nr_res--;
+@@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(
+ */
+ pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
+ if (IS_ERR(pdev)) {
+- acpi_unregister_gsi(wd->timer_interrupt);
++ if (irq > 0)
++ acpi_unregister_gsi(wd->timer_interrupt);
+ return PTR_ERR(pdev);
+ }
+
--- /dev/null
+From 44200f2d9b8b52389c70e6c7bbe51e0dc6eaf938 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Fri, 9 Apr 2021 15:11:55 -0700
+Subject: crypto: arm/curve25519 - Move '.fpu' after '.arch'
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 44200f2d9b8b52389c70e6c7bbe51e0dc6eaf938 upstream.
+
+Debian's clang carries a patch that makes the default FPU mode
+'vfp3-d16' instead of 'neon' for 'armv7-a' to avoid generating NEON
+instructions on hardware that does not support them:
+
+https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/raw/5a61ca6f21b4ad8c6ac4970e5ea5a7b5b4486d22/debian/patches/clang-arm-default-vfp3-on-armv7a.patch
+https://bugs.debian.org/841474
+https://bugs.debian.org/842142
+https://bugs.debian.org/914268
+
+This results in the following build error when clang's integrated
+assembler is used because the '.arch' directive overrides the '.fpu'
+directive:
+
+arch/arm/crypto/curve25519-core.S:25:2: error: instruction requires: NEON
+ vmov.i32 q0, #1
+ ^
+arch/arm/crypto/curve25519-core.S:26:2: error: instruction requires: NEON
+ vshr.u64 q1, q0, #7
+ ^
+arch/arm/crypto/curve25519-core.S:27:2: error: instruction requires: NEON
+ vshr.u64 q0, q0, #8
+ ^
+arch/arm/crypto/curve25519-core.S:28:2: error: instruction requires: NEON
+ vmov.i32 d4, #19
+ ^
+
+Shuffle the order of the '.arch' and '.fpu' directives so that the code
+builds regardless of the default FPU mode. This has been tested against
+both clang with and without Debian's patch and GCC.
+
+Cc: stable@vger.kernel.org
+Fixes: d8f1308a025f ("crypto: arm/curve25519 - wire up NEON implementation")
+Link: https://github.com/ClangBuiltLinux/continuous-integration2/issues/118
+Reported-by: Arnd Bergmann <arnd@arndb.de>
+Suggested-by: Arnd Bergmann <arnd@arndb.de>
+Suggested-by: Jessica Clarke <jrtc27@jrtc27.com>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Acked-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/crypto/curve25519-core.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/crypto/curve25519-core.S
++++ b/arch/arm/crypto/curve25519-core.S
+@@ -10,8 +10,8 @@
+ #include <linux/linkage.h>
+
+ .text
+-.fpu neon
+ .arch armv7-a
++.fpu neon
+ .align 4
+
+ ENTRY(curve25519_neon)
--- /dev/null
+From 30d0f6a956fc74bb2e948398daf3278c6b08c7e9 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 21 Mar 2021 22:07:48 -0700
+Subject: crypto: rng - fix crypto_rng_reset() refcounting when !CRYPTO_STATS
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 30d0f6a956fc74bb2e948398daf3278c6b08c7e9 upstream.
+
+crypto_stats_get() is a no-op when the kernel is compiled without
+CONFIG_CRYPTO_STATS, so pairing it with crypto_alg_put() unconditionally
+(as crypto_rng_reset() does) is wrong.
+
+Fix this by moving the call to crypto_stats_get() to just before the
+actual algorithm operation which might need it. This makes it always
+paired with crypto_stats_rng_seed().
+
+Fixes: eed74b3eba9e ("crypto: rng - Fix a refcounting bug in crypto_rng_reset()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/rng.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/crypto/rng.c
++++ b/crypto/rng.c
+@@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *
+ u8 *buf = NULL;
+ int err;
+
+- crypto_stats_get(alg);
+ if (!seed && slen) {
+ buf = kmalloc(slen, GFP_KERNEL);
+- if (!buf) {
+- crypto_alg_put(alg);
++ if (!buf)
+ return -ENOMEM;
+- }
+
+ err = get_random_bytes_wait(buf, slen);
+- if (err) {
+- crypto_alg_put(alg);
++ if (err)
+ goto out;
+- }
+ seed = buf;
+ }
+
++ crypto_stats_get(alg);
+ err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
+ crypto_stats_rng_seed(alg, err);
+ out:
--- /dev/null
+From ac05a8a927e5a1027592d8f98510a511dadeed14 Mon Sep 17 00:00:00 2001
+From: Hansem Ro <hansemro@outlook.com>
+Date: Thu, 6 May 2021 13:27:10 -0700
+Subject: Input: ili210x - add missing negation for touch indication on ili210x
+
+From: Hansem Ro <hansemro@outlook.com>
+
+commit ac05a8a927e5a1027592d8f98510a511dadeed14 upstream.
+
+This adds the negation needed for proper finger detection on Ilitek
+ili2107/ili210x. This fixes polling issues (on Amazon Kindle Fire)
+caused by returning false for the cooresponding finger on the touchscreen.
+
+Signed-off-by: Hansem Ro <hansemro@outlook.com>
+Fixes: e3559442afd2a ("ili210x - rework the touchscreen sample processing")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/input/touchscreen/ili210x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
+ {
+- if (touchdata[0] & BIT(finger))
++ if (!(touchdata[0] & BIT(finger)))
+ return false;
+
+ *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
--- /dev/null
+From 48cb17531b15967d9d3f34c770a25cc6c4ca6ad1 Mon Sep 17 00:00:00 2001
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Date: Wed, 14 Apr 2021 20:12:51 +0300
+Subject: intel_th: pci: Add Alder Lake-M support
+
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+
+commit 48cb17531b15967d9d3f34c770a25cc6c4ca6ad1 upstream.
+
+This adds support for the Trace Hub in Alder Lake-M PCH.
+
+Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: stable@vger.kernel.org # v4.14+
+Link: https://lore.kernel.org/r/20210414171251.14672-8-alexander.shishkin@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwtracing/intel_th/pci.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -274,6 +274,11 @@ static const struct pci_device_id intel_
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
++ /* Alder Lake-M */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
++ {
+ /* Alder Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
--- /dev/null
+From 38c527aeb41926c71902dd42f788a8b093b21416 Mon Sep 17 00:00:00 2001
+From: "Longpeng(Mike)" <longpeng2@huawei.com>
+Date: Thu, 15 Apr 2021 08:46:28 +0800
+Subject: iommu/vt-d: Force to flush iotlb before creating superpage
+
+From: Longpeng(Mike) <longpeng2@huawei.com>
+
+commit 38c527aeb41926c71902dd42f788a8b093b21416 upstream.
+
+The translation caches may preserve obsolete data when the
+mapping size is changed, suppose the following sequence which
+can reveal the problem with high probability.
+
+1.mmap(4GB,MAP_HUGETLB)
+2.
+ while (1) {
+ (a) DMA MAP 0,0xa0000
+ (b) DMA UNMAP 0,0xa0000
+ (c) DMA MAP 0,0xc0000000
+ * DMA read IOVA 0 may failure here (Not present)
+ * if the problem occurs.
+ (d) DMA UNMAP 0,0xc0000000
+ }
+
+The page table(only focus on IOVA 0) after (a) is:
+ PML4: 0x19db5c1003 entry:0xffff899bdcd2f000
+ PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000
+ PDE: 0x1a30a72003 entry:0xffff89b39cacb000
+ PTE: 0x21d200803 entry:0xffff89b3b0a72000
+
+The page table after (b) is:
+ PML4: 0x19db5c1003 entry:0xffff899bdcd2f000
+ PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000
+ PDE: 0x1a30a72003 entry:0xffff89b39cacb000
+ PTE: 0x0 entry:0xffff89b3b0a72000
+
+The page table after (c) is:
+ PML4: 0x19db5c1003 entry:0xffff899bdcd2f000
+ PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000
+ PDE: 0x21d200883 entry:0xffff89b39cacb000 (*)
+
+Because the PDE entry after (b) is present, it won't be
+flushed even if the iommu driver flush cache when unmap,
+so the obsolete data may be preserved in cache, which
+would cause the wrong translation at end.
+
+However, we can see the PDE entry is finally switch to
+2M-superpage mapping, but it does not transform
+to 0x21d200883 directly:
+
+1. PDE: 0x1a30a72003
+2. __domain_mapping
+ dma_pte_free_pagetable
+ Set the PDE entry to ZERO
+ Set the PDE entry to 0x21d200883
+
+So we must flush the cache after the entry switch to ZERO
+to avoid the obsolete info be preserved.
+
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Lu Baolu <baolu.lu@linux.intel.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
+Cc: Alex Williamson <alex.williamson@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Kevin Tian <kevin.tian@intel.com>
+Cc: Gonglei (Arei) <arei.gonglei@huawei.com>
+
+Fixes: 6491d4d02893 ("intel-iommu: Free old page tables before creating superpage")
+Cc: <stable@vger.kernel.org> # v3.0+
+Link: https://lore.kernel.org/linux-iommu/670baaf8-4ff8-4e84-4be3-030b95ab5a5e@huawei.com/
+Suggested-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
+Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20210415004628.1779-1-longpeng2@huawei.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/intel/iommu.c | 52 ++++++++++++++++++++++++++++++++------------
+ 1 file changed, 38 insertions(+), 14 deletions(-)
+
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2289,6 +2289,41 @@ static inline int hardware_largepage_cap
+ return level;
+ }
+
++/*
++ * Ensure that old small page tables are removed to make room for superpage(s).
++ * We're going to add new large pages, so make sure we don't remove their parent
++ * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
++ */
++static void switch_to_super_page(struct dmar_domain *domain,
++ unsigned long start_pfn,
++ unsigned long end_pfn, int level)
++{
++ unsigned long lvl_pages = lvl_to_nr_pages(level);
++ struct dma_pte *pte = NULL;
++ int i;
++
++ while (start_pfn <= end_pfn) {
++ if (!pte)
++ pte = pfn_to_dma_pte(domain, start_pfn, &level);
++
++ if (dma_pte_present(pte)) {
++ dma_pte_free_pagetable(domain, start_pfn,
++ start_pfn + lvl_pages - 1,
++ level + 1);
++
++ for_each_domain_iommu(i, domain)
++ iommu_flush_iotlb_psi(g_iommus[i], domain,
++ start_pfn, lvl_pages,
++ 0, 0);
++ }
++
++ pte++;
++ start_pfn += lvl_pages;
++ if (first_pte_in_page(pte))
++ pte = NULL;
++ }
++}
++
+ static int
+ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages, int prot)
+@@ -2329,22 +2364,11 @@ __domain_mapping(struct dmar_domain *dom
+ return -ENOMEM;
+ /* It is large page*/
+ if (largepage_lvl > 1) {
+- unsigned long nr_superpages, end_pfn;
++ unsigned long end_pfn;
+
+ pteval |= DMA_PTE_LARGE_PAGE;
+- lvl_pages = lvl_to_nr_pages(largepage_lvl);
+-
+- nr_superpages = nr_pages / lvl_pages;
+- end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
+-
+- /*
+- * Ensure that old small page tables are
+- * removed to make room for superpage(s).
+- * We're adding new large pages, so make sure
+- * we don't remove their parent tables.
+- */
+- dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
+- largepage_lvl + 1);
++ end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
++ switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
+ } else {
+ pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+ }
--- /dev/null
+From 960b9a8a7676b9054d8b46a2c7db52a0c8766b56 Mon Sep 17 00:00:00 2001
+From: lizhe <lizhe67@huawei.com>
+Date: Thu, 18 Mar 2021 11:06:57 +0800
+Subject: jffs2: Fix kasan slab-out-of-bounds problem
+
+From: lizhe <lizhe67@huawei.com>
+
+commit 960b9a8a7676b9054d8b46a2c7db52a0c8766b56 upstream.
+
+KASAN report a slab-out-of-bounds problem. The logs are listed below.
+It is because in function jffs2_scan_dirent_node, we alloc "checkedlen+1"
+bytes for fd->name and we check crc with length rd->nsize. If checkedlen
+is less than rd->nsize, it will cause the slab-out-of-bounds problem.
+
+jffs2: Dirent at *** has zeroes in name. Truncating to %d char
+==================================================================
+BUG: KASAN: slab-out-of-bounds in crc32_le+0x1ce/0x260 at addr ffff8800842cf2d1
+Read of size 1 by task test_JFFS2/915
+=============================================================================
+BUG kmalloc-64 (Tainted: G B O ): kasan: bad access detected
+-----------------------------------------------------------------------------
+INFO: Allocated in jffs2_alloc_full_dirent+0x2a/0x40 age=0 cpu=1 pid=915
+ ___slab_alloc+0x580/0x5f0
+ __slab_alloc.isra.24+0x4e/0x64
+ __kmalloc+0x170/0x300
+ jffs2_alloc_full_dirent+0x2a/0x40
+ jffs2_scan_eraseblock+0x1ca4/0x3b64
+ jffs2_scan_medium+0x285/0xfe0
+ jffs2_do_mount_fs+0x5fb/0x1bbc
+ jffs2_do_fill_super+0x245/0x6f0
+ jffs2_fill_super+0x287/0x2e0
+ mount_mtd_aux.isra.0+0x9a/0x144
+ mount_mtd+0x222/0x2f0
+ jffs2_mount+0x41/0x60
+ mount_fs+0x63/0x230
+ vfs_kern_mount.part.6+0x6c/0x1f4
+ do_mount+0xae8/0x1940
+ SyS_mount+0x105/0x1d0
+INFO: Freed in jffs2_free_full_dirent+0x22/0x40 age=27 cpu=1 pid=915
+ __slab_free+0x372/0x4e4
+ kfree+0x1d4/0x20c
+ jffs2_free_full_dirent+0x22/0x40
+ jffs2_build_remove_unlinked_inode+0x17a/0x1e4
+ jffs2_do_mount_fs+0x1646/0x1bbc
+ jffs2_do_fill_super+0x245/0x6f0
+ jffs2_fill_super+0x287/0x2e0
+ mount_mtd_aux.isra.0+0x9a/0x144
+ mount_mtd+0x222/0x2f0
+ jffs2_mount+0x41/0x60
+ mount_fs+0x63/0x230
+ vfs_kern_mount.part.6+0x6c/0x1f4
+ do_mount+0xae8/0x1940
+ SyS_mount+0x105/0x1d0
+ entry_SYSCALL_64_fastpath+0x1e/0x97
+Call Trace:
+ [<ffffffff815befef>] dump_stack+0x59/0x7e
+ [<ffffffff812d1d65>] print_trailer+0x125/0x1b0
+ [<ffffffff812d82c8>] object_err+0x34/0x40
+ [<ffffffff812dadef>] kasan_report.part.1+0x21f/0x534
+ [<ffffffff81132401>] ? vprintk+0x2d/0x40
+ [<ffffffff815f1ee2>] ? crc32_le+0x1ce/0x260
+ [<ffffffff812db41a>] kasan_report+0x26/0x30
+ [<ffffffff812d9fc1>] __asan_load1+0x3d/0x50
+ [<ffffffff815f1ee2>] crc32_le+0x1ce/0x260
+ [<ffffffff814764ae>] ? jffs2_alloc_full_dirent+0x2a/0x40
+ [<ffffffff81485cec>] jffs2_scan_eraseblock+0x1d0c/0x3b64
+ [<ffffffff81488813>] ? jffs2_scan_medium+0xccf/0xfe0
+ [<ffffffff81483fe0>] ? jffs2_scan_make_ino_cache+0x14c/0x14c
+ [<ffffffff812da3e9>] ? kasan_unpoison_shadow+0x35/0x50
+ [<ffffffff812da3e9>] ? kasan_unpoison_shadow+0x35/0x50
+ [<ffffffff812da462>] ? kasan_kmalloc+0x5e/0x70
+ [<ffffffff812d5d90>] ? kmem_cache_alloc_trace+0x10c/0x2cc
+ [<ffffffff818169fb>] ? mtd_point+0xf7/0x130
+ [<ffffffff81487dc9>] jffs2_scan_medium+0x285/0xfe0
+ [<ffffffff81487b44>] ? jffs2_scan_eraseblock+0x3b64/0x3b64
+ [<ffffffff812da3e9>] ? kasan_unpoison_shadow+0x35/0x50
+ [<ffffffff812da3e9>] ? kasan_unpoison_shadow+0x35/0x50
+ [<ffffffff812da462>] ? kasan_kmalloc+0x5e/0x70
+ [<ffffffff812d57df>] ? __kmalloc+0x12b/0x300
+ [<ffffffff812da462>] ? kasan_kmalloc+0x5e/0x70
+ [<ffffffff814a2753>] ? jffs2_sum_init+0x9f/0x240
+ [<ffffffff8148b2ff>] jffs2_do_mount_fs+0x5fb/0x1bbc
+ [<ffffffff8148ad04>] ? jffs2_del_noinode_dirent+0x640/0x640
+ [<ffffffff812da462>] ? kasan_kmalloc+0x5e/0x70
+ [<ffffffff81127c5b>] ? __init_rwsem+0x97/0xac
+ [<ffffffff81492349>] jffs2_do_fill_super+0x245/0x6f0
+ [<ffffffff81493c5b>] jffs2_fill_super+0x287/0x2e0
+ [<ffffffff814939d4>] ? jffs2_parse_options+0x594/0x594
+ [<ffffffff81819bea>] mount_mtd_aux.isra.0+0x9a/0x144
+ [<ffffffff81819eb6>] mount_mtd+0x222/0x2f0
+ [<ffffffff814939d4>] ? jffs2_parse_options+0x594/0x594
+ [<ffffffff81819c94>] ? mount_mtd_aux.isra.0+0x144/0x144
+ [<ffffffff81258757>] ? free_pages+0x13/0x1c
+ [<ffffffff814fa0ac>] ? selinux_sb_copy_data+0x278/0x2e0
+ [<ffffffff81492b35>] jffs2_mount+0x41/0x60
+ [<ffffffff81302fb7>] mount_fs+0x63/0x230
+ [<ffffffff8133755f>] ? alloc_vfsmnt+0x32f/0x3b0
+ [<ffffffff81337f2c>] vfs_kern_mount.part.6+0x6c/0x1f4
+ [<ffffffff8133ceec>] do_mount+0xae8/0x1940
+ [<ffffffff811b94e0>] ? audit_filter_rules.constprop.6+0x1d10/0x1d10
+ [<ffffffff8133c404>] ? copy_mount_string+0x40/0x40
+ [<ffffffff812cbf78>] ? alloc_pages_current+0xa4/0x1bc
+ [<ffffffff81253a89>] ? __get_free_pages+0x25/0x50
+ [<ffffffff81338993>] ? copy_mount_options.part.17+0x183/0x264
+ [<ffffffff8133e3a9>] SyS_mount+0x105/0x1d0
+ [<ffffffff8133e2a4>] ? copy_mnt_ns+0x560/0x560
+ [<ffffffff810e8391>] ? msa_space_switch_handler+0x13d/0x190
+ [<ffffffff81be184a>] entry_SYSCALL_64_fastpath+0x1e/0x97
+ [<ffffffff810e9274>] ? msa_space_switch+0xb0/0xe0
+Memory state around the buggy address:
+ ffff8800842cf180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800842cf200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+>ffff8800842cf280: fc fc fc fc fc fc 00 00 00 00 01 fc fc fc fc fc
+ ^
+ ffff8800842cf300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8800842cf380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+==================================================================
+
+Cc: stable@vger.kernel.org
+Reported-by: Kunkun Xu <xukunkun1@huawei.com>
+Signed-off-by: lizhe <lizhe67@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jffs2/scan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct
+ memcpy(&fd->name, rd->name, checkedlen);
+ fd->name[checkedlen] = 0;
+
+- crc = crc32(0, fd->name, rd->nsize);
++ crc = crc32(0, fd->name, checkedlen);
+ if (crc != je32_to_cpu(rd->name_crc)) {
+ pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ __func__, ofs, je32_to_cpu(rd->name_crc), crc);
--- /dev/null
+From 42984af09afc414d540fcc8247f42894b0378a91 Mon Sep 17 00:00:00 2001
+From: Joel Stanley <joel@jms.id.au>
+Date: Wed, 31 Mar 2021 00:15:37 +1030
+Subject: jffs2: Hook up splice_write callback
+
+From: Joel Stanley <joel@jms.id.au>
+
+commit 42984af09afc414d540fcc8247f42894b0378a91 upstream.
+
+overlayfs using jffs2 as the upper filesystem would fail in some cases
+since moving to v5.10. The test case used was to run 'touch' on a file
+that exists in the lower fs, causing the modification time to be
+updated. It returns EINVAL when the bug is triggered.
+
+A bisection showed this was introduced in v5.9-rc1, with commit
+36e2c7421f02 ("fs: don't allow splice read/write without explicit ops").
+Reverting that commit restores the expected behaviour.
+
+Some digging showed that this was due to jffs2 lacking an implementation
+of splice_write. (For unknown reasons the warn_unsupported that should
+trigger was not displaying any output).
+
+Adding this patch resolved the issue and the test now passes.
+
+Cc: stable@vger.kernel.org
+Fixes: 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops")
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Tested-by: Lei YU <yulei.sh@bytedance.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jffs2/file.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -57,6 +57,7 @@ const struct file_operations jffs2_file_
+ .mmap = generic_file_readonly_mmap,
+ .fsync = jffs2_fsync,
+ .splice_read = generic_file_splice_read,
++ .splice_write = iter_file_splice_write,
+ };
+
+ /* jffs2_file_inode_operations */
--- /dev/null
+From cbaf3f6af9c268caf558c8e7ec52bcb35c5455dd Mon Sep 17 00:00:00 2001
+From: Ido Schimmel <idosch@nvidia.com>
+Date: Thu, 6 May 2021 10:23:08 +0300
+Subject: mlxsw: spectrum_mr: Update egress RIF list before route's action
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+commit cbaf3f6af9c268caf558c8e7ec52bcb35c5455dd upstream.
+
+Each multicast route that is forwarding packets (as opposed to trapping
+them) points to a list of egress router interfaces (RIFs) through which
+packets are replicated.
+
+A route's action can transition from trap to forward when a RIF is
+created for one of the route's egress virtual interfaces (eVIF). When
+this happens, the route's action is first updated and only later the
+list of egress RIFs is committed to the device.
+
+This results in the route pointing to an invalid list. In case the list
+pointer is out of range (due to uninitialized memory), the device will
+complain:
+
+mlxsw_spectrum2 0000:06:00.0: EMAD reg access failed (tid=5733bf490000905c,reg_id=300f(pefa),type=write,status=7(bad parameter))
+
+Fix this by first committing the list of egress RIFs to the device and
+only later update the route's action.
+
+Note that a fix is not needed in the reverse function (i.e.,
+mlxsw_sp_mr_route_evif_unresolve()), as there the route's action is
+first updated and only later the RIF is removed from the list.
+
+Cc: stable@vger.kernel.org
+Fixes: c011ec1bbfd6 ("mlxsw: spectrum: Add the multicast routing offloading logic")
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Link: https://lore.kernel.org/r/20210506072308.3834303-1-idosch@idosch.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 30 +++++++++++-----------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+@@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct ml
+ u16 erif_index = 0;
+ int err;
+
++ /* Add the eRIF */
++ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
++ erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
++ err = mr->mr_ops->route_erif_add(mlxsw_sp,
++ rve->mr_route->route_priv,
++ erif_index);
++ if (err)
++ return err;
++ }
++
+ /* Update the route action, as the new eVIF can be a tunnel or a pimreg
+ * device which will require updating the action.
+ */
+@@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct ml
+ rve->mr_route->route_priv,
+ route_action);
+ if (err)
+- return err;
+- }
+-
+- /* Add the eRIF */
+- if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+- erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+- err = mr->mr_ops->route_erif_add(mlxsw_sp,
+- rve->mr_route->route_priv,
+- erif_index);
+- if (err)
+- goto err_route_erif_add;
++ goto err_route_action_update;
+ }
+
+ /* Update the minimum MTU */
+@@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct ml
+ return 0;
+
+ err_route_min_mtu_update:
+- if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+- mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
+- erif_index);
+-err_route_erif_add:
+ if (route_action != rve->mr_route->route_action)
+ mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ rve->mr_route->route_action);
++err_route_action_update:
++ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
++ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
++ erif_index);
+ return err;
+ }
+
--- /dev/null
+From 31fe34a0118e0acc958c802e830ad5d37ef6b1d3 Mon Sep 17 00:00:00 2001
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Wed, 28 Apr 2021 15:23:14 +0200
+Subject: net/sched: sch_frag: fix stack OOB read while fragmenting IPv4 packets
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+commit 31fe34a0118e0acc958c802e830ad5d37ef6b1d3 upstream.
+
+when 'act_mirred' tries to fragment IPv4 packets that had been previously
+re-assembled using 'act_ct', splats like the following can be observed on
+kernels built with KASAN:
+
+ BUG: KASAN: stack-out-of-bounds in ip_do_fragment+0x1b03/0x1f60
+ Read of size 1 at addr ffff888147009574 by task ping/947
+
+ CPU: 0 PID: 947 Comm: ping Not tainted 5.12.0-rc6+ #418
+ Hardware name: Red Hat KVM, BIOS 1.11.1-4.module+el8.1.0+4066+0f1aadab 04/01/2014
+ Call Trace:
+ <IRQ>
+ dump_stack+0x92/0xc1
+ print_address_description.constprop.7+0x1a/0x150
+ kasan_report.cold.13+0x7f/0x111
+ ip_do_fragment+0x1b03/0x1f60
+ sch_fragment+0x4bf/0xe40
+ tcf_mirred_act+0xc3d/0x11a0 [act_mirred]
+ tcf_action_exec+0x104/0x3e0
+ fl_classify+0x49a/0x5e0 [cls_flower]
+ tcf_classify_ingress+0x18a/0x820
+ __netif_receive_skb_core+0xae7/0x3340
+ __netif_receive_skb_one_core+0xb6/0x1b0
+ process_backlog+0x1ef/0x6c0
+ __napi_poll+0xaa/0x500
+ net_rx_action+0x702/0xac0
+ __do_softirq+0x1e4/0x97f
+ do_softirq+0x71/0x90
+ </IRQ>
+ __local_bh_enable_ip+0xdb/0xf0
+ ip_finish_output2+0x760/0x2120
+ ip_do_fragment+0x15a5/0x1f60
+ __ip_finish_output+0x4c2/0xea0
+ ip_output+0x1ca/0x4d0
+ ip_send_skb+0x37/0xa0
+ raw_sendmsg+0x1c4b/0x2d00
+ sock_sendmsg+0xdb/0x110
+ __sys_sendto+0x1d7/0x2b0
+ __x64_sys_sendto+0xdd/0x1b0
+ do_syscall_64+0x33/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+ RIP: 0033:0x7f82e13853eb
+ Code: 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f3 0f 1e fa 48 8d 05 75 42 2c 00 41 89 ca 8b 00 85 c0 75 14 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 75 c3 0f 1f 40 00 41 57 4d 89 c7 41 56 41 89
+ RSP: 002b:00007ffe01fad888 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+ RAX: ffffffffffffffda RBX: 00005571aac13700 RCX: 00007f82e13853eb
+ RDX: 0000000000002330 RSI: 00005571aac13700 RDI: 0000000000000003
+ RBP: 0000000000002330 R08: 00005571aac10500 R09: 0000000000000010
+ R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffe01faefb0
+ R13: 00007ffe01fad890 R14: 00007ffe01fad980 R15: 00005571aac0f0a0
+
+ The buggy address belongs to the page:
+ page:000000001dff2e03 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x147009
+ flags: 0x17ffffc0001000(reserved)
+ raw: 0017ffffc0001000 ffffea00051c0248 ffffea00051c0248 0000000000000000
+ raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000
+ page dumped because: kasan: bad access detected
+
+ Memory state around the buggy address:
+ ffff888147009400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff888147009480: f1 f1 f1 f1 04 f2 f2 f2 f2 f2 f2 f2 00 00 00 00
+ >ffff888147009500: 00 00 00 00 00 00 00 00 00 00 f2 f2 f2 f2 f2 f2
+ ^
+ ffff888147009580: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff888147009600: 00 00 00 00 00 00 00 00 00 00 00 00 00 f2 f2 f2
+
+for IPv4 packets, sch_fragment() uses a temporary struct dst_entry. Then,
+in the following call graph:
+
+ ip_do_fragment()
+ ip_skb_dst_mtu()
+ ip_dst_mtu_maybe_forward()
+ ip_mtu_locked()
+
+the pointer to struct dst_entry is used as pointer to struct rtable: this
+turns the access to struct members like rt_mtu_locked into an OOB read in
+the stack. Fix this changing the temporary variable used for IPv4 packets
+in sch_fragment(), similarly to what is done for IPv6 few lines below.
+
+Fixes: c129412f74e9 ("net/sched: sch_frag: add generic packet fragment support.")
+Cc: <stable@vger.kernel.org> # 5.11
+Reported-by: Shuang Li <shuali@redhat.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_frag.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/sched/sch_frag.c
++++ b/net/sched/sch_frag.c
+@@ -90,16 +90,16 @@ static int sch_fragment(struct net *net,
+ }
+
+ if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
+- struct dst_entry sch_frag_dst;
++ struct rtable sch_frag_rt = { 0 };
+ unsigned long orig_dst;
+
+ sch_frag_prepare_frag(skb, xmit);
+- dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
++ dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
+- sch_frag_dst.dev = skb->dev;
++ sch_frag_rt.dst.dev = skb->dev;
+
+ orig_dst = skb->_skb_refdst;
+- skb_dst_set_noref(skb, &sch_frag_dst);
++ skb_dst_set_noref(skb, &sch_frag_rt.dst);
+ IPCB(skb)->frag_max_size = mru;
+
+ ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
--- /dev/null
+From 39fd01863616964f009599e50ca5c6ea9ebf88d6 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Thu, 15 Apr 2021 15:41:57 -0400
+Subject: NFS: Don't discard pNFS layout segments that are marked for return
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 39fd01863616964f009599e50ca5c6ea9ebf88d6 upstream.
+
+If the pNFS layout segment is marked with the NFS_LSEG_LAYOUTRETURN
+flag, then the assumption is that it has some reporting requirement
+to perform through a layoutreturn (e.g. flexfiles layout stats or error
+information).
+
+Fixes: e0b7d420f72a ("pNFS: Don't discard layout segments that are marked for return")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/pnfs.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct p
+
+ assert_spin_locked(&lo->plh_inode->i_lock);
+
++ if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
++ tmp_list = &lo->plh_return_segs;
++
+ list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+ if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
+ dprintk("%s: marking lseg %p iomode %d "
+@@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct p
+ lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset,
+ lseg->pls_range.length);
++ if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
++ tmp_list = &lo->plh_return_segs;
+ if (mark_lseg_invalid(lseg, tmp_list))
+ continue;
+ remaining++;
--- /dev/null
+From c09f11ef35955785f92369e25819bf0629df2e59 Mon Sep 17 00:00:00 2001
+From: Randy Dunlap <rdunlap@infradead.org>
+Date: Mon, 1 Mar 2021 16:19:30 -0800
+Subject: NFS: fs_context: validate UDP retrans to prevent shift out-of-bounds
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+commit c09f11ef35955785f92369e25819bf0629df2e59 upstream.
+
+Fix shift out-of-bounds in xprt_calc_majortimeo(). This is caused
+by a garbage timeout (retrans) mount option being passed to nfs mount,
+in this case from syzkaller.
+
+If the protocol is XPRT_TRANSPORT_UDP, then 'retrans' is a shift
+value for a 64-bit long integer, so 'retrans' cannot be >= 64.
+If it is >= 64, fail the mount and return an error.
+
+Fixes: 9954bf92c0cd ("NFS: Move mount parameterisation bits into their own file")
+Reported-by: syzbot+ba2e91df8f74809417fa@syzkaller.appspotmail.com
+Reported-by: syzbot+f3a0fa110fd630ab56c8@syzkaller.appspotmail.com
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
+Cc: Anna Schumaker <anna.schumaker@netapp.com>
+Cc: linux-nfs@vger.kernel.org
+Cc: David Howells <dhowells@redhat.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/fs_context.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -974,6 +974,15 @@ static int nfs23_parse_monolithic(struct
+ sizeof(mntfh->data) - mntfh->size);
+
+ /*
++ * for proto == XPRT_TRANSPORT_UDP, which is what uses
++ * to_exponential, implying shift: limit the shift value
++ * to BITS_PER_LONG (majortimeo is unsigned long)
++ */
++ if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
++ if (data->retrans >= 64) /* shift value is too large */
++ goto out_invalid_data;
++
++ /*
+ * Translate to nfs_fs_context, which nfs_fill_super
+ * can deal with.
+ */
+@@ -1073,6 +1082,9 @@ out_no_address:
+
+ out_invalid_fh:
+ return nfs_invalf(fc, "NFS: invalid root filehandle");
++
++out_invalid_data:
++ return nfs_invalf(fc, "NFS: invalid binary mount data");
+ }
+
+ #if IS_ENABLED(CONFIG_NFS_V4)
--- /dev/null
+From de144ff4234f935bd2150108019b5d87a90a8a96 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Sun, 18 Apr 2021 15:00:45 -0400
+Subject: NFSv4: Don't discard segments marked for return in _pnfs_return_layout()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit de144ff4234f935bd2150108019b5d87a90a8a96 upstream.
+
+If the pNFS layout segment is marked with the NFS_LSEG_LAYOUTRETURN
+flag, then the assumption is that it has some reporting requirement
+to perform through a layoutreturn (e.g. flexfiles layout stats or error
+information).
+
+Fixes: 6d597e175012 ("pnfs: only tear down lsegs that precede seqid in LAYOUTRETURN args")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/pnfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
+ }
+ valid_layout = pnfs_layout_is_valid(lo);
+ pnfs_clear_layoutcommit(ino, &tmp_list);
+- pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
++ pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
+
+ if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
+ struct pnfs_layout_range range = {
--- /dev/null
+From 7c0ea5930c1c211931819d83cfb157bff1539a4c Mon Sep 17 00:00:00 2001
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Wed, 28 Apr 2021 15:23:07 +0200
+Subject: openvswitch: fix stack OOB read while fragmenting IPv4 packets
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+commit 7c0ea5930c1c211931819d83cfb157bff1539a4c upstream.
+
+running openvswitch on kernels built with KASAN, it's possible to see the
+following splat while testing fragmentation of IPv4 packets:
+
+ BUG: KASAN: stack-out-of-bounds in ip_do_fragment+0x1b03/0x1f60
+ Read of size 1 at addr ffff888112fc713c by task handler2/1367
+
+ CPU: 0 PID: 1367 Comm: handler2 Not tainted 5.12.0-rc6+ #418
+ Hardware name: Red Hat KVM, BIOS 1.11.1-4.module+el8.1.0+4066+0f1aadab 04/01/2014
+ Call Trace:
+ dump_stack+0x92/0xc1
+ print_address_description.constprop.7+0x1a/0x150
+ kasan_report.cold.13+0x7f/0x111
+ ip_do_fragment+0x1b03/0x1f60
+ ovs_fragment+0x5bf/0x840 [openvswitch]
+ do_execute_actions+0x1bd5/0x2400 [openvswitch]
+ ovs_execute_actions+0xc8/0x3d0 [openvswitch]
+ ovs_packet_cmd_execute+0xa39/0x1150 [openvswitch]
+ genl_family_rcv_msg_doit.isra.15+0x227/0x2d0
+ genl_rcv_msg+0x287/0x490
+ netlink_rcv_skb+0x120/0x380
+ genl_rcv+0x24/0x40
+ netlink_unicast+0x439/0x630
+ netlink_sendmsg+0x719/0xbf0
+ sock_sendmsg+0xe2/0x110
+ ____sys_sendmsg+0x5ba/0x890
+ ___sys_sendmsg+0xe9/0x160
+ __sys_sendmsg+0xd3/0x170
+ do_syscall_64+0x33/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+ RIP: 0033:0x7f957079db07
+ Code: c3 66 90 41 54 41 89 d4 55 48 89 f5 53 89 fb 48 83 ec 10 e8 eb ec ff ff 44 89 e2 48 89 ee 89 df 41 89 c0 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 35 44 89 c7 48 89 44 24 08 e8 24 ed ff ff 48
+ RSP: 002b:00007f956ce35a50 EFLAGS: 00000293 ORIG_RAX: 000000000000002e
+ RAX: ffffffffffffffda RBX: 0000000000000019 RCX: 00007f957079db07
+ RDX: 0000000000000000 RSI: 00007f956ce35ae0 RDI: 0000000000000019
+ RBP: 00007f956ce35ae0 R08: 0000000000000000 R09: 00007f9558006730
+ R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000000000
+ R13: 00007f956ce37308 R14: 00007f956ce35f80 R15: 00007f956ce35ae0
+
+ The buggy address belongs to the page:
+ page:00000000af2a1d93 refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x112fc7
+ flags: 0x17ffffc0000000()
+ raw: 0017ffffc0000000 0000000000000000 dead000000000122 0000000000000000
+ raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000
+ page dumped because: kasan: bad access detected
+
+ addr ffff888112fc713c is located in stack of task handler2/1367 at offset 180 in frame:
+ ovs_fragment+0x0/0x840 [openvswitch]
+
+ this frame has 2 objects:
+ [32, 144) 'ovs_dst'
+ [192, 424) 'ovs_rt'
+
+ Memory state around the buggy address:
+ ffff888112fc7000: f3 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff888112fc7080: 00 f1 f1 f1 f1 00 00 00 00 00 00 00 00 00 00 00
+ >ffff888112fc7100: 00 00 00 f2 f2 f2 f2 f2 f2 00 00 00 00 00 00 00
+ ^
+ ffff888112fc7180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff888112fc7200: 00 00 00 00 00 00 f2 f2 f2 00 00 00 00 00 00 00
+
+for IPv4 packets, ovs_fragment() uses a temporary struct dst_entry. Then,
+in the following call graph:
+
+ ip_do_fragment()
+ ip_skb_dst_mtu()
+ ip_dst_mtu_maybe_forward()
+ ip_mtu_locked()
+
+the pointer to struct dst_entry is used as pointer to struct rtable: this
+turns the access to struct members like rt_mtu_locked into an OOB read in
+the stack. Fix this changing the temporary variable used for IPv4 packets
+in ovs_fragment(), similarly to what is done for IPv6 few lines below.
+
+Fixes: d52e5a7e7ca4 ("ipv4: lock mtu in fnhe when received PMTU < net.ipv4.route.min_pmt")
+Cc: <stable@vger.kernel.org>
+Acked-by: Eelco Chaudron <echaudro@redhat.com>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/actions.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net
+ }
+
+ if (key->eth.type == htons(ETH_P_IP)) {
+- struct dst_entry ovs_dst;
++ struct rtable ovs_rt = { 0 };
+ unsigned long orig_dst;
+
+ prepare_frag(vport, skb, orig_network_offset,
+ ovs_key_mac_proto(key));
+- dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
++ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
+- ovs_dst.dev = vport->dev;
++ ovs_rt.dst.dev = vport->dev;
+
+ orig_dst = skb->_skb_refdst;
+- skb_dst_set_noref(skb, &ovs_dst);
++ skb_dst_set_noref(skb, &ovs_rt.dst);
+ IPCB(skb)->frag_max_size = mru;
+
+ ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
--- /dev/null
+From f5668260b872e89b8d3942a8b7d4278aa9c2c981 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Thu, 29 Apr 2021 16:52:09 +0000
+Subject: powerpc/32: Fix boot failure with CONFIG_STACKPROTECTOR
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit f5668260b872e89b8d3942a8b7d4278aa9c2c981 upstream.
+
+Commit 7c95d8893fb5 ("powerpc: Change calling convention for
+create_branch() et. al.") complexified the frame of function
+do_feature_fixups(), leading to GCC setting up a stack
+guard when CONFIG_STACKPROTECTOR is selected.
+
+The problem is that do_feature_fixups() is called very early
+while 'current' in r2 is not set up yet and the code is still
+not at the final address used at link time.
+
+So, like other instrumentation, stack protection needs to be
+deactivated for feature-fixups.c and code-patching.c
+
+Fixes: 7c95d8893fb5 ("powerpc: Change calling convention for create_branch() et. al.")
+Cc: stable@vger.kernel.org # v5.8+
+Reported-by: Jonathan Neuschaefer <j.neuschaefer@gmx.net>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Tested-by: Jonathan Neuschaefer <j.neuschaefer@gmx.net>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/b688fe82927b330349d9e44553363fa451ea4d95.1619715114.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/lib/Makefile | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -5,6 +5,9 @@
+
+ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
+
++CFLAGS_code-patching.o += -fno-stack-protector
++CFLAGS_feature-fixups.o += -fno-stack-protector
++
+ CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
+
--- /dev/null
+From 5ae5bc12d0728db60a0aa9b62160ffc038875f1a Mon Sep 17 00:00:00 2001
+From: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+Date: Mon, 12 Apr 2021 13:22:50 +0530
+Subject: powerpc/eeh: Fix EEH handling for hugepages in ioremap space.
+
+From: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+
+commit 5ae5bc12d0728db60a0aa9b62160ffc038875f1a upstream.
+
+During the EEH MMIO error checking, the current implementation fails to map
+the (virtual) MMIO address back to the pci device on radix with hugepage
+mappings for I/O. This results into failure to dispatch EEH event with no
+recovery even when EEH capability has been enabled on the device.
+
+eeh_check_failure(token) # token = virtual MMIO address
+ addr = eeh_token_to_phys(token);
+ edev = eeh_addr_cache_get_dev(addr);
+ if (!edev)
+ return 0;
+ eeh_dev_check_failure(edev); <= Dispatch the EEH event
+
+In case of hugepage mappings, eeh_token_to_phys() has a bug in virt -> phys
+translation that results in wrong physical address, which is then passed to
+eeh_addr_cache_get_dev() to match it against cached pci I/O address ranges
+to get to a PCI device. Hence, it fails to find a match and the EEH event
+never gets dispatched leaving the device in failed state.
+
+The commit 33439620680be ("powerpc/eeh: Handle hugepages in ioremap space")
+introduced following logic to translate virt to phys for hugepage mappings:
+
+eeh_token_to_phys():
++ pa = pte_pfn(*ptep);
++
++ /* On radix we can do hugepage mappings for io, so handle that */
++ if (hugepage_shift) {
++ pa <<= hugepage_shift; <= This is wrong
++ pa |= token & ((1ul << hugepage_shift) - 1);
++ }
+
+This patch fixes the virt -> phys translation in eeh_token_to_phys()
+function.
+
+ $ cat /sys/kernel/debug/powerpc/eeh_address_cache
+ mem addr range [0x0000040080000000-0x00000400807fffff]: 0030:01:00.1
+ mem addr range [0x0000040080800000-0x0000040080ffffff]: 0030:01:00.1
+ mem addr range [0x0000040081000000-0x00000400817fffff]: 0030:01:00.0
+ mem addr range [0x0000040081800000-0x0000040081ffffff]: 0030:01:00.0
+ mem addr range [0x0000040082000000-0x000004008207ffff]: 0030:01:00.1
+ mem addr range [0x0000040082080000-0x00000400820fffff]: 0030:01:00.0
+ mem addr range [0x0000040082100000-0x000004008210ffff]: 0030:01:00.1
+ mem addr range [0x0000040082110000-0x000004008211ffff]: 0030:01:00.0
+
+Above is the list of cached io address ranges of pci 0030:01:00.<fn>.
+
+Before this patch:
+
+Tracing 'arg1' of function eeh_addr_cache_get_dev() during error injection
+clearly shows that 'addr=' contains wrong physical address:
+
+ kworker/u16:0-7 [001] .... 108.883775: eeh_addr_cache_get_dev:
+ (eeh_addr_cache_get_dev+0xc/0xf0) addr=0x80103000a510
+
+dmesg shows no EEH recovery messages:
+
+ [ 108.563768] bnx2x: [bnx2x_timer:5801(eth2)]MFW seems hanged: drv_pulse (0x9ae) != mcp_pulse (0x7fff)
+ [ 108.563788] bnx2x: [bnx2x_hw_stats_update:870(eth2)]NIG timer max (4294967295)
+ [ 108.883788] bnx2x: [bnx2x_acquire_hw_lock:2013(eth1)]lock_status 0xffffffff resource_bit 0x1
+ [ 108.884407] bnx2x 0030:01:00.0 eth1: MDC/MDIO access timeout
+ [ 108.884976] bnx2x 0030:01:00.0 eth1: MDC/MDIO access timeout
+ <..>
+
+After this patch:
+
+eeh_addr_cache_get_dev() trace shows correct physical address:
+
+ <idle>-0 [001] ..s. 1043.123828: eeh_addr_cache_get_dev:
+ (eeh_addr_cache_get_dev+0xc/0xf0) addr=0x40080bc7cd8
+
+dmesg logs shows EEH recovery getting triggerred:
+
+ [ 964.323980] bnx2x: [bnx2x_timer:5801(eth2)]MFW seems hanged: drv_pulse (0x746f) != mcp_pulse (0x7fff)
+ [ 964.323991] EEH: Recovering PHB#30-PE#10000
+ [ 964.324002] EEH: PE location: N/A, PHB location: N/A
+ [ 964.324006] EEH: Frozen PHB#30-PE#10000 detected
+ <..>
+
+Fixes: 33439620680b ("powerpc/eeh: Handle hugepages in ioremap space")
+Cc: stable@vger.kernel.org # v5.3+
+Reported-by: Dominic DeMarco <ddemarc@us.ibm.com>
+Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/161821396263.48361.2796709239866588652.stgit@jupiter
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/eeh.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to
+ pa = pte_pfn(*ptep);
+
+ /* On radix we can do hugepage mappings for io, so handle that */
+- if (hugepage_shift) {
+- pa <<= hugepage_shift;
+- pa |= token & ((1ul << hugepage_shift) - 1);
+- } else {
+- pa <<= PAGE_SHIFT;
+- pa |= token & (PAGE_SIZE - 1);
+- }
++ if (!hugepage_shift)
++ hugepage_shift = PAGE_SHIFT;
+
++ pa <<= PAGE_SHIFT;
++ pa |= token & ((1ul << hugepage_shift) - 1);
+ return pa;
+ }
+
--- /dev/null
+From 7de21e679e6a789f3729e8402bc440b623a28eae Mon Sep 17 00:00:00 2001
+From: Tony Ambardar <tony.ambardar@gmail.com>
+Date: Thu, 17 Sep 2020 06:54:37 -0700
+Subject: powerpc: fix EDEADLOCK redefinition error in uapi/asm/errno.h
+
+From: Tony Ambardar <tony.ambardar@gmail.com>
+
+commit 7de21e679e6a789f3729e8402bc440b623a28eae upstream.
+
+A few archs like powerpc have different errno.h values for macros
+EDEADLOCK and EDEADLK. In code including both libc and linux versions of
+errno.h, this can result in multiple definitions of EDEADLOCK in the
+include chain. Definitions to the same value (e.g. seen with mips) do
+not raise warnings, but on powerpc there are redefinitions changing the
+value, which raise warnings and errors (if using "-Werror").
+
+Guard against these redefinitions to avoid build errors like the following,
+first seen cross-compiling libbpf v5.8.9 for powerpc using GCC 8.4.0 with
+musl 1.1.24:
+
+ In file included from ../../arch/powerpc/include/uapi/asm/errno.h:5,
+ from ../../include/linux/err.h:8,
+ from libbpf.c:29:
+ ../../include/uapi/asm-generic/errno.h:40: error: "EDEADLOCK" redefined [-Werror]
+ #define EDEADLOCK EDEADLK
+
+ In file included from toolchain-powerpc_8540_gcc-8.4.0_musl/include/errno.h:10,
+ from libbpf.c:26:
+ toolchain-powerpc_8540_gcc-8.4.0_musl/include/bits/errno.h:58: note: this is the location of the previous definition
+ #define EDEADLOCK 58
+
+ cc1: all warnings being treated as errors
+
+Cc: Stable <stable@vger.kernel.org>
+Reported-by: Rosen Penev <rosenp@gmail.com>
+Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200917135437.1238787-1-Tony.Ambardar@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/uapi/asm/errno.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/include/uapi/asm/errno.h
++++ b/arch/powerpc/include/uapi/asm/errno.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_POWERPC_ERRNO_H
+ #define _ASM_POWERPC_ERRNO_H
+
++#undef EDEADLOCK
+ #include <asm-generic/errno.h>
+
+ #undef EDEADLOCK
--- /dev/null
+From 40c753993e3aad51a12c21233486e2037417a4d6 Mon Sep 17 00:00:00 2001
+From: Sourabh Jain <sourabhjain@linux.ibm.com>
+Date: Thu, 29 Apr 2021 11:32:56 +0530
+Subject: powerpc/kexec_file: Use current CPU info while setting up FDT
+
+From: Sourabh Jain <sourabhjain@linux.ibm.com>
+
+commit 40c753993e3aad51a12c21233486e2037417a4d6 upstream.
+
+kexec_file_load() uses initial_boot_params in setting up the device tree
+for the kernel to be loaded. Though initial_boot_params holds info about
+CPUs at the time of boot, it doesn't account for hot added CPUs.
+
+So, kexec'ing with kexec_file_load() syscall leaves the kexec'ed kernel
+with inaccurate CPU info.
+
+If kdump kernel is loaded with kexec_file_load() syscall and the system
+crashes on a hot added CPU, the capture kernel hangs failing to identify
+the boot CPU, with no output.
+
+To avoid this from happening, extract current CPU info from of_root
+device node and use it for setting up the fdt in kexec_file_load case.
+
+Fixes: 6ecd0163d360 ("powerpc/kexec_file: Add appropriate regions for memory reserve map")
+Cc: stable@vger.kernel.org # v5.9+
+Signed-off-by: Sourabh Jain <sourabhjain@linux.ibm.com>
+Reviewed-by: Hari Bathini <hbathini@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210429060256.199714-1-sourabhjain@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kexec/file_load_64.c | 92 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 92 insertions(+)
+
+--- a/arch/powerpc/kexec/file_load_64.c
++++ b/arch/powerpc/kexec/file_load_64.c
+@@ -961,6 +961,93 @@ unsigned int kexec_fdt_totalsize_ppc64(s
+ }
+
+ /**
++ * add_node_props - Reads node properties from device node structure and add
++ * them to fdt.
++ * @fdt: Flattened device tree of the kernel
++ * @node_offset: offset of the node to add a property at
++ * @dn: device node pointer
++ *
++ * Returns 0 on success, negative errno on error.
++ */
++static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
++{
++ int ret = 0;
++ struct property *pp;
++
++ if (!dn)
++ return -EINVAL;
++
++ for_each_property_of_node(dn, pp) {
++ ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
++ if (ret < 0) {
++ pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
++ return ret;
++ }
++ }
++ return ret;
++}
++
++/**
++ * update_cpus_node - Update cpus node of flattened device tree using of_root
++ * device node.
++ * @fdt: Flattened device tree of the kernel.
++ *
++ * Returns 0 on success, negative errno on error.
++ */
++static int update_cpus_node(void *fdt)
++{
++ struct device_node *cpus_node, *dn;
++ int cpus_offset, cpus_subnode_offset, ret = 0;
++
++ cpus_offset = fdt_path_offset(fdt, "/cpus");
++ if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
++ pr_err("Malformed device tree: error reading /cpus node: %s\n",
++ fdt_strerror(cpus_offset));
++ return cpus_offset;
++ }
++
++ if (cpus_offset > 0) {
++ ret = fdt_del_node(fdt, cpus_offset);
++ if (ret < 0) {
++ pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
++ return -EINVAL;
++ }
++ }
++
++ /* Add cpus node to fdt */
++ cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
++ if (cpus_offset < 0) {
++ pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
++ return -EINVAL;
++ }
++
++ /* Add cpus node properties */
++ cpus_node = of_find_node_by_path("/cpus");
++ ret = add_node_props(fdt, cpus_offset, cpus_node);
++ of_node_put(cpus_node);
++ if (ret < 0)
++ return ret;
++
++ /* Loop through all subnodes of cpus and add them to fdt */
++ for_each_node_by_type(dn, "cpu") {
++ cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
++ if (cpus_subnode_offset < 0) {
++ pr_err("Unable to add %s subnode: %s\n", dn->full_name,
++ fdt_strerror(cpus_subnode_offset));
++ ret = cpus_subnode_offset;
++ goto out;
++ }
++
++ ret = add_node_props(fdt, cpus_subnode_offset, dn);
++ if (ret < 0)
++ goto out;
++ }
++out:
++ of_node_put(dn);
++ return ret;
++}
++
++/**
+ * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
+ * being loaded.
+ * @image: kexec image being loaded.
+@@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kim
+ }
+ }
+
++ /* Update cpus nodes information to account hotplug CPUs. */
++ ret = update_cpus_node(fdt);
++ if (ret < 0)
++ goto out;
++
+ /* Update memory reserve map */
+ ret = get_reserved_memory_ranges(&rmem);
+ if (ret)
--- /dev/null
+From ee1bc694fbaec1a662770703fc34a74abf418938 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sun, 25 Apr 2021 21:58:31 +1000
+Subject: powerpc/kvm: Fix build error when PPC_MEM_KEYS/PPC_PSERIES=n
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit ee1bc694fbaec1a662770703fc34a74abf418938 upstream.
+
+lkp reported a randconfig failure:
+
+ In file included from arch/powerpc/include/asm/book3s/64/pkeys.h:6,
+ from arch/powerpc/kvm/book3s_64_mmu_host.c:15:
+ arch/powerpc/include/asm/book3s/64/hash-pkey.h: In function 'hash__vmflag_to_pte_pkey_bits':
+ >> arch/powerpc/include/asm/book3s/64/hash-pkey.h:10:23: error: 'VM_PKEY_BIT0' undeclared
+ 10 | return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) |
+ | ^~~~~~~~~~~~
+
+We added the include of book3s/64/pkeys.h for pte_to_hpte_pkey_bits(),
+but that header on its own should only be included when PPC_MEM_KEYS=y.
+Instead include linux/pkeys.h, which brings in the right definitions
+when PPC_MEM_KEYS=y and also provides empty stubs when PPC_MEM_KEYS=n.
+
+Fixes: e4e8bc1df691 ("powerpc/kvm: Fix PR KVM with KUAP/MEM_KEYS enabled")
+Cc: stable@vger.kernel.org # v5.11+
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210425115831.2818434-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_64_mmu_host.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
+@@ -8,11 +8,11 @@
+ */
+
+ #include <linux/kvm_host.h>
++#include <linux/pkeys.h>
+
+ #include <asm/kvm_ppc.h>
+ #include <asm/kvm_book3s.h>
+ #include <asm/book3s/64/mmu-hash.h>
+-#include <asm/book3s/64/pkeys.h>
+ #include <asm/machdep.h>
+ #include <asm/mmu_context.h>
+ #include <asm/hw_irq.h>
--- /dev/null
+From e4e8bc1df691ba5ba749d1e2b67acf9827e51a35 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Mon, 19 Apr 2021 22:01:39 +1000
+Subject: powerpc/kvm: Fix PR KVM with KUAP/MEM_KEYS enabled
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit e4e8bc1df691ba5ba749d1e2b67acf9827e51a35 upstream.
+
+The changes to add KUAP support with the hash MMU broke booting of KVM
+PR guests. The symptom is no visible progress of the guest, or possibly
+just "SLOF" being printed to the qemu console.
+
+Host code is still executing, but breaking into xmon might show a stack
+trace such as:
+
+ __might_fault+0x84/0xe0 (unreliable)
+ kvm_read_guest+0x1c8/0x2f0 [kvm]
+ kvmppc_ld+0x1b8/0x2d0 [kvm]
+ kvmppc_load_last_inst+0x50/0xa0 [kvm]
+ kvmppc_exit_pr_progint+0x178/0x220 [kvm_pr]
+ kvmppc_handle_exit_pr+0x31c/0xe30 [kvm_pr]
+ after_sprg3_load+0x80/0x90 [kvm_pr]
+ kvmppc_vcpu_run_pr+0x104/0x260 [kvm_pr]
+ kvmppc_vcpu_run+0x34/0x48 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x340/0x450 [kvm]
+ kvm_vcpu_ioctl+0x2ac/0x8c0 [kvm]
+ sys_ioctl+0x320/0x1060
+ system_call_exception+0x160/0x270
+ system_call_common+0xf0/0x27c
+
+Bisect points to commit b2ff33a10c8b ("powerpc/book3s64/hash/kuap:
+Enable kuap on hash"), but that's just the commit that enabled KUAP with
+hash and made the bug visible.
+
+The root cause seems to be that KVM PR is creating kernel mappings that
+don't use the correct key, since we switched to using key 3.
+
+We have a helper for adding the right key value, however it's designed
+to take a pteflags variable, which the KVM code doesn't have. But we can
+make it work by passing 0 for the pteflags, and tell it explicitly that
+it should use the kernel key.
+
+With that changed guests boot successfully.
+
+Fixes: d94b827e89dc ("powerpc/book3s64/kuap: Use Key 3 for kernel mapping with hash translation")
+Cc: stable@vger.kernel.org # v5.11+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210419120139.1455937-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_64_mmu_host.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
+@@ -12,6 +12,7 @@
+ #include <asm/kvm_ppc.h>
+ #include <asm/kvm_book3s.h>
+ #include <asm/book3s/64/mmu-hash.h>
++#include <asm/book3s/64/pkeys.h>
+ #include <asm/machdep.h>
+ #include <asm/mmu_context.h>
+ #include <asm/hw_irq.h>
+@@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu
+ else
+ kvmppc_mmu_flush_icache(pfn);
+
++ rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
+ rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
+
+ /*
--- /dev/null
+From 49c1d07fd04f54eb588c4a1dfcedc8d22c5ffd50 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Fri, 2 Apr 2021 12:41:24 +1000
+Subject: powerpc/powernv: Enable HAIL (HV AIL) for ISA v3.1 processors
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 49c1d07fd04f54eb588c4a1dfcedc8d22c5ffd50 upstream.
+
+Starting with ISA v3.1, LPCR[AIL] no longer controls the interrupt
+mode for HV=1 interrupts. Instead, a new LPCR[HAIL] bit is defined
+which behaves like AIL=3 for HV interrupts when set.
+
+Set HAIL on bare metal to give us mmu-on interrupts and improve
+performance.
+
+This also fixes an scv bug: we don't implement scv real mode (AIL=0)
+vectors because they are at an inconvenient location, so we just
+disable scv support when AIL can not be set. However powernv assumes
+that LPCR[AIL] will enable AIL mode so it enables scv support despite
+HV interrupts being AIL=0, which causes scv interrupts to go off into
+the weeds.
+
+Fixes: 7fa95f9adaee ("powerpc/64s: system call support for scv/rfscv instructions")
+Cc: stable@vger.kernel.org # v5.9+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210402024124.545826-1-npiggin@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/reg.h | 1 +
+ arch/powerpc/kernel/setup_64.c | 19 ++++++++++++++++---
+ 2 files changed, 17 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -441,6 +441,7 @@
+ #define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
+ #define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
+ #define LPCR_RMLS_SH 26
++#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */
+ #define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
+ #define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
+ #define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -232,10 +232,23 @@ static void cpu_ready_for_interrupts(voi
+ * If we are not in hypervisor mode the job is done once for
+ * the whole partition in configure_exceptions().
+ */
+- if (cpu_has_feature(CPU_FTR_HVMODE) &&
+- cpu_has_feature(CPU_FTR_ARCH_207S)) {
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ unsigned long lpcr = mfspr(SPRN_LPCR);
+- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
++ unsigned long new_lpcr = lpcr;
++
++ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
++ /* P10 DD1 does not have HAIL */
++ if (pvr_version_is(PVR_POWER10) &&
++ (mfspr(SPRN_PVR) & 0xf00) == 0x100)
++ new_lpcr |= LPCR_AIL_3;
++ else
++ new_lpcr |= LPCR_HAIL;
++ } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
++ new_lpcr |= LPCR_AIL_3;
++ }
++
++ if (new_lpcr != lpcr)
++ mtspr(SPRN_LPCR, new_lpcr);
+ }
+
+ /*
--- /dev/null
+From 1c4bce6753857dc409a0197342d18764e7f4b741 Mon Sep 17 00:00:00 2001
+From: Dmitry Safonov <dima@arista.com>
+Date: Wed, 31 Mar 2021 16:48:46 +0000
+Subject: powerpc/vdso: Separate vvar vma from vdso
+
+From: Dmitry Safonov <dima@arista.com>
+
+commit 1c4bce6753857dc409a0197342d18764e7f4b741 upstream.
+
+Since commit 511157ab641e ("powerpc/vdso: Move vdso datapage up front")
+VVAR page is in front of the VDSO area. In result it breaks CRIU
+(Checkpoint Restore In Userspace) [1], where CRIU expects that "[vdso]"
+from /proc/../maps points at ELF/vdso image, rather than at VVAR data page.
+Laurent made a patch to keep CRIU working (by reading aux vector).
+But I think it still makes sence to separate two mappings into different
+VMAs. It will also make ppc64 less "special" for userspace and as
+a side-bonus will make VVAR page un-writable by debugger (which previously
+would COW page and can be unexpected).
+
+I opportunistically Cc stable on it: I understand that usually such
+stuff isn't a stable material, but that will allow us in CRIU have
+one workaround less that is needed just for one release (v5.11) on
+one platform (ppc64), which we otherwise have to maintain.
+I wouldn't go as far as to say that the commit 511157ab641e is ABI
+regression as no other userspace got broken, but I'd really appreciate
+if it gets backported to v5.11 after v5.12 is released, so as not
+to complicate already non-simple CRIU-vdso code. Thanks!
+
+[1]: https://github.com/checkpoint-restore/criu/issues/1417
+
+Cc: stable@vger.kernel.org # v5.11
+Signed-off-by: Dmitry Safonov <dima@arista.com>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Tested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> # vDSO parts.
+Acked-by: Andrei Vagin <avagin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/f401eb1ebc0bfc4d8f0e10dc8e525fd409eb68e2.1617209142.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/mmu_context.h | 2 -
+ arch/powerpc/kernel/vdso.c | 54 +++++++++++++++++++++++----------
+ 2 files changed, 40 insertions(+), 16 deletions(-)
+
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_str
+ static inline void arch_unmap(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+ {
+- unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
++ unsigned long vdso_base = (unsigned long)mm->context.vdso;
+
+ if (start <= vdso_base && vdso_base < end)
+ mm->context.vdso = NULL;
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_s
+ {
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+
+- if (new_size != text_size + PAGE_SIZE)
++ if (new_size != text_size)
+ return -EINVAL;
+
+- current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
++ current->mm->context.vdso = (void __user *)new_vma->vm_start;
+
+ return 0;
+ }
+@@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm
+ return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
+ }
+
++static struct vm_special_mapping vvar_spec __ro_after_init = {
++ .name = "[vvar]",
++};
++
+ static struct vm_special_mapping vdso32_spec __ro_after_init = {
+ .name = "[vdso]",
+ .mremap = vdso32_mremap,
+@@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_
+ */
+ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+- struct mm_struct *mm = current->mm;
++ unsigned long vdso_size, vdso_base, mappings_size;
+ struct vm_special_mapping *vdso_spec;
++ unsigned long vvar_size = PAGE_SIZE;
++ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+- unsigned long vdso_size;
+- unsigned long vdso_base;
+
+ if (is_32bit_task()) {
+ vdso_spec = &vdso32_spec;
+@@ -110,8 +114,8 @@ static int __arch_setup_additional_pages
+ vdso_base = 0;
+ }
+
+- /* Add a page to the vdso size for the data page */
+- vdso_size += PAGE_SIZE;
++ mappings_size = vdso_size + vvar_size;
++ mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
+
+ /*
+ * pick a base address for the vDSO in process space. We try to put it
+@@ -119,9 +123,7 @@ static int __arch_setup_additional_pages
+ * and end up putting it elsewhere.
+ * Add enough to the size so that the result can be aligned.
+ */
+- vdso_base = get_unmapped_area(NULL, vdso_base,
+- vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
+ if (IS_ERR_VALUE(vdso_base))
+ return vdso_base;
+
+@@ -133,7 +135,13 @@ static int __arch_setup_additional_pages
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO.
+ */
+- mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
++ mm->context.vdso = (void __user *)vdso_base + vvar_size;
++
++ vma = _install_special_mapping(mm, vdso_base, vvar_size,
++ VM_READ | VM_MAYREAD | VM_IO |
++ VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
++ if (IS_ERR(vma))
++ return PTR_ERR(vma);
+
+ /*
+ * our vma flags don't have VM_WRITE so by default, the process isn't
+@@ -145,9 +153,12 @@ static int __arch_setup_additional_pages
+ * It's fine to use that for setting breakpoints in the vDSO code
+ * pages though.
+ */
+- vma = _install_special_mapping(mm, vdso_base, vdso_size,
++ vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
++ if (IS_ERR(vma))
++ do_munmap(mm, vdso_base, vvar_size, NULL);
++
+ return PTR_ERR_OR_ZERO(vma);
+ }
+
+@@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_
+ if (!pagelist)
+ panic("%s: Cannot allocate page list for VDSO", __func__);
+
+- pagelist[0] = virt_to_page(vdso_data);
+-
+ for (i = 0; i < pages; i++)
+- pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
++ pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
++
++ return pagelist;
++}
++
++static struct page ** __init vvar_setup_pages(void)
++{
++ struct page **pagelist;
+
++ /* .pages is NULL-terminated */
++ pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
++ if (!pagelist)
++ panic("%s: Cannot allocate page list for VVAR", __func__);
++
++ pagelist[0] = virt_to_page(vdso_data);
+ return pagelist;
+ }
+
+@@ -295,6 +317,8 @@ static int __init vdso_init(void)
+ if (IS_ENABLED(CONFIG_PPC64))
+ vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
+
++ vvar_spec.pages = vvar_setup_pages();
++
+ smp_wmb();
+
+ return 0;
ubifs-only-check-replay-with-inode-type-to-judge-if-inode-linked.patch
f2fs-fix-error-handling-in-f2fs_end_enable_verity.patch
f2fs-fix-to-avoid-out-of-bounds-memory-access.patch
+mlxsw-spectrum_mr-update-egress-rif-list-before-route-s-action.patch
+openvswitch-fix-stack-oob-read-while-fragmenting-ipv4-packets.patch
+net-sched-sch_frag-fix-stack-oob-read-while-fragmenting-ipv4-packets.patch
+acpi-gtdt-don-t-corrupt-interrupt-mappings-on-watchdow-probe-failure.patch
+nfs-fs_context-validate-udp-retrans-to-prevent-shift-out-of-bounds.patch
+nfs-don-t-discard-pnfs-layout-segments-that-are-marked-for-return.patch
+nfsv4-don-t-discard-segments-marked-for-return-in-_pnfs_return_layout.patch
+input-ili210x-add-missing-negation-for-touch-indication-on-ili210x.patch
+jffs2-fix-kasan-slab-out-of-bounds-problem.patch
+jffs2-hook-up-splice_write-callback.patch
+iommu-vt-d-force-to-flush-iotlb-before-creating-superpage.patch
+powerpc-vdso-separate-vvar-vma-from-vdso.patch
+powerpc-powernv-enable-hail-hv-ail-for-isa-v3.1-processors.patch
+powerpc-eeh-fix-eeh-handling-for-hugepages-in-ioremap-space.patch
+powerpc-kexec_file-use-current-cpu-info-while-setting-up-fdt.patch
+powerpc-32-fix-boot-failure-with-config_stackprotector.patch
+powerpc-fix-edeadlock-redefinition-error-in-uapi-asm-errno.h.patch
+powerpc-kvm-fix-pr-kvm-with-kuap-mem_keys-enabled.patch
+powerpc-kvm-fix-build-error-when-ppc_mem_keys-ppc_pseries-n.patch
+intel_th-pci-add-alder-lake-m-support.patch
+tpm-efi-use-local-variable-for-calculating-final-log-size.patch
+tpm-vtpm_proxy-avoid-reading-host-log-when-using-a-virtual-device.patch
+crypto-arm-curve25519-move-.fpu-after-.arch.patch
+crypto-rng-fix-crypto_rng_reset-refcounting-when-crypto_stats.patch
--- /dev/null
+From 48cff270b037022e37835d93361646205ca25101 Mon Sep 17 00:00:00 2001
+From: Stefan Berger <stefanb@linux.ibm.com>
+Date: Wed, 10 Mar 2021 17:19:14 -0500
+Subject: tpm: efi: Use local variable for calculating final log size
+
+From: Stefan Berger <stefanb@linux.ibm.com>
+
+commit 48cff270b037022e37835d93361646205ca25101 upstream.
+
+When tpm_read_log_efi is called multiple times, which happens when
+one loads and unloads a TPM2 driver multiple times, then the global
+variable efi_tpm_final_log_size will at some point become a negative
+number due to the subtraction of final_events_preboot_size occurring
+each time. Use a local variable to avoid this integer underflow.
+
+The following issue is now resolved:
+
+Mar 8 15:35:12 hibinst kernel: Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+Mar 8 15:35:12 hibinst kernel: Workqueue: tpm-vtpm vtpm_proxy_work [tpm_vtpm_proxy]
+Mar 8 15:35:12 hibinst kernel: RIP: 0010:__memcpy+0x12/0x20
+Mar 8 15:35:12 hibinst kernel: Code: 00 b8 01 00 00 00 85 d2 74 0a c7 05 44 7b ef 00 0f 00 00 00 c3 cc cc cc 66 66 90 66 90 48 89 f8 48 89 d1 48 c1 e9 03 83 e2 07 <f3> 48 a5 89 d1 f3 a4 c3 66 0f 1f 44 00 00 48 89 f8 48 89 d1 f3 a4
+Mar 8 15:35:12 hibinst kernel: RSP: 0018:ffff9ac4c0fcfde0 EFLAGS: 00010206
+Mar 8 15:35:12 hibinst kernel: RAX: ffff88f878cefed5 RBX: ffff88f878ce9000 RCX: 1ffffffffffffe0f
+Mar 8 15:35:12 hibinst kernel: RDX: 0000000000000003 RSI: ffff9ac4c003bff9 RDI: ffff88f878cf0e4d
+Mar 8 15:35:12 hibinst kernel: RBP: ffff9ac4c003b000 R08: 0000000000001000 R09: 000000007e9d6073
+Mar 8 15:35:12 hibinst kernel: R10: ffff9ac4c003b000 R11: ffff88f879ad3500 R12: 0000000000000ed5
+Mar 8 15:35:12 hibinst kernel: R13: ffff88f878ce9760 R14: 0000000000000002 R15: ffff88f77de7f018
+Mar 8 15:35:12 hibinst kernel: FS: 0000000000000000(0000) GS:ffff88f87bd00000(0000) knlGS:0000000000000000
+Mar 8 15:35:12 hibinst kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+Mar 8 15:35:12 hibinst kernel: CR2: ffff9ac4c003c000 CR3: 00000001785a6004 CR4: 0000000000060ee0
+Mar 8 15:35:12 hibinst kernel: Call Trace:
+Mar 8 15:35:12 hibinst kernel: tpm_read_log_efi+0x152/0x1a7
+Mar 8 15:35:12 hibinst kernel: tpm_bios_log_setup+0xc8/0x1c0
+Mar 8 15:35:12 hibinst kernel: tpm_chip_register+0x8f/0x260
+Mar 8 15:35:12 hibinst kernel: vtpm_proxy_work+0x16/0x60 [tpm_vtpm_proxy]
+Mar 8 15:35:12 hibinst kernel: process_one_work+0x1b4/0x370
+Mar 8 15:35:12 hibinst kernel: worker_thread+0x53/0x3e0
+Mar 8 15:35:12 hibinst kernel: ? process_one_work+0x370/0x370
+
+Cc: stable@vger.kernel.org
+Fixes: 166a2809d65b ("tpm: Don't duplicate events from the final event log in the TCG2 log")
+Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/eventlog/efi.c | 29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+--- a/drivers/char/tpm/eventlog/efi.c
++++ b/drivers/char/tpm/eventlog/efi.c
+@@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *ch
+ {
+
+ struct efi_tcg2_final_events_table *final_tbl = NULL;
++ int final_events_log_size = efi_tpm_final_log_size;
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct tpm_bios_log *log;
+ u32 log_size;
+@@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *ch
+ ret = tpm_log_version;
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+- efi_tpm_final_log_size == 0 ||
++ final_events_log_size == 0 ||
+ tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ goto out;
+
+ final_tbl = memremap(efi.tpm_final_log,
+- sizeof(*final_tbl) + efi_tpm_final_log_size,
++ sizeof(*final_tbl) + final_events_log_size,
+ MEMREMAP_WB);
+ if (!final_tbl) {
+ pr_err("Could not map UEFI TPM final log\n");
+@@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *ch
+ goto out;
+ }
+
+- efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
++ /*
++ * The 'final events log' size excludes the 'final events preboot log'
++ * at its beginning.
++ */
++ final_events_log_size -= log_tbl->final_events_preboot_size;
+
++ /*
++ * Allocate memory for the 'combined log' where we will append the
++ * 'final events log' to.
++ */
+ tmp = krealloc(log->bios_event_log,
+- log_size + efi_tpm_final_log_size,
++ log_size + final_events_log_size,
+ GFP_KERNEL);
+ if (!tmp) {
+ kfree(log->bios_event_log);
+@@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *ch
+ log->bios_event_log = tmp;
+
+ /*
+- * Copy any of the final events log that didn't also end up in the
+- * main log. Events can be logged in both if events are generated
++ * Append any of the 'final events log' that didn't also end up in the
++ * 'main log'. Events can be logged in both if events are generated
+ * between GetEventLog() and ExitBootServices().
+ */
+ memcpy((void *)log->bios_event_log + log_size,
+ final_tbl->events + log_tbl->final_events_preboot_size,
+- efi_tpm_final_log_size);
++ final_events_log_size);
++ /*
++ * The size of the 'combined log' is the size of the 'main log' plus
++ * the size of the 'final events log'.
++ */
+ log->bios_event_log_end = log->bios_event_log +
+- log_size + efi_tpm_final_log_size;
++ log_size + final_events_log_size;
+
+ out:
+ memunmap(final_tbl);
--- /dev/null
+From 9716ac65efc8f780549b03bddf41e60c445d4709 Mon Sep 17 00:00:00 2001
+From: Stefan Berger <stefanb@linux.ibm.com>
+Date: Wed, 10 Mar 2021 17:19:16 -0500
+Subject: tpm: vtpm_proxy: Avoid reading host log when using a virtual device
+
+From: Stefan Berger <stefanb@linux.ibm.com>
+
+commit 9716ac65efc8f780549b03bddf41e60c445d4709 upstream.
+
+Avoid allocating memory and reading the host log when a virtual device
+is used since this log is of no use to that driver. A virtual
+device can be identified through the flag TPM_CHIP_FLAG_VIRTUAL, which
+is only set for the tpm_vtpm_proxy driver.
+
+Cc: stable@vger.kernel.org
+Fixes: 6f99612e2500 ("tpm: Proxy driver for supporting multiple emulated TPMs")
+Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/eventlog/common.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/char/tpm/eventlog/common.c
++++ b/drivers/char/tpm/eventlog/common.c
+@@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip
+ int log_version;
+ int rc = 0;
+
++ if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
++ return;
++
+ rc = tpm_read_log(chip);
+ if (rc < 0)
+ return;