--- /dev/null
+From 10f985db4ffe320f01a1ba1ef9b1805c91208973 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Mar 2021 10:54:57 +0530
+Subject: arm64/mm: Fix pfn_valid() for ZONE_DEVICE based memory
+
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+
+[ Upstream commit eeb0753ba27b26f609e61f9950b14f1b934fe429 ]
+
+pfn_valid() validates a pfn but basically it checks for a valid struct page
+backing for that pfn. It should always return positive for memory ranges
+backed with struct page mapping. But currently pfn_valid() fails for all
+ZONE_DEVICE based memory types even though they have struct page mapping.
+
+pfn_valid() asserts that there is a memblock entry for a given pfn without
+MEMBLOCK_NOMAP flag being set. The problem with ZONE_DEVICE based memory is
+that they do not have memblock entries. Hence memblock_is_map_memory() will
+invariably fail via memblock_search() for a ZONE_DEVICE based address. This
+eventually fails pfn_valid() which is wrong. memblock_is_map_memory() needs
+to be skipped for such memory ranges. As ZONE_DEVICE memory gets hotplugged
+into the system via memremap_pages() called from a driver, their respective
+memory sections will not have SECTION_IS_EARLY set.
+
+Normal hotplug memory will never have MEMBLOCK_NOMAP set in their memblock
+regions. Because the flag MEMBLOCK_NOMAP was specifically designed and set
+for firmware reserved memory regions. memblock_is_map_memory() can just be
+skipped as its always going to be positive and that will be an optimization
+for the normal hotplug memory. Like ZONE_DEVICE based memory, all normal
+hotplugged memory too will not have SECTION_IS_EARLY set for their sections
+
+Skipping memblock_is_map_memory() for all non early memory sections would
+fix pfn_valid() problem for ZONE_DEVICE based memory and also improve its
+performance for normal hotplug memory as well.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-kernel@vger.kernel.org
+Acked-by: David Hildenbrand <david@redhat.com>
+Fixes: 73b20c84d42d ("arm64: mm: implement pte_devmap support")
+Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Link: https://lore.kernel.org/r/1614921898-4099-2-git-send-email-anshuman.khandual@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/mm/init.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 709d98fea90c..1141075e4d53 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -230,6 +230,18 @@ int pfn_valid(unsigned long pfn)
+
+ if (!valid_section(__pfn_to_section(pfn)))
+ return 0;
++
++ /*
++ * ZONE_DEVICE memory does not have the memblock entries.
++ * memblock_is_map_memory() check for ZONE_DEVICE based
++ * addresses will always fail. Even the normal hotplugged
++ * memory will never have MEMBLOCK_NOMAP flag set in their
++ * memblock entries. Skip memblock search for all non early
++ * memory sections covering all of hotplug memory including
++ * both normal and ZONE_DEVICE based.
++ */
++ if (!early_section(__pfn_to_section(pfn)))
++ return pfn_section_valid(__pfn_to_section(pfn), pfn);
+ #endif
+ return memblock_is_map_memory(addr);
+ }
+--
+2.30.1
+
--- /dev/null
+From 5fb0a6b3ca7648ad7ee30f1ba95c9f9d1123c343 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Mar 2021 18:15:11 +0100
+Subject: arm64: mm: use a 48-bit ID map when possible on 52-bit VA builds
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Upstream commit 7ba8f2b2d652cd8d8a2ab61f4be66973e70f9f88 ]
+
+52-bit VA kernels can run on hardware that is only 48-bit capable, but
+configure the ID map as 52-bit by default. This was not a problem until
+recently, because the special T0SZ value for a 52-bit VA space was never
+programmed into the TCR register anwyay, and because a 52-bit ID map
+happens to use the same number of translation levels as a 48-bit one.
+
+This behavior was changed by commit 1401bef703a4 ("arm64: mm: Always update
+TCR_EL1 from __cpu_set_tcr_t0sz()"), which causes the unsupported T0SZ
+value for a 52-bit VA to be programmed into TCR_EL1. While some hardware
+simply ignores this, Mark reports that Amberwing systems choke on this,
+resulting in a broken boot. But even before that commit, the unsupported
+idmap_t0sz value was exposed to KVM and used to program TCR_EL2 incorrectly
+as well.
+
+Given that we already have to deal with address spaces being either 48-bit
+or 52-bit in size, the cleanest approach seems to be to simply default to
+a 48-bit VA ID map, and only switch to a 52-bit one if the placement of the
+kernel in DRAM requires it. This is guaranteed not to happen unless the
+system is actually 52-bit VA capable.
+
+Fixes: 90ec95cda91a ("arm64: mm: Introduce VA_BITS_MIN")
+Reported-by: Mark Salter <msalter@redhat.com>
+Link: http://lore.kernel.org/r/20210310003216.410037-1-msalter@redhat.com
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20210310171515.416643-2-ardb@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/mmu_context.h | 5 +----
+ arch/arm64/kernel/head.S | 2 +-
+ arch/arm64/mm/mmu.c | 2 +-
+ 3 files changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
+index 0b3079fd28eb..1c364ec0ad31 100644
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -65,10 +65,7 @@ extern u64 idmap_ptrs_per_pgd;
+
+ static inline bool __cpu_uses_extended_idmap(void)
+ {
+- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
+- return false;
+-
+- return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
++ return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual));
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 7ec430e18f95..a0b3bfe67609 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
+ */
+ adrp x5, __idmap_text_end
+ clz x5, x5
+- cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
++ cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
+ b.ge 1f // .. then skip VA range extension
+
+ adr_l x6, idmap_t0sz
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index cb78343181db..6f0648777d34 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -40,7 +40,7 @@
+ #define NO_BLOCK_MAPPINGS BIT(0)
+ #define NO_CONT_MAPPINGS BIT(1)
+
+-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
++u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
+ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+
+ u64 __section(".mmuoff.data.write") vabits_actual;
+--
+2.30.1
+
--- /dev/null
+From 5a40fed9032b69afca679030988a1528849509f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Mar 2021 19:30:17 -0800
+Subject: block: rsxx: fix error return code of rsxx_pci_probe()
+
+From: Jia-Ju Bai <baijiaju1990@gmail.com>
+
+[ Upstream commit df66617bfe87487190a60783d26175b65d2502ce ]
+
+When create_singlethread_workqueue returns NULL to card->event_wq, no
+error return code of rsxx_pci_probe() is assigned.
+
+To fix this bug, st is assigned with -ENOMEM in this case.
+
+Fixes: 8722ff8cdbfa ("block: IBM RamSan 70/80 device driver")
+Reported-by: TOTE Robot <oslab@tsinghua.edu.cn>
+Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
+Link: https://lore.kernel.org/r/20210310033017.4023-1-baijiaju1990@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/rsxx/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
+index 5ac1881396af..227e1be4c6f9 100644
+--- a/drivers/block/rsxx/core.c
++++ b/drivers/block/rsxx/core.c
+@@ -871,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
+ card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
+ if (!card->event_wq) {
+ dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
++ st = -ENOMEM;
+ goto failed_event_handler;
+ }
+
+--
+2.30.1
+
--- /dev/null
+From cfd71154d4b89c55fcc0ff4e9f4102448a2fff54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Mar 2021 14:10:53 +0800
+Subject: configfs: fix a use-after-free in __configfs_open_file
+
+From: Daiyue Zhang <zhangdaiyue1@huawei.com>
+
+[ Upstream commit 14fbbc8297728e880070f7b077b3301a8c698ef9 ]
+
+Commit b0841eefd969 ("configfs: provide exclusion between IO and removals")
+uses ->frag_dead to mark the fragment state, thus no bothering with extra
+refcount on config_item when opening a file. The configfs_get_config_item
+was removed in __configfs_open_file, but not with config_item_put. So the
+refcount on config_item will lost its balance, causing use-after-free
+issues in some occasions like this:
+
+Test:
+1. Mount configfs on /config with read-only items:
+drwxrwx--- 289 root root 0 2021-04-01 11:55 /config
+drwxr-xr-x 2 root root 0 2021-04-01 11:54 /config/a
+--w--w--w- 1 root root 4096 2021-04-01 11:53 /config/a/1.txt
+......
+
+2. Then run:
+for file in /config
+do
+echo $file
+grep -R 'key' $file
+done
+
+3. __configfs_open_file will be called in parallel, the first one
+got called will do:
+if (file->f_mode & FMODE_READ) {
+ if (!(inode->i_mode & S_IRUGO))
+ goto out_put_module;
+ config_item_put(buffer->item);
+ kref_put()
+ package_details_release()
+ kfree()
+
+the other one will run into use-after-free issues like this:
+BUG: KASAN: use-after-free in __configfs_open_file+0x1bc/0x3b0
+Read of size 8 at addr fffffff155f02480 by task grep/13096
+CPU: 0 PID: 13096 Comm: grep VIP: 00 Tainted: G W 4.14.116-kasan #1
+TGID: 13096 Comm: grep
+Call trace:
+dump_stack+0x118/0x160
+kasan_report+0x22c/0x294
+__asan_load8+0x80/0x88
+__configfs_open_file+0x1bc/0x3b0
+configfs_open_file+0x28/0x34
+do_dentry_open+0x2cc/0x5c0
+vfs_open+0x80/0xe0
+path_openat+0xd8c/0x2988
+do_filp_open+0x1c4/0x2fc
+do_sys_open+0x23c/0x404
+SyS_openat+0x38/0x48
+
+Allocated by task 2138:
+kasan_kmalloc+0xe0/0x1ac
+kmem_cache_alloc_trace+0x334/0x394
+packages_make_item+0x4c/0x180
+configfs_mkdir+0x358/0x740
+vfs_mkdir2+0x1bc/0x2e8
+SyS_mkdirat+0x154/0x23c
+el0_svc_naked+0x34/0x38
+
+Freed by task 13096:
+kasan_slab_free+0xb8/0x194
+kfree+0x13c/0x910
+package_details_release+0x524/0x56c
+kref_put+0xc4/0x104
+config_item_put+0x24/0x34
+__configfs_open_file+0x35c/0x3b0
+configfs_open_file+0x28/0x34
+do_dentry_open+0x2cc/0x5c0
+vfs_open+0x80/0xe0
+path_openat+0xd8c/0x2988
+do_filp_open+0x1c4/0x2fc
+do_sys_open+0x23c/0x404
+SyS_openat+0x38/0x48
+el0_svc_naked+0x34/0x38
+
+To fix this issue, remove the config_item_put in
+__configfs_open_file to balance the refcount of config_item.
+
+Fixes: b0841eefd969 ("configfs: provide exclusion between IO and removals")
+Signed-off-by: Daiyue Zhang <zhangdaiyue1@huawei.com>
+Signed-off-by: Yi Chen <chenyi77@huawei.com>
+Signed-off-by: Ge Qiu <qiuge@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Acked-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/configfs/file.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/fs/configfs/file.c b/fs/configfs/file.c
+index 1f0270229d7b..da8351d1e455 100644
+--- a/fs/configfs/file.c
++++ b/fs/configfs/file.c
+@@ -378,7 +378,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
+
+ attr = to_attr(dentry);
+ if (!attr)
+- goto out_put_item;
++ goto out_free_buffer;
+
+ if (type & CONFIGFS_ITEM_BIN_ATTR) {
+ buffer->bin_attr = to_bin_attr(dentry);
+@@ -391,7 +391,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
+ /* Grab the module reference for this attribute if we have one */
+ error = -ENODEV;
+ if (!try_module_get(buffer->owner))
+- goto out_put_item;
++ goto out_free_buffer;
+
+ error = -EACCES;
+ if (!buffer->item->ci_type)
+@@ -435,8 +435,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
+
+ out_put_module:
+ module_put(buffer->owner);
+-out_put_item:
+- config_item_put(buffer->item);
+ out_free_buffer:
+ up_read(&frag->frag_sem);
+ kfree(buffer);
+--
+2.30.1
+
--- /dev/null
+From 73d0d33af7c6a47eadead8a076c9eef57c48ffff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 28 Feb 2021 09:33:19 +0800
+Subject: cpufreq: qcom-hw: fix dereferencing freed memory 'data'
+
+From: Shawn Guo <shawn.guo@linaro.org>
+
+[ Upstream commit 02fc409540303801994d076fcdb7064bd634dbf3 ]
+
+Commit 67fc209b527d ("cpufreq: qcom-hw: drop devm_xxx() calls from
+init/exit hooks") introduces an issue of dereferencing freed memory
+'data'. Fix it.
+
+Fixes: 67fc209b527d ("cpufreq: qcom-hw: drop devm_xxx() calls from init/exit hooks")
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/qcom-cpufreq-hw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 2726e77c9e5a..5cdd20e38771 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -368,7 +368,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ error:
+ kfree(data);
+ unmap_base:
+- iounmap(data->base);
++ iounmap(base);
+ release_region:
+ release_mem_region(res->start, resource_size(res));
+ return ret;
+--
+2.30.1
+
--- /dev/null
+From c88e328de6b3add40a150d1cc68bd89594dd73ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Mar 2021 10:04:23 +0000
+Subject: cpufreq: qcom-hw: Fix return value check in
+ qcom_cpufreq_hw_cpu_init()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+[ Upstream commit 536eb97abeba857126ad055de5923fa592acef25 ]
+
+In case of error, the function ioremap() returns NULL pointer
+not ERR_PTR(). The IS_ERR() test in the return value check
+should be replaced with NULL test.
+
+Fixes: 67fc209b527d ("cpufreq: qcom-hw: drop devm_xxx() calls from init/exit hooks")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Acked-by: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/qcom-cpufreq-hw.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 5cdd20e38771..6de07556665b 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ }
+
+ base = ioremap(res->start, resource_size(res));
+- if (IS_ERR(base)) {
++ if (!base) {
+ dev_err(dev, "failed to map resource %pR\n", res);
+- ret = PTR_ERR(base);
++ ret = -ENOMEM;
+ goto release_region;
+ }
+
+--
+2.30.1
+
--- /dev/null
+From d5e6c7e327d95a9457c65f252b4e76801b1b189e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Mar 2021 14:35:27 +1000
+Subject: drm/nouveau: fix dma syncing for loops (v2)
+
+From: Dave Airlie <airlied@redhat.com>
+
+[ Upstream commit 4042160c2e5433e0759782c402292a90b5bf458d ]
+
+The index variable should only be increased in one place.
+
+Noticed this while trying to track down another oops.
+
+v2: use while loop.
+
+Fixes: f295c8cfec83 ("drm/nouveau: fix dma syncing warning with debugging on.")
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210311043527.5376-1-airlied@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_bo.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 7ea367a5444d..f1c9a22083be 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -556,7 +556,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+ if (nvbo->force_coherent)
+ return;
+
+- for (i = 0; i < ttm_dma->num_pages; ++i) {
++ i = 0;
++ while (i < ttm_dma->num_pages) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+@@ -587,7 +588,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+ if (nvbo->force_coherent)
+ return;
+
+- for (i = 0; i < ttm_dma->num_pages; ++i) {
++ i = 0;
++ while (i < ttm_dma->num_pages) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+--
+2.30.1
+
--- /dev/null
+From 5b588e6401b7bce933d883a0f1a3cb7e81847b78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Mar 2021 17:17:25 -0800
+Subject: drm/ttm: Fix TTM page pool accounting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Anthony DeRossi <ajderossi@gmail.com>
+
+[ Upstream commit ca63d76fd2319db984f2875992643f900caf2c72 ]
+
+Freed pages are not subtracted from the allocated_pages counter in
+ttm_pool_type_fini(), causing a leak in the count on device removal.
+The next shrinker invocation loops forever trying to free pages that are
+no longer in the pool:
+
+ rcu: INFO: rcu_sched self-detected stall on CPU
+ rcu: 3-....: (9998 ticks this GP) idle=54e/1/0x4000000000000000 softirq=434857/434857 fqs=2237
+ (t=10001 jiffies g=2194533 q=49211)
+ NMI backtrace for cpu 3
+ CPU: 3 PID: 1034 Comm: kswapd0 Tainted: P O 5.11.0-com #1
+ Hardware name: System manufacturer System Product Name/PRIME X570-PRO, BIOS 1405 11/19/2019
+ Call Trace:
+ <IRQ>
+ ...
+ </IRQ>
+ sysvec_apic_timer_interrupt+0x77/0x80
+ asm_sysvec_apic_timer_interrupt+0x12/0x20
+ RIP: 0010:mutex_unlock+0x16/0x20
+ Code: e7 48 8b 70 10 e8 7a 53 77 ff eb aa e8 43 6c ff ff 0f 1f 00 65 48 8b 14 25 00 6d 01 00 31 c9 48 89 d0 f0 48 0f b1 0f 48 39 c2 <74> 05 e9 e3 fe ff ff c3 66 90 48 8b 47 20 48 85 c0 74 0f 8b 50 10
+ RSP: 0018:ffffbdb840797be8 EFLAGS: 00000246
+ RAX: ffff9ff445a41c00 RBX: ffffffffc02a9ef8 RCX: 0000000000000000
+ RDX: ffff9ff445a41c00 RSI: ffffbdb840797c78 RDI: ffffffffc02a9ac0
+ RBP: 0000000000000080 R08: 0000000000000000 R09: ffffbdb840797c80
+ R10: 0000000000000000 R11: fffffffffffffff5 R12: 0000000000000000
+ R13: 0000000000000000 R14: 0000000000000084 R15: ffffffffc02a9a60
+ ttm_pool_shrink+0x7d/0x90 [ttm]
+ ttm_pool_shrinker_scan+0x5/0x20 [ttm]
+ do_shrink_slab+0x13a/0x1a0
+...
+
+debugfs shows the incorrect total:
+
+ $ cat /sys/kernel/debug/dri/0/ttm_page_pool
+ --- 0--- --- 1--- --- 2--- --- 3--- --- 4--- --- 5--- --- 6--- --- 7--- --- 8--- --- 9--- ---10---
+ wc : 0 0 0 0 0 0 0 0 0 0 0
+ uc : 0 0 0 0 0 0 0 0 0 0 0
+ wc 32 : 0 0 0 0 0 0 0 0 0 0 0
+ uc 32 : 0 0 0 0 0 0 0 0 0 0 0
+ DMA uc : 0 0 0 0 0 0 0 0 0 0 0
+ DMA wc : 0 0 0 0 0 0 0 0 0 0 0
+ DMA : 0 0 0 0 0 0 0 0 0 0 0
+
+ total : 3029 of 8244261
+
+Using ttm_pool_type_take() to remove pages from the pool before freeing
+them correctly accounts for the freed pages.
+
+Fixes: d099fc8f540a ("drm/ttm: new TT backend allocation pool v3")
+Signed-off-by: Anthony DeRossi <ajderossi@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210303011723.22512-1-ajderossi@gmail.com
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/ttm/ttm_pool.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index 6e27cb1bf48b..4eb6efb8b8c0 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -268,13 +268,13 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
+ /* Remove a pool_type from the global shrinker list and free all pages */
+ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
+ {
+- struct page *p, *tmp;
++ struct page *p;
+
+ mutex_lock(&shrinker_lock);
+ list_del(&pt->shrinker_list);
+ mutex_unlock(&shrinker_lock);
+
+- list_for_each_entry_safe(p, tmp, &pt->pages, lru)
++ while ((p = ttm_pool_type_take(pt)))
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ }
+
+--
+2.30.1
+
--- /dev/null
+From d496d2328b2bca20b2e5a7595f357868d3545777 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Feb 2021 17:02:40 +0100
+Subject: hrtimer: Update softirq_expires_next correctly after
+ __hrtimer_get_next_event()
+
+From: Anna-Maria Behnsen <anna-maria@linutronix.de>
+
+[ Upstream commit 46eb1701c046cc18c032fa68f3c8ccbf24483ee4 ]
+
+hrtimer_force_reprogram() and hrtimer_interrupt() invokes
+__hrtimer_get_next_event() to find the earliest expiry time of hrtimer
+bases. __hrtimer_get_next_event() does not update
+cpu_base::[softirq_]_expires_next to preserve reprogramming logic. That
+needs to be done at the callsites.
+
+hrtimer_force_reprogram() updates cpu_base::softirq_expires_next only when
+the first expiring timer is a softirq timer and the soft interrupt is not
+activated. That's wrong because cpu_base::softirq_expires_next is left
+stale when the first expiring timer of all bases is a timer which expires
+in hard interrupt context. hrtimer_interrupt() does never update
+cpu_base::softirq_expires_next which is wrong too.
+
+That becomes a problem when clock_settime() sets CLOCK_REALTIME forward and
+the first soft expiring timer is in the CLOCK_REALTIME_SOFT base. Setting
+CLOCK_REALTIME forward moves the clock MONOTONIC based expiry time of that
+timer before the stale cpu_base::softirq_expires_next.
+
+cpu_base::softirq_expires_next is cached to make the check for raising the
+soft interrupt fast. In the above case the soft interrupt won't be raised
+until clock monotonic reaches the stale cpu_base::softirq_expires_next
+value. That's incorrect, but what's worse it that if the softirq timer
+becomes the first expiring timer of all clock bases after the hard expiry
+timer has been handled the reprogramming of the clockevent from
+hrtimer_interrupt() will result in an interrupt storm. That happens because
+the reprogramming does not use cpu_base::softirq_expires_next, it uses
+__hrtimer_get_next_event() which returns the actual expiry time. Once clock
+MONOTONIC reaches cpu_base::softirq_expires_next the soft interrupt is
+raised and the storm subsides.
+
+Change the logic in hrtimer_force_reprogram() to evaluate the soft and hard
+bases seperately, update softirq_expires_next and handle the case when a
+soft expiring timer is the first of all bases by comparing the expiry times
+and updating the required cpu base fields. Split this functionality into a
+separate function to be able to use it in hrtimer_interrupt() as well
+without copy paste.
+
+Fixes: 5da70160462e ("hrtimer: Implement support for softirq based hrtimers")
+Reported-by: Mikael Beckius <mikael.beckius@windriver.com>
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Mikael Beckius <mikael.beckius@windriver.com>
+Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/20210223160240.27518-1-anna-maria@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/hrtimer.c | 60 ++++++++++++++++++++++++++++---------------
+ 1 file changed, 39 insertions(+), 21 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 743c852e10f2..788b9d137de4 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -546,8 +546,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
+ }
+
+ /*
+- * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
+- * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
++ * Recomputes cpu_base::*next_timer and returns the earliest expires_next
++ * but does not set cpu_base::*expires_next, that is done by
++ * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
++ * cpu_base::*expires_next right away, reprogramming logic would no longer
++ * work.
+ *
+ * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
+ * those timers will get run whenever the softirq gets handled, at the end of
+@@ -588,6 +591,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
+ return expires_next;
+ }
+
++static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
++{
++ ktime_t expires_next, soft = KTIME_MAX;
++
++ /*
++ * If the soft interrupt has already been activated, ignore the
++ * soft bases. They will be handled in the already raised soft
++ * interrupt.
++ */
++ if (!cpu_base->softirq_activated) {
++ soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
++ /*
++ * Update the soft expiry time. clock_settime() might have
++ * affected it.
++ */
++ cpu_base->softirq_expires_next = soft;
++ }
++
++ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
++ /*
++ * If a softirq timer is expiring first, update cpu_base->next_timer
++ * and program the hardware with the soft expiry time.
++ */
++ if (expires_next > soft) {
++ cpu_base->next_timer = cpu_base->softirq_next_timer;
++ expires_next = soft;
++ }
++
++ return expires_next;
++}
++
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ {
+ ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+@@ -628,23 +662,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+ {
+ ktime_t expires_next;
+
+- /*
+- * Find the current next expiration time.
+- */
+- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+-
+- if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
+- /*
+- * When the softirq is activated, hrtimer has to be
+- * programmed with the first hard hrtimer because soft
+- * timer interrupt could occur too late.
+- */
+- if (cpu_base->softirq_activated)
+- expires_next = __hrtimer_get_next_event(cpu_base,
+- HRTIMER_ACTIVE_HARD);
+- else
+- cpu_base->softirq_expires_next = expires_next;
+- }
++ expires_next = hrtimer_update_next_event(cpu_base);
+
+ if (skip_equal && expires_next == cpu_base->expires_next)
+ return;
+@@ -1644,8 +1662,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+
+- /* Reevaluate the clock bases for the next expiry */
+- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
++ /* Reevaluate the clock bases for the [soft] next expiry */
++ expires_next = hrtimer_update_next_event(cpu_base);
+ /*
+ * Store the new expiry value so the migration code can verify
+ * against it.
+--
+2.30.1
+
--- /dev/null
+From 16dd190ecf3f2a15f875c7ba42540b7d05e5cee6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Mar 2021 21:08:03 -0800
+Subject: include/linux/sched/mm.h: use rcu_dereference in in_vfork()
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 149fc787353f65b7e72e05e7b75d34863266c3e2 ]
+
+Fix a sparse warning by using rcu_dereference(). Technically this is a
+bug and a sufficiently aggressive compiler could reload the `real_parent'
+pointer outside the protection of the rcu lock (and access freed memory),
+but I think it's pretty unlikely to happen.
+
+Link: https://lkml.kernel.org/r/20210221194207.1351703-1-willy@infradead.org
+Fixes: b18dc5f291c0 ("mm, oom: skip vforked tasks from being selected")
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sched/mm.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index 1ae08b8462a4..90b2a0bce11c 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -140,7 +140,8 @@ static inline bool in_vfork(struct task_struct *tsk)
+ * another oom-unkillable task does this it should blame itself.
+ */
+ rcu_read_lock();
+- ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
++ ret = tsk->vfork_done &&
++ rcu_dereference(tsk->real_parent)->mm == tsk->mm;
+ rcu_read_unlock();
+
+ return ret;
+--
+2.30.1
+
--- /dev/null
+From 69d995c93c566a771c0da42497eef13d1ffe71d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Mar 2021 10:49:20 -0700
+Subject: io_uring: perform IOPOLL reaping if canceler is thread itself
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit d052d1d685f5125249ab4ff887562c88ba959638 ]
+
+We bypass IOPOLL completion polling (and reaping) for the SQPOLL thread,
+but if it's the thread itself invoking cancelations, then we still need
+to perform it or no one will.
+
+Fixes: 9936c7c2bc76 ("io_uring: deduplicate core cancellations sequence")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 241313278e5a..00ef0b90d149 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8891,7 +8891,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ }
+
+ /* SQPOLL thread does its own polling */
+- if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
++ if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
++ (ctx->sq_data && ctx->sq_data->thread == current)) {
+ while (!list_empty_careful(&ctx->iopoll_list)) {
+ io_iopoll_try_reap_events(ctx);
+ ret = true;
+--
+2.30.1
+
--- /dev/null
+From 43dd9650163eeec38c0a556bbd2d0933a41d757d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Mar 2021 21:07:01 -0800
+Subject: memblock: fix section mismatch warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 34dc2efb39a231280fd6696a59bbe712bf3c5c4a ]
+
+The inlining logic in clang-13 is rewritten to often not inline some
+functions that were inlined by all earlier compilers.
+
+In case of the memblock interfaces, this exposed a harmless bug of a
+missing __init annotation:
+
+WARNING: modpost: vmlinux.o(.text+0x507c0a): Section mismatch in reference from the function memblock_bottom_up() to the variable .meminit.data:memblock
+The function memblock_bottom_up() references
+the variable __meminitdata memblock.
+This is often because memblock_bottom_up lacks a __meminitdata
+annotation or the annotation of memblock is wrong.
+
+Interestingly, these annotations were present originally, but got removed
+with the explanation that the __init annotation prevents the function from
+getting inlined. I checked this again and found that while this is the
+case with clang, gcc (version 7 through 10, did not test others) does
+inline the functions regardless.
+
+As the previous change was apparently intended to help the clang builds,
+reverting it to help the newer clang versions seems appropriate as well.
+gcc builds don't seem to care either way.
+
+Link: https://lkml.kernel.org/r/20210225133808.2188581-1-arnd@kernel.org
+Fixes: 5bdba520c1b3 ("mm: memblock: drop __init from memblock functions to make it inline")
+Reference: 2cfb3665e864 ("include/linux/memblock.h: add __init to memblock_set_bottom_up()")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Faiyaz Mohammed <faiyazm@codeaurora.org>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Cc: Aslan Bakirov <aslan@fb.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/memblock.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index b93c44b9121e..7643d2dfa959 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
+ /*
+ * Set the allocation direction to bottom-up or top-down.
+ */
+-static inline void memblock_set_bottom_up(bool enable)
++static inline __init void memblock_set_bottom_up(bool enable)
+ {
+ memblock.bottom_up = enable;
+ }
+@@ -470,7 +470,7 @@ static inline void memblock_set_bottom_up(bool enable)
+ * if this is true, that said, memblock will allocate memory
+ * in bottom-up direction.
+ */
+-static inline bool memblock_bottom_up(void)
++static inline __init bool memblock_bottom_up(void)
+ {
+ return memblock.bottom_up;
+ }
+--
+2.30.1
+
--- /dev/null
+From 47f1c38c57114895b898fae57d6eb073822198df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Feb 2021 18:57:21 +0200
+Subject: mlxsw: spectrum_router: Ignore routes using a deleted nexthop object
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit dc860b88ce0a7ed9a048d5042cbb175daf60b657 ]
+
+Routes are currently processed from a workqueue whereas nexthop objects
+are processed in system call context. This can result in the driver not
+finding a suitable nexthop group for a route and issuing a warning [1].
+
+Fix this by ignoring such routes earlier in the process. The subsequent
+deletion notification will be ignored as well.
+
+[1]
+ WARNING: CPU: 2 PID: 7754 at drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c:4853 mlxsw_sp_router_fib_event_work+0x1112/0x1e00 [mlxsw_spectrum]
+ [...]
+ CPU: 2 PID: 7754 Comm: kworker/u8:0 Not tainted 5.11.0-rc6-cq-20210207-1 #16
+ Hardware name: Mellanox Technologies Ltd. MSN2100/SA001390, BIOS 5.6.5 05/24/2018
+ Workqueue: mlxsw_core_ordered mlxsw_sp_router_fib_event_work [mlxsw_spectrum]
+ RIP: 0010:mlxsw_sp_router_fib_event_work+0x1112/0x1e00 [mlxsw_spectrum]
+
+Fixes: cdd6cfc54c64 ("mlxsw: spectrum_router: Allow programming routes with nexthop objects")
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reported-by: Alex Veber <alexve@nvidia.com>
+Tested-by: Alex Veber <alexve@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 41424ee909a0..23d9fe18adba 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -5861,6 +5861,10 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
++ if (fen_info->fi->nh &&
++ !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
++ return 0;
++
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
+ &fen_info->dst, sizeof(fen_info->dst),
+ fen_info->dst_len,
+@@ -6511,6 +6515,9 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return 0;
+
++ if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
++ return 0;
++
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+--
+2.30.1
+
--- /dev/null
+From 620f3a130336df6784b1f6c184cbf7e4b84582aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Mar 2021 13:32:10 -0800
+Subject: mptcp: fix memory accounting on allocation error
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit eaeef1ce55ec9161e0c44ff27017777b1644b421 ]
+
+In case of memory pressure the MPTCP xmit path keeps
+at most a single skb in the tx cache, eventually freeing
+additional ones.
+
+The associated counter for forward memory is not update
+accordingly, and that causes the following splat:
+
+WARNING: CPU: 0 PID: 12 at net/core/stream.c:208 sk_stream_kill_queues+0x3ca/0x530 net/core/stream.c:208
+Modules linked in:
+CPU: 0 PID: 12 Comm: kworker/0:1 Not tainted 5.11.0-rc2 #59
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+Workqueue: events mptcp_worker
+RIP: 0010:sk_stream_kill_queues+0x3ca/0x530 net/core/stream.c:208
+Code: 03 0f b6 04 02 84 c0 74 08 3c 03 0f 8e 63 01 00 00 8b ab 00 01 00 00 e9 60 ff ff ff e8 2f 24 d3 fe 0f 0b eb 97 e8 26 24 d3 fe <0f> 0b eb a0 e8 1d 24 d3 fe 0f 0b e9 a5 fe ff ff 4c 89 e7 e8 0e d0
+RSP: 0018:ffffc900000c7bc8 EFLAGS: 00010293
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
+RDX: ffff88810030ac40 RSI: ffffffff8262ca4a RDI: 0000000000000003
+RBP: 0000000000000d00 R08: 0000000000000000 R09: ffffffff85095aa7
+R10: ffffffff8262c9ea R11: 0000000000000001 R12: ffff888108908100
+R13: ffffffff85095aa0 R14: ffffc900000c7c48 R15: 1ffff92000018f85
+FS: 0000000000000000(0000) GS:ffff88811b200000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fa7444baef8 CR3: 0000000035ee9005 CR4: 0000000000170ef0
+Call Trace:
+ __mptcp_destroy_sock+0x4a7/0x6c0 net/mptcp/protocol.c:2547
+ mptcp_worker+0x7dd/0x1610 net/mptcp/protocol.c:2272
+ process_one_work+0x896/0x1170 kernel/workqueue.c:2275
+ worker_thread+0x605/0x1350 kernel/workqueue.c:2421
+ kthread+0x344/0x410 kernel/kthread.c:292
+ ret_from_fork+0x22/0x30 arch/x86/entry/entry_64.S:296
+
+At close time, as reported by syzkaller/Christoph.
+
+This change address the issue properly updating the fwd
+allocated memory counter in the error path.
+
+Reported-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/136
+Fixes: 724cfd2ee8aa ("mptcp: allocate TX skbs in msk context")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index de89824a2a36..056846eb2e5b 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1176,6 +1176,7 @@ static bool mptcp_tx_cache_refill(struct sock *sk, int size,
+ */
+ while (skbs->qlen > 1) {
+ skb = __skb_dequeue_tail(skbs);
++ *total_ts -= skb->truesize;
+ __kfree_skb(skb);
+ }
+ return skbs->qlen > 0;
+--
+2.30.1
+
--- /dev/null
+From f8b51c21c53a41aaadaf04562be6752e64621c70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Mar 2021 13:32:09 -0800
+Subject: mptcp: put subflow sock on connect error
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit f07157792c633b528de5fc1dbe2e4ea54f8e09d4 ]
+
+mptcp_add_pending_subflow() performs a sock_hold() on the subflow,
+then adds the subflow to the join list.
+
+Without a sock_put the subflow sk won't be freed in case connect() fails.
+
+unreferenced object 0xffff88810c03b100 (size 3000):
+[..]
+ sk_prot_alloc.isra.0+0x2f/0x110
+ sk_alloc+0x5d/0xc20
+ inet6_create+0x2b7/0xd30
+ __sock_create+0x17f/0x410
+ mptcp_subflow_create_socket+0xff/0x9c0
+ __mptcp_subflow_connect+0x1da/0xaf0
+ mptcp_pm_nl_work+0x6e0/0x1120
+ mptcp_worker+0x508/0x9a0
+
+Fixes: 5b950ff4331ddda ("mptcp: link MPC subflow into msk only after accept")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/subflow.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 81b7be67d288..c3090003a17b 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1174,6 +1174,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ spin_lock_bh(&msk->join_list_lock);
+ list_del(&subflow->node);
+ spin_unlock_bh(&msk->join_list_lock);
++ sock_put(mptcp_subflow_tcp_sock(subflow));
+
+ failed:
+ subflow->disposable = 1;
+--
+2.30.1
+
--- /dev/null
+From 765e6e4af62ed88ca52fd5eb58aeb363c94d4588 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Mar 2021 19:11:02 -0800
+Subject: net: bonding: fix error return code of bond_neigh_init()
+
+From: Jia-Ju Bai <baijiaju1990@gmail.com>
+
+[ Upstream commit 2055a99da8a253a357bdfd359b3338ef3375a26c ]
+
+When slave is NULL or slave_ops->ndo_neigh_setup is NULL, no error
+return code of bond_neigh_init() is assigned.
+To fix this bug, ret is assigned with -EINVAL in these cases.
+
+Fixes: 9e99bfefdbce ("bonding: fix bond_neigh_init()")
+Reported-by: TOTE Robot <oslab@tsinghua.edu.cn>
+Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 5fe5232cc3f3..fba6b6d1b430 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3917,11 +3917,15 @@ static int bond_neigh_init(struct neighbour *n)
+
+ rcu_read_lock();
+ slave = bond_first_slave_rcu(bond);
+- if (!slave)
++ if (!slave) {
++ ret = -EINVAL;
+ goto out;
++ }
+ slave_ops = slave->dev->netdev_ops;
+- if (!slave_ops->ndo_neigh_setup)
++ if (!slave_ops->ndo_neigh_setup) {
++ ret = -EINVAL;
+ goto out;
++ }
+
+ /* TODO: find another way [1] to implement this.
+ * Passing a zeroed structure is fragile,
+--
+2.30.1
+
--- /dev/null
+From 572af19ff7c330008879f843271e48cebcc88e43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Mar 2021 15:09:44 +0000
+Subject: net: expand textsearch ts_state to fit skb_seq_state
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit b228c9b058760500fda5edb3134527f629fc2dc3 ]
+
+The referenced commit expands the skb_seq_state used by
+skb_find_text with a 4B frag_off field, growing it to 48B.
+
+This exceeds container ts_state->cb, causing a stack corruption:
+
+[ 73.238353] Kernel panic - not syncing: stack-protector: Kernel stack
+is corrupted in: skb_find_text+0xc5/0xd0
+[ 73.247384] CPU: 1 PID: 376 Comm: nping Not tainted 5.11.0+ #4
+[ 73.252613] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
+BIOS 1.14.0-2 04/01/2014
+[ 73.260078] Call Trace:
+[ 73.264677] dump_stack+0x57/0x6a
+[ 73.267866] panic+0xf6/0x2b7
+[ 73.270578] ? skb_find_text+0xc5/0xd0
+[ 73.273964] __stack_chk_fail+0x10/0x10
+[ 73.277491] skb_find_text+0xc5/0xd0
+[ 73.280727] string_mt+0x1f/0x30
+[ 73.283639] ipt_do_table+0x214/0x410
+
+The struct is passed between skb_find_text and its callbacks
+skb_prepare_seq_read, skb_seq_read and skb_abort_seq read through
+the textsearch interface using TS_SKB_CB.
+
+I assumed that this mapped to skb->cb like other .._SKB_CB wrappers.
+skb->cb is 48B. But it maps to ts_state->cb, which is only 40B.
+
+skb->cb was increased from 40B to 48B after ts_state was introduced,
+in commit 3e3850e989c5 ("[NETFILTER]: Fix xfrm lookup in
+ip_route_me_harder/ip6_route_me_harder").
+
+Increase ts_state.cb[] to 48 to fit the struct.
+
+Also add a BUILD_BUG_ON to avoid a repeat.
+
+The alternative is to directly add a dependency from textsearch onto
+linux/skbuff.h, but I think the intent is textsearch to have no such
+dependencies on its callers.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=211911
+Fixes: 97550f6fa592 ("net: compound page support in skb_seq_read")
+Reported-by: Kris Karas <bugs-a17@moonlit-rail.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/textsearch.h | 2 +-
+ net/core/skbuff.c | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
+index 13770cfe33ad..6673e4d4ac2e 100644
+--- a/include/linux/textsearch.h
++++ b/include/linux/textsearch.h
+@@ -23,7 +23,7 @@ struct ts_config;
+ struct ts_state
+ {
+ unsigned int offset;
+- char cb[40];
++ char cb[48];
+ };
+
+ /**
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 28b8242f18d7..2b784d62a9fe 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3622,6 +3622,8 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ struct ts_state state;
+ unsigned int ret;
+
++ BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
++
+ config->get_next_block = skb_ts_get_next_block;
+ config->finish = skb_ts_finish;
+
+--
+2.30.1
+
--- /dev/null
+From 99a39e19c113551b5b6267c97a78b7179ee802fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Mar 2021 11:55:49 -0800
+Subject: net: macb: Add default usrio config to default gem config
+
+From: Atish Patra <atish.patra@wdc.com>
+
+[ Upstream commit b12422362ce947098ac420ac3c975fc006af4c02 ]
+
+There is no usrio config defined for default gem config leading to
+a kernel panic devices that don't define a data. This issue can be
+reprdouced with microchip polar fire soc where compatible string
+is defined as "cdns,macb".
+
+Fixes: edac63861db7 ("add userio bits as platform configuration")
+
+Signed-off-by: Atish Patra <atish.patra@wdc.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 814a5b10141d..07cdb38e7d11 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -3950,6 +3950,13 @@ static int macb_init(struct platform_device *pdev)
+ return 0;
+ }
+
++static const struct macb_usrio_config macb_default_usrio = {
++ .mii = MACB_BIT(MII),
++ .rmii = MACB_BIT(RMII),
++ .rgmii = GEM_BIT(RGMII),
++ .refclk = MACB_BIT(CLKEN),
++};
++
+ #if defined(CONFIG_OF)
+ /* 1518 rounded up */
+ #define AT91ETHER_MAX_RBUFF_SZ 0x600
+@@ -4435,13 +4442,6 @@ static int fu540_c000_init(struct platform_device *pdev)
+ return macb_init(pdev);
+ }
+
+-static const struct macb_usrio_config macb_default_usrio = {
+- .mii = MACB_BIT(MII),
+- .rmii = MACB_BIT(RMII),
+- .rgmii = GEM_BIT(RGMII),
+- .refclk = MACB_BIT(CLKEN),
+-};
+-
+ static const struct macb_usrio_config sama7g5_usrio = {
+ .mii = 0,
+ .rmii = 1,
+@@ -4590,6 +4590,7 @@ static const struct macb_config default_gem_config = {
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
++ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+ };
+
+--
+2.30.1
+
--- /dev/null
+From 262027e75ca6165d210e271e2cb4b3352e84b3e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Feb 2021 17:30:20 +0200
+Subject: net: phy: ti: take into account all possible interrupt sources
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 73f476aa1975bae6a792b340f5b26ffcfba869a6 ]
+
+The previous implementation of .handle_interrupt() did not take into
+account the fact that all the interrupt status registers should be
+acknowledged since multiple interrupt sources could be asserted.
+
+Fix this by reading all the status registers before exiting with
+IRQ_NONE or triggering the PHY state machine.
+
+Fixes: 1d1ae3c6ca3f ("net: phy: ti: implement generic .handle_interrupt() callback")
+Reported-by: Sven Schuchmann <schuchmann@schleissheimer.de>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://lore.kernel.org/r/20210226153020.867852-1-ciorneiioana@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/dp83822.c | 9 +++++----
+ drivers/net/phy/dp83tc811.c | 11 ++++++-----
+ 2 files changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index fff371ca1086..423952cb9e1c 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -290,6 +290,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
+
+ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
+ {
++ bool trigger_machine = false;
+ int irq_status;
+
+ /* The MISR1 and MISR2 registers are holding the interrupt status in
+@@ -305,7 +306,7 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+- goto trigger_machine;
++ trigger_machine = true;
+
+ irq_status = phy_read(phydev, MII_DP83822_MISR2);
+ if (irq_status < 0) {
+@@ -313,11 +314,11 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+- goto trigger_machine;
++ trigger_machine = true;
+
+- return IRQ_NONE;
++ if (!trigger_machine)
++ return IRQ_NONE;
+
+-trigger_machine:
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
+index 688fadffb249..7ea32fb77190 100644
+--- a/drivers/net/phy/dp83tc811.c
++++ b/drivers/net/phy/dp83tc811.c
+@@ -264,6 +264,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
+
+ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ {
++ bool trigger_machine = false;
+ int irq_status;
+
+ /* The INT_STAT registers 1, 2 and 3 are holding the interrupt status
+@@ -279,7 +280,7 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+- goto trigger_machine;
++ trigger_machine = true;
+
+ irq_status = phy_read(phydev, MII_DP83811_INT_STAT2);
+ if (irq_status < 0) {
+@@ -287,7 +288,7 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+- goto trigger_machine;
++ trigger_machine = true;
+
+ irq_status = phy_read(phydev, MII_DP83811_INT_STAT3);
+ if (irq_status < 0) {
+@@ -295,11 +296,11 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+- goto trigger_machine;
++ trigger_machine = true;
+
+- return IRQ_NONE;
++ if (!trigger_machine)
++ return IRQ_NONE;
+
+-trigger_machine:
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+--
+2.30.1
+
--- /dev/null
+From 360fe16622f795dbe08c1e33f8de306cd85f2f5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Mar 2021 14:42:52 -0500
+Subject: NFS: Don't gratuitously clear the inode cache when lookup failed
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 47397915ede0192235474b145ebcd81b37b03624 ]
+
+The fact that the lookup revalidation failed, does not mean that the
+inode contents have changed.
+
+Fixes: 5ceb9d7fdaaf ("NFS: Refactor nfs_lookup_revalidate()")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/dir.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 7bcc6fcf1096..4db3018776f6 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1444,18 +1444,14 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ __func__, dentry);
+ return 1;
+ case 0:
+- if (inode && S_ISDIR(inode->i_mode)) {
+- /* Purge readdir caches. */
+- nfs_zap_caches(inode);
+- /*
+- * We can't d_drop the root of a disconnected tree:
+- * its d_hash is on the s_anon list and d_drop() would hide
+- * it from shrink_dcache_for_unmount(), leading to busy
+- * inodes on unmount and further oopses.
+- */
+- if (IS_ROOT(dentry))
+- return 1;
+- }
++ /*
++ * We can't d_drop the root of a disconnected tree:
++ * its d_hash is on the s_anon list and d_drop() would hide
++ * it from shrink_dcache_for_unmount(), leading to busy
++ * inodes on unmount and further oopses.
++ */
++ if (inode && IS_ROOT(dentry))
++ return 1;
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+ __func__, dentry);
+ return 0;
+--
+2.30.1
+
--- /dev/null
+From 9d9f6ad45143661b3a7fe02f77a914aebc3b01c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Mar 2021 14:42:51 -0500
+Subject: NFS: Don't revalidate the directory permissions on a lookup failure
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 82e7ca1334ab16e2e04fafded1cab9dfcdc11b40 ]
+
+There should be no reason to expect the directory permissions to change
+just because the directory contents changed or a negative lookup timed
+out. So let's avoid doing a full call to nfs_mark_for_revalidate() in
+that case.
+Furthermore, if this is a negative dentry, and we haven't actually done
+a new lookup, then we have no reason yet to believe the directory has
+changed at all. So let's remove the gratuitous directory inode
+invalidation altogether when called from
+nfs_lookup_revalidate_negative().
+
+Reported-by: Geert Jansen <gerardu@amazon.com>
+Fixes: 5ceb9d7fdaaf ("NFS: Refactor nfs_lookup_revalidate()")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/dir.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index ef827ae193d2..7bcc6fcf1096 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1401,6 +1401,15 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
+ goto out;
+ }
+
++static void nfs_mark_dir_for_revalidate(struct inode *inode)
++{
++ struct nfs_inode *nfsi = NFS_I(inode);
++
++ spin_lock(&inode->i_lock);
++ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
++ spin_unlock(&inode->i_lock);
++}
++
+ /*
+ * We judge how long we want to trust negative
+ * dentries by looking at the parent inode mtime.
+@@ -1435,7 +1444,6 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ __func__, dentry);
+ return 1;
+ case 0:
+- nfs_mark_for_revalidate(dir);
+ if (inode && S_ISDIR(inode->i_mode)) {
+ /* Purge readdir caches. */
+ nfs_zap_caches(inode);
+@@ -1525,6 +1533,13 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ nfs4_label_free(label);
++
++ /*
++ * If the lookup failed despite the dentry change attribute being
++ * a match, then we should revalidate the directory cache.
++ */
++ if (!ret && nfs_verify_change_attribute(dir, dentry->d_time))
++ nfs_mark_dir_for_revalidate(dir);
+ return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
+ }
+
+@@ -1567,7 +1582,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ error = nfs_lookup_verify_inode(inode, flags);
+ if (error) {
+ if (error == -ESTALE)
+- nfs_zap_caches(dir);
++ nfs_mark_dir_for_revalidate(dir);
+ goto out_bad;
+ }
+ nfs_advise_use_readdirplus(dir);
+@@ -2064,7 +2079,6 @@ nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
+ dput(parent);
+ return d;
+ out_error:
+- nfs_mark_for_revalidate(dir);
+ d = ERR_PTR(error);
+ goto out;
+ }
+--
+2.30.1
+
--- /dev/null
+From ec3b2681ccbe25be791e79280d574d22bae1c402 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Jan 2021 18:43:56 +0100
+Subject: NFSv4.2: fix return value of _nfs4_get_security_label()
+
+From: Ondrej Mosnacek <omosnace@redhat.com>
+
+[ Upstream commit 53cb245454df5b13d7063162afd7a785aed6ebf2 ]
+
+An xattr 'get' handler is expected to return the length of the value on
+success, yet _nfs4_get_security_label() (and consequently also
+nfs4_xattr_get_nfs4_label(), which is used as an xattr handler) returns
+just 0 on success.
+
+Fix this by returning label.len instead, which contains the length of
+the result.
+
+Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
+Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
+Reviewed-by: James Morris <jamorris@linux.microsoft.com>
+Reviewed-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index fc8bbfd9beb3..7eb44f37558c 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5972,7 +5972,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
+ return ret;
+ if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
+ return -ENOENT;
+- return 0;
++ return label.len;
+ }
+
+ static int nfs4_get_security_label(struct inode *inode, void *buf,
+--
+2.30.1
+
--- /dev/null
+From ea3a19de46e7c6126e569d5c6e2fbcd8c1a88d1a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Mar 2021 16:51:26 -0800
+Subject: nvme-fc: fix racing controller reset and create association
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit f20ef34d71abc1fc56b322aaa251f90f94320140 ]
+
+Recent patch to prevent calling __nvme_fc_abort_outstanding_ios in
+interrupt context results in a possible race condition. A controller
+reset results in errored io completions, which schedules error
+work. The change of error work to a work element allows it to fire
+after the ctrl state transition to NVME_CTRL_CONNECTING, causing
+any outstanding io (used to initialize the controller) to fail and
+cause problems for connect_work.
+
+Add a state check to only schedule error work if not in the RESETTING
+state.
+
+Fixes: 19fce0470f05 ("nvme-fc: avoid calling _nvme_fc_abort_outstanding_ios from interrupt context")
+Signed-off-by: Nigel Kirkland <nkirkland2304@gmail.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/fc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 5f36cfa8136c..7ec6869b3e5b 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2055,7 +2055,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+ nvme_fc_complete_rq(rq);
+
+ check_error:
+- if (terminate_assoc)
++ if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
+ }
+
+--
+2.30.1
+
--- /dev/null
+From 1b3b3ba743cae2df7c8920256c1f0f50c6187af6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Mar 2021 08:04:21 +0000
+Subject: perf/arm_dmc620_pmu: Fix error return code in
+ dmc620_pmu_device_probe()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+[ Upstream commit c8e3866836528a4ba3b0535834f03768d74f7d8e ]
+
+Fix to return negative error code -ENOMEM from the error handling
+case instead of 0, as done elsewhere in this function.
+
+Fixes: 53c218da220c ("driver/perf: Add PMU driver for the ARM DMC-620 memory controller")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Link: https://lore.kernel.org/r/20210312080421.277562-1-weiyongjun1@huawei.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/perf/arm_dmc620_pmu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
+index 004930eb4bbb..b50b47f1a0d9 100644
+--- a/drivers/perf/arm_dmc620_pmu.c
++++ b/drivers/perf/arm_dmc620_pmu.c
+@@ -681,6 +681,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
+ if (!name) {
+ dev_err(&pdev->dev,
+ "Create name failed, PMU @%pa\n", &res->start);
++ ret = -ENOMEM;
+ goto out_teardown_dev;
+ }
+
+--
+2.30.1
+
--- /dev/null
+From 94c2262931c37a34457a576c3104e8f0af2d992b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Nov 2020 11:38:40 -0800
+Subject: perf/core: Flush PMU internal buffers for per-CPU events
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit a5398bffc01fe044848c5024e5e867e407f239b8 ]
+
+Sometimes the PMU internal buffers have to be flushed for per-CPU events
+during a context switch, e.g., large PEBS. Otherwise, the perf tool may
+report samples in locations that do not belong to the process where the
+samples are processed in, because PEBS does not tag samples with PID/TID.
+
+The current code only flush the buffers for a per-task event. It doesn't
+check a per-CPU event.
+
+Add a new event state flag, PERF_ATTACH_SCHED_CB, to indicate that the
+PMU internal buffers have to be flushed for this event during a context
+switch.
+
+Add sched_cb_entry and perf_sched_cb_usages back to track the PMU/cpuctx
+which is required to be flushed.
+
+Only need to invoke the sched_task() for per-CPU events in this patch.
+The per-task events have been handled in perf_event_context_sched_in/out
+already.
+
+Fixes: 9c964efa4330 ("perf/x86/intel: Drain the PEBS buffer during context switches")
+Reported-by: Gabriel Marin <gmx@google.com>
+Originally-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20201130193842.10569-1-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/perf_event.h | 2 ++
+ kernel/events/core.c | 42 ++++++++++++++++++++++++++++++++++----
+ 2 files changed, 40 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 9a38f579bc76..419a4d77de00 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -606,6 +606,7 @@ struct swevent_hlist {
+ #define PERF_ATTACH_TASK 0x04
+ #define PERF_ATTACH_TASK_DATA 0x08
+ #define PERF_ATTACH_ITRACE 0x10
++#define PERF_ATTACH_SCHED_CB 0x20
+
+ struct perf_cgroup;
+ struct perf_buffer;
+@@ -872,6 +873,7 @@ struct perf_cpu_context {
+ struct list_head cgrp_cpuctx_entry;
+ #endif
+
++ struct list_head sched_cb_entry;
+ int sched_cb_usage;
+
+ int online;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 55d18791a72d..8425dbc1d239 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -385,6 +385,7 @@ static DEFINE_MUTEX(perf_sched_mutex);
+ static atomic_t perf_sched_count;
+
+ static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
++static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+ static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
+
+ static atomic_t nr_mmap_events __read_mostly;
+@@ -3474,11 +3475,16 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+ }
+ }
+
++static DEFINE_PER_CPU(struct list_head, sched_cb_list);
++
+ void perf_sched_cb_dec(struct pmu *pmu)
+ {
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+- --cpuctx->sched_cb_usage;
++ this_cpu_dec(perf_sched_cb_usages);
++
++ if (!--cpuctx->sched_cb_usage)
++ list_del(&cpuctx->sched_cb_entry);
+ }
+
+
+@@ -3486,7 +3492,10 @@ void perf_sched_cb_inc(struct pmu *pmu)
+ {
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+- cpuctx->sched_cb_usage++;
++ if (!cpuctx->sched_cb_usage++)
++ list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
++
++ this_cpu_inc(perf_sched_cb_usages);
+ }
+
+ /*
+@@ -3515,6 +3524,24 @@ static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+
++static void perf_pmu_sched_task(struct task_struct *prev,
++ struct task_struct *next,
++ bool sched_in)
++{
++ struct perf_cpu_context *cpuctx;
++
++ if (prev == next)
++ return;
++
++ list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
++ /* will be handled in perf_event_context_sched_in/out */
++ if (cpuctx->task_ctx)
++ continue;
++
++ __perf_pmu_sched_task(cpuctx, sched_in);
++ }
++}
++
+ static void perf_event_switch(struct task_struct *task,
+ struct task_struct *next_prev, bool sched_in);
+
+@@ -3537,6 +3564,9 @@ void __perf_event_task_sched_out(struct task_struct *task,
+ {
+ int ctxn;
+
++ if (__this_cpu_read(perf_sched_cb_usages))
++ perf_pmu_sched_task(task, next, false);
++
+ if (atomic_read(&nr_switch_events))
+ perf_event_switch(task, next, false);
+
+@@ -3845,6 +3875,9 @@ void __perf_event_task_sched_in(struct task_struct *prev,
+
+ if (atomic_read(&nr_switch_events))
+ perf_event_switch(task, prev, true);
++
++ if (__this_cpu_read(perf_sched_cb_usages))
++ perf_pmu_sched_task(prev, task, true);
+ }
+
+ static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
+@@ -4669,7 +4702,7 @@ static void unaccount_event(struct perf_event *event)
+ if (event->parent)
+ return;
+
+- if (event->attach_state & PERF_ATTACH_TASK)
++ if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
+ dec = true;
+ if (event->attr.mmap || event->attr.mmap_data)
+ atomic_dec(&nr_mmap_events);
+@@ -11168,7 +11201,7 @@ static void account_event(struct perf_event *event)
+ if (event->parent)
+ return;
+
+- if (event->attach_state & PERF_ATTACH_TASK)
++ if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
+ inc = true;
+ if (event->attr.mmap || event->attr.mmap_data)
+ atomic_inc(&nr_mmap_events);
+@@ -12960,6 +12993,7 @@ static void __init perf_event_init_all_cpus(void)
+ #ifdef CONFIG_CGROUP_PERF
+ INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
+ #endif
++ INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
+ }
+ }
+
+--
+2.30.1
+
--- /dev/null
+From 23149f8d33137605095d5ea5d938054a4354fc6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Nov 2020 11:38:41 -0800
+Subject: perf/x86/intel: Set PERF_ATTACH_SCHED_CB for large PEBS and LBR
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit afbef30149587ad46f4780b1e0cc5e219745ce90 ]
+
+To supply a PID/TID for large PEBS, it requires flushing the PEBS buffer
+in a context switch.
+
+For normal LBRs, a context switch can flip the address space and LBR
+entries are not tagged with an identifier, we need to wipe the LBR, even
+for per-cpu events.
+
+For LBR callstack, save/restore the stack is required during a context
+switch.
+
+Set PERF_ATTACH_SCHED_CB for the event with large PEBS & LBR.
+
+Fixes: 9c964efa4330 ("perf/x86/intel: Drain the PEBS buffer during context switches")
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lkml.kernel.org/r/20201130193842.10569-2-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 4faaef3a8f6c..d3f5cf70c1a0 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3578,8 +3578,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
+ if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
+ event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
+ if (!(event->attr.sample_type &
+- ~intel_pmu_large_pebs_flags(event)))
++ ~intel_pmu_large_pebs_flags(event))) {
+ event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
++ event->attach_state |= PERF_ATTACH_SCHED_CB;
++ }
+ }
+ if (x86_pmu.pebs_aliases)
+ x86_pmu.pebs_aliases(event);
+@@ -3592,6 +3594,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
+ ret = intel_pmu_setup_lbr_filter(event);
+ if (ret)
+ return ret;
++ event->attach_state |= PERF_ATTACH_SCHED_CB;
+
+ /*
+ * BTS is set up earlier in this path, so don't account twice
+--
+2.30.1
+
--- /dev/null
+From 7bf5c09d8866beb6c2a5b71c8f2ef92cb9393696 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Feb 2021 14:09:59 +1100
+Subject: powerpc/64s/exception: Clean up a missed SRR specifier
+
+From: Daniel Axtens <dja@axtens.net>
+
+[ Upstream commit c080a173301ffc62cb6c76308c803c7fee05517a ]
+
+Nick's patch cleaning up the SRR specifiers in exception-64s.S missed
+a single instance of EXC_HV_OR_STD. Clean that up.
+
+Caught by clang's integrated assembler.
+
+Fixes: 3f7fbd97d07d ("powerpc/64s/exception: Clean up SRR specifiers")
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Acked-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210225031006.1204774-2-dja@axtens.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/exceptions-64s.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 6e53f7638737..de988770a7e4 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -470,7 +470,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
+
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ /* MSR[RI] is clear iff using SRR regs */
+- .if IHSRR == EXC_HV_OR_STD
++ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ xori r10,r10,MSR_RI
+ END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
+--
+2.30.1
+
--- /dev/null
+From 22a7402d7e7b8ac89e752a91505da72ed52d29a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Feb 2021 14:19:46 +1100
+Subject: powerpc/sstep: Fix VSX instruction emulation
+
+From: Jordan Niethe <jniethe5@gmail.com>
+
+[ Upstream commit 5c88a17e15795226b56d83f579cbb9b7a4864f79 ]
+
+Commit af99da74333b ("powerpc/sstep: Support VSX vector paired storage
+access instructions") added loading and storing 32 word long data into
+adjacent VSRs. However the calculation used to determine if two VSRs
+needed to be loaded/stored inadvertently prevented the load/storing
+taking place for instructions with a data length less than 16 words.
+
+This causes the emulation to not function correctly, which can be seen
+by the alignment_handler selftest:
+
+$ ./alignment_handler
+[snip]
+test: test_alignment_handler_vsx_207
+tags: git_version:powerpc-5.12-1-0-g82d2c16b350f
+VSX: 2.07B
+ Doing lxsspx: PASSED
+ Doing lxsiwax: FAILED: Wrong Data
+ Doing lxsiwzx: PASSED
+ Doing stxsspx: PASSED
+ Doing stxsiwx: PASSED
+failure: test_alignment_handler_vsx_207
+test: test_alignment_handler_vsx_300
+tags: git_version:powerpc-5.12-1-0-g82d2c16b350f
+VSX: 3.00B
+ Doing lxsd: PASSED
+ Doing lxsibzx: PASSED
+ Doing lxsihzx: PASSED
+ Doing lxssp: FAILED: Wrong Data
+ Doing lxv: PASSED
+ Doing lxvb16x: PASSED
+ Doing lxvh8x: PASSED
+ Doing lxvx: PASSED
+ Doing lxvwsx: FAILED: Wrong Data
+ Doing lxvl: PASSED
+ Doing lxvll: PASSED
+ Doing stxsd: PASSED
+ Doing stxsibx: PASSED
+ Doing stxsihx: PASSED
+ Doing stxssp: PASSED
+ Doing stxv: PASSED
+ Doing stxvb16x: PASSED
+ Doing stxvh8x: PASSED
+ Doing stxvx: PASSED
+ Doing stxvl: PASSED
+ Doing stxvll: PASSED
+failure: test_alignment_handler_vsx_300
+[snip]
+
+Fix this by making sure all VSX instruction emulation correctly
+load/store from the VSRs.
+
+Fixes: af99da74333b ("powerpc/sstep: Support VSX vector paired storage access instructions")
+Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
+Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210225031946.1458206-1-jniethe5@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/lib/sstep.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index bb5c20d4ca91..c6aebc149d14 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -904,7 +904,7 @@ static nokprobe_inline int do_vsx_load(struct instruction_op *op,
+ if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
+ return -EFAULT;
+
+- nr_vsx_regs = size / sizeof(__vector128);
++ nr_vsx_regs = max(1ul, size / sizeof(__vector128));
+ emulate_vsx_load(op, buf, mem, cross_endian);
+ preempt_disable();
+ if (reg < 32) {
+@@ -951,7 +951,7 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op,
+ if (!address_ok(regs, ea, size))
+ return -EFAULT;
+
+- nr_vsx_regs = size / sizeof(__vector128);
++ nr_vsx_regs = max(1ul, size / sizeof(__vector128));
+ preempt_disable();
+ if (reg < 32) {
+ /* FP regs + extensions */
+--
+2.30.1
+
--- /dev/null
+From 4c2dea371361044b4b321e251f65fbaa9bda03ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 14 Mar 2021 23:51:14 +0300
+Subject: prctl: fix PR_SET_MM_AUXV kernel stack leak
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+[ Upstream commit c995f12ad8842dbf5cfed113fb52cdd083f5afd1 ]
+
+Doing a
+
+ prctl(PR_SET_MM, PR_SET_MM_AUXV, addr, 1);
+
+will copy 1 byte from userspace to (quite big) on-stack array
+and then stash everything to mm->saved_auxv.
+AT_NULL terminator will be inserted at the very end.
+
+/proc/*/auxv handler will find that AT_NULL terminator
+and copy original stack contents to userspace.
+
+This devious scheme requires CAP_SYS_RESOURCE.
+
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sys.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 51f00fe20e4d..7cf21c947649 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -2080,7 +2080,7 @@ static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
+ * up to the caller to provide sane values here, otherwise userspace
+ * tools which use this vector might be unhappy.
+ */
+- unsigned long user_auxv[AT_VECTOR_SIZE];
++ unsigned long user_auxv[AT_VECTOR_SIZE] = {};
+
+ if (len > sizeof(user_auxv))
+ return -EINVAL;
+--
+2.30.1
+
--- /dev/null
+From 14b1d75bc04780a166734b9b6e53beb7e5e00d14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Mar 2021 15:21:18 +0100
+Subject: seqlock,lockdep: Fix seqcount_latch_init()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 4817a52b306136c8b2b2271d8770401441e4cf79 ]
+
+seqcount_init() must be a macro in order to preserve the static
+variable that is used for the lockdep key. Don't then wrap it in an
+inline function, which destroys that.
+
+Luckily there aren't many users of this function, but fix it before it
+becomes a problem.
+
+Fixes: 80793c3471d9 ("seqlock: Introduce seqcount_latch_t")
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/YEeFEbNUVkZaXDp4@hirez.programming.kicks-ass.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/seqlock.h | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 2f7bb92b4c9e..f61e34fbaaea 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -664,10 +664,7 @@ typedef struct {
+ * seqcount_latch_init() - runtime initializer for seqcount_latch_t
+ * @s: Pointer to the seqcount_latch_t instance
+ */
+-static inline void seqcount_latch_init(seqcount_latch_t *s)
+-{
+- seqcount_init(&s->seqcount);
+-}
++#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
+
+ /**
+ * raw_read_seqcount_latch() - pick even/odd latch data copy
+--
+2.30.1
+
staging-comedi-me4000-fix-endian-problem-for-ai-command-data.patch
staging-comedi-pcl711-fix-endian-problem-for-ai-command-data.patch
staging-comedi-pcl818-fix-endian-problem-for-ai-command-data.patch
+mlxsw-spectrum_router-ignore-routes-using-a-deleted-.patch
+net-phy-ti-take-into-account-all-possible-interrupt-.patch
+sh_eth-fix-trscer-mask-for-r7s72100.patch
+powerpc-sstep-fix-vsx-instruction-emulation.patch
+net-macb-add-default-usrio-config-to-default-gem-con.patch
+cpufreq-qcom-hw-fix-dereferencing-freed-memory-data.patch
+cpufreq-qcom-hw-fix-return-value-check-in-qcom_cpufr.patch
+arm64-mm-fix-pfn_valid-for-zone_device-based-memory.patch
+net-bonding-fix-error-return-code-of-bond_neigh_init.patch
+sunrpc-set-memalloc_nofs_save-for-sync-tasks.patch
+nfs-don-t-revalidate-the-directory-permissions-on-a-.patch
+nfs-don-t-gratuitously-clear-the-inode-cache-when-lo.patch
+nfsv4.2-fix-return-value-of-_nfs4_get_security_label.patch
+block-rsxx-fix-error-return-code-of-rsxx_pci_probe.patch
+drm-ttm-fix-ttm-page-pool-accounting.patch
+nvme-fc-fix-racing-controller-reset-and-create-assoc.patch
+configfs-fix-a-use-after-free-in-__configfs_open_fil.patch
+arm64-mm-use-a-48-bit-id-map-when-possible-on-52-bit.patch
+io_uring-perform-iopoll-reaping-if-canceler-is-threa.patch
+drm-nouveau-fix-dma-syncing-for-loops-v2.patch
+perf-arm_dmc620_pmu-fix-error-return-code-in-dmc620_.patch
+net-expand-textsearch-ts_state-to-fit-skb_seq_state.patch
+mptcp-put-subflow-sock-on-connect-error.patch
+mptcp-fix-memory-accounting-on-allocation-error.patch
+perf-core-flush-pmu-internal-buffers-for-per-cpu-eve.patch
+perf-x86-intel-set-perf_attach_sched_cb-for-large-pe.patch
+hrtimer-update-softirq_expires_next-correctly-after-.patch
+powerpc-64s-exception-clean-up-a-missed-srr-specifie.patch
+seqlock-lockdep-fix-seqcount_latch_init.patch
+memblock-fix-section-mismatch-warning.patch
+stop_machine-mark-helpers-__always_inline.patch
+include-linux-sched-mm.h-use-rcu_dereference-in-in_v.patch
+prctl-fix-pr_set_mm_auxv-kernel-stack-leak.patch
--- /dev/null
+From 4432836b306b7551af582015496fee25bc023618 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 28 Feb 2021 23:26:34 +0300
+Subject: sh_eth: fix TRSCER mask for R7S72100
+
+From: Sergey Shtylyov <s.shtylyov@omprussia.ru>
+
+[ Upstream commit 75be7fb7f978202c4c3a1a713af4485afb2ff5f6 ]
+
+According to the RZ/A1H Group, RZ/A1M Group User's Manual: Hardware,
+Rev. 4.00, the TRSCER register has bit 9 reserved, hence we can't use
+the driver's default TRSCER mask. Add the explicit initializer for
+sh_eth_cpu_data::trscer_err_mask for R7S72100.
+
+Fixes: db893473d313 ("sh_eth: Add support for r7s72100")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omprussia.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/sh_eth.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 1dfecfd938cf..f029c7c03804 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -560,6 +560,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
+ EESR_TDE,
+ .fdr_value = 0x0000070f,
+
++ .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
++
+ .no_psr = 1,
+ .apr = 1,
+ .mpr = 1,
+--
+2.30.1
+
--- /dev/null
+From 2ae64071e4d641845c0ed45f039e4772d77539a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Mar 2021 21:07:04 -0800
+Subject: stop_machine: mark helpers __always_inline
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit cbf78d85079cee662c45749ef4f744d41be85d48 ]
+
+With clang-13, some functions only get partially inlined, with a
+specialized version referring to a global variable. This triggers a
+harmless build-time check for the intel-rng driver:
+
+WARNING: modpost: drivers/char/hw_random/intel-rng.o(.text+0xe): Section mismatch in reference from the function stop_machine() to the function .init.text:intel_rng_hw_init()
+The function stop_machine() references
+the function __init intel_rng_hw_init().
+This is often because stop_machine lacks a __init
+annotation or the annotation of intel_rng_hw_init is wrong.
+
+In this instance, an easy workaround is to force the stop_machine()
+function to be inline, along with related interfaces that did not show the
+same behavior at the moment, but theoretically could.
+
+The combination of the two patches listed below triggers the behavior in
+clang-13, but individually these commits are correct.
+
+Link: https://lkml.kernel.org/r/20210225130153.1956990-1-arnd@kernel.org
+Fixes: fe5595c07400 ("stop_machine: Provide stop_machine_cpuslocked()")
+Fixes: ee527cd3a20c ("Use stop_machine_run in the Intel RNG driver")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: "Paul E. McKenney" <paulmck@kernel.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Valentin Schneider <valentin.schneider@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/stop_machine.h | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
+index 30577c3aecf8..46fb3ebdd16e 100644
+--- a/include/linux/stop_machine.h
++++ b/include/linux/stop_machine.h
+@@ -128,7 +128,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus);
+ #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
+-static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
++static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus)
+ {
+ unsigned long flags;
+@@ -139,14 +139,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+ return ret;
+ }
+
+-static inline int stop_machine(cpu_stop_fn_t fn, void *data,
+- const struct cpumask *cpus)
++static __always_inline int
++stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
+ {
+ return stop_machine_cpuslocked(fn, data, cpus);
+ }
+
+-static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+- const struct cpumask *cpus)
++static __always_inline int
++stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
++ const struct cpumask *cpus)
+ {
+ return stop_machine(fn, data, cpus);
+ }
+--
+2.30.1
+
--- /dev/null
+From d1e7558b23e0ca373b4cdc8c79d0b78fea8b363a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Mar 2021 08:47:16 -0500
+Subject: SUNRPC: Set memalloc_nofs_save() for sync tasks
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+[ Upstream commit f0940f4b3284a00f38a5d42e6067c2aaa20e1f2e ]
+
+We could recurse into NFS doing memory reclaim while sending a sync task,
+which might result in a deadlock. Set memalloc_nofs_save for sync task
+execution.
+
+Fixes: a1231fda7e94 ("SUNRPC: Set memalloc_nofs_save() on all rpciod/xprtiod jobs")
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/sched.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index cf702a5f7fe5..39ed0e0afe6d 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -963,8 +963,11 @@ void rpc_execute(struct rpc_task *task)
+
+ rpc_set_active(task);
+ rpc_make_runnable(rpciod_workqueue, task);
+- if (!is_async)
++ if (!is_async) {
++ unsigned int pflags = memalloc_nofs_save();
+ __rpc_execute(task);
++ memalloc_nofs_restore(pflags);
++ }
+ }
+
+ static void rpc_async_schedule(struct work_struct *work)
+--
+2.30.1
+