--- /dev/null
+From 75df529bec9110dad43ab30e2d9490242529e8b8 Mon Sep 17 00:00:00 2001
+From: Andrew Jones <drjones@redhat.com>
+Date: Wed, 16 Sep 2020 17:45:30 +0200
+Subject: arm64: paravirt: Initialize steal time when cpu is online
+
+From: Andrew Jones <drjones@redhat.com>
+
+commit 75df529bec9110dad43ab30e2d9490242529e8b8 upstream.
+
+Steal time initialization requires mapping a memory region which
+invokes a memory allocation. Doing this at CPU starting time results
+in the following trace when CONFIG_DEBUG_ATOMIC_SLEEP is enabled:
+
+BUG: sleeping function called from invalid context at mm/slab.h:498
+in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 0, name: swapper/1
+CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.9.0-rc5+ #1
+Call trace:
+ dump_backtrace+0x0/0x208
+ show_stack+0x1c/0x28
+ dump_stack+0xc4/0x11c
+ ___might_sleep+0xf8/0x130
+ __might_sleep+0x58/0x90
+ slab_pre_alloc_hook.constprop.101+0xd0/0x118
+ kmem_cache_alloc_node_trace+0x84/0x270
+ __get_vm_area_node+0x88/0x210
+ get_vm_area_caller+0x38/0x40
+ __ioremap_caller+0x70/0xf8
+ ioremap_cache+0x78/0xb0
+ memremap+0x9c/0x1a8
+ init_stolen_time_cpu+0x54/0xf0
+ cpuhp_invoke_callback+0xa8/0x720
+ notify_cpu_starting+0xc8/0xd8
+ secondary_start_kernel+0x114/0x180
+CPU1: Booted secondary processor 0x0000000001 [0x431f0a11]
+
+However we don't need to initialize steal time at CPU starting time.
+We can simply wait until CPU online time, just sacrificing a bit of
+accuracy by returning zero for steal time until we know better.
+
+While at it, add __init to the functions that are only called by
+pv_time_init() which is __init.
+
+Signed-off-by: Andrew Jones <drjones@redhat.com>
+Fixes: e0685fa228fd ("arm64: Retrieve stolen time as paravirtualized guest")
+Cc: stable@vger.kernel.org
+Reviewed-by: Steven Price <steven.price@arm.com>
+Link: https://lore.kernel.org/r/20200916154530.40809-1-drjones@redhat.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/paravirt.c | 26 +++++++++++++++-----------
+ include/linux/cpuhotplug.h | 1 -
+ 2 files changed, 15 insertions(+), 12 deletions(-)
+
+--- a/arch/arm64/kernel/paravirt.c
++++ b/arch/arm64/kernel/paravirt.c
+@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
+ struct pv_time_stolen_time_region *reg;
+
+ reg = per_cpu_ptr(&stolen_time_region, cpu);
+- if (!reg->kaddr) {
+- pr_warn_once("stolen time enabled but not configured for cpu %d\n",
+- cpu);
++
++ /*
++ * paravirt_steal_clock() may be called before the CPU
++ * online notification callback runs. Until the callback
++ * has run we just return zero.
++ */
++ if (!reg->kaddr)
+ return 0;
+- }
+
+ return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
+ }
+
+-static int stolen_time_dying_cpu(unsigned int cpu)
++static int stolen_time_cpu_down_prepare(unsigned int cpu)
+ {
+ struct pv_time_stolen_time_region *reg;
+
+@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigne
+ return 0;
+ }
+
+-static int init_stolen_time_cpu(unsigned int cpu)
++static int stolen_time_cpu_online(unsigned int cpu)
+ {
+ struct pv_time_stolen_time_region *reg;
+ struct arm_smccc_res res;
+@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned
+ return 0;
+ }
+
+-static int pv_time_init_stolen_time(void)
++static int __init pv_time_init_stolen_time(void)
+ {
+ int ret;
+
+- ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
+- "hypervisor/arm/pvtime:starting",
+- init_stolen_time_cpu, stolen_time_dying_cpu);
++ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
++ "hypervisor/arm/pvtime:online",
++ stolen_time_cpu_online,
++ stolen_time_cpu_down_prepare);
+ if (ret < 0)
+ return ret;
+ return 0;
+ }
+
+-static bool has_pv_steal_clock(void)
++static bool __init has_pv_steal_clock(void)
+ {
+ struct arm_smccc_res res;
+
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -141,7 +141,6 @@ enum cpuhp_state {
+ /* Must be the last timer callback */
+ CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_ARM_XEN_STARTING,
+- CPUHP_AP_ARM_KVMPV_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ CPUHP_AP_ARM64_ISNDEP_STARTING,
--- /dev/null
+From 02186d8897d49b0afd3c80b6cf23437d91024065 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Fri, 18 Sep 2020 12:51:15 -0700
+Subject: dm/dax: Fix table reference counts
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 02186d8897d49b0afd3c80b6cf23437d91024065 upstream.
+
+A recent fix to the dm_dax_supported() flow uncovered a latent bug. When
+dm_get_live_table() fails it is still required to drop the
+srcu_read_lock(). Without this change the lvm2 test-suite triggers this
+warning:
+
+ # lvm2-testsuite --only pvmove-abort-all.sh
+
+ WARNING: lock held when returning to user space!
+ 5.9.0-rc5+ #251 Tainted: G OE
+ ------------------------------------------------
+ lvm/1318 is leaving the kernel with locks still held!
+ 1 lock held by lvm/1318:
+ #0: ffff9372abb5a340 (&md->io_barrier){....}-{0:0}, at: dm_get_live_table+0x5/0xb0 [dm_mod]
+
+...and later on this hang signature:
+
+ INFO: task lvm:1344 blocked for more than 122 seconds.
+ Tainted: G OE 5.9.0-rc5+ #251
+ "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ task:lvm state:D stack: 0 pid: 1344 ppid: 1 flags:0x00004000
+ Call Trace:
+ __schedule+0x45f/0xa80
+ ? finish_task_switch+0x249/0x2c0
+ ? wait_for_completion+0x86/0x110
+ schedule+0x5f/0xd0
+ schedule_timeout+0x212/0x2a0
+ ? __schedule+0x467/0xa80
+ ? wait_for_completion+0x86/0x110
+ wait_for_completion+0xb0/0x110
+ __synchronize_srcu+0xd1/0x160
+ ? __bpf_trace_rcu_utilization+0x10/0x10
+ __dm_suspend+0x6d/0x210 [dm_mod]
+ dm_suspend+0xf6/0x140 [dm_mod]
+
+Fixes: 7bf7eac8d648 ("dax: Arrange for dax_supported check to span multiple devices")
+Cc: <stable@vger.kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Alasdair Kergon <agk@redhat.com>
+Cc: Mike Snitzer <snitzer@redhat.com>
+Reported-by: Adrian Huang <ahuang12@lenovo.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Tested-by: Adrian Huang <ahuang12@lenovo.com>
+Link: https://lore.kernel.org/r/160045867590.25663.7548541079217827340.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1136,15 +1136,16 @@ static bool dm_dax_supported(struct dax_
+ {
+ struct mapped_device *md = dax_get_private(dax_dev);
+ struct dm_table *map;
++ bool ret = false;
+ int srcu_idx;
+- bool ret;
+
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+- return false;
++ goto out;
+
+ ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
+
++out:
+ dm_put_live_table(md, srcu_idx);
+
+ return ret;
--- /dev/null
+From 29231826f3bd65500118c473fccf31c0cf14dbc0 Mon Sep 17 00:00:00 2001
+From: Quentin Perret <qperret@google.com>
+Date: Wed, 16 Sep 2020 18:18:25 +0100
+Subject: ehci-hcd: Move include to keep CRC stable
+
+From: Quentin Perret <qperret@google.com>
+
+commit 29231826f3bd65500118c473fccf31c0cf14dbc0 upstream.
+
+The CRC calculation done by genksyms is triggered when the parser hits
+EXPORT_SYMBOL*() macros. At this point, genksyms recursively expands the
+types of the function parameters, and uses that as the input for the CRC
+calculation. In the case of forward-declared structs, the type expands
+to 'UNKNOWN'. Following this, it appears that the result of the
+expansion of each type is cached somewhere, and seems to be re-used
+when/if the same type is seen again for another exported symbol in the
+same C file.
+
+Unfortunately, this can cause CRC 'stability' issues when a struct
+definition becomes visible in the middle of a C file. For example, let's
+assume code with the following pattern:
+
+ struct foo;
+
+ int bar(struct foo *arg)
+ {
+ /* Do work ... */
+ }
+ EXPORT_SYMBOL_GPL(bar);
+
+ /* This contains struct foo's definition */
+ #include "foo.h"
+
+ int baz(struct foo *arg)
+ {
+ /* Do more work ... */
+ }
+ EXPORT_SYMBOL_GPL(baz);
+
+Here, baz's CRC will be computed using the expansion of struct foo that
+was cached after bar's CRC calculation ('UNKOWN' here). But if
+EXPORT_SYMBOL_GPL(bar) is removed from the file (because of e.g. symbol
+trimming using CONFIG_TRIM_UNUSED_KSYMS), struct foo will be expanded
+late, during baz's CRC calculation, which now has visibility over the
+full struct definition, hence resulting in a different CRC for baz.
+
+The proper fix for this certainly is in genksyms, but that will take me
+some time to get right. In the meantime, we have seen one occurrence of
+this in the ehci-hcd code which hits this problem because of the way it
+includes C files halfway through the code together with an unlucky mix
+of symbol trimming.
+
+In order to workaround this, move the include done in ehci-hub.c early
+in ehci-hcd.c, hence making sure the struct definitions are visible to
+the entire file. This improves CRC stability of the ehci-hcd exports
+even when symbol trimming is enabled.
+
+Acked-by: Alan Stern <stern@rowland.harvard.edu>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Quentin Perret <qperret@google.com>
+Link: https://lore.kernel.org/r/20200916171825.3228122-1-qperret@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/ehci-hcd.c | 1 +
+ drivers/usb/host/ehci-hub.c | 1 -
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -22,6 +22,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/usb.h>
+ #include <linux/usb/hcd.h>
++#include <linux/usb/otg.h>
+ #include <linux/moduleparam.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/debugfs.h>
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -14,7 +14,6 @@
+ */
+
+ /*-------------------------------------------------------------------------*/
+-#include <linux/usb/otg.h>
+
+ #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+
--- /dev/null
+From 9683182612214aa5f5e709fad49444b847cd866a Mon Sep 17 00:00:00 2001
+From: Pavel Tatashin <pasha.tatashin@soleen.com>
+Date: Fri, 18 Sep 2020 21:20:31 -0700
+Subject: mm/memory_hotplug: drain per-cpu pages again during memory offline
+
+From: Pavel Tatashin <pasha.tatashin@soleen.com>
+
+commit 9683182612214aa5f5e709fad49444b847cd866a upstream.
+
+There is a race during page offline that can lead to infinite loop:
+a page never ends up on a buddy list and __offline_pages() keeps
+retrying infinitely or until a termination signal is received.
+
+Thread#1 - a new process:
+
+load_elf_binary
+ begin_new_exec
+ exec_mmap
+ mmput
+ exit_mmap
+ tlb_finish_mmu
+ tlb_flush_mmu
+ release_pages
+ free_unref_page_list
+ free_unref_page_prepare
+ set_pcppage_migratetype(page, migratetype);
+ // Set page->index migration type below MIGRATE_PCPTYPES
+
+Thread#2 - hot-removes memory
+__offline_pages
+ start_isolate_page_range
+ set_migratetype_isolate
+ set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+ Set migration type to MIGRATE_ISOLATE-> set
+ drain_all_pages(zone);
+ // drain per-cpu page lists to buddy allocator.
+
+Thread#1 - continue
+ free_unref_page_commit
+ migratetype = get_pcppage_migratetype(page);
+ // get old migration type
+ list_add(&page->lru, &pcp->lists[migratetype]);
+ // add new page to already drained pcp list
+
+Thread#2
+Never drains pcp again, and therefore gets stuck in the loop.
+
+The fix is to try to drain per-cpu lists again after
+check_pages_isolated_cb() fails.
+
+Fixes: c52e75935f8d ("mm: remove extra drain pages on pcp list")
+Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: David Rientjes <rientjes@google.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Wei Yang <richard.weiyang@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20200903140032.380431-1-pasha.tatashin@soleen.com
+Link: https://lkml.kernel.org/r/20200904151448.100489-2-pasha.tatashin@soleen.com
+Link: http://lkml.kernel.org/r/20200904070235.GA15277@dhcp22.suse.cz
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory_hotplug.c | 14 ++++++++++++++
+ mm/page_isolation.c | 8 ++++++++
+ 2 files changed, 22 insertions(+)
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1557,6 +1557,20 @@ static int __ref __offline_pages(unsigne
+ /* check again */
+ ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
+ NULL, check_pages_isolated_cb);
++ /*
++ * per-cpu pages are drained in start_isolate_page_range, but if
++ * there are still pages that are not free, make sure that we
++ * drain again, because when we isolated range we might
++ * have raced with another thread that was adding pages to pcp
++ * list.
++ *
++ * Forward progress should be still guaranteed because
++ * pages on the pcp list can only belong to MOVABLE_ZONE
++ * because has_unmovable_pages explicitly checks for
++ * PageBuddy on freed pages on other zones.
++ */
++ if (ret)
++ drain_all_pages(zone);
+ } while (ret);
+
+ /* Ok, all of our target is isolated.
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -170,6 +170,14 @@ __first_valid_page(unsigned long pfn, un
+ * pageblocks we may have modified and return -EBUSY to caller. This
+ * prevents two threads from simultaneously working on overlapping ranges.
+ *
++ * Please note that there is no strong synchronization with the page allocator
++ * either. Pages might be freed while their page blocks are marked ISOLATED.
++ * In some cases pages might still end up on pcp lists and that would allow
++ * for their allocation even when they are in fact isolated already. Depending
++ * on how strong of a guarantee the caller needs drain_all_pages might be needed
++ * (e.g. __offline_pages will need to call it after check for isolated range for
++ * a next retry).
++ *
+ * Return: the number of isolated pageblocks on success and -EBUSY if any part
+ * of range cannot be isolated.
+ */
--- /dev/null
+From 437ef802e0adc9f162a95213a3488e8646e5fc03 Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Tue, 8 Sep 2020 11:51:06 +1000
+Subject: powerpc/dma: Fix dma_map_ops::get_required_mask
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit 437ef802e0adc9f162a95213a3488e8646e5fc03 upstream.
+
+There are 2 problems with it:
+ 1. "<" vs expected "<<"
+ 2. the shift number is an IOMMU page number mask, not an address
+ mask as the IOMMU page shift is missing.
+
+This did not hit us before f1565c24b596 ("powerpc: use the generic
+dma_ops_bypass mode") because we had additional code to handle bypass
+mask so this chunk (almost?) never executed.However there were
+reports that aacraid does not work with "iommu=nobypass".
+
+After f1565c24b596, aacraid (and probably others which call
+dma_get_required_mask() before setting the mask) was unable to enable
+64bit DMA and fall back to using IOMMU which was known not to work,
+one of the problems is double free of an IOMMU page.
+
+This fixes DMA for aacraid, both with and without "iommu=nobypass" in
+the kernel command line. Verified with "stress-ng -d 4".
+
+Fixes: 6a5c7be5e484 ("powerpc: Override dma_get_required_mask by platform hook and ops")
+Cc: stable@vger.kernel.org # v3.2+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200908015106.79661-1-aik@ozlabs.ru
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/dma-iommu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/dma-iommu.c
++++ b/arch/powerpc/kernel/dma-iommu.c
+@@ -160,7 +160,8 @@ u64 dma_iommu_get_required_mask(struct d
+ return bypass_mask;
+ }
+
+- mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
++ mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
++ tbl->it_page_shift - 1);
+ mask += mask - 1;
+
+ return mask;
--- /dev/null
+From cd4d3d5f21ddbfae3f686ac0ff405f21f7847ad3 Mon Sep 17 00:00:00 2001
+From: Janosch Frank <frankja@linux.ibm.com>
+Date: Tue, 8 Sep 2020 09:05:04 -0400
+Subject: s390: add 3f program exception handler
+
+From: Janosch Frank <frankja@linux.ibm.com>
+
+commit cd4d3d5f21ddbfae3f686ac0ff405f21f7847ad3 upstream.
+
+Program exception 3f (secure storage violation) can only be detected
+when the CPU is running in SIE with a format 4 state description,
+e.g. running a protected guest. Because of this and because user
+space partly controls the guest memory mapping and can trigger this
+exception, we want to send a SIGSEGV to the process running the guest
+and not panic the kernel.
+
+Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
+Cc: <stable@vger.kernel.org> # 5.7
+Fixes: 084ea4d611a3 ("s390/mm: add (non)secure page access exceptions handlers")
+Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/entry.h | 1 +
+ arch/s390/kernel/pgm_check.S | 2 +-
+ arch/s390/mm/fault.c | 20 ++++++++++++++++++++
+ 3 files changed, 22 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/entry.h
++++ b/arch/s390/kernel/entry.h
+@@ -26,6 +26,7 @@ void do_protection_exception(struct pt_r
+ void do_dat_exception(struct pt_regs *regs);
+ void do_secure_storage_access(struct pt_regs *regs);
+ void do_non_secure_storage_access(struct pt_regs *regs);
++void do_secure_storage_violation(struct pt_regs *regs);
+
+ void addressing_exception(struct pt_regs *regs);
+ void data_exception(struct pt_regs *regs);
+--- a/arch/s390/kernel/pgm_check.S
++++ b/arch/s390/kernel/pgm_check.S
+@@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception) /* 3b */
+ PGM_CHECK_DEFAULT /* 3c */
+ PGM_CHECK(do_secure_storage_access) /* 3d */
+ PGM_CHECK(do_non_secure_storage_access) /* 3e */
+-PGM_CHECK_DEFAULT /* 3f */
++PGM_CHECK(do_secure_storage_violation) /* 3f */
+ PGM_CHECK(monitor_event_exception) /* 40 */
+ PGM_CHECK_DEFAULT /* 41 */
+ PGM_CHECK_DEFAULT /* 42 */
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -875,6 +875,21 @@ void do_non_secure_storage_access(struct
+ }
+ NOKPROBE_SYMBOL(do_non_secure_storage_access);
+
++void do_secure_storage_violation(struct pt_regs *regs)
++{
++ /*
++ * Either KVM messed up the secure guest mapping or the same
++ * page is mapped into multiple secure guests.
++ *
++ * This exception is only triggered when a guest 2 is running
++ * and can therefore never occur in kernel context.
++ */
++ printk_ratelimited(KERN_WARNING
++ "Secure storage violation in task: %s, pid %d\n",
++ current->comm, current->pid);
++ send_sig(SIGSEGV, current, 0);
++}
++
+ #else
+ void do_secure_storage_access(struct pt_regs *regs)
+ {
+@@ -885,4 +900,9 @@ void do_non_secure_storage_access(struct
+ {
+ default_trap_handler(regs);
+ }
++
++void do_secure_storage_violation(struct pt_regs *regs)
++{
++ default_trap_handler(regs);
++}
+ #endif
--- /dev/null
+From afdf9550e54627fcf4dd609bdc1153059378cdf5 Mon Sep 17 00:00:00 2001
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+Date: Thu, 3 Sep 2020 13:42:57 +0200
+Subject: s390/pci: fix leak of DMA tables on hard unplug
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+commit afdf9550e54627fcf4dd609bdc1153059378cdf5 upstream.
+
+commit f606b3ef47c9 ("s390/pci: adapt events for zbus") removed the
+zpci_disable_device() call for a zPCI event with PEC 0x0304 because
+the device is already deconfigured by the platform.
+This however skips the Linux side of the disable in particular it leads
+to leaking the DMA tables and bitmaps because zpci_dma_exit_device() is
+never called on the device.
+
+If the device transitions to the Reserved state we call zpci_zdev_put()
+but zpci_release_device() will not call zpci_disable_device() because
+the state of the zPCI function is already ZPCI_FN_STATE_STANDBY.
+
+If the device is put into the Standby state, zpci_disable_device() is
+not called and the device is assumed to have been put in Standby through
+platform action.
+At this point the device may be removed by a subsequent event with PEC
+0x0308 or 0x0306 which calls zpci_zdev_put() with the same problem
+as above or the device may be configured again in which case
+zpci_disable_device() is also not called.
+
+Fix this by calling zpci_disable_device() explicitly for PEC 0x0304 as
+before. To make it more clear that zpci_disable_device() may be called,
+even if the lower level device has already been disabled by the
+platform, add a comment to zpci_disable_device().
+
+Cc: <stable@vger.kernel.org> # 5.8
+Fixes: f606b3ef47c9 ("s390/pci: adapt events for zbus")
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci.c | 4 ++++
+ arch/s390/pci/pci_event.c | 2 ++
+ 2 files changed, 6 insertions(+)
+
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -668,6 +668,10 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
+ int zpci_disable_device(struct zpci_dev *zdev)
+ {
+ zpci_dma_exit_device(zdev);
++ /*
++ * The zPCI function may already be disabled by the platform, this is
++ * detected in clp_disable_fh() which becomes a no-op.
++ */
+ return clp_disable_fh(zdev);
+ }
+ EXPORT_SYMBOL_GPL(zpci_disable_device);
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -143,6 +143,8 @@ static void __zpci_event_availability(st
+ zpci_remove_device(zdev);
+ }
+
++ zdev->fh = ccdf->fh;
++ zpci_disable_device(zdev);
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+ if (!clp_get_state(ccdf->fid, &state) &&
+ state == ZPCI_FN_STATE_RESERVED) {
--- /dev/null
+From b6186d7fb53349efd274263a45f0b08749ccaa2d Mon Sep 17 00:00:00 2001
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Wed, 9 Sep 2020 11:59:43 +0200
+Subject: s390/zcrypt: fix kmalloc 256k failure
+
+From: Harald Freudenberger <freude@linux.ibm.com>
+
+commit b6186d7fb53349efd274263a45f0b08749ccaa2d upstream.
+
+Tests showed that under stress conditions the kernel may
+temporary fail to allocate 256k with kmalloc. However,
+this fix reworks the related code in the cca_findcard2()
+function to use kvmalloc instead.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Cc: Stable <stable@vger.kernel.org>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/crypto/zcrypt_ccamisc.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/s390/crypto/zcrypt_ccamisc.c
++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
+@@ -1685,9 +1685,9 @@ int cca_findcard2(u32 **apqns, u32 *nr_a
+ *nr_apqns = 0;
+
+ /* fetch status of all crypto cards */
+- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+- sizeof(struct zcrypt_device_status_ext),
+- GFP_KERNEL);
++ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
++ sizeof(struct zcrypt_device_status_ext),
++ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+@@ -1755,7 +1755,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_a
+ verify = 0;
+ }
+
+- kfree(device_status);
++ kvfree(device_status);
+ return rc;
+ }
+ EXPORT_SYMBOL(cca_findcard2);
--- /dev/null
+From 1ec882fc81e3177faf055877310dbdb0c68eb7db Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Fri, 18 Sep 2020 21:20:28 -0700
+Subject: selftests/vm: fix display of page size in map_hugetlb
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 1ec882fc81e3177faf055877310dbdb0c68eb7db upstream.
+
+The displayed size is in bytes while the text says it is in kB.
+
+Shift it by 10 to really display kBytes.
+
+Fixes: fa7b9a805c79 ("tools/selftest/vm: allow choosing mem size and page size in map_hugetlb")
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/e27481224564a93d14106e750de31189deaa8bc8.1598861977.git.christophe.leroy@csgroup.eu
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/vm/map_hugetlb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/vm/map_hugetlb.c
++++ b/tools/testing/selftests/vm/map_hugetlb.c
+@@ -83,7 +83,7 @@ int main(int argc, char **argv)
+ }
+
+ if (shift)
+- printf("%u kB hugepages\n", 1 << shift);
++ printf("%u kB hugepages\n", 1 << (shift - 10));
+ else
+ printf("Default size hugepages\n");
+ printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20);
ksm-reinstate-memcg-charge-on-copied-pages.patch
kprobes-fix-kill-kprobe-which-has-been-marked-as-gone.patch
mm-thp-fix-__split_huge_pmd_locked-for-migration-pmd.patch
+s390-add-3f-program-exception-handler.patch
+s390-pci-fix-leak-of-dma-tables-on-hard-unplug.patch
+s390-zcrypt-fix-kmalloc-256k-failure.patch
+ehci-hcd-move-include-to-keep-crc-stable.patch
+arm64-paravirt-initialize-steal-time-when-cpu-is-online.patch
+powerpc-dma-fix-dma_map_ops-get_required_mask.patch
+selftests-vm-fix-display-of-page-size-in-map_hugetlb.patch
+dm-dax-fix-table-reference-counts.patch
+mm-memory_hotplug-drain-per-cpu-pages-again-during-memory-offline.patch