--- /dev/null
+From b6ae256afd32f96bec0117175b329d0dd617655e Mon Sep 17 00:00:00 2001
+From: Christoffer Dall <christoffer.dall@arm.com>
+Date: Thu, 12 Dec 2019 20:50:55 +0100
+Subject: KVM: arm64: Only sign-extend MMIO up to register width
+
+From: Christoffer Dall <christoffer.dall@arm.com>
+
+commit b6ae256afd32f96bec0117175b329d0dd617655e upstream.
+
+On AArch64 you can do a sign-extended load to either a 32-bit or 64-bit
+register, and we should only sign extend the register up to the width of
+the register as specified in the operation (by using the 32-bit Wn or
+64-bit Xn register specifier).
+
+As it turns out, the architecture provides this decoding information in
+the SF ("Sixty-Four" -- how cute...) bit.
+
+Let's take advantage of this with the usual 32-bit/64-bit header file
+dance and do the right thing on AArch64 hosts.
+
+Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191212195055.5541-1-christoffer.dall@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_emulate.h | 5 +++++
+ arch/arm/include/asm/kvm_mmio.h | 2 ++
+ arch/arm64/include/asm/kvm_emulate.h | 5 +++++
+ arch/arm64/include/asm/kvm_mmio.h | 6 ++----
+ virt/kvm/arm/mmio.c | 6 ++++++
+ 5 files changed, 20 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -144,6 +144,11 @@ static inline bool kvm_vcpu_dabt_issext(
+ return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
+ }
+
++static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
++{
++ return false;
++}
++
+ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+ {
+ return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+--- a/arch/arm/include/asm/kvm_mmio.h
++++ b/arch/arm/include/asm/kvm_mmio.h
+@@ -26,6 +26,8 @@
+ struct kvm_decode {
+ unsigned long rt;
+ bool sign_extend;
++ /* Not used on 32-bit arm */
++ bool sixty_four;
+ };
+
+ void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -188,6 +188,11 @@ static inline bool kvm_vcpu_dabt_issext(
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
+ }
+
++static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
++{
++ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
++}
++
+ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+ {
+ return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+--- a/arch/arm64/include/asm/kvm_mmio.h
++++ b/arch/arm64/include/asm/kvm_mmio.h
+@@ -21,13 +21,11 @@
+ #include <linux/kvm_host.h>
+ #include <asm/kvm_arm.h>
+
+-/*
+- * This is annoying. The mmio code requires this, even if we don't
+- * need any decoding. To be fixed.
+- */
+ struct kvm_decode {
+ unsigned long rt;
+ bool sign_extend;
++ /* Witdth of the register accessed by the faulting instruction is 64-bits */
++ bool sixty_four;
+ };
+
+ void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+--- a/virt/kvm/arm/mmio.c
++++ b/virt/kvm/arm/mmio.c
+@@ -117,6 +117,9 @@ int kvm_handle_mmio_return(struct kvm_vc
+ data = (data ^ mask) - mask;
+ }
+
++ if (!vcpu->arch.mmio_decode.sixty_four)
++ data = data & 0xffffffff;
++
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+ &data);
+ data = vcpu_data_host_to_guest(vcpu, data, len);
+@@ -137,6 +140,7 @@ static int decode_hsr(struct kvm_vcpu *v
+ unsigned long rt;
+ int access_size;
+ bool sign_extend;
++ bool sixty_four;
+
+ if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+ /* page table accesses IO mem: tell guest to fix its TTBR */
+@@ -150,11 +154,13 @@ static int decode_hsr(struct kvm_vcpu *v
+
+ *is_write = kvm_vcpu_dabt_iswrite(vcpu);
+ sign_extend = kvm_vcpu_dabt_issext(vcpu);
++ sixty_four = kvm_vcpu_dabt_issf(vcpu);
+ rt = kvm_vcpu_dabt_get_rd(vcpu);
+
+ *len = access_size;
+ vcpu->arch.mmio_decode.sign_extend = sign_extend;
+ vcpu->arch.mmio_decode.rt = rt;
++ vcpu->arch.mmio_decode.sixty_four = sixty_four;
+
+ return 0;
+ }
--- /dev/null
+From a53998802e178451701d59d38e36f551422977ba Mon Sep 17 00:00:00 2001
+From: Alexander Lobakin <alobakin@dlink.ru>
+Date: Fri, 17 Jan 2020 17:02:07 +0300
+Subject: MIPS: fix indentation of the 'RELOCS' message
+
+From: Alexander Lobakin <alobakin@dlink.ru>
+
+commit a53998802e178451701d59d38e36f551422977ba upstream.
+
+quiet_cmd_relocs lacks a whitespace which results in:
+
+ LD vmlinux
+ SORTEX vmlinux
+ SYSMAP System.map
+ RELOCS vmlinux
+ Building modules, stage 2.
+ MODPOST 64 modules
+
+After this patch:
+
+ LD vmlinux
+ SORTEX vmlinux
+ SYSMAP System.map
+ RELOCS vmlinux
+ Building modules, stage 2.
+ MODPOST 64 modules
+
+Typo is present in kernel tree since the introduction of relocatable
+kernel support in commit e818fac595ab ("MIPS: Generate relocation table
+when CONFIG_RELOCATABLE"), but the relocation scripts were moved to
+Makefile.postlink later with commit 44079d3509ae ("MIPS: Use
+Makefile.postlink to insert relocations into vmlinux").
+
+Fixes: 44079d3509ae ("MIPS: Use Makefile.postlink to insert relocations into vmlinux")
+Cc: <stable@vger.kernel.org> # v4.11+
+Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
+[paulburton@kernel.org: Fixup commit references in commit message.]
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Makefile.postlink | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile.postlink
++++ b/arch/mips/Makefile.postlink
+@@ -12,7 +12,7 @@ __archpost:
+ include scripts/Kbuild.include
+
+ CMD_RELOCS = arch/mips/boot/tools/relocs
+-quiet_cmd_relocs = RELOCS $@
++quiet_cmd_relocs = RELOCS $@
+ cmd_relocs = $(CMD_RELOCS) $@
+
+ # `@true` prevents complaint when there is nothing to be done
--- /dev/null
+From fbee6ba2dca30d302efe6bddb3a886f5e964a257 Mon Sep 17 00:00:00 2001
+From: Pingfan Liu <kernelfans@gmail.com>
+Date: Fri, 10 Jan 2020 12:54:02 +0800
+Subject: powerpc/pseries: Advance pfn if section is not present in lmb_is_removable()
+
+From: Pingfan Liu <kernelfans@gmail.com>
+
+commit fbee6ba2dca30d302efe6bddb3a886f5e964a257 upstream.
+
+In lmb_is_removable(), if a section is not present, it should continue
+to test the rest of the sections in the block. But the current code
+fails to do so.
+
+Fixes: 51925fb3c5c9 ("powerpc/pseries: Implement memory hotplug remove in the kernel")
+Cc: stable@vger.kernel.org # v4.1+
+Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/1578632042-12415-1-git-send-email-kernelfans@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/hotplug-memory.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -452,8 +452,10 @@ static bool lmb_is_removable(struct of_d
+
+ for (i = 0; i < scns_per_block; i++) {
+ pfn = PFN_DOWN(phys_addr);
+- if (!pfn_present(pfn))
++ if (!pfn_present(pfn)) {
++ phys_addr += MIN_MEMORY_BLOCK_SIZE;
+ continue;
++ }
+
+ rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+ phys_addr += MIN_MEMORY_BLOCK_SIZE;
--- /dev/null
+From c2a20711fc181e7f22ee5c16c28cb9578af84729 Mon Sep 17 00:00:00 2001
+From: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+Date: Mon, 6 Jan 2020 13:50:02 -0600
+Subject: powerpc/xmon: don't access ASDR in VMs
+
+From: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+
+commit c2a20711fc181e7f22ee5c16c28cb9578af84729 upstream.
+
+ASDR is HV-privileged and must only be accessed in HV-mode.
+Fixes a Program Check (0x700) when xmon in a VM dumps SPRs.
+
+Fixes: d1e1b351f50f ("powerpc/xmon: Add ISA v3.0 SPRs to SPR dump")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200107021633.GB29843@us.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/xmon/xmon.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1830,15 +1830,14 @@ static void dump_300_sprs(void)
+
+ printf("pidr = %.16lx tidr = %.16lx\n",
+ mfspr(SPRN_PID), mfspr(SPRN_TIDR));
+- printf("asdr = %.16lx psscr = %.16lx\n",
+- mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR)
+- : mfspr(SPRN_PSSCR_PR));
++ printf("psscr = %.16lx\n",
++ hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
+
+ if (!hv)
+ return;
+
+- printf("ptcr = %.16lx\n",
+- mfspr(SPRN_PTCR));
++ printf("ptcr = %.16lx asdr = %.16lx\n",
++ mfspr(SPRN_PTCR), mfspr(SPRN_ASDR));
+ #endif
+ }
+
--- /dev/null
+From 5f490a520bcb393389a4d44bec90afcb332eb112 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Thu, 16 Jan 2020 19:59:04 +0100
+Subject: s390/mm: fix dynamic pagetable upgrade for hugetlbfs
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit 5f490a520bcb393389a4d44bec90afcb332eb112 upstream.
+
+Commit ee71d16d22bb ("s390/mm: make TASK_SIZE independent from the number
+of page table levels") changed the logic of TASK_SIZE and also removed the
+arch_mmap_check() implementation for s390. This combination has a subtle
+effect on how get_unmapped_area() for hugetlbfs pages works. It is now
+possible that a user process establishes a hugetlbfs mapping at an address
+above 4 TB, without triggering a dynamic pagetable upgrade from 3 to 4
+levels.
+
+This is because hugetlbfs mappings will not use mm->get_unmapped_area, but
+rather file->f_op->get_unmapped_area, which currently is the generic
+implementation of hugetlb_get_unmapped_area() that does not know about s390
+dynamic pagetable upgrades, but with the new definition of TASK_SIZE, it
+will now allow mappings above 4 TB.
+
+Subsequent access to such a mapped address above 4 TB will result in a page
+fault loop, because the CPU cannot translate such a large address with 3
+pagetable levels. The fault handler will try to map in a hugepage at the
+address, but due to the folded pagetable logic it will end up with creating
+entries in the 3 level pagetable, possibly overwriting existing mappings,
+and then it all repeats when the access is retried.
+
+Apart from the page fault loop, this can have various nasty effects, e.g.
+kernel panic from one of the BUG_ON() checks in memory management code,
+or even data loss if an existing mapping gets overwritten.
+
+Fix this by implementing HAVE_ARCH_HUGETLB_UNMAPPED_AREA support for s390,
+providing an s390 version for hugetlb_get_unmapped_area() with pagetable
+upgrade support similar to arch_get_unmapped_area(), which will then be
+used instead of the generic version.
+
+Fixes: ee71d16d22bb ("s390/mm: make TASK_SIZE independent from the number of page table levels")
+Cc: <stable@vger.kernel.org> # 4.12+
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/page.h | 2
+ arch/s390/mm/hugetlbpage.c | 100 ++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 101 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/page.h
++++ b/arch/s390/include/asm/page.h
+@@ -33,6 +33,8 @@
+ #define ARCH_HAS_PREPARE_HUGEPAGE
+ #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
+
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++
+ #include <asm/setup.h>
+ #ifndef __ASSEMBLY__
+
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -2,7 +2,7 @@
+ /*
+ * IBM System z Huge TLB Page Support for Kernel.
+ *
+- * Copyright IBM Corp. 2007,2016
++ * Copyright IBM Corp. 2007,2020
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+@@ -11,6 +11,9 @@
+
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
++#include <linux/mman.h>
++#include <linux/sched/mm.h>
++#include <linux/security.h>
+
+ /*
+ * If the bit selected by single-bit bitmask "a" is set within "x", move
+@@ -243,3 +246,98 @@ static __init int setup_hugepagesz(char
+ return 1;
+ }
+ __setup("hugepagesz=", setup_hugepagesz);
++
++static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
++ unsigned long addr, unsigned long len,
++ unsigned long pgoff, unsigned long flags)
++{
++ struct hstate *h = hstate_file(file);
++ struct vm_unmapped_area_info info;
++
++ info.flags = 0;
++ info.length = len;
++ info.low_limit = current->mm->mmap_base;
++ info.high_limit = TASK_SIZE;
++ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
++ info.align_offset = 0;
++ return vm_unmapped_area(&info);
++}
++
++static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
++ unsigned long addr0, unsigned long len,
++ unsigned long pgoff, unsigned long flags)
++{
++ struct hstate *h = hstate_file(file);
++ struct vm_unmapped_area_info info;
++ unsigned long addr;
++
++ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
++ info.length = len;
++ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
++ info.high_limit = current->mm->mmap_base;
++ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
++ info.align_offset = 0;
++ addr = vm_unmapped_area(&info);
++
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++ if (addr & ~PAGE_MASK) {
++ VM_BUG_ON(addr != -ENOMEM);
++ info.flags = 0;
++ info.low_limit = TASK_UNMAPPED_BASE;
++ info.high_limit = TASK_SIZE;
++ addr = vm_unmapped_area(&info);
++ }
++
++ return addr;
++}
++
++unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct hstate *h = hstate_file(file);
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ int rc;
++
++ if (len & ~huge_page_mask(h))
++ return -EINVAL;
++ if (len > TASK_SIZE - mmap_min_addr)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED) {
++ if (prepare_hugepage_range(file, addr, len))
++ return -EINVAL;
++ goto check_asce_limit;
++ }
++
++ if (addr) {
++ addr = ALIGN(addr, huge_page_size(h));
++ vma = find_vma(mm, addr);
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
++ (!vma || addr + len <= vm_start_gap(vma)))
++ goto check_asce_limit;
++ }
++
++ if (mm->get_unmapped_area == arch_get_unmapped_area)
++ addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
++ pgoff, flags);
++ else
++ addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
++ pgoff, flags);
++ if (addr & ~PAGE_MASK)
++ return addr;
++
++check_asce_limit:
++ if (addr + len > current->mm->context.asce_limit &&
++ addr + len <= TASK_SIZE) {
++ rc = crst_table_upgrade(mm, addr + len);
++ if (rc)
++ return (unsigned long) rc;
++ }
++ return addr;
++}
lib-test_kasan.c-fix-memory-leak-in-kmalloc_oob_krealloc_more.patch
irqdomain-fix-a-memory-leak-in-irq_domain_push_irq.patch
platform-x86-intel_scu_ipc-fix-interrupt-support.patch
+kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch
+mips-fix-indentation-of-the-relocs-message.patch
+s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch
+powerpc-xmon-don-t-access-asdr-in-vms.patch
+powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch