From: Greg Kroah-Hartman Date: Thu, 6 Feb 2020 16:49:54 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v4.19.103~125 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=82ce534b5f2f07bd4e46dd6f143e8c56b245b644;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch mips-fix-indentation-of-the-relocs-message.patch powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch powerpc-xmon-don-t-access-asdr-in-vms.patch s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch --- diff --git a/queue-4.14/kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch b/queue-4.14/kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch new file mode 100644 index 00000000000..2852ad99e91 --- /dev/null +++ b/queue-4.14/kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch @@ -0,0 +1,125 @@ +From b6ae256afd32f96bec0117175b329d0dd617655e Mon Sep 17 00:00:00 2001 +From: Christoffer Dall +Date: Thu, 12 Dec 2019 20:50:55 +0100 +Subject: KVM: arm64: Only sign-extend MMIO up to register width + +From: Christoffer Dall + +commit b6ae256afd32f96bec0117175b329d0dd617655e upstream. + +On AArch64 you can do a sign-extended load to either a 32-bit or 64-bit +register, and we should only sign extend the register up to the width of +the register as specified in the operation (by using the 32-bit Wn or +64-bit Xn register specifier). + +As it turns out, the architecture provides this decoding information in +the SF ("Sixty-Four" -- how cute...) bit. + +Let's take advantage of this with the usual 32-bit/64-bit header file +dance and do the right thing on AArch64 hosts. + +Signed-off-by: Christoffer Dall +Signed-off-by: Marc Zyngier +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20191212195055.5541-1-christoffer.dall@arm.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/include/asm/kvm_emulate.h | 5 +++++ + arch/arm/include/asm/kvm_mmio.h | 2 ++ + arch/arm64/include/asm/kvm_emulate.h | 5 +++++ + arch/arm64/include/asm/kvm_mmio.h | 6 ++---- + virt/kvm/arm/mmio.c | 6 ++++++ + 5 files changed, 20 insertions(+), 4 deletions(-) + +--- a/arch/arm/include/asm/kvm_emulate.h ++++ b/arch/arm/include/asm/kvm_emulate.h +@@ -144,6 +144,11 @@ static inline bool kvm_vcpu_dabt_issext( + return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; + } + ++static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) ++{ ++ return false; ++} ++ + static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) + { + return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; +--- a/arch/arm/include/asm/kvm_mmio.h ++++ b/arch/arm/include/asm/kvm_mmio.h +@@ -26,6 +26,8 @@ + struct kvm_decode { + unsigned long rt; + bool sign_extend; ++ /* Not used on 32-bit arm */ ++ bool sixty_four; + }; + + void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); +--- a/arch/arm64/include/asm/kvm_emulate.h ++++ b/arch/arm64/include/asm/kvm_emulate.h +@@ -188,6 +188,11 @@ static inline bool kvm_vcpu_dabt_issext( + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); + } + ++static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) ++{ ++ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); ++} ++ + static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) + { + return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; +--- a/arch/arm64/include/asm/kvm_mmio.h ++++ b/arch/arm64/include/asm/kvm_mmio.h +@@ -21,13 +21,11 @@ + #include + #include + +-/* +- * This is annoying. The mmio code requires this, even if we don't +- * need any decoding. To be fixed. +- */ + struct kvm_decode { + unsigned long rt; + bool sign_extend; ++ /* Witdth of the register accessed by the faulting instruction is 64-bits */ ++ bool sixty_four; + }; + + void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); +--- a/virt/kvm/arm/mmio.c ++++ b/virt/kvm/arm/mmio.c +@@ -117,6 +117,9 @@ int kvm_handle_mmio_return(struct kvm_vc + data = (data ^ mask) - mask; + } + ++ if (!vcpu->arch.mmio_decode.sixty_four) ++ data = data & 0xffffffff; ++ + trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, + &data); + data = vcpu_data_host_to_guest(vcpu, data, len); +@@ -137,6 +140,7 @@ static int decode_hsr(struct kvm_vcpu *v + unsigned long rt; + int access_size; + bool sign_extend; ++ bool sixty_four; + + if (kvm_vcpu_dabt_iss1tw(vcpu)) { + /* page table accesses IO mem: tell guest to fix its TTBR */ +@@ -150,11 +154,13 @@ static int decode_hsr(struct kvm_vcpu *v + + *is_write = kvm_vcpu_dabt_iswrite(vcpu); + sign_extend = kvm_vcpu_dabt_issext(vcpu); ++ sixty_four = kvm_vcpu_dabt_issf(vcpu); + rt = kvm_vcpu_dabt_get_rd(vcpu); + + *len = access_size; + vcpu->arch.mmio_decode.sign_extend = sign_extend; + vcpu->arch.mmio_decode.rt = rt; ++ vcpu->arch.mmio_decode.sixty_four = sixty_four; + + return 0; + } diff --git a/queue-4.14/mips-fix-indentation-of-the-relocs-message.patch b/queue-4.14/mips-fix-indentation-of-the-relocs-message.patch new file mode 100644 index 00000000000..98cafed30c5 --- /dev/null +++ b/queue-4.14/mips-fix-indentation-of-the-relocs-message.patch @@ -0,0 +1,61 @@ +From a53998802e178451701d59d38e36f551422977ba Mon Sep 17 00:00:00 2001 +From: Alexander Lobakin +Date: Fri, 17 Jan 2020 17:02:07 +0300 +Subject: MIPS: fix indentation of the 'RELOCS' message + +From: Alexander Lobakin + +commit a53998802e178451701d59d38e36f551422977ba upstream. + +quiet_cmd_relocs lacks a whitespace which results in: + + LD vmlinux + SORTEX vmlinux + SYSMAP System.map + RELOCS vmlinux + Building modules, stage 2. + MODPOST 64 modules + +After this patch: + + LD vmlinux + SORTEX vmlinux + SYSMAP System.map + RELOCS vmlinux + Building modules, stage 2. + MODPOST 64 modules + +Typo is present in kernel tree since the introduction of relocatable +kernel support in commit e818fac595ab ("MIPS: Generate relocation table +when CONFIG_RELOCATABLE"), but the relocation scripts were moved to +Makefile.postlink later with commit 44079d3509ae ("MIPS: Use +Makefile.postlink to insert relocations into vmlinux"). + +Fixes: 44079d3509ae ("MIPS: Use Makefile.postlink to insert relocations into vmlinux") +Cc: # v4.11+ +Signed-off-by: Alexander Lobakin +[paulburton@kernel.org: Fixup commit references in commit message.] +Signed-off-by: Paul Burton +Cc: Ralf Baechle +Cc: James Hogan +Cc: Masahiro Yamada +Cc: Rob Herring +Cc: linux-mips@vger.kernel.org +Cc: linux-kernel@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman + +--- + arch/mips/Makefile.postlink | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/Makefile.postlink ++++ b/arch/mips/Makefile.postlink +@@ -12,7 +12,7 @@ __archpost: + include scripts/Kbuild.include + + CMD_RELOCS = arch/mips/boot/tools/relocs +-quiet_cmd_relocs = RELOCS $@ ++quiet_cmd_relocs = RELOCS $@ + cmd_relocs = $(CMD_RELOCS) $@ + + # `@true` prevents complaint when there is nothing to be done diff --git a/queue-4.14/powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch b/queue-4.14/powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch new file mode 100644 index 00000000000..3ab5963cc97 --- /dev/null +++ b/queue-4.14/powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch @@ -0,0 +1,38 @@ +From fbee6ba2dca30d302efe6bddb3a886f5e964a257 Mon Sep 17 00:00:00 2001 +From: Pingfan Liu +Date: Fri, 10 Jan 2020 12:54:02 +0800 +Subject: powerpc/pseries: Advance pfn if section is not present in lmb_is_removable() + +From: Pingfan Liu + +commit fbee6ba2dca30d302efe6bddb3a886f5e964a257 upstream. + +In lmb_is_removable(), if a section is not present, it should continue +to test the rest of the sections in the block. But the current code +fails to do so. + +Fixes: 51925fb3c5c9 ("powerpc/pseries: Implement memory hotplug remove in the kernel") +Cc: stable@vger.kernel.org # v4.1+ +Signed-off-by: Pingfan Liu +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/1578632042-12415-1-git-send-email-kernelfans@gmail.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/powerpc/platforms/pseries/hotplug-memory.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c +@@ -452,8 +452,10 @@ static bool lmb_is_removable(struct of_d + + for (i = 0; i < scns_per_block; i++) { + pfn = PFN_DOWN(phys_addr); +- if (!pfn_present(pfn)) ++ if (!pfn_present(pfn)) { ++ phys_addr += MIN_MEMORY_BLOCK_SIZE; + continue; ++ } + + rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION); + phys_addr += MIN_MEMORY_BLOCK_SIZE; diff --git a/queue-4.14/powerpc-xmon-don-t-access-asdr-in-vms.patch b/queue-4.14/powerpc-xmon-don-t-access-asdr-in-vms.patch new file mode 100644 index 00000000000..6f0ef478c55 --- /dev/null +++ b/queue-4.14/powerpc-xmon-don-t-access-asdr-in-vms.patch @@ -0,0 +1,46 @@ +From c2a20711fc181e7f22ee5c16c28cb9578af84729 Mon Sep 17 00:00:00 2001 +From: Sukadev Bhattiprolu +Date: Mon, 6 Jan 2020 13:50:02 -0600 +Subject: powerpc/xmon: don't access ASDR in VMs + +From: Sukadev Bhattiprolu + +commit c2a20711fc181e7f22ee5c16c28cb9578af84729 upstream. + +ASDR is HV-privileged and must only be accessed in HV-mode. +Fixes a Program Check (0x700) when xmon in a VM dumps SPRs. + +Fixes: d1e1b351f50f ("powerpc/xmon: Add ISA v3.0 SPRs to SPR dump") +Cc: stable@vger.kernel.org # v4.14+ +Signed-off-by: Sukadev Bhattiprolu +Reviewed-by: Andrew Donnellan +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20200107021633.GB29843@us.ibm.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/powerpc/xmon/xmon.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -1830,15 +1830,14 @@ static void dump_300_sprs(void) + + printf("pidr = %.16lx tidr = %.16lx\n", + mfspr(SPRN_PID), mfspr(SPRN_TIDR)); +- printf("asdr = %.16lx psscr = %.16lx\n", +- mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR) +- : mfspr(SPRN_PSSCR_PR)); ++ printf("psscr = %.16lx\n", ++ hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR)); + + if (!hv) + return; + +- printf("ptcr = %.16lx\n", +- mfspr(SPRN_PTCR)); ++ printf("ptcr = %.16lx asdr = %.16lx\n", ++ mfspr(SPRN_PTCR), mfspr(SPRN_ASDR)); + #endif + } + diff --git a/queue-4.14/s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch b/queue-4.14/s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch new file mode 100644 index 00000000000..87f2376cd90 --- /dev/null +++ b/queue-4.14/s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch @@ -0,0 +1,181 @@ +From 5f490a520bcb393389a4d44bec90afcb332eb112 Mon Sep 17 00:00:00 2001 +From: Gerald Schaefer +Date: Thu, 16 Jan 2020 19:59:04 +0100 +Subject: s390/mm: fix dynamic pagetable upgrade for hugetlbfs + +From: Gerald Schaefer + +commit 5f490a520bcb393389a4d44bec90afcb332eb112 upstream. + +Commit ee71d16d22bb ("s390/mm: make TASK_SIZE independent from the number +of page table levels") changed the logic of TASK_SIZE and also removed the +arch_mmap_check() implementation for s390. This combination has a subtle +effect on how get_unmapped_area() for hugetlbfs pages works. It is now +possible that a user process establishes a hugetlbfs mapping at an address +above 4 TB, without triggering a dynamic pagetable upgrade from 3 to 4 +levels. + +This is because hugetlbfs mappings will not use mm->get_unmapped_area, but +rather file->f_op->get_unmapped_area, which currently is the generic +implementation of hugetlb_get_unmapped_area() that does not know about s390 +dynamic pagetable upgrades, but with the new definition of TASK_SIZE, it +will now allow mappings above 4 TB. + +Subsequent access to such a mapped address above 4 TB will result in a page +fault loop, because the CPU cannot translate such a large address with 3 +pagetable levels. The fault handler will try to map in a hugepage at the +address, but due to the folded pagetable logic it will end up with creating +entries in the 3 level pagetable, possibly overwriting existing mappings, +and then it all repeats when the access is retried. + +Apart from the page fault loop, this can have various nasty effects, e.g. +kernel panic from one of the BUG_ON() checks in memory management code, +or even data loss if an existing mapping gets overwritten. + +Fix this by implementing HAVE_ARCH_HUGETLB_UNMAPPED_AREA support for s390, +providing an s390 version for hugetlb_get_unmapped_area() with pagetable +upgrade support similar to arch_get_unmapped_area(), which will then be +used instead of the generic version. + +Fixes: ee71d16d22bb ("s390/mm: make TASK_SIZE independent from the number of page table levels") +Cc: # 4.12+ +Signed-off-by: Gerald Schaefer +Signed-off-by: Vasily Gorbik +Signed-off-by: Greg Kroah-Hartman + +--- + arch/s390/include/asm/page.h | 2 + arch/s390/mm/hugetlbpage.c | 100 ++++++++++++++++++++++++++++++++++++++++++- + 2 files changed, 101 insertions(+), 1 deletion(-) + +--- a/arch/s390/include/asm/page.h ++++ b/arch/s390/include/asm/page.h +@@ -33,6 +33,8 @@ + #define ARCH_HAS_PREPARE_HUGEPAGE + #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH + ++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA ++ + #include + #ifndef __ASSEMBLY__ + +--- a/arch/s390/mm/hugetlbpage.c ++++ b/arch/s390/mm/hugetlbpage.c +@@ -2,7 +2,7 @@ + /* + * IBM System z Huge TLB Page Support for Kernel. + * +- * Copyright IBM Corp. 2007,2016 ++ * Copyright IBM Corp. 2007,2020 + * Author(s): Gerald Schaefer + */ + +@@ -11,6 +11,9 @@ + + #include + #include ++#include ++#include ++#include + + /* + * If the bit selected by single-bit bitmask "a" is set within "x", move +@@ -243,3 +246,98 @@ static __init int setup_hugepagesz(char + return 1; + } + __setup("hugepagesz=", setup_hugepagesz); ++ ++static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, ++ unsigned long addr, unsigned long len, ++ unsigned long pgoff, unsigned long flags) ++{ ++ struct hstate *h = hstate_file(file); ++ struct vm_unmapped_area_info info; ++ ++ info.flags = 0; ++ info.length = len; ++ info.low_limit = current->mm->mmap_base; ++ info.high_limit = TASK_SIZE; ++ info.align_mask = PAGE_MASK & ~huge_page_mask(h); ++ info.align_offset = 0; ++ return vm_unmapped_area(&info); ++} ++ ++static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, ++ unsigned long addr0, unsigned long len, ++ unsigned long pgoff, unsigned long flags) ++{ ++ struct hstate *h = hstate_file(file); ++ struct vm_unmapped_area_info info; ++ unsigned long addr; ++ ++ info.flags = VM_UNMAPPED_AREA_TOPDOWN; ++ info.length = len; ++ info.low_limit = max(PAGE_SIZE, mmap_min_addr); ++ info.high_limit = current->mm->mmap_base; ++ info.align_mask = PAGE_MASK & ~huge_page_mask(h); ++ info.align_offset = 0; ++ addr = vm_unmapped_area(&info); ++ ++ /* ++ * A failed mmap() very likely causes application failure, ++ * so fall back to the bottom-up function here. This scenario ++ * can happen with large stack limits and large mmap() ++ * allocations. ++ */ ++ if (addr & ~PAGE_MASK) { ++ VM_BUG_ON(addr != -ENOMEM); ++ info.flags = 0; ++ info.low_limit = TASK_UNMAPPED_BASE; ++ info.high_limit = TASK_SIZE; ++ addr = vm_unmapped_area(&info); ++ } ++ ++ return addr; ++} ++ ++unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ++ unsigned long len, unsigned long pgoff, unsigned long flags) ++{ ++ struct hstate *h = hstate_file(file); ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++ int rc; ++ ++ if (len & ~huge_page_mask(h)) ++ return -EINVAL; ++ if (len > TASK_SIZE - mmap_min_addr) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) { ++ if (prepare_hugepage_range(file, addr, len)) ++ return -EINVAL; ++ goto check_asce_limit; ++ } ++ ++ if (addr) { ++ addr = ALIGN(addr, huge_page_size(h)); ++ vma = find_vma(mm, addr); ++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && ++ (!vma || addr + len <= vm_start_gap(vma))) ++ goto check_asce_limit; ++ } ++ ++ if (mm->get_unmapped_area == arch_get_unmapped_area) ++ addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, ++ pgoff, flags); ++ else ++ addr = hugetlb_get_unmapped_area_topdown(file, addr, len, ++ pgoff, flags); ++ if (addr & ~PAGE_MASK) ++ return addr; ++ ++check_asce_limit: ++ if (addr + len > current->mm->context.asce_limit && ++ addr + len <= TASK_SIZE) { ++ rc = crst_table_upgrade(mm, addr + len); ++ if (rc) ++ return (unsigned long) rc; ++ } ++ return addr; ++} diff --git a/queue-4.14/series b/queue-4.14/series index 7cfa0a9b9cc..7c571e61359 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -28,3 +28,8 @@ media-v4l2-rect.h-fix-v4l2_rect_map_inside-top-left-adjustments.patch lib-test_kasan.c-fix-memory-leak-in-kmalloc_oob_krealloc_more.patch irqdomain-fix-a-memory-leak-in-irq_domain_push_irq.patch platform-x86-intel_scu_ipc-fix-interrupt-support.patch +kvm-arm64-only-sign-extend-mmio-up-to-register-width.patch +mips-fix-indentation-of-the-relocs-message.patch +s390-mm-fix-dynamic-pagetable-upgrade-for-hugetlbfs.patch +powerpc-xmon-don-t-access-asdr-in-vms.patch +powerpc-pseries-advance-pfn-if-section-is-not-present-in-lmb_is_removable.patch