]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 31 Aug 2017 05:58:52 +0000 (07:58 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 31 Aug 2017 05:58:52 +0000 (07:58 +0200)
added patches:
arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch
kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch
x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch

queue-4.9/arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch [new file with mode: 0644]
queue-4.9/kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch [new file with mode: 0644]

diff --git a/queue-4.9/arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch b/queue-4.9/arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch
new file mode 100644 (file)
index 0000000..312ab3f
--- /dev/null
@@ -0,0 +1,54 @@
+From 289d07a2dc6c6b6f3e4b8a62669320d99dbe6c3d Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 11 Jul 2017 15:19:22 +0100
+Subject: arm64: mm: abort uaccess retries upon fatal signal
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 289d07a2dc6c6b6f3e4b8a62669320d99dbe6c3d upstream.
+
+When there's a fatal signal pending, arm64's do_page_fault()
+implementation returns 0. The intent is that we'll return to the
+faulting userspace instruction, delivering the signal on the way.
+
+However, if we take a fatal signal during fixing up a uaccess, this
+results in a return to the faulting kernel instruction, which will be
+instantly retried, resulting in the same fault being taken forever. As
+the task never reaches userspace, the signal is not delivered, and the
+task is left unkillable. While the task is stuck in this state, it can
+inhibit the forward progress of the system.
+
+To avoid this, we must ensure that when a fatal signal is pending, we
+apply any necessary fixup for a faulting kernel instruction. Thus we
+will return to an error path, and it is up to that code to make forward
+progress towards delivering the fatal signal.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Laura Abbott <labbott@redhat.com>
+Reviewed-by: Steve Capper <steve.capper@arm.com>
+Tested-by: Steve Capper <steve.capper@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Tested-by: James Morse <james.morse@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -373,8 +373,11 @@ retry:
+        * signal first. We do not need to release the mmap_sem because it
+        * would already be released in __lock_page_or_retry in mm/filemap.c.
+        */
+-      if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++      if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++              if (!user_mode(regs))
++                      goto no_context;
+               return 0;
++      }
+       /*
+        * Major/minor page fault accounting is only done on the initial
diff --git a/queue-4.9/kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch b/queue-4.9/kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch
new file mode 100644 (file)
index 0000000..f69cad0
--- /dev/null
@@ -0,0 +1,107 @@
+From 6c0d706b563af732adb094c5bf807437e8963e84 Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Wed, 3 May 2017 15:17:51 +0100
+Subject: kvm: arm/arm64: Fix race in resetting stage2 PGD
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 6c0d706b563af732adb094c5bf807437e8963e84 upstream.
+
+In kvm_free_stage2_pgd() we check the stage2 PGD before holding
+the lock and proceed to take the lock if it is valid. And we unmap
+the page tables, followed by releasing the lock. We reset the PGD
+only after dropping this lock, which could cause a race condition
+where another thread waiting on or even holding the lock, could
+potentially see that the PGD is still valid and proceed to perform
+a stage2 operation and later encounter a NULL PGD.
+
+[223090.242280] Unable to handle kernel NULL pointer dereference at
+virtual address 00000040
+[223090.262330] PC is at unmap_stage2_range+0x8c/0x428
+[223090.262332] LR is at kvm_unmap_hva_handler+0x2c/0x3c
+[223090.262531] Call trace:
+[223090.262533] [<ffff0000080adb78>] unmap_stage2_range+0x8c/0x428
+[223090.262535] [<ffff0000080adf40>] kvm_unmap_hva_handler+0x2c/0x3c
+[223090.262537] [<ffff0000080ace2c>] handle_hva_to_gpa+0xb0/0x104
+[223090.262539] [<ffff0000080af988>] kvm_unmap_hva+0x5c/0xbc
+[223090.262543] [<ffff0000080a2478>]
+kvm_mmu_notifier_invalidate_page+0x50/0x8c
+[223090.262547] [<ffff0000082274f8>]
+__mmu_notifier_invalidate_page+0x5c/0x84
+[223090.262551] [<ffff00000820b700>] try_to_unmap_one+0x1d0/0x4a0
+[223090.262553] [<ffff00000820c5c8>] rmap_walk+0x1cc/0x2e0
+[223090.262555] [<ffff00000820c90c>] try_to_unmap+0x74/0xa4
+[223090.262557] [<ffff000008230ce4>] migrate_pages+0x31c/0x5ac
+[223090.262561] [<ffff0000081f869c>] compact_zone+0x3fc/0x7ac
+[223090.262563] [<ffff0000081f8ae0>] compact_zone_order+0x94/0xb0
+[223090.262564] [<ffff0000081f91c0>] try_to_compact_pages+0x108/0x290
+[223090.262569] [<ffff0000081d5108>] __alloc_pages_direct_compact+0x70/0x1ac
+[223090.262571] [<ffff0000081d64a0>] __alloc_pages_nodemask+0x434/0x9f4
+[223090.262572] [<ffff0000082256f0>] alloc_pages_vma+0x230/0x254
+[223090.262574] [<ffff000008235e5c>] do_huge_pmd_anonymous_page+0x114/0x538
+[223090.262576] [<ffff000008201bec>] handle_mm_fault+0xd40/0x17a4
+[223090.262577] [<ffff0000081fb324>] __get_user_pages+0x12c/0x36c
+[223090.262578] [<ffff0000081fb804>] get_user_pages_unlocked+0xa4/0x1b8
+[223090.262579] [<ffff0000080a3ce8>] __gfn_to_pfn_memslot+0x280/0x31c
+[223090.262580] [<ffff0000080a3dd0>] gfn_to_pfn_prot+0x4c/0x5c
+[223090.262582] [<ffff0000080af3f8>] kvm_handle_guest_abort+0x240/0x774
+[223090.262584] [<ffff0000080b2bac>] handle_exit+0x11c/0x1ac
+[223090.262586] [<ffff0000080ab99c>] kvm_arch_vcpu_ioctl_run+0x31c/0x648
+[223090.262587] [<ffff0000080a1d78>] kvm_vcpu_ioctl+0x378/0x768
+[223090.262590] [<ffff00000825df5c>] do_vfs_ioctl+0x324/0x5a4
+[223090.262591] [<ffff00000825e26c>] SyS_ioctl+0x90/0xa4
+[223090.262595] [<ffff000008085d84>] el0_svc_naked+0x38/0x3c
+
+This patch moves the stage2 PGD manipulation under the lock.
+
+Reported-by: Alexander Graf <agraf@suse.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -829,22 +829,22 @@ void stage2_unmap_vm(struct kvm *kvm)
+  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
+  * underlying level-2 and level-3 tables before freeing the actual level-1 table
+  * and setting the struct pointer to NULL.
+- *
+- * Note we don't need locking here as this is only called when the VM is
+- * destroyed, which can only be done once.
+  */
+ void kvm_free_stage2_pgd(struct kvm *kvm)
+ {
+-      if (kvm->arch.pgd == NULL)
+-              return;
++      void *pgd = NULL;
+       spin_lock(&kvm->mmu_lock);
+-      unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
++      if (kvm->arch.pgd) {
++              unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
++              pgd = kvm->arch.pgd;
++              kvm->arch.pgd = NULL;
++      }
+       spin_unlock(&kvm->mmu_lock);
+       /* Free the HW pgd, one page at a time */
+-      free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
+-      kvm->arch.pgd = NULL;
++      if (pgd)
++              free_pages_exact(pgd, S2_PGD_SIZE);
+ }
+ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
index cbeb312cffc295cd294e3bc663d66d7114f3ae9d..af2b70af6e301bf604f8b0f6075b3fe8286d96b6 100644 (file)
@@ -2,3 +2,6 @@ p54-memset-0-whole-array.patch
 scsi-isci-avoid-array-subscript-warning.patch
 staging-wilc1000-simplify-vif-ndev-accesses.patch
 gcov-support-gcc-7.1.patch
+kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch
+arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch
+x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch
diff --git a/queue-4.9/x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch b/queue-4.9/x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch
new file mode 100644 (file)
index 0000000..6c5773b
--- /dev/null
@@ -0,0 +1,66 @@
+From 7206f9bf108eb9513d170c73f151367a1bdf3dbf Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 19 Jul 2017 14:53:02 +0200
+Subject: x86/io: Add "memory" clobber to insb/insw/insl/outsb/outsw/outsl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 7206f9bf108eb9513d170c73f151367a1bdf3dbf upstream.
+
+The x86 version of insb/insw/insl uses an inline assembly that does
+not have the target buffer listed as an output. This can confuse
+the compiler, leading it to think that a subsequent access of the
+buffer is uninitialized:
+
+  drivers/net/wireless/wl3501_cs.c: In function ‘wl3501_mgmt_scan_confirm’:
+  drivers/net/wireless/wl3501_cs.c:665:9: error: ‘sig.status’ is used uninitialized in this function [-Werror=uninitialized]
+  drivers/net/wireless/wl3501_cs.c:668:12: error: ‘sig.cap_info’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
+  drivers/net/sb1000.c: In function 'sb1000_rx':
+  drivers/net/sb1000.c:775:9: error: 'st[0]' is used uninitialized in this function [-Werror=uninitialized]
+  drivers/net/sb1000.c:776:10: error: 'st[1]' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+  drivers/net/sb1000.c:784:11: error: 'st[1]' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+
+I tried to mark the exact input buffer as an output here, but couldn't
+figure it out. As suggested by Linus, marking all memory as clobbered
+however is good enough too. For the outs operations, I also add the
+memory clobber, to force the input to be written to local variables.
+This is probably already guaranteed by the "asm volatile", but it can't
+hurt to do this for symmetry.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Link: http://lkml.kernel.org/r/20170719125310.2487451-5-arnd@arndb.de
+Link: https://lkml.org/lkml/2017/7/12/605
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/io.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(
+ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+ {                                                                     \
+       asm volatile("rep; outs" #bwl                                   \
+-                   : "+S"(addr), "+c"(count) : "d"(port));            \
++                   : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
+ }                                                                     \
+                                                                       \
+ static inline void ins##bwl(int port, void *addr, unsigned long count)        \
+ {                                                                     \
+       asm volatile("rep; ins" #bwl                                    \
+-                   : "+D"(addr), "+c"(count) : "d"(port));            \
++                   : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
+ }
+ BUILDIO(b, b, char)