]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.20-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 5 Jan 2019 15:32:06 +0000 (16:32 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 5 Jan 2019 15:32:06 +0000 (16:32 +0100)
added patches:
arm64-kvm-make-vhe-stage-2-tlb-invalidation-operations-non-interruptible.patch
input-atmel_mxt_ts-don-t-try-to-free-unallocated-kernel-memory.patch
input-elan_i2c-add-acpi-id-for-touchpad-in-asus-aspire-f5-573g.patch
kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails.patch
kvm-ppc-book3s-hv-fix-race-between-kvm_unmap_hva_range-and-mmu-mode-switch.patch
kvm-x86-use-jmp-to-invoke-kvm_spurious_fault-from-.fixup.patch
s390-pci-fix-sleeping-in-atomic-during-hotplug.patch
x86-mm-drop-usage-of-__flush_tlb_all-in-kernel_physical_mapping_init.patch
x86-speculation-l1tf-drop-the-swap-storage-limit-restriction-when-l1tf-off.patch

queue-4.20/arm64-kvm-make-vhe-stage-2-tlb-invalidation-operations-non-interruptible.patch [new file with mode: 0644]
queue-4.20/input-atmel_mxt_ts-don-t-try-to-free-unallocated-kernel-memory.patch [new file with mode: 0644]
queue-4.20/input-elan_i2c-add-acpi-id-for-touchpad-in-asus-aspire-f5-573g.patch [new file with mode: 0644]
queue-4.20/kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails.patch [new file with mode: 0644]
queue-4.20/kvm-ppc-book3s-hv-fix-race-between-kvm_unmap_hva_range-and-mmu-mode-switch.patch [new file with mode: 0644]
queue-4.20/kvm-x86-use-jmp-to-invoke-kvm_spurious_fault-from-.fixup.patch [new file with mode: 0644]
queue-4.20/s390-pci-fix-sleeping-in-atomic-during-hotplug.patch [new file with mode: 0644]
queue-4.20/series
queue-4.20/x86-mm-drop-usage-of-__flush_tlb_all-in-kernel_physical_mapping_init.patch [new file with mode: 0644]
queue-4.20/x86-speculation-l1tf-drop-the-swap-storage-limit-restriction-when-l1tf-off.patch [new file with mode: 0644]

diff --git a/queue-4.20/arm64-kvm-make-vhe-stage-2-tlb-invalidation-operations-non-interruptible.patch b/queue-4.20/arm64-kvm-make-vhe-stage-2-tlb-invalidation-operations-non-interruptible.patch
new file mode 100644 (file)
index 0000000..da20e68
--- /dev/null
@@ -0,0 +1,149 @@
+From c987876a80e7bcb98a839f10dca9ce7fda4feced Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 6 Dec 2018 17:31:19 +0000
+Subject: arm64: KVM: Make VHE Stage-2 TLB invalidation operations non-interruptible
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit c987876a80e7bcb98a839f10dca9ce7fda4feced upstream.
+
+Contrary to the non-VHE version of the TLB invalidation helpers, the VHE
+code  has interrupts enabled, meaning that we can take an interrupt in
+the middle of such a sequence, and start running something else with
+HCR_EL2.TGE cleared.
+
+That's really not a good idea.
+
+Take the heavy-handed option and disable interrupts in
+__tlb_switch_to_guest_vhe, restoring them in __tlb_switch_to_host_vhe.
+The latter also gain an ISB in order to make sure that TGE really has
+taken effect.
+
+Cc: stable@vger.kernel.org
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/hyp/tlb.c |   35 +++++++++++++++++++++++++----------
+ 1 file changed, 25 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/kvm/hyp/tlb.c
++++ b/arch/arm64/kvm/hyp/tlb.c
+@@ -15,14 +15,19 @@
+  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
++#include <linux/irqflags.h>
++
+ #include <asm/kvm_hyp.h>
+ #include <asm/kvm_mmu.h>
+ #include <asm/tlbflush.h>
+-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
++static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
++                                               unsigned long *flags)
+ {
+       u64 val;
++      local_irq_save(*flags);
++
+       /*
+        * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
+        * most TLB operations target EL2/EL0. In order to affect the
+@@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_g
+       isb();
+ }
+-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
++static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
++                                                unsigned long *flags)
+ {
+       __load_guest_stage2(kvm);
+       isb();
+@@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch
+                           __tlb_switch_to_guest_vhe,
+                           ARM64_HAS_VIRT_HOST_EXTN);
+-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
++static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
++                                              unsigned long flags)
+ {
+       /*
+        * We're done with the TLB operation, let's restore the host's
+@@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_h
+        */
+       write_sysreg(0, vttbr_el2);
+       write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
++      isb();
++      local_irq_restore(flags);
+ }
+-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
++static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
++                                               unsigned long flags)
+ {
+       write_sysreg(0, vttbr_el2);
+ }
+@@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch
+ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+ {
++      unsigned long flags;
++
+       dsb(ishst);
+       /* Switch to requested VMID */
+       kvm = kern_hyp_va(kvm);
+-      __tlb_switch_to_guest()(kvm);
++      __tlb_switch_to_guest()(kvm, &flags);
+       /*
+        * We could do so much better if we had the VA as well.
+@@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa
+       if (!has_vhe() && icache_is_vpipt())
+               __flush_icache_all();
+-      __tlb_switch_to_host()(kvm);
++      __tlb_switch_to_host()(kvm, flags);
+ }
+ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
+ {
++      unsigned long flags;
++
+       dsb(ishst);
+       /* Switch to requested VMID */
+       kvm = kern_hyp_va(kvm);
+-      __tlb_switch_to_guest()(kvm);
++      __tlb_switch_to_guest()(kvm, &flags);
+       __tlbi(vmalls12e1is);
+       dsb(ish);
+       isb();
+-      __tlb_switch_to_host()(kvm);
++      __tlb_switch_to_host()(kvm, flags);
+ }
+ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
++      unsigned long flags;
+       /* Switch to requested VMID */
+-      __tlb_switch_to_guest()(kvm);
++      __tlb_switch_to_guest()(kvm, &flags);
+       __tlbi(vmalle1);
+       dsb(nsh);
+       isb();
+-      __tlb_switch_to_host()(kvm);
++      __tlb_switch_to_host()(kvm, flags);
+ }
+ void __hyp_text __kvm_flush_vm_context(void)
diff --git a/queue-4.20/input-atmel_mxt_ts-don-t-try-to-free-unallocated-kernel-memory.patch b/queue-4.20/input-atmel_mxt_ts-don-t-try-to-free-unallocated-kernel-memory.patch
new file mode 100644 (file)
index 0000000..e1f2f99
--- /dev/null
@@ -0,0 +1,40 @@
+From 1e3c336ad8f40f88a8961c434640920fe35cc08b Mon Sep 17 00:00:00 2001
+From: Sanjeev Chugh <sanjeev_chugh@mentor.com>
+Date: Fri, 28 Dec 2018 17:04:31 -0800
+Subject: Input: atmel_mxt_ts - don't try to free unallocated kernel memory
+
+From: Sanjeev Chugh <sanjeev_chugh@mentor.com>
+
+commit 1e3c336ad8f40f88a8961c434640920fe35cc08b upstream.
+
+If the user attempts to update Atmel device with an invalid configuration
+cfg file, error handling code is trying to free cfg file memory which is
+not allocated yet hence results into kernel crash.
+
+This patch fixes the order of memory free operations.
+
+Signed-off-by: Sanjeev Chugh <sanjeev_chugh@mentor.com>
+Fixes: a4891f105837 ("Input: atmel_mxt_ts - zero terminate config firmware file")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/touchscreen/atmel_mxt_ts.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/input/touchscreen/atmel_mxt_ts.c
++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
+@@ -1585,10 +1585,10 @@ static int mxt_update_cfg(struct mxt_dat
+       /* T7 config may have changed */
+       mxt_init_t7_power_cfg(data);
+-release_raw:
+-      kfree(cfg.raw);
+ release_mem:
+       kfree(cfg.mem);
++release_raw:
++      kfree(cfg.raw);
+       return ret;
+ }
diff --git a/queue-4.20/input-elan_i2c-add-acpi-id-for-touchpad-in-asus-aspire-f5-573g.patch b/queue-4.20/input-elan_i2c-add-acpi-id-for-touchpad-in-asus-aspire-f5-573g.patch
new file mode 100644 (file)
index 0000000..2a92d87
--- /dev/null
@@ -0,0 +1,31 @@
+From 7db54c89f0b30a101584e09d3729144e6170059d Mon Sep 17 00:00:00 2001
+From: Patrick Dreyer <Patrick@Dreyer.name>
+Date: Sun, 23 Dec 2018 10:06:35 -0800
+Subject: Input: elan_i2c - add ACPI ID for touchpad in ASUS Aspire F5-573G
+
+From: Patrick Dreyer <Patrick@Dreyer.name>
+
+commit 7db54c89f0b30a101584e09d3729144e6170059d upstream.
+
+This adds ELAN0501 to the ACPI table to support Elan touchpad found in ASUS
+Aspire F5-573G.
+
+Signed-off-by: Patrick Dreyer <Patrick.Dreyer@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/mouse/elan_i2c_core.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1336,6 +1336,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
+ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN0000", 0 },
+       { "ELAN0100", 0 },
++      { "ELAN0501", 0 },
+       { "ELAN0600", 0 },
+       { "ELAN0602", 0 },
+       { "ELAN0605", 0 },
diff --git a/queue-4.20/kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails.patch b/queue-4.20/kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails.patch
new file mode 100644 (file)
index 0000000..21820ee
--- /dev/null
@@ -0,0 +1,40 @@
+From 1b3ab5ad1b8ad99bae76ec583809c5f5a31c707c Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Mon, 3 Dec 2018 13:52:51 -0800
+Subject: KVM: nVMX: Free the VMREAD/VMWRITE bitmaps if alloc_kvm_area() fails
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 1b3ab5ad1b8ad99bae76ec583809c5f5a31c707c upstream.
+
+Fixes: 34a1cd60d17f ("kvm: x86: vmx: move some vmx setting from vmx_init() to hardware_setup()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8031,13 +8031,16 @@ static __init int hardware_setup(void)
+       kvm_mce_cap_supported |= MCG_LMCE_P;
+-      return alloc_kvm_area();
++      r = alloc_kvm_area();
++      if (r)
++              goto out;
++      return 0;
+ out:
+       for (i = 0; i < VMX_BITMAP_NR; i++)
+               free_page((unsigned long)vmx_bitmap[i]);
+-    return r;
++      return r;
+ }
+ static __exit void hardware_unsetup(void)
diff --git a/queue-4.20/kvm-ppc-book3s-hv-fix-race-between-kvm_unmap_hva_range-and-mmu-mode-switch.patch b/queue-4.20/kvm-ppc-book3s-hv-fix-race-between-kvm_unmap_hva_range-and-mmu-mode-switch.patch
new file mode 100644 (file)
index 0000000..9c32470
--- /dev/null
@@ -0,0 +1,98 @@
+From 234ff0b729ad882d20f7996591a964965647addf Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Fri, 16 Nov 2018 21:28:18 +1100
+Subject: KVM: PPC: Book3S HV: Fix race between kvm_unmap_hva_range and MMU mode switch
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 234ff0b729ad882d20f7996591a964965647addf upstream.
+
+Testing has revealed an occasional crash which appears to be caused
+by a race between kvmppc_switch_mmu_to_hpt and kvm_unmap_hva_range_hv.
+The symptom is a NULL pointer dereference in __find_linux_pte() called
+from kvm_unmap_radix() with kvm->arch.pgtable == NULL.
+
+Looking at kvmppc_switch_mmu_to_hpt(), it does indeed clear
+kvm->arch.pgtable (via kvmppc_free_radix()) before setting
+kvm->arch.radix to NULL, and there is nothing to prevent
+kvm_unmap_hva_range_hv() or the other MMU callback functions from
+being called concurrently with kvmppc_switch_mmu_to_hpt() or
+kvmppc_switch_mmu_to_radix().
+
+This patch therefore adds calls to spin_lock/unlock on the kvm->mmu_lock
+around the assignments to kvm->arch.radix, and makes sure that the
+partition-scoped radix tree or HPT is only freed after changing
+kvm->arch.radix.
+
+This also takes the kvm->mmu_lock in kvmppc_rmap_reset() to make sure
+that the clearing of each rmap array (one per memslot) doesn't happen
+concurrently with use of the array in the kvm_unmap_hva_range_hv()
+or the other MMU callbacks.
+
+Fixes: 18c3640cefc7 ("KVM: PPC: Book3S HV: Add infrastructure for running HPT guests on radix host")
+Cc: stable@vger.kernel.org # v4.15+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_64_mmu_hv.c |    3 +++
+ arch/powerpc/kvm/book3s_hv.c        |   17 +++++++++++------
+ 2 files changed, 14 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+@@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
+       srcu_idx = srcu_read_lock(&kvm->srcu);
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots) {
++              /* Mutual exclusion with kvm_unmap_hva_range etc. */
++              spin_lock(&kvm->mmu_lock);
+               /*
+                * This assumes it is acceptable to lose reference and
+                * change bits across a reset.
+                */
+               memset(memslot->arch.rmap, 0,
+                      memslot->npages * sizeof(*memslot->arch.rmap));
++              spin_unlock(&kvm->mmu_lock);
+       }
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+ }
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4532,12 +4532,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm
+ {
+       if (nesting_enabled(kvm))
+               kvmhv_release_all_nested(kvm);
++      kvmppc_rmap_reset(kvm);
++      kvm->arch.process_table = 0;
++      /* Mutual exclusion with kvm_unmap_hva_range etc. */
++      spin_lock(&kvm->mmu_lock);
++      kvm->arch.radix = 0;
++      spin_unlock(&kvm->mmu_lock);
+       kvmppc_free_radix(kvm);
+       kvmppc_update_lpcr(kvm, LPCR_VPM1,
+                          LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+-      kvmppc_rmap_reset(kvm);
+-      kvm->arch.radix = 0;
+-      kvm->arch.process_table = 0;
+       return 0;
+ }
+@@ -4549,12 +4552,14 @@ int kvmppc_switch_mmu_to_radix(struct kv
+       err = kvmppc_init_vm_radix(kvm);
+       if (err)
+               return err;
+-
++      kvmppc_rmap_reset(kvm);
++      /* Mutual exclusion with kvm_unmap_hva_range etc. */
++      spin_lock(&kvm->mmu_lock);
++      kvm->arch.radix = 1;
++      spin_unlock(&kvm->mmu_lock);
+       kvmppc_free_hpt(&kvm->arch.hpt);
+       kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
+                          LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+-      kvmppc_rmap_reset(kvm);
+-      kvm->arch.radix = 1;
+       return 0;
+ }
diff --git a/queue-4.20/kvm-x86-use-jmp-to-invoke-kvm_spurious_fault-from-.fixup.patch b/queue-4.20/kvm-x86-use-jmp-to-invoke-kvm_spurious_fault-from-.fixup.patch
new file mode 100644 (file)
index 0000000..042db8e
--- /dev/null
@@ -0,0 +1,136 @@
+From e81434995081fd7efb755fd75576b35dbb0850b1 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Thu, 20 Dec 2018 14:21:08 -0800
+Subject: KVM: x86: Use jmp to invoke kvm_spurious_fault() from .fixup
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit e81434995081fd7efb755fd75576b35dbb0850b1 upstream.
+
+____kvm_handle_fault_on_reboot() provides a generic exception fixup
+handler that is used to cleanly handle faults on VMX/SVM instructions
+during reboot (or at least try to).  If there isn't a reboot in
+progress, ____kvm_handle_fault_on_reboot() treats any exception as
+fatal to KVM and invokes kvm_spurious_fault(), which in turn generates
+a BUG() to get a stack trace and die.
+
+When it was originally added by commit 4ecac3fd6dc2 ("KVM: Handle
+virtualization instruction #UD faults during reboot"), the "call" to
+kvm_spurious_fault() was handcoded as PUSH+JMP, where the PUSH'd value
+is the RIP of the faulting instructing.
+
+The PUSH+JMP trickery is necessary because the exception fixup handler
+code lies outside of its associated function, e.g. right after the
+function.  An actual CALL from the .fixup code would show a slightly
+bogus stack trace, e.g. an extra "random" function would be inserted
+into the trace, as the return RIP on the stack would point to no known
+function (and the unwinder will likely try to guess who owns the RIP).
+
+Unfortunately, the JMP was replaced with a CALL when the macro was
+reworked to not spin indefinitely during reboot (commit b7c4145ba2eb
+"KVM: Don't spin on virt instruction faults during reboot").  This
+causes the aforementioned behavior where a bogus function is inserted
+into the stack trace, e.g. my builds like to blame free_kvm_area().
+
+Revert the CALL back to a JMP.  The changelog for commit b7c4145ba2eb
+("KVM: Don't spin on virt instruction faults during reboot") contains
+nothing that indicates the switch to CALL was deliberate.  This is
+backed up by the fact that the PUSH <insn RIP> was left intact.
+
+Note that an alternative to the PUSH+JMP magic would be to JMP back
+to the "real" code and CALL from there, but that would require adding
+a JMP in the non-faulting path to avoid calling kvm_spurious_fault()
+and would add no value, i.e. the stack trace would be the same.
+
+Using CALL:
+
+------------[ cut here ]------------
+kernel BUG at /home/sean/go/src/kernel.org/linux/arch/x86/kvm/x86.c:356!
+invalid opcode: 0000 [#1] SMP
+CPU: 4 PID: 1057 Comm: qemu-system-x86 Not tainted 4.20.0-rc6+ #75
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+RIP: 0010:kvm_spurious_fault+0x5/0x10 [kvm]
+Code: <0f> 0b 66 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 41 55 49 89 fd 41
+RSP: 0018:ffffc900004bbcc8 EFLAGS: 00010046
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffffffffffffff
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+RBP: ffff888273fd8000 R08: 00000000000003e8 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000784 R12: ffffc90000371fb0
+R13: 0000000000000000 R14: 000000026d763cf4 R15: ffff888273fd8000
+FS:  00007f3d69691700(0000) GS:ffff888277800000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000055f89bc56fe0 CR3: 0000000271a5a001 CR4: 0000000000362ee0
+Call Trace:
+ free_kvm_area+0x1044/0x43ea [kvm_intel]
+ ? vmx_vcpu_run+0x156/0x630 [kvm_intel]
+ ? kvm_arch_vcpu_ioctl_run+0x447/0x1a40 [kvm]
+ ? kvm_vcpu_ioctl+0x368/0x5c0 [kvm]
+ ? kvm_vcpu_ioctl+0x368/0x5c0 [kvm]
+ ? __set_task_blocked+0x38/0x90
+ ? __set_current_blocked+0x50/0x60
+ ? __fpu__restore_sig+0x97/0x490
+ ? do_vfs_ioctl+0xa1/0x620
+ ? __x64_sys_futex+0x89/0x180
+ ? ksys_ioctl+0x66/0x70
+ ? __x64_sys_ioctl+0x16/0x20
+ ? do_syscall_64+0x4f/0x100
+ ? entry_SYSCALL_64_after_hwframe+0x44/0xa9
+Modules linked in: vhost_net vhost tap kvm_intel kvm irqbypass bridge stp llc
+---[ end trace 9775b14b123b1713 ]---
+
+Using JMP:
+
+------------[ cut here ]------------
+kernel BUG at /home/sean/go/src/kernel.org/linux/arch/x86/kvm/x86.c:356!
+invalid opcode: 0000 [#1] SMP
+CPU: 6 PID: 1067 Comm: qemu-system-x86 Not tainted 4.20.0-rc6+ #75
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+RIP: 0010:kvm_spurious_fault+0x5/0x10 [kvm]
+Code: <0f> 0b 66 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 41 55 49 89 fd 41
+RSP: 0018:ffffc90000497cd0 EFLAGS: 00010046
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffffffffffffff
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+RBP: ffff88827058bd40 R08: 00000000000003e8 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000784 R12: ffffc90000369fb0
+R13: 0000000000000000 R14: 00000003c8fc6642 R15: ffff88827058bd40
+FS:  00007f3d7219e700(0000) GS:ffff888277900000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f3d64001000 CR3: 0000000271c6b004 CR4: 0000000000362ee0
+Call Trace:
+ vmx_vcpu_run+0x156/0x630 [kvm_intel]
+ ? kvm_arch_vcpu_ioctl_run+0x447/0x1a40 [kvm]
+ ? kvm_vcpu_ioctl+0x368/0x5c0 [kvm]
+ ? kvm_vcpu_ioctl+0x368/0x5c0 [kvm]
+ ? __set_task_blocked+0x38/0x90
+ ? __set_current_blocked+0x50/0x60
+ ? __fpu__restore_sig+0x97/0x490
+ ? do_vfs_ioctl+0xa1/0x620
+ ? __x64_sys_futex+0x89/0x180
+ ? ksys_ioctl+0x66/0x70
+ ? __x64_sys_ioctl+0x16/0x20
+ ? do_syscall_64+0x4f/0x100
+ ? entry_SYSCALL_64_after_hwframe+0x44/0xa9
+Modules linked in: vhost_net vhost tap kvm_intel kvm irqbypass bridge stp llc
+---[ end trace f9daedb85ab3ddba ]---
+
+Fixes: b7c4145ba2eb ("KVM: Don't spin on virt instruction faults during reboot")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1492,7 +1492,7 @@ asmlinkage void kvm_spurious_fault(void)
+       "cmpb $0, kvm_rebooting \n\t"         \
+       "jne 668b \n\t"                       \
+       __ASM_SIZE(push) " $666b \n\t"        \
+-      "call kvm_spurious_fault \n\t"        \
++      "jmp kvm_spurious_fault \n\t"         \
+       ".popsection \n\t" \
+       _ASM_EXTABLE(666b, 667b)
diff --git a/queue-4.20/s390-pci-fix-sleeping-in-atomic-during-hotplug.patch b/queue-4.20/s390-pci-fix-sleeping-in-atomic-during-hotplug.patch
new file mode 100644 (file)
index 0000000..9f05707
--- /dev/null
@@ -0,0 +1,39 @@
+From 98dfd32620e970eb576ebce5ea39d905cb005e72 Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@linux.ibm.com>
+Date: Thu, 18 Oct 2018 11:11:08 +0200
+Subject: s390/pci: fix sleeping in atomic during hotplug
+
+From: Sebastian Ott <sebott@linux.ibm.com>
+
+commit 98dfd32620e970eb576ebce5ea39d905cb005e72 upstream.
+
+When triggered by pci hotplug (PEC 0x306) clp_get_state is called
+with spinlocks held resulting in the following warning:
+
+zpci: n/a: Event 0x306 reconfigured PCI function 0x0
+BUG: sleeping function called from invalid context at mm/page_alloc.c:4324
+in_atomic(): 1, irqs_disabled(): 0, pid: 98, name: kmcheck
+2 locks held by kmcheck/98:
+
+Change the allocation to use GFP_ATOMIC.
+
+Cc: stable@vger.kernel.org # 4.13+
+Signed-off-by: Sebastian Ott <sebott@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci_clp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_sta
+       struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
+       int rc;
+-      rrb = clp_alloc_block(GFP_KERNEL);
++      rrb = clp_alloc_block(GFP_ATOMIC);
+       if (!rrb)
+               return -ENOMEM;
index 4cdec5b69c85324200d5f4962eb82296f0ddadac..252a785403cf0db3a104734ee184de0f80526710 100644 (file)
@@ -48,3 +48,12 @@ staging-wilc1000-fix-missing-read_write-setting-when-reading-data.patch
 staging-bcm2835-audio-double-free-in-init-error-path.patch
 asoc-intel-cht_bsw_max98090_ti-add-pmc_plt_clk_0-quirk-for-chromebook-clapper.patch
 asoc-intel-cht_bsw_max98090_ti-add-pmc_plt_clk_0-quirk-for-chromebook-gnawty.patch
+s390-pci-fix-sleeping-in-atomic-during-hotplug.patch
+input-atmel_mxt_ts-don-t-try-to-free-unallocated-kernel-memory.patch
+input-elan_i2c-add-acpi-id-for-touchpad-in-asus-aspire-f5-573g.patch
+x86-speculation-l1tf-drop-the-swap-storage-limit-restriction-when-l1tf-off.patch
+x86-mm-drop-usage-of-__flush_tlb_all-in-kernel_physical_mapping_init.patch
+kvm-x86-use-jmp-to-invoke-kvm_spurious_fault-from-.fixup.patch
+arm64-kvm-make-vhe-stage-2-tlb-invalidation-operations-non-interruptible.patch
+kvm-ppc-book3s-hv-fix-race-between-kvm_unmap_hva_range-and-mmu-mode-switch.patch
+kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails.patch
diff --git a/queue-4.20/x86-mm-drop-usage-of-__flush_tlb_all-in-kernel_physical_mapping_init.patch b/queue-4.20/x86-mm-drop-usage-of-__flush_tlb_all-in-kernel_physical_mapping_init.patch
new file mode 100644 (file)
index 0000000..b248063
--- /dev/null
@@ -0,0 +1,110 @@
+From ba6f508d0ec4adb09f0a939af6d5e19cdfa8667d Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 4 Dec 2018 13:37:27 -0800
+Subject: x86/mm: Drop usage of __flush_tlb_all() in kernel_physical_mapping_init()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit ba6f508d0ec4adb09f0a939af6d5e19cdfa8667d upstream.
+
+Commit:
+
+  f77084d96355 "x86/mm/pat: Disable preemption around __flush_tlb_all()"
+
+addressed a case where __flush_tlb_all() is called without preemption
+being disabled. It also left a warning to catch other cases where
+preemption is not disabled.
+
+That warning triggers for the memory hotplug path which is also used for
+persistent memory enabling:
+
+ WARNING: CPU: 35 PID: 911 at ./arch/x86/include/asm/tlbflush.h:460
+ RIP: 0010:__flush_tlb_all+0x1b/0x3a
+ [..]
+ Call Trace:
+  phys_pud_init+0x29c/0x2bb
+  kernel_physical_mapping_init+0xfc/0x219
+  init_memory_mapping+0x1a5/0x3b0
+  arch_add_memory+0x2c/0x50
+  devm_memremap_pages+0x3aa/0x610
+  pmem_attach_disk+0x585/0x700 [nd_pmem]
+
+Andy wondered why a path that can sleep was using __flush_tlb_all() [1]
+and Dave confirmed the expectation for TLB flush is for modifying /
+invalidating existing PTE entries, but not initial population [2]. Drop
+the usage of __flush_tlb_all() in phys_{p4d,pud,pmd}_init() on the
+expectation that this path is only ever populating empty entries for the
+linear map. Note, at linear map teardown time there is a call to the
+all-cpu flush_tlb_all() to invalidate the removed mappings.
+
+[1]: https://lkml.kernel.org/r/9DFD717D-857D-493D-A606-B635D72BAC21@amacapital.net
+[2]: https://lkml.kernel.org/r/749919a4-cdb1-48a3-adb4-adb81a5fa0b5@intel.com
+
+[ mingo: Minor readability edits. ]
+
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reported-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: dave.hansen@intel.com
+Fixes: f77084d96355 ("x86/mm/pat: Disable preemption around __flush_tlb_all()")
+Link: http://lkml.kernel.org/r/154395944713.32119.15611079023837132638.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/init_64.c |    6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned
+                                                          paddr_end,
+                                                          page_size_mask,
+                                                          prot);
+-                              __flush_tlb_all();
+                               continue;
+                       }
+                       /*
+@@ -627,7 +626,6 @@ phys_pud_init(pud_t *pud_page, unsigned
+               pud_populate(&init_mm, pud, pmd);
+               spin_unlock(&init_mm.page_table_lock);
+       }
+-      __flush_tlb_all();
+       update_page_count(PG_LEVEL_1G, pages);
+@@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned
+                       paddr_last = phys_pud_init(pud, paddr,
+                                       paddr_end,
+                                       page_size_mask);
+-                      __flush_tlb_all();
+                       continue;
+               }
+@@ -680,7 +677,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned
+               p4d_populate(&init_mm, p4d, pud);
+               spin_unlock(&init_mm.page_table_lock);
+       }
+-      __flush_tlb_all();
+       return paddr_last;
+ }
+@@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned lo
+       if (pgd_changed)
+               sync_global_pgds(vaddr_start, vaddr_end - 1);
+-      __flush_tlb_all();
+-
+       return paddr_last;
+ }
diff --git a/queue-4.20/x86-speculation-l1tf-drop-the-swap-storage-limit-restriction-when-l1tf-off.patch b/queue-4.20/x86-speculation-l1tf-drop-the-swap-storage-limit-restriction-when-l1tf-off.patch
new file mode 100644 (file)
index 0000000..398b711
--- /dev/null
@@ -0,0 +1,102 @@
+From 5b5e4d623ec8a34689df98e42d038a3b594d2ff9 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Tue, 13 Nov 2018 19:49:10 +0100
+Subject: x86/speculation/l1tf: Drop the swap storage limit restriction when l1tf=off
+
+From: Michal Hocko <mhocko@suse.com>
+
+commit 5b5e4d623ec8a34689df98e42d038a3b594d2ff9 upstream.
+
+Swap storage is restricted to max_swapfile_size (~16TB on x86_64) whenever
+the system is deemed affected by L1TF vulnerability. Even though the limit
+is quite high for most deployments it seems to be too restrictive for
+deployments which are willing to live with the mitigation disabled.
+
+We have a customer to deploy 8x 6,4TB PCIe/NVMe SSD swap devices which is
+clearly out of the limit.
+
+Drop the swap restriction when l1tf=off is specified. It also doesn't make
+much sense to warn about too much memory for the l1tf mitigation when it is
+forcefully disabled by the administrator.
+
+[ tglx: Folded the documentation delta change ]
+
+Fixes: 377eeaa8e11f ("x86/speculation/l1tf: Limit swap file size to MAX_PA/2")
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Jiri Kosina <jkosina@suse.cz>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: <linux-mm@kvack.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181113184910.26697-1-mhocko@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/admin-guide/kernel-parameters.txt |    3 +++
+ Documentation/admin-guide/l1tf.rst              |    6 +++++-
+ arch/x86/kernel/cpu/bugs.c                      |    3 ++-
+ arch/x86/mm/init.c                              |    2 +-
+ 4 files changed, 11 insertions(+), 3 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2096,6 +2096,9 @@
+                       off
+                               Disables hypervisor mitigations and doesn't
+                               emit any warnings.
++                              It also drops the swap size and available
++                              RAM limit restriction on both hypervisor and
++                              bare metal.
+                       Default is 'flush'.
+--- a/Documentation/admin-guide/l1tf.rst
++++ b/Documentation/admin-guide/l1tf.rst
+@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid
+   off         Disables hypervisor mitigations and doesn't emit any
+               warnings.
++              It also drops the swap size and available RAM limit restrictions
++              on both hypervisor and bare metal.
++
+   ============  =============================================================
+ The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
+@@ -576,7 +579,8 @@ Default mitigations
+   The kernel default mitigations for vulnerable processors are:
+   - PTE inversion to protect against malicious user space. This is done
+-    unconditionally and cannot be controlled.
++    unconditionally and cannot be controlled. The swap storage is limited
++    to ~16TB.
+   - L1D conditional flushing on VMENTER when EPT is enabled for
+     a guest.
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1002,7 +1002,8 @@ static void __init l1tf_select_mitigatio
+ #endif
+       half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+-      if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
++      if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
++                      e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+               pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+               pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
+                               half_pa);
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
+       pages = generic_max_swapfile_size();
+-      if (boot_cpu_has_bug(X86_BUG_L1TF)) {
++      if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
+               /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+               unsigned long long l1tf_limit = l1tf_pfn_limit();
+               /*