]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Jun 2017 08:11:37 +0000 (10:11 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Jun 2017 08:11:37 +0000 (10:11 +0200)
added patches:
arm-kvm-allow-unaligned-accesses-at-hyp.patch
arm64-kvm-allow-unaligned-accesses-at-el2.patch
arm64-kvm-preserve-res1-bits-in-sctlr_el2.patch
kvm-async_pf-avoid-async-pf-injection-when-in-guest-mode.patch
kvm-async_pf-fix-rcu_irq_enter-with-irqs-enabled.patch
kvm-cpuid-fix-read-write-out-of-bounds-vulnerability-in-cpuid-emulation.patch

queue-4.11/arm-kvm-allow-unaligned-accesses-at-hyp.patch [new file with mode: 0644]
queue-4.11/arm64-kvm-allow-unaligned-accesses-at-el2.patch [new file with mode: 0644]
queue-4.11/arm64-kvm-preserve-res1-bits-in-sctlr_el2.patch [new file with mode: 0644]
queue-4.11/kvm-async_pf-avoid-async-pf-injection-when-in-guest-mode.patch [new file with mode: 0644]
queue-4.11/kvm-async_pf-fix-rcu_irq_enter-with-irqs-enabled.patch [new file with mode: 0644]
queue-4.11/kvm-cpuid-fix-read-write-out-of-bounds-vulnerability-in-cpuid-emulation.patch [new file with mode: 0644]
queue-4.11/series

diff --git a/queue-4.11/arm-kvm-allow-unaligned-accesses-at-hyp.patch b/queue-4.11/arm-kvm-allow-unaligned-accesses-at-hyp.patch
new file mode 100644 (file)
index 0000000..d347ccd
--- /dev/null
@@ -0,0 +1,44 @@
+From 33b5c38852b29736f3b472dd095c9a18ec22746f Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Jun 2017 19:08:35 +0100
+Subject: arm: KVM: Allow unaligned accesses at HYP
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 33b5c38852b29736f3b472dd095c9a18ec22746f upstream.
+
+We currently have the HSCTLR.A bit set, trapping unaligned accesses
+at HYP, but we're not really prepared to deal with it.
+
+Since the rest of the kernel is pretty happy about that, let's follow
+its example and set HSCTLR.A to zero. Modern CPUs don't really care.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/init.S |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kvm/init.S
++++ b/arch/arm/kvm/init.S
+@@ -95,7 +95,6 @@ __do_hyp_init:
+       @  - Write permission implies XN: disabled
+       @  - Instruction cache: enabled
+       @  - Data/Unified cache: enabled
+-      @  - Memory alignment checks: enabled
+       @  - MMU: enabled (this code must be run from an identity mapping)
+       mrc     p15, 4, r0, c1, c0, 0   @ HSCR
+       ldr     r2, =HSCTLR_MASK
+@@ -103,8 +102,8 @@ __do_hyp_init:
+       mrc     p15, 0, r1, c1, c0, 0   @ SCTLR
+       ldr     r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
+       and     r1, r1, r2
+- ARM( ldr     r2, =(HSCTLR_M | HSCTLR_A)                      )
+- THUMB(       ldr     r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE)          )
++ ARM( ldr     r2, =(HSCTLR_M)                                 )
++ THUMB(       ldr     r2, =(HSCTLR_M | HSCTLR_TE)                     )
+       orr     r1, r1, r2
+       orr     r0, r0, r1
+       mcr     p15, 4, r0, c1, c0, 0   @ HSCR
diff --git a/queue-4.11/arm64-kvm-allow-unaligned-accesses-at-el2.patch b/queue-4.11/arm64-kvm-allow-unaligned-accesses-at-el2.patch
new file mode 100644 (file)
index 0000000..35d0d31
--- /dev/null
@@ -0,0 +1,42 @@
+From 78fd6dcf11468a5a131b8365580d0c613bcc02cb Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Jun 2017 19:08:34 +0100
+Subject: arm64: KVM: Allow unaligned accesses at EL2
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 78fd6dcf11468a5a131b8365580d0c613bcc02cb upstream.
+
+We currently have the SCTLR_EL2.A bit set, trapping unaligned accesses
+at EL2, but we're not really prepared to deal with it. So far, this
+has been unnoticed, until GCC 7 started emitting those (in particular
+64bit writes on a 32bit boundary).
+
+Since the rest of the kernel is pretty happy about that, let's follow
+its example and set SCTLR_EL2.A to zero. Modern CPUs don't really
+care.
+
+Reported-by: Alexander Graf <agraf@suse.de>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/hyp-init.S |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -104,9 +104,10 @@ __do_hyp_init:
+       /*
+        * Preserve all the RES1 bits while setting the default flags,
+-       * as well as the EE bit on BE.
++       * as well as the EE bit on BE. Drop the A flag since the compiler
++       * is allowed to generate unaligned accesses.
+        */
+-      ldr     x4, =(SCTLR_EL2_RES1 | SCTLR_ELx_FLAGS)
++      ldr     x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+ CPU_BE(       orr     x4, x4, #SCTLR_ELx_EE)
+       msr     sctlr_el2, x4
+       isb
diff --git a/queue-4.11/arm64-kvm-preserve-res1-bits-in-sctlr_el2.patch b/queue-4.11/arm64-kvm-preserve-res1-bits-in-sctlr_el2.patch
new file mode 100644 (file)
index 0000000..bb3e54f
--- /dev/null
@@ -0,0 +1,57 @@
+From d68c1f7fd1b7148dab5fe658321d511998969f2d Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Jun 2017 19:08:33 +0100
+Subject: arm64: KVM: Preserve RES1 bits in SCTLR_EL2
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit d68c1f7fd1b7148dab5fe658321d511998969f2d upstream.
+
+__do_hyp_init has the rather bad habit of ignoring RES1 bits and
+writing them back as zero. On a v8.0-8.2 CPU, this doesn't do anything
+bad, but may end-up being pretty nasty on future revisions of the
+architecture.
+
+Let's preserve those bits so that we don't have to fix this later on.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/sysreg.h |    4 ++++
+ arch/arm64/kvm/hyp-init.S       |   10 ++++++----
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -138,6 +138,10 @@
+ #define SCTLR_ELx_A   (1 << 1)
+ #define SCTLR_ELx_M   1
++#define SCTLR_EL2_RES1        ((1 << 4)  | (1 << 5)  | (1 << 11) | (1 << 16) | \
++                       (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
++                       (1 << 28) | (1 << 29))
++
+ #define SCTLR_ELx_FLAGS       (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+                        SCTLR_ELx_SA | SCTLR_ELx_I)
+--- a/arch/arm64/kvm/hyp-init.S
++++ b/arch/arm64/kvm/hyp-init.S
+@@ -102,10 +102,12 @@ __do_hyp_init:
+       tlbi    alle2
+       dsb     sy
+-      mrs     x4, sctlr_el2
+-      and     x4, x4, #SCTLR_ELx_EE   // preserve endianness of EL2
+-      ldr     x5, =SCTLR_ELx_FLAGS
+-      orr     x4, x4, x5
++      /*
++       * Preserve all the RES1 bits while setting the default flags,
++       * as well as the EE bit on BE.
++       */
++      ldr     x4, =(SCTLR_EL2_RES1 | SCTLR_ELx_FLAGS)
++CPU_BE(       orr     x4, x4, #SCTLR_ELx_EE)
+       msr     sctlr_el2, x4
+       isb
diff --git a/queue-4.11/kvm-async_pf-avoid-async-pf-injection-when-in-guest-mode.patch b/queue-4.11/kvm-async_pf-avoid-async-pf-injection-when-in-guest-mode.patch
new file mode 100644 (file)
index 0000000..6dc4468
--- /dev/null
@@ -0,0 +1,99 @@
+From 9bc1f09f6fa76fdf31eb7d6a4a4df43574725f93 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Thu, 8 Jun 2017 20:13:40 -0700
+Subject: KVM: async_pf: avoid async pf injection when in guest mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit 9bc1f09f6fa76fdf31eb7d6a4a4df43574725f93 upstream.
+
+ INFO: task gnome-terminal-:1734 blocked for more than 120 seconds.
+       Not tainted 4.12.0-rc4+ #8
+ "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ gnome-terminal- D    0  1734   1015 0x00000000
+ Call Trace:
+  __schedule+0x3cd/0xb30
+  schedule+0x40/0x90
+  kvm_async_pf_task_wait+0x1cc/0x270
+  ? __vfs_read+0x37/0x150
+  ? prepare_to_swait+0x22/0x70
+  do_async_page_fault+0x77/0xb0
+  ? do_async_page_fault+0x77/0xb0
+  async_page_fault+0x28/0x30
+
+This is triggered by running both win7 and win2016 on L1 KVM simultaneously,
+and then gives stress to memory on L1, I can observed this hang on L1 when
+at least ~70% swap area is occupied on L0.
+
+This is due to async pf was injected to L2 which should be injected to L1,
+L2 guest starts receiving pagefault w/ bogus %cr2(apf token from the host
+actually), and L1 guest starts accumulating tasks stuck in D state in
+kvm_async_pf_task_wait() since missing PAGE_READY async_pfs.
+
+This patch fixes the hang by doing async pf when executing L1 guest.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c |    7 +++++--
+ arch/x86/kvm/mmu.h |    1 +
+ arch/x86/kvm/x86.c |    3 +--
+ 3 files changed, 7 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3683,12 +3683,15 @@ static int kvm_arch_setup_async_pf(struc
+       return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+ }
+-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
++bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
+ {
+       if (unlikely(!lapic_in_kernel(vcpu) ||
+                    kvm_event_needs_reinjection(vcpu)))
+               return false;
++      if (is_guest_mode(vcpu))
++              return false;
++
+       return kvm_x86_ops->interrupt_allowed(vcpu);
+ }
+@@ -3704,7 +3707,7 @@ static bool try_async_pf(struct kvm_vcpu
+       if (!async)
+               return false; /* *pfn has correct page already */
+-      if (!prefault && can_do_async_pf(vcpu)) {
++      if (!prefault && kvm_can_do_async_pf(vcpu)) {
+               trace_kvm_try_async_get_page(gva, gfn);
+               if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+                       trace_kvm_async_pf_doublefault(gva, gfn);
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -75,6 +75,7 @@ enum {
+ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
+ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
++bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
+ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+ {
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8623,8 +8623,7 @@ bool kvm_arch_can_inject_async_page_pres
+       if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+               return true;
+       else
+-              return !kvm_event_needs_reinjection(vcpu) &&
+-                      kvm_x86_ops->interrupt_allowed(vcpu);
++              return kvm_can_do_async_pf(vcpu);
+ }
+ void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/queue-4.11/kvm-async_pf-fix-rcu_irq_enter-with-irqs-enabled.patch b/queue-4.11/kvm-async_pf-fix-rcu_irq_enter-with-irqs-enabled.patch
new file mode 100644 (file)
index 0000000..c5cc5a0
--- /dev/null
@@ -0,0 +1,40 @@
+From bbaf0e2b1c1b4f88abd6ef49576f0efb1734eae5 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 26 Apr 2017 16:56:26 +0200
+Subject: kvm: async_pf: fix rcu_irq_enter() with irqs enabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit bbaf0e2b1c1b4f88abd6ef49576f0efb1734eae5 upstream.
+
+native_safe_halt enables interrupts, and you just shouldn't
+call rcu_irq_enter() with interrupts enabled.  Reorder the
+call with the following local_irq_disable() to respect the
+invariant.
+
+Reported-by: Ross Zwisler <ross.zwisler@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Tested-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kvm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
+                        */
+                       rcu_irq_exit();
+                       native_safe_halt();
+-                      rcu_irq_enter();
+                       local_irq_disable();
++                      rcu_irq_enter();
+               }
+       }
+       if (!n.halted)
diff --git a/queue-4.11/kvm-cpuid-fix-read-write-out-of-bounds-vulnerability-in-cpuid-emulation.patch b/queue-4.11/kvm-cpuid-fix-read-write-out-of-bounds-vulnerability-in-cpuid-emulation.patch
new file mode 100644 (file)
index 0000000..fd86ac8
--- /dev/null
@@ -0,0 +1,88 @@
+From a3641631d14571242eec0d30c9faa786cbf52d44 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Thu, 8 Jun 2017 01:22:07 -0700
+Subject: KVM: cpuid: Fix read/write out-of-bounds vulnerability in cpuid emulation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit a3641631d14571242eec0d30c9faa786cbf52d44 upstream.
+
+If "i" is the last element in the vcpu->arch.cpuid_entries[] array, it
+potentially can be exploited the vulnerability. this will out-of-bounds
+read and write.  Luckily, the effect is small:
+
+       /* when no next entry is found, the current entry[i] is reselected */
+       for (j = i + 1; ; j = (j + 1) % nent) {
+               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+               if (ej->function == e->function) {
+
+It reads ej->maxphyaddr, which is user controlled.  However...
+
+                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+After cpuid_entries there is
+
+       int maxphyaddr;
+       struct x86_emulate_ctxt emulate_ctxt;  /* 16-byte aligned */
+
+So we have:
+
+- cpuid_entries at offset 1B50 (6992)
+- maxphyaddr at offset 27D0 (6992 + 3200 = 10192)
+- padding at 27D4...27DF
+- emulate_ctxt at 27E0
+
+And it writes in the padding.  Pfew, writing the ops field of emulate_ctxt
+would have been much worse.
+
+This patch fixes it by modding the index to avoid the out-of-bounds
+access. Worst case, i == j and ej->function == e->function,
+the loop can bail out.
+
+Reported-by: Moguofang <moguofang@huawei.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Guofang Mo <moguofang@huawei.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c |   20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -780,18 +780,20 @@ out:
+ static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
+ {
+       struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
+-      int j, nent = vcpu->arch.cpuid_nent;
++      struct kvm_cpuid_entry2 *ej;
++      int j = i;
++      int nent = vcpu->arch.cpuid_nent;
+       e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+       /* when no next entry is found, the current entry[i] is reselected */
+-      for (j = i + 1; ; j = (j + 1) % nent) {
+-              struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+-              if (ej->function == e->function) {
+-                      ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+-                      return j;
+-              }
+-      }
+-      return 0; /* silence gcc, even though control never reaches here */
++      do {
++              j = (j + 1) % nent;
++              ej = &vcpu->arch.cpuid_entries[j];
++      } while (ej->function != e->function);
++
++      ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
++
++      return j;
+ }
+ /* find an entry with matching function, matching index (if needed), and that
index e0448909b8c576ae3c007940067bddff0cafb0f4..e205f309cb31f89e2bea21b9c66c03be868841ad 100644 (file)
@@ -47,3 +47,9 @@ nfsd4-fix-null-dereference-on-replay.patch
 gfs2-make-flush-bios-explicitely-sync.patch
 efi-don-t-issue-error-message-when-booted-under-xen.patch
 efi-bgrt-skip-efi_bgrt_init-in-case-of-non-efi-boot.patch
+kvm-async_pf-fix-rcu_irq_enter-with-irqs-enabled.patch
+kvm-cpuid-fix-read-write-out-of-bounds-vulnerability-in-cpuid-emulation.patch
+arm64-kvm-preserve-res1-bits-in-sctlr_el2.patch
+arm64-kvm-allow-unaligned-accesses-at-el2.patch
+arm-kvm-allow-unaligned-accesses-at-hyp.patch
+kvm-async_pf-avoid-async-pf-injection-when-in-guest-mode.patch