]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Feb 2015 02:34:43 +0000 (10:34 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Feb 2015 02:34:43 +0000 (10:34 +0800)
added patches:
kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
x86-kvm-vmx-preserve-cr4-across-vm-entry.patch

queue-3.10/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch [new file with mode: 0644]
queue-3.10/series
queue-3.10/x86-kvm-vmx-preserve-cr4-across-vm-entry.patch [new file with mode: 0644]

diff --git a/queue-3.10/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch b/queue-3.10/kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
new file mode 100644 (file)
index 0000000..8515191
--- /dev/null
@@ -0,0 +1,79 @@
+From a642fc305053cc1c6e47e4f4df327895747ab485 Mon Sep 17 00:00:00 2001
+From: Petr Matousek <pmatouse@redhat.com>
+Date: Tue, 23 Sep 2014 20:22:30 +0200
+Subject: kvm: vmx: handle invvpid vm exit gracefully
+
+From: Petr Matousek <pmatouse@redhat.com>
+
+commit a642fc305053cc1c6e47e4f4df327895747ab485 upstream.
+
+On systems with invvpid instruction support (corresponding bit in
+IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
+causes vm exit, which is currently not handled and results in
+propagation of unknown exit to userspace.
+
+Fix this by installing an invvpid vm exit handler.
+
+This is CVE-2014-3646.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Petr Matousek <pmatouse@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[wangkai: Backport to 3.10: adjust context]
+Signed-off-by: Wang Kai <morgan.wang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/uapi/asm/vmx.h |    2 ++
+ arch/x86/kvm/vmx.c              |    9 ++++++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -67,6 +67,7 @@
+ #define EXIT_REASON_EPT_MISCONFIG       49
+ #define EXIT_REASON_INVEPT              50
+ #define EXIT_REASON_PREEMPTION_TIMER    52
++#define EXIT_REASON_INVVPID             53
+ #define EXIT_REASON_WBINVD              54
+ #define EXIT_REASON_XSETBV              55
+ #define EXIT_REASON_APIC_WRITE          56
+@@ -112,6 +113,7 @@
+       { EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
+       { EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
+       { EXIT_REASON_INVD,                  "INVD" }, \
++      { EXIT_REASON_INVVPID,               "INVVPID" }, \
+       { EXIT_REASON_INVPCID,               "INVPCID" }, \
+       { EXIT_REASON_PREEMPTION_TIMER,      "PREEMPTION_TIMER" }
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6248,6 +6248,12 @@ static int handle_invept(struct kvm_vcpu
+       return 1;
+ }
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++      kvm_queue_exception(vcpu, UD_VECTOR);
++      return 1;
++}
++
+ /*
+  * The exit handlers return 1 if the exit was handled fully and guest execution
+  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
+@@ -6293,6 +6299,7 @@ static int (*const kvm_vmx_exit_handlers
+       [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_invalid_op,
+       [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
+       [EXIT_REASON_INVEPT]                  = handle_invept,
++      [EXIT_REASON_INVVPID]                 = handle_invvpid,
+ };
+ static const int kvm_vmx_max_exit_handlers =
+@@ -6519,7 +6526,7 @@ static bool nested_vmx_exit_handled(stru
+       case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+       case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+       case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+-      case EXIT_REASON_INVEPT:
++      case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+               /*
+                * VMX instructions trap unconditionally. This allows L1 to
+                * emulate them for its L2 guest, i.e., allows 3-level nesting!
index 24da9d5d74addada84b9b10d7a561218c2bbd590..0a4d20455ba15556c82aa52f7e5b63b6afafc514 100644 (file)
@@ -14,3 +14,5 @@ asoc-atmel_ssc_dai-fix-start-event-for-i2s-mode.patch
 asoc-sgtl5000-add-delay-before-first-i2c-access.patch
 alsa-ak411x-fix-stall-in-work-callback.patch
 smpboot-add-missing-get_online_cpus-in-smpboot_register_percpu_thread.patch
+kvm-vmx-handle-invvpid-vm-exit-gracefully.patch
+x86-kvm-vmx-preserve-cr4-across-vm-entry.patch
diff --git a/queue-3.10/x86-kvm-vmx-preserve-cr4-across-vm-entry.patch b/queue-3.10/x86-kvm-vmx-preserve-cr4-across-vm-entry.patch
new file mode 100644 (file)
index 0000000..6171b84
--- /dev/null
@@ -0,0 +1,83 @@
+From d974baa398f34393db76be45f7d4d04fbdbb4a0a Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@amacapital.net>
+Date: Wed, 8 Oct 2014 09:02:13 -0700
+Subject: x86,kvm,vmx: Preserve CR4 across VM entry
+
+From: Andy Lutomirski <luto@amacapital.net>
+
+commit d974baa398f34393db76be45f7d4d04fbdbb4a0a upstream.
+
+CR4 isn't constant; at least the TSD and PCE bits can vary.
+
+TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
+like it's correct.
+
+This adds a branch and a read from cr4 to each vm entry.  Because it is
+extremely likely that consecutive entries into the same vcpu will have
+the same host cr4 value, this fixes up the vmcs instead of restoring cr4
+after the fact.  A subsequent patch will add a kernel-wide cr4 shadow,
+reducing the overhead in the common case to just two memory reads and a
+branch.
+
+Signed-off-by: Andy Lutomirski <luto@amacapital.net>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Petr Matousek <pmatouse@redhat.com>
+Cc: Gleb Natapov <gleb@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[wangkai: Backport to 3.10: adjust context]
+Signed-off-by: Wang Kai <morgan.wang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c |   16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -438,6 +438,7 @@ struct vcpu_vmx {
+ #endif
+               int           gs_ldt_reload_needed;
+               int           fs_reload_needed;
++              unsigned long vmcs_host_cr4;    /* May not match real cr4 */
+       } host_state;
+       struct {
+               int vm86_active;
+@@ -4076,11 +4077,16 @@ static void vmx_set_constant_host_state(
+       u32 low32, high32;
+       unsigned long tmpl;
+       struct desc_ptr dt;
++      unsigned long cr4;
+       vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
+-      vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
+       vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
++      /* Save the most likely value for this task's CR4 in the VMCS. */
++      cr4 = read_cr4();
++      vmcs_writel(HOST_CR4, cr4);                     /* 22.2.3, 22.2.5 */
++      vmx->host_state.vmcs_host_cr4 = cr4;
++
+       vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+ #ifdef CONFIG_X86_64
+       /*
+@@ -6971,7 +6977,7 @@ static void atomic_switch_perf_msrs(stru
+ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+-      unsigned long debugctlmsr;
++      unsigned long debugctlmsr, cr4;
+       /* Record the guest's net vcpu time for enforced NMI injections. */
+       if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
+@@ -6992,6 +6998,12 @@ static void __noclone vmx_vcpu_run(struc
+       if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+               vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
++      cr4 = read_cr4();
++      if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
++              vmcs_writel(HOST_CR4, cr4);
++              vmx->host_state.vmcs_host_cr4 = cr4;
++      }
++
+       /* When single-stepping over STI and MOV SS, we must clear the
+        * corresponding interruptibility bits in the guest state. Otherwise
+        * vmentry fails as it then expects bit 14 (BS) in pending debug