]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Jun 2015 07:24:16 +0000 (16:24 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Jun 2015 07:24:16 +0000 (16:24 +0900)
added patches:
kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch
x86-fpu-disable-xsaves-support-for-now.patch

queue-3.14/kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch [new file with mode: 0644]
queue-3.14/series
queue-3.14/x86-fpu-disable-xsaves-support-for-now.patch [new file with mode: 0644]

diff --git a/queue-3.14/kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch b/queue-3.14/kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch
new file mode 100644 (file)
index 0000000..4777bd5
--- /dev/null
@@ -0,0 +1,32 @@
+From 898761158be7682082955e3efa4ad24725305fc7 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 2 Apr 2015 11:04:05 +0200
+Subject: KVM: MMU: fix CR4.SMEP=1, CR0.WP=0 with shadow pages
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 898761158be7682082955e3efa4ad24725305fc7 upstream.
+
+smep_andnot_wp is initialized in kvm_init_shadow_mmu and shadow pages
+should not be reused for different values of it.  Thus, it has to be
+added to the mask in kvm_mmu_pte_write.
+
+Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4078,7 +4078,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
+       ++vcpu->kvm->stat.mmu_pte_write;
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+-      mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
++      mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
+       for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+               if (detect_write_misaligned(sp, gpa, bytes) ||
+                     detect_write_flooding(sp)) {
index d46fd919aabf1485c65da4619547fa129c5e4085..dff17c84b7ae5cdd7d54e54bdf5f8837592a9f4b 100644 (file)
@@ -4,3 +4,5 @@ staging-rtl8712-rtl8712-avoid-lots-of-build-warnings.patch
 staging-rtl8192e-llvmlinux-remove-unused-inline-prototype.patch
 kernel-use-the-gnu89-standard-explicitly.patch
 qla2xxx-remove-redundant-declaration-in-qla_gbl.h.patch
+x86-fpu-disable-xsaves-support-for-now.patch
+kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch
diff --git a/queue-3.14/x86-fpu-disable-xsaves-support-for-now.patch b/queue-3.14/x86-fpu-disable-xsaves-support-for-now.patch
new file mode 100644 (file)
index 0000000..430a04d
--- /dev/null
@@ -0,0 +1,78 @@
+From e88221c50cadade0eb4f7f149f4967d760212695 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Wed, 20 May 2015 11:45:30 +0200
+Subject: x86/fpu: Disable XSAVES* support for now
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit e88221c50cadade0eb4f7f149f4967d760212695 upstream.
+
+The kernel's handling of 'compacted' xsave state layout is buggy:
+
+    http://marc.info/?l=linux-kernel&m=142967852317199
+
+I don't have such a system, and the description there is vague, but
+from extrapolation I guess that there were two kinds of bugs
+observed:
+
+  - boot crashes, due to size calculations being wrong and the dynamic
+    allocation allocating a too small xstate area. (This is now fixed
+    in the new FPU code - but still present in stable kernels.)
+
+  - FPU state corruption and ABI breakage: if signal handlers try to
+    change the FPU state in standard format, which then the kernel
+    tries to restore in the compacted format.
+
+These breakages are scary, but they only occur on a small number of
+systems that have XSAVES* CPU support. Yet we have had XSAVES support
+in the upstream kernel for a large number of stable kernel releases,
+and the fixes are involved and unproven.
+
+So do the safe resolution first: disable XSAVES* support and only
+use the standard xstate format. This makes the code work and is
+easy to backport.
+
+On top of this we can work on enabling (and testing!) proper
+compacted format support, without backporting pressure, on top of the
+new, cleaned up FPU code.
+
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/i387.c |   15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -155,6 +155,21 @@ static void init_thread_xstate(void)
+               xstate_size = sizeof(struct i387_fxsave_struct);
+       else
+               xstate_size = sizeof(struct i387_fsave_struct);
++
++      /*
++       * Quirk: we don't yet handle the XSAVES* instructions
++       * correctly, as we don't correctly convert between
++       * standard and compacted format when interfacing
++       * with user-space - so disable it for now.
++       *
++       * The difference is small: with recent CPUs the
++       * compacted format is only marginally smaller than
++       * the standard FPU state format.
++       *
++       * ( This is easy to backport while we are fixing
++       *   XSAVES* support. )
++       */
++      setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+ }
+ /*