]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 26 Feb 2021 15:38:56 +0000 (16:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 26 Feb 2021 15:38:56 +0000 (16:38 +0100)
added patches:
kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch

queue-5.4/kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch b/queue-5.4/kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch
new file mode 100644 (file)
index 0000000..870ba30
--- /dev/null
@@ -0,0 +1,126 @@
+From 841c2be09fe4f495fe5224952a419bd8c7e5b455 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 8 Jul 2020 14:57:31 +0300
+Subject: kvm: x86: replace kvm_spec_ctrl_test_value with runtime test on the host
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 841c2be09fe4f495fe5224952a419bd8c7e5b455 upstream.
+
+To avoid complex and in some cases incorrect logic in
+kvm_spec_ctrl_test_value, just try the guest's given value on the host
+processor instead, and if it doesn't #GP, allow the guest to set it.
+
+One such case is when host CPU supports STIBP mitigation
+but doesn't support IBRS (as is the case with some Zen2 AMD cpus),
+and in this case we were giving guest #GP when it tried to use STIBP
+
+The reason why can can do the host test is that IA32_SPEC_CTRL msr is
+passed to the guest, after the guest sets it to a non zero value
+for the first time (due to performance reasons),
+and as as result of this, it is pointless to emulate #GP condition on
+this first access, in a different way than what the host CPU does.
+
+This is based on a patch from Sean Christopherson, who suggested this idea.
+
+Fixes: 6441fa6178f5 ("KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL")
+Cc: stable@vger.kernel.org
+Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20200708115731.180097-1-mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c     |    2 +-
+ arch/x86/kvm/vmx/vmx.c |    2 +-
+ arch/x86/kvm/x86.c     |   40 ++++++++++++++++++++++------------------
+ arch/x86/kvm/x86.h     |    2 +-
+ 4 files changed, 25 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4327,7 +4327,7 @@ static int svm_set_msr(struct kvm_vcpu *
+                   !guest_has_spec_ctrl_msr(vcpu))
+                       return 1;
+-              if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
++              if (kvm_spec_ctrl_test_value(data))
+                       return 1;
+               svm->spec_ctrl = data;
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1974,7 +1974,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+                   !guest_has_spec_ctrl_msr(vcpu))
+                       return 1;
+-              if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
++              if (kvm_spec_ctrl_test_value(data))
+                       return 1;
+               vmx->spec_ctrl = data;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10374,28 +10374,32 @@ bool kvm_arch_no_poll(struct kvm_vcpu *v
+ }
+ EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
+-u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
++
++int kvm_spec_ctrl_test_value(u64 value)
+ {
+-      uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
++      /*
++       * test that setting IA32_SPEC_CTRL to given value
++       * is allowed by the host processor
++       */
++
++      u64 saved_value;
++      unsigned long flags;
++      int ret = 0;
++
++      local_irq_save(flags);
++
++      if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
++              ret = 1;
++      else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
++              ret = 1;
++      else
++              wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
+-      /* The STIBP bit doesn't fault even if it's not advertised */
+-      if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+-          !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+-              bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+-      if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+-          !boot_cpu_has(X86_FEATURE_AMD_IBRS))
+-              bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+-
+-      if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
+-          !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+-              bits &= ~SPEC_CTRL_SSBD;
+-      if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+-          !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+-              bits &= ~SPEC_CTRL_SSBD;
++      local_irq_restore(flags);
+-      return bits;
++      return ret;
+ }
+-EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
++EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -368,6 +368,6 @@ static inline bool kvm_pat_valid(u64 dat
+ void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+ void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+-u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
++int kvm_spec_ctrl_test_value(u64 value);
+ #endif
index 16eb79446facf879fb733947292c4855d18eb3f5..ba25511936e3900719642ad7f7206836b280747b 100644 (file)
@@ -1 +1,2 @@
 vmlinux.lds.h-add-dwarf-v5-sections.patch
+kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch