--- /dev/null
+From a87036add09283e6c4f4103a15c596c67b86ab86 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 8 Mar 2016 09:52:13 +0100
+Subject: KVM: x86: disable MPX if host did not enable MPX XSAVE features
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit a87036add09283e6c4f4103a15c596c67b86ab86 upstream.
+
+When eager FPU is disabled, KVM will still see the MPX bit in CPUID and
+presumably the MPX vmentry and vmexit controls. However, it will not
+be able to expose the MPX XSAVE features to the guest, because the guest's
+accessible XSAVE features are always a subset of host_xcr0.
+
+In this case, we should disable the MPX CPUID bit, the BNDCFGS MSR,
+and the MPX vmentry and vmexit controls for nested virtualization.
+It is then unnecessary to enable guest eager FPU if the guest has the
+MPX CPUID bit set.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c | 13 ++++++++++---
+ arch/x86/kvm/cpuid.h | 9 +--------
+ arch/x86/kvm/vmx.c | 13 ++++++-------
+ 3 files changed, 17 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xsta
+ return ret;
+ }
+
++bool kvm_mpx_supported(void)
++{
++ return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
++ && kvm_x86_ops->mpx_supported());
++}
++EXPORT_SYMBOL_GPL(kvm_mpx_supported);
++
+ u64 kvm_supported_xcr0(void)
+ {
+ u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
+
+- if (!kvm_x86_ops->mpx_supported())
++ if (!kvm_mpx_supported())
+ xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
+
+ return xcr0;
+@@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vc
+ if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+ best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+
+- vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
++ vcpu->arch.eager_fpu = use_eager_fpu();
+ if (vcpu->arch.eager_fpu)
+ kvm_x86_ops->fpu_activate(vcpu);
+
+@@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct
+ #endif
+ unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
+ unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
+- unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
++ unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
+ unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
+
+ /* cpuid 1.edx */
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -4,6 +4,7 @@
+ #include "x86.h"
+
+ int kvm_update_cpuid(struct kvm_vcpu *vcpu);
++bool kvm_mpx_supported(void);
+ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function, u32 index);
+ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
+@@ -134,14 +135,6 @@ static inline bool guest_cpuid_has_rtm(s
+ return best && (best->ebx & bit(X86_FEATURE_RTM));
+ }
+
+-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_cpuid_entry2 *best;
+-
+- best = kvm_find_cpuid_entry(vcpu, 7, 0);
+- return best && (best->ebx & bit(X86_FEATURE_MPX));
+-}
+-
+ static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(
+ static u64 construct_eptp(unsigned long root_hpa);
+ static void kvm_cpu_vmxon(u64 addr);
+ static void kvm_cpu_vmxoff(void);
+-static bool vmx_mpx_supported(void);
+ static bool vmx_xsaves_supported(void);
+ static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
+ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
+@@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(s
+ VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
+
+- if (vmx_mpx_supported())
++ if (kvm_mpx_supported())
+ vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+
+ /* We support free control of debug control saving. */
+@@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(s
+ VM_ENTRY_LOAD_IA32_PAT;
+ vmx->nested.nested_vmx_entry_ctls_high |=
+ (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
+- if (vmx_mpx_supported())
++ if (kvm_mpx_supported())
+ vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+
+ /* We support free control of debug control loading. */
+@@ -2813,7 +2812,7 @@ static int vmx_get_msr(struct kvm_vcpu *
+ msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
+ break;
+ case MSR_IA32_BNDCFGS:
+- if (!vmx_mpx_supported())
++ if (!kvm_mpx_supported())
+ return 1;
+ msr_info->data = vmcs_read64(GUEST_BNDCFGS);
+ break;
+@@ -2890,7 +2889,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+ vmcs_writel(GUEST_SYSENTER_ESP, data);
+ break;
+ case MSR_IA32_BNDCFGS:
+- if (!vmx_mpx_supported())
++ if (!kvm_mpx_supported())
+ return 1;
+ vmcs_write64(GUEST_BNDCFGS, data);
+ break;
+@@ -3363,7 +3362,7 @@ static void init_vmcs_shadow_fields(void
+ for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+ switch (shadow_read_write_fields[i]) {
+ case GUEST_BNDCFGS:
+- if (!vmx_mpx_supported())
++ if (!kvm_mpx_supported())
+ continue;
+ break;
+ default:
+@@ -10265,7 +10264,7 @@ static void prepare_vmcs12(struct kvm_vc
+ vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
+ vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
+ vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
+- if (vmx_mpx_supported())
++ if (kvm_mpx_supported())
+ vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
-@@ -150,6 +150,14 @@ static inline bool guest_cpuid_has_pcomm
+@@ -143,6 +143,14 @@ static inline bool guest_cpuid_has_pcomm
return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
}
struct kvm_cpuid_entry2 *best;
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
-@@ -2813,7 +2813,7 @@ static int vmx_get_msr(struct kvm_vcpu *
+@@ -2812,7 +2812,7 @@ static int vmx_get_msr(struct kvm_vcpu *
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
-- if (!vmx_mpx_supported())
-+ if (!vmx_mpx_supported() || !guest_cpuid_has_mpx(vcpu))
+- if (!kvm_mpx_supported())
++ if (!kvm_mpx_supported() || !guest_cpuid_has_mpx(vcpu))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
-@@ -2890,7 +2890,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+@@ -2889,7 +2889,7 @@ static int vmx_set_msr(struct kvm_vcpu *
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
-- if (!vmx_mpx_supported())
-+ if (!vmx_mpx_supported() || !guest_cpuid_has_mpx(vcpu))
+- if (!kvm_mpx_supported())
++ if (!kvm_mpx_supported() || !guest_cpuid_has_mpx(vcpu))
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;