]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Add SBPB support
authorJosh Poimboeuf <jpoimboe@kernel.org>
Fri, 25 Aug 2023 07:01:36 +0000 (00:01 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 4 Oct 2023 22:19:32 +0000 (15:19 -0700)
Add support for the AMD Selective Branch Predictor Barrier (SBPB) by
advertising the CPUID bit and handling PRED_CMD writes accordingly.

Note, like SRSO_NO and IBPB_BRTYPE before it, advertise support for SBPB
even if it's not enumerated by in the raw CPUID.  Some CPUs that gained
support via a uCode patch don't report SBPB via CPUID (the kernel forces
the flag).

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Link: https://lore.kernel.org/r/a4ab1e7fe50096d50fde33e739ed2da40b41ea6a.1692919072.git.jpoimboe@kernel.org
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/x86.c

index 8f26a929d510cdebca111916cce8c9d9715b839b..552dc4f3899b12c2c2c61281a8f5acdd40bec761 100644 (file)
@@ -764,6 +764,7 @@ void kvm_set_cpu_caps(void)
                F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
        );
 
+       kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
        kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
        kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
 
index 284fa4704553da1345731927225084e7daeeba4b..0b90532b6e261430c7997e933f59f5531312d627 100644 (file)
@@ -174,7 +174,8 @@ static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
 {
        return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
-               guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
+               guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
+               guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
 }
 
 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
index ec728aef9acccfd428c690937b48dee0d97c3a36..2b5b325e19f2d819fd96cb600fef0c1a59c88f8b 100644 (file)
@@ -3670,17 +3670,36 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.perf_capabilities = data;
                kvm_pmu_refresh(vcpu);
                break;
-       case MSR_IA32_PRED_CMD:
-               if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu))
-                       return 1;
+       case MSR_IA32_PRED_CMD: {
+               u64 reserved_bits = ~(PRED_CMD_IBPB | PRED_CMD_SBPB);
+
+               if (!msr_info->host_initiated) {
+                       if ((!guest_has_pred_cmd_msr(vcpu)))
+                               return 1;
+
+                       if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+                           !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
+                               reserved_bits |= PRED_CMD_IBPB;
 
-               if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB))
+                       if (!guest_cpuid_has(vcpu, X86_FEATURE_SBPB))
+                               reserved_bits |= PRED_CMD_SBPB;
+               }
+
+               if (!boot_cpu_has(X86_FEATURE_IBPB))
+                       reserved_bits |= PRED_CMD_IBPB;
+
+               if (!boot_cpu_has(X86_FEATURE_SBPB))
+                       reserved_bits |= PRED_CMD_SBPB;
+
+               if (data & reserved_bits)
                        return 1;
+
                if (!data)
                        break;
 
-               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+               wrmsrl(MSR_IA32_PRED_CMD, data);
                break;
+       }
        case MSR_IA32_FLUSH_CMD:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D))