]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SVM: Set/clear SRSO's BP_SPEC_REDUCE on 0 <=> 1 VM count transitions
authorSean Christopherson <seanjc@google.com>
Mon, 5 May 2025 18:03:00 +0000 (11:03 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 8 May 2025 14:17:10 +0000 (07:17 -0700)
Set the magic BP_SPEC_REDUCE bit to mitigate SRSO when running VMs if and
only if KVM has at least one active VM.  Leaving the bit set at all times
unfortunately degrades performance by a wee bit more than expected.

Use a dedicated spinlock and counter instead of hooking virtualization
enablement, as changing the behavior of kvm.enable_virt_at_load based on
SRSO_BP_SPEC_REDUCE is painful, and has its own drawbacks, e.g. could
result in performance issues for flows that are sensitive to VM creation
latency.

Defer setting BP_SPEC_REDUCE until VMRUN is imminent to avoid impacting
performance on CPUs that aren't running VMs, e.g. if a setup is using
housekeeping CPUs.  Setting BP_SPEC_REDUCE in task context, i.e. without
blasting IPIs to all CPUs, also helps avoid serializing 1<=>N transitions
without incurring a gross amount of complexity (see the Link for details
on how ugly coordinating via IPIs gets).

Link: https://lore.kernel.org/all/aBOnzNCngyS_pQIW@google.com
Fixes: 8442df2b49ed ("x86/bugs: KVM: Add support for SRSO_MSR_FIX")
Reported-by: Michael Larabel <Michael@michaellarabel.com>
Closes: https://www.phoronix.com/review/linux-615-amd-regression
Cc: Borislav Petkov <bp@alien8.de>
Tested-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20250505180300.973137-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index c5470d842aed436f0f7095ee5976ecbb2b4426f2..a89c271a1951f457912b7d5c4c025c3344032dcc 100644 (file)
@@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void)
        kvm_cpu_svm_disable();
 
        amd_pmu_disable_virt();
-
-       if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
-               msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
 }
 
 static int svm_enable_virtualization_cpu(void)
@@ -687,9 +684,6 @@ static int svm_enable_virtualization_cpu(void)
                rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
        }
 
-       if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
-               msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
-
        return 0;
 }
 
@@ -1518,6 +1512,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
        __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
 }
 
+#ifdef CONFIG_CPU_MITIGATIONS
+static DEFINE_SPINLOCK(srso_lock);
+static atomic_t srso_nr_vms;
+
+static void svm_srso_clear_bp_spec_reduce(void *ign)
+{
+       struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
+
+       if (!sd->bp_spec_reduce_set)
+               return;
+
+       msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+       sd->bp_spec_reduce_set = false;
+}
+
+static void svm_srso_vm_destroy(void)
+{
+       if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+               return;
+
+       if (atomic_dec_return(&srso_nr_vms))
+               return;
+
+       guard(spinlock)(&srso_lock);
+
+       /*
+        * Verify a new VM didn't come along, acquire the lock, and increment
+        * the count before this task acquired the lock.
+        */
+       if (atomic_read(&srso_nr_vms))
+               return;
+
+       on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
+}
+
+static void svm_srso_vm_init(void)
+{
+       if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+               return;
+
+       /*
+        * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
+        * transition, i.e. destroying the last VM, is fully complete, e.g. so
+        * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
+        */
+       if (atomic_inc_not_zero(&srso_nr_vms))
+               return;
+
+       guard(spinlock)(&srso_lock);
+
+       atomic_inc(&srso_nr_vms);
+}
+#else
+static void svm_srso_vm_init(void) { }
+static void svm_srso_vm_destroy(void) { }
+#endif
+
 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1550,6 +1601,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
            (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
                kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
 
+       if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
+           !sd->bp_spec_reduce_set) {
+               sd->bp_spec_reduce_set = true;
+               msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+       }
        svm->guest_state_loaded = true;
 }
 
@@ -5040,6 +5096,8 @@ static void svm_vm_destroy(struct kvm *kvm)
 {
        avic_vm_destroy(kvm);
        sev_vm_destroy(kvm);
+
+       svm_srso_vm_destroy();
 }
 
 static int svm_vm_init(struct kvm *kvm)
@@ -5065,6 +5123,7 @@ static int svm_vm_init(struct kvm *kvm)
                        return ret;
        }
 
+       svm_srso_vm_init();
        return 0;
 }
 
index d4490eaed55dd42130552e517d899c5f16d090cb..f16b068c4228b80145863dab188c417a43b95fb7 100644 (file)
@@ -335,6 +335,8 @@ struct svm_cpu_data {
        u32 next_asid;
        u32 min_asid;
 
+       bool bp_spec_reduce_set;
+
        struct vmcb *save_area;
        unsigned long save_area_pa;