]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: SVM: Merge "after set CPUID" intercept recalc helpers
authorSean Christopherson <seanjc@google.com>
Tue, 10 Jun 2025 22:57:28 +0000 (15:57 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:07:32 +0000 (13:07 -0700)
Merge svm_recalc_intercepts_after_set_cpuid() and
svm_recalc_instruction_intercepts() such that the "after set CPUID" helper
simply invokes the type-specific helpers (MSRs vs. instructions), i.e.
make svm_recalc_intercepts_after_set_cpuid() a single entry point for all
intercept updates that need to be performed after a CPUID change.

No functional change intended.

Link: https://lore.kernel.org/r/20250610225737.156318-24-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/svm.c

index c17eeeea8e643744e6bd6afaaf3519c61124952d..a6d6d3973a6b6adc4174c63f2c569d9b6b0511d5 100644 (file)
@@ -1069,9 +1069,10 @@ void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
 }
 
 /* Evaluate instruction intercepts that depend on guest CPUID features. */
-static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
-                                             struct vcpu_svm *svm)
+static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
+
        /*
         * Intercept INVPCID if shadow paging is enabled to sync/free shadow
         * roots, or if INVPCID is disabled in the guest to inject #UD.
@@ -1090,11 +1091,6 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
                else
                        svm_set_intercept(svm, INTERCEPT_RDTSCP);
        }
-}
-
-static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
 
        if (guest_cpuid_is_intel_compatible(vcpu)) {
                svm_set_intercept(svm, INTERCEPT_VMLOAD);
@@ -1111,7 +1107,11 @@ static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
                        svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
                }
        }
+}
 
+static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+       svm_recalc_instruction_intercepts(vcpu);
        svm_recalc_msr_intercepts(vcpu);
 }
 
@@ -1237,8 +1237,6 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
                svm_clr_intercept(svm, INTERCEPT_PAUSE);
        }
 
-       svm_recalc_instruction_intercepts(vcpu, svm);
-
        if (kvm_vcpu_apicv_active(vcpu))
                avic_init_vmcb(svm, vmcb);
 
@@ -4499,8 +4497,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        if (guest_cpuid_is_intel_compatible(vcpu))
                guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
 
-       svm_recalc_instruction_intercepts(vcpu, svm);
-
        if (sev_guest(vcpu->kvm))
                sev_vcpu_after_set_cpuid(svm);