]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Rename msr_filter_changed() => recalc_msr_intercepts()
authorSean Christopherson <seanjc@google.com>
Tue, 10 Jun 2025 22:57:25 +0000 (15:57 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:07:30 +0000 (13:07 -0700)
Rename msr_filter_changed() to recalc_msr_intercepts() and drop the
trampoline wrapper now that both SVM and VMX use a filter-agnostic recalc
helper to react to the new userspace filter.

No functional change intended.

Reviewed-by: Xin Li (Intel) <xin@zytor.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Link: https://lore.kernel.org/r/20250610225737.156318-21-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/x86_ops.h
arch/x86/kvm/x86.c

index 9e0c37ea267e055a417b973eefbebe874b084342..6a24f0a01ff631a04e1db4433f7ce4ce6a62d67a 100644 (file)
@@ -138,7 +138,7 @@ KVM_X86_OP(check_emulate_instruction)
 KVM_X86_OP(apic_init_signal_blocked)
 KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
 KVM_X86_OP_OPTIONAL(migrate_timers)
-KVM_X86_OP(msr_filter_changed)
+KVM_X86_OP(recalc_msr_intercepts)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
index 5e0415a8ee3f2a48cfe47b6bb7d2e59407e65b5e..8f38c24cce63b85aef7928170442825cbca6feab 100644 (file)
@@ -1897,7 +1897,7 @@ struct kvm_x86_ops {
        int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
 
        void (*migrate_timers)(struct kvm_vcpu *vcpu);
-       void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
+       void (*recalc_msr_intercepts)(struct kvm_vcpu *vcpu);
        int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
 
        void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
index 290823e945578a7916c4d2ed009cfebd2aa925e6..20f566ca1b377d083d8e7961bf332770a01667c6 100644 (file)
@@ -890,11 +890,6 @@ static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
         */
 }
 
-static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
-{
-       svm_recalc_msr_intercepts(vcpu);
-}
-
 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
 {
        to_vmcb->save.dbgctl            = from_vmcb->save.dbgctl;
@@ -923,7 +918,6 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
-
        svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
        svm_recalc_lbr_msr_intercepts(vcpu);
 
@@ -5216,7 +5210,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .apic_init_signal_blocked = svm_apic_init_signal_blocked,
 
-       .msr_filter_changed = svm_msr_filter_changed,
+       .recalc_msr_intercepts = svm_recalc_msr_intercepts,
        .complete_emulated_msr = svm_complete_emulated_msr,
 
        .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
index 4a6d4460f947158abec958a500c255014b38571d..047d314fa4e46d834358b7f593fe226a00a04fea 100644 (file)
@@ -220,7 +220,7 @@ static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        return vmx_get_msr(vcpu, msr_info);
 }
 
-static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
+static void vt_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
 {
        /*
         * TDX doesn't allow VMM to configure interception of MSR accesses.
@@ -231,7 +231,7 @@ static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
        if (is_td_vcpu(vcpu))
                return;
 
-       vmx_msr_filter_changed(vcpu);
+       vmx_recalc_msr_intercepts(vcpu);
 }
 
 static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
@@ -1027,7 +1027,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
        .migrate_timers = vmx_migrate_timers,
 
-       .msr_filter_changed = vt_op(msr_filter_changed),
+       .recalc_msr_intercepts = vt_op(recalc_msr_intercepts),
        .complete_emulated_msr = vt_op(complete_emulated_msr),
 
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
index 0ca9f5de37cefaf92bd966c0e869a82c8fb92dc7..e38f4648e61292ca860b7ef92b5264416508657c 100644 (file)
@@ -4085,7 +4085,7 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
        }
 }
 
-static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
+void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
 {
        if (!cpu_has_vmx_msr_bitmap())
                return;
@@ -4134,11 +4134,6 @@ static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
         */
 }
 
-void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
-{
-       vmx_recalc_msr_intercepts(vcpu);
-}
-
 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
                                                int vector)
 {
index 0b4f5c5558d0dac61cd599066d53ee7166da3d35..ffd72f036213f32dd4c6ab757d281ac1b77a663d 100644 (file)
@@ -52,7 +52,7 @@ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
                           int trig_mode, int vector);
 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
 bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
-void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
+void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
 int vmx_get_feature_msr(u32 msr, u64 *data);
index 179c2c550c8d900951266c3287ca52cadf148444..7227696c75e695d4fecd14787de04624c9d208f3 100644 (file)
@@ -10927,8 +10927,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_vcpu_update_apicv(vcpu);
                if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
                        kvm_check_async_pf_completion(vcpu);
+
+               /*
+                * Recalc MSR intercepts as userspace may want to intercept
+                * accesses to MSRs that KVM would otherwise pass through to
+                * the guest.
+                */
                if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
-                       kvm_x86_call(msr_filter_changed)(vcpu);
+                       kvm_x86_call(recalc_msr_intercepts)(vcpu);
 
                if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
                        kvm_x86_call(update_cpu_dirty_logging)(vcpu);