]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86: Rework KVM_REQ_MSR_FILTER_CHANGED into a generic RECALC_INTERCEPTS
authorSean Christopherson <seanjc@google.com>
Wed, 6 Aug 2025 19:56:46 +0000 (12:56 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 18 Sep 2025 19:57:18 +0000 (12:57 -0700)
Rework the MSR_FILTER_CHANGED request into a more generic RECALC_INTERCEPTS
request, and expand the responsibilities of vendor code to recalculate all
intercepts that vary based on userspace input, e.g. instruction intercepts
that are tied to guest CPUID.

Providing a generic recalc request will allow the upcoming mediated PMU
support to trigger a recalc when PMU features, e.g. PERF_CAPABILITIES, are
set by userspace, without having to make multiple calls to/from PMU code.
As a bonus, using a request will effectively coalesce recalcs, e.g. will
reduce the number of recalcs for normal usage from 3+ to 1 (vCPU create,
set CPUID, set PERF_CAPABILITIES (Intel only), set filter).

The downside is that MSR filter changes that are done in isolation will do
a small amount of unnecessary work, but that's already a relatively slow
path, and the cost of recalculating instruction intercepts is negligible.

Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://lore.kernel.org/r/20250806195706.1650976-25-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/x86_ops.h
arch/x86/kvm/x86.c

index 18a5c3119e1a84b339a34929da0ff8bb73f5a354..7c240e23bd5217b82ce78012ec07ce2ea2bd75e2 100644 (file)
@@ -138,7 +138,7 @@ KVM_X86_OP(check_emulate_instruction)
 KVM_X86_OP(apic_init_signal_blocked)
 KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
 KVM_X86_OP_OPTIONAL(migrate_timers)
-KVM_X86_OP(recalc_msr_intercepts)
+KVM_X86_OP(recalc_intercepts)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
index 464a636b2dc603c0a8b557b6cde23fb82be7380b..7e1e41f04752ba1b817fe3849597b75184f0a7c0 100644 (file)
 #define KVM_REQ_TLB_FLUSH_GUEST \
        KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_APF_READY              KVM_ARCH_REQ(28)
-#define KVM_REQ_MSR_FILTER_CHANGED     KVM_ARCH_REQ(29)
+#define KVM_REQ_RECALC_INTERCEPTS      KVM_ARCH_REQ(29)
 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
        KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
@@ -1914,7 +1914,7 @@ struct kvm_x86_ops {
        int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
 
        void (*migrate_timers)(struct kvm_vcpu *vcpu);
-       void (*recalc_msr_intercepts)(struct kvm_vcpu *vcpu);
+       void (*recalc_intercepts)(struct kvm_vcpu *vcpu);
        int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
 
        void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
index 7e7821ee8ee187f16f89394cda7171353af1ff31..711f160113d118f5e5e70a1b1be62bec5c822512 100644 (file)
@@ -1077,7 +1077,7 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
        }
 }
 
-static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
+static void svm_recalc_intercepts(struct kvm_vcpu *vcpu)
 {
        svm_recalc_instruction_intercepts(vcpu);
        svm_recalc_msr_intercepts(vcpu);
@@ -1225,7 +1225,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
 
        svm_hv_init_vmcb(vmcb);
 
-       svm_recalc_intercepts_after_set_cpuid(vcpu);
+       svm_recalc_intercepts(vcpu);
 
        vmcb_mark_all_dirty(vmcb);
 
@@ -4479,7 +4479,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        if (sev_guest(vcpu->kvm))
                sev_vcpu_after_set_cpuid(svm);
 
-       svm_recalc_intercepts_after_set_cpuid(vcpu);
+       svm_recalc_intercepts(vcpu);
 }
 
 static bool svm_has_wbinvd_exit(void)
@@ -5181,7 +5181,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .apic_init_signal_blocked = svm_apic_init_signal_blocked,
 
-       .recalc_msr_intercepts = svm_recalc_msr_intercepts,
+       .recalc_intercepts = svm_recalc_intercepts,
        .complete_emulated_msr = svm_complete_emulated_msr,
 
        .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
index dbab1c15b0cdfd82a5958c7055e14f4a5b20073f..68dcafd177a878f580fda9e057488aa3bf105b4e 100644 (file)
@@ -188,18 +188,18 @@ static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        return vmx_get_msr(vcpu, msr_info);
 }
 
-static void vt_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
+static void vt_recalc_intercepts(struct kvm_vcpu *vcpu)
 {
        /*
-        * TDX doesn't allow VMM to configure interception of MSR accesses.
-        * TDX guest requests MSR accesses by calling TDVMCALL.  The MSR
-        * filters will be applied when handling the TDVMCALL for RDMSR/WRMSR
-        * if the userspace has set any.
+        * TDX doesn't allow VMM to configure interception of instructions or
+        * MSR accesses.  TDX guest requests MSR accesses by calling TDVMCALL.
+        * The MSR filters will be applied when handling the TDVMCALL for
+        * RDMSR/WRMSR if the userspace has set any.
         */
        if (is_td_vcpu(vcpu))
                return;
 
-       vmx_recalc_msr_intercepts(vcpu);
+       vmx_recalc_intercepts(vcpu);
 }
 
 static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
@@ -995,7 +995,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
        .migrate_timers = vmx_migrate_timers,
 
-       .recalc_msr_intercepts = vt_op(recalc_msr_intercepts),
+       .recalc_intercepts = vt_op(recalc_intercepts),
        .complete_emulated_msr = vt_op(complete_emulated_msr),
 
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
index 5d3a50547c3e39fd2bdfa70799d83d028ed7c42d..68bec421f3fc661896a14a12f4169298d2162bc1 100644 (file)
@@ -4068,7 +4068,7 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
        }
 }
 
-void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
+static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
 {
        if (!cpu_has_vmx_msr_bitmap())
                return;
@@ -4121,6 +4121,11 @@ void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
         */
 }
 
+void vmx_recalc_intercepts(struct kvm_vcpu *vcpu)
+{
+       vmx_recalc_msr_intercepts(vcpu);
+}
+
 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
                                                int vector)
 {
@@ -7802,7 +7807,7 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                        ~FEAT_CTL_SGX_LC_ENABLED;
 
        /* Recalc MSR interception to account for feature changes. */
-       vmx_recalc_msr_intercepts(vcpu);
+       vmx_recalc_intercepts(vcpu);
 
        /* Refresh #PF interception to account for MAXPHYADDR changes. */
        vmx_update_exception_bitmap(vcpu);
index 2b3424f638dbbca24f943bc58539162b69b6d012..2c590ff44ced0133b51383f357610726ba11f53c 100644 (file)
@@ -52,7 +52,7 @@ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
                           int trig_mode, int vector);
 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
 bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
-void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
+void vmx_recalc_intercepts(struct kvm_vcpu *vcpu);
 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
 int vmx_get_feature_msr(u32 msr, u64 *data);
index 99f2a150ca78dfb618186729b9b664208d980e59..64e08148909bfc8db8f8185e37c34067894c933e 100644 (file)
@@ -6794,7 +6794,11 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
 
        kvm_free_msr_filter(old_filter);
 
-       kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
+       /*
+        * Recalc MSR intercepts as userspace may want to intercept accesses to
+        * MSRs that KVM would otherwise pass through to the guest.
+        */
+       kvm_make_all_cpus_request(kvm, KVM_REQ_RECALC_INTERCEPTS);
 
        return 0;
 }
@@ -10827,13 +10831,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
                        kvm_check_async_pf_completion(vcpu);
 
-               /*
-                * Recalc MSR intercepts as userspace may want to intercept
-                * accesses to MSRs that KVM would otherwise pass through to
-                * the guest.
-                */
-               if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
-                       kvm_x86_call(recalc_msr_intercepts)(vcpu);
+               if (kvm_check_request(KVM_REQ_RECALC_INTERCEPTS, vcpu))
+                       kvm_x86_call(recalc_intercepts)(vcpu);
 
                if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
                        kvm_x86_call(update_cpu_dirty_logging)(vcpu);