]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/pmu: Add wrappers for counting emulated instructions/branches
authorSean Christopherson <seanjc@google.com>
Tue, 5 Aug 2025 19:05:18 +0000 (12:05 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 19 Aug 2025 18:59:37 +0000 (11:59 -0700)
Add wrappers for triggering instruction retired and branch retired PMU
events in anticipation of reworking the internal mechanisms to track
which PMCs need to be evaluated, e.g. to avoid having to walk and check
every PMC.

Opportunistically bury "struct kvm_pmu_emulated_event_selectors" in pmu.c.

No functional change intended.

Link: https://lore.kernel.org/r/20250805190526.1453366-11-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index adb0fb8f6bb7e31e2021fb1e61c34d3866abdafd..9040bf20e8510c9e3d3b1b7a39bb0eeeb069cb45 100644 (file)
 struct x86_pmu_capability __read_mostly kvm_pmu_cap;
 EXPORT_SYMBOL_GPL(kvm_pmu_cap);
 
-struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
-EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
+struct kvm_pmu_emulated_event_selectors {
+       u64 INSTRUCTIONS_RETIRED;
+       u64 BRANCH_INSTRUCTIONS_RETIRED;
+};
+static struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
 
 /* Precise Distribution of Instructions Retired (PDIR) */
 static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
@@ -912,7 +915,7 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
                                                         select_user;
 }
 
-void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
+static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
 {
        DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -949,7 +952,18 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
                kvm_pmu_incr_counter(pmc);
        }
 }
-EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
+
+void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu)
+{
+       kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
+}
+EXPORT_SYMBOL_GPL(kvm_pmu_instruction_retired);
+
+void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
+{
+       kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
+}
+EXPORT_SYMBOL_GPL(kvm_pmu_branch_retired);
 
 static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
 {
index 13477066eb4028f79aad4607be04b57a71d1f86a..740af816af372a4f0413bacace62a547facb62be 100644 (file)
 
 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
 
-struct kvm_pmu_emulated_event_selectors {
-       u64 INSTRUCTIONS_RETIRED;
-       u64 BRANCH_INSTRUCTIONS_RETIRED;
-};
-
 struct kvm_pmu_ops {
        struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
                unsigned int idx, u64 *mask);
@@ -178,7 +173,6 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
 }
 
 extern struct x86_pmu_capability kvm_pmu_cap;
-extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
 
 void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops);
 
@@ -227,7 +221,8 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
-void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
+void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu);
+void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
 
 bool is_vmware_backdoor_pmc(u32 pmc_idx);
 
index b8ea1969113df0e4bb8c67cd740be7e9659574ed..db2fd4eedc9010d5de1753e38635e5dbd266a5a8 100644 (file)
@@ -3690,7 +3690,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                return 1;
        }
 
-       kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
+       kvm_pmu_branch_retired(vcpu);
 
        if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
                return nested_vmx_failInvalid(vcpu);
index f5e933f0e21ac618cfdf5f1b6759d1778e92eb80..c44d3d64270b099545737849e66f5df65d014081 100644 (file)
@@ -8820,7 +8820,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
        if (unlikely(!r))
                return 0;
 
-       kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
+       kvm_pmu_instruction_retired(vcpu);
 
        /*
         * rflags is the old, "raw" value of the flags.  The new value has
@@ -9161,9 +9161,9 @@ writeback:
                 */
                if (!ctxt->have_exception ||
                    exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
-                       kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
+                       kvm_pmu_instruction_retired(vcpu);
                        if (ctxt->is_branch)
-                               kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
+                               kvm_pmu_branch_retired(vcpu);
                        kvm_rip_write(vcpu, ctxt->eip);
                        if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
                                r = kvm_vcpu_do_singlestep(vcpu);