]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/pmu: Rename pmc_speculative_in_use() to pmc_is_locally_enabled()
authorSean Christopherson <seanjc@google.com>
Tue, 5 Aug 2025 19:05:20 +0000 (12:05 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 19 Aug 2025 18:59:39 +0000 (11:59 -0700)
Rename pmc_speculative_in_use() to pmc_is_locally_enabled() to better
capture what it actually tracks, and to show its relationship to
pmc_is_globally_enabled().  While neither AMD nor Intel refer to event
selectors or the fixed counter control MSR as "local", it's the obvious
name to pair with "global".

As for "speculative", there's absolutely nothing speculative about the
checks.  E.g. for PMUs without PERF_GLOBAL_CTRL, from the guest's
perspective, the counters are "in use" without any qualifications.

No functional change intended.

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20250805190526.1453366-13-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/vmx/pmu_intel.c

index 8070097c5c4a893bfb2627e003fcfbaa95a86513..55b995c3ed08bdbcda32073850607a08219bbe92 100644 (file)
@@ -493,7 +493,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
 
 static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
 {
-       return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
+       return pmc_is_globally_enabled(pmc) && pmc_is_locally_enabled(pmc) &&
               check_pmu_event_filter(pmc);
 }
 
@@ -572,7 +572,7 @@ void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc)
         * omitting a PMC from a bitmap could result in a missed event if the
         * filter is changed to allow counting the event.
         */
-       if (!pmc_speculative_in_use(pmc))
+       if (!pmc_is_locally_enabled(pmc))
                return;
 
        if (pmc_is_event_match(pmc, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED))
@@ -912,7 +912,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
                      pmu->pmc_in_use, X86_PMC_IDX_MAX);
 
        kvm_for_each_pmc(pmu, pmc, i, bitmask) {
-               if (pmc->perf_event && !pmc_speculative_in_use(pmc))
+               if (pmc->perf_event && !pmc_is_locally_enabled(pmc))
                        pmc_stop_counter(pmc);
        }
 
index cb93a936a177f02574f4fe429f93a907b33bc66b..08ae644db00e0dae6575108b0571b621e8a23204 100644 (file)
@@ -160,7 +160,7 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
        return NULL;
 }
 
-static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
+static inline bool pmc_is_locally_enabled(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 
index 0b173602821ba37f483e85e0d4daa8bec991e03d..07baff96300f981bbfc3a8a3e42b78e66b3a92ae 100644 (file)
@@ -762,7 +762,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
        int bit, hw_idx;
 
        kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) {
-               if (!pmc_speculative_in_use(pmc) ||
+               if (!pmc_is_locally_enabled(pmc) ||
                    !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
                        continue;