From: Sean Christopherson Date: Tue, 5 Aug 2025 19:05:20 +0000 (-0700) Subject: KVM: x86/pmu: Rename pmc_speculative_in_use() to pmc_is_locally_enabled() X-Git-Tag: v6.18-rc1~55^2~6^2~38 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=6b6f1adc43320494197ddbefbe933faea21e2d10;p=thirdparty%2Fkernel%2Flinux.git KVM: x86/pmu: Rename pmc_speculative_in_use() to pmc_is_locally_enabled() Rename pmc_speculative_in_use() to pmc_is_locally_enabled() to better capture what it actually tracks, and to show its relationship to pmc_is_globally_enabled(). While neither AMD nor Intel refer to event selectors or the fixed counter control MSR as "local", it's the obvious name to pair with "global". As for "speculative", there's absolutely nothing speculative about the checks. E.g. for PMUs without PERF_GLOBAL_CTRL, from the guest's perspective, the counters are "in use" without any qualifications. No functional change intended. Reviewed-by: Dapeng Mi Link: https://lore.kernel.org/r/20250805190526.1453366-13-seanjc@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 8070097c5c4a8..55b995c3ed08b 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -493,7 +493,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) static bool pmc_event_is_allowed(struct kvm_pmc *pmc) { - return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) && + return pmc_is_globally_enabled(pmc) && pmc_is_locally_enabled(pmc) && check_pmu_event_filter(pmc); } @@ -572,7 +572,7 @@ void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc) * omitting a PMC from a bitmap could result in a missed event if the * filter is changed to allow counting the event. */ - if (!pmc_speculative_in_use(pmc)) + if (!pmc_is_locally_enabled(pmc)) return; if (pmc_is_event_match(pmc, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED)) @@ -912,7 +912,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) pmu->pmc_in_use, X86_PMC_IDX_MAX); kvm_for_each_pmc(pmu, pmc, i, bitmask) { - if (pmc->perf_event && !pmc_speculative_in_use(pmc)) + if (pmc->perf_event && !pmc_is_locally_enabled(pmc)) pmc_stop_counter(pmc); } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index cb93a936a177f..08ae644db00e0 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -160,7 +160,7 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) return NULL; } -static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) +static inline bool pmc_is_locally_enabled(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 0b173602821ba..07baff96300f9 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -762,7 +762,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) int bit, hw_idx; kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) { - if (!pmc_speculative_in_use(pmc) || + if (!pmc_is_locally_enabled(pmc) || !pmc_is_globally_enabled(pmc) || !pmc->perf_event) continue;