]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/pmu: Process only enabled PMCs when emulating events in software
authorSean Christopherson <seanjc@google.com>
Fri, 10 Nov 2023 02:28:53 +0000 (18:28 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 1 Feb 2024 17:35:48 +0000 (09:35 -0800)
Mask off disabled counters based on PERF_GLOBAL_CTRL *before* iterating
over PMCs to emulate (branch) instruction required events in software.  In
the common case where the guest isn't utilizing the PMU, pre-checking for
enabled counters turns a relatively expensive search into a few AND uops
and a Jcc.

Sadly, PMUs without PERF_GLOBAL_CTRL, e.g. most existing AMD CPUs, are out
of luck as there is no way to check that a PMC isn't being used without
checking the PMC's event selector.

Cc: Konstantin Khorenko <khorenko@virtuozzo.com>
Link: https://lore.kernel.org/r/20231110022857.1273836-7-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c

index 7b5563ff259cec7e56c10f965f09c61003431f69..c04c3f37a1b8cb98251613702684c003f761c826 100644 (file)
@@ -847,11 +847,20 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
 
 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
 {
+       DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        int i;
 
-       kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
+       BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);
+
+       if (!kvm_pmu_has_perf_global_ctrl(pmu))
+               bitmap_copy(bitmap, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
+       else if (!bitmap_and(bitmap, pmu->all_valid_pmc_idx,
+                            (unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
+               return;
+
+       kvm_for_each_pmc(pmu, pmc, i, bitmap) {
                if (!pmc_event_is_allowed(pmc))
                        continue;