]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86/pmu: Move initialization of valid PMCs bitmask to common x86
authorSean Christopherson <seanjc@google.com>
Wed, 6 Aug 2025 19:56:52 +0000 (12:56 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 18 Sep 2025 19:58:13 +0000 (12:58 -0700)
Move all initialization of all_valid_pmc_idx to common code, as the logic
is 100% common to Intel and AMD, and KVM heavily relies on Intel and AMD
having the same semantics.  E.g. the fact that AMD doesn't support fixed
counters doesn't allow KVM to use all_valid_pmc_idx[63:32] for other
purposes.

Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://lore.kernel.org/r/20250806195706.1650976-31-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index 5205f0d9ced9b8f142635bb9c67281ee221404f4..b7dc5bd981baeca6fff93c158690b2bd7f3ee376 100644 (file)
@@ -888,6 +888,10 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
         */
        if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
                pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
+
+       bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
+       bitmap_set(pmu->all_valid_pmc_idx, KVM_FIXED_PMC_BASE_IDX,
+                  pmu->nr_arch_fixed_counters);
 }
 
 void kvm_pmu_init(struct kvm_vcpu *vcpu)
index 25ccd6a998386ff5486e2c1ff9b0e1a109541b56..bc062285fbf53435ec794cb449d1258125915755 100644 (file)
@@ -210,7 +210,6 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
        /* not applicable to AMD; but clean them to prevent any fall out */
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
        pmu->nr_arch_fixed_counters = 0;
-       bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
 }
 
 static void amd_pmu_init(struct kvm_vcpu *vcpu)
index 096f091980f0de62b6c9d2cde0bcf9a963ff33f6..edf0db8e53ad33a69fdd258256fbba848bc60055 100644 (file)
@@ -579,11 +579,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
        }
 
-       bitmap_set(pmu->all_valid_pmc_idx,
-               0, pmu->nr_arch_gp_counters);
-       bitmap_set(pmu->all_valid_pmc_idx,
-               INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
-
        perf_capabilities = vcpu_get_perf_capabilities(vcpu);
        if (intel_pmu_lbr_is_compatible(vcpu) &&
            (perf_capabilities & PERF_CAP_LBR_FMT))