]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/pmu: Restrict GLOBAL_{CTRL,STATUS}, fixed PMCs, and PEBS to PMU v2+
authorSean Christopherson <seanjc@google.com>
Wed, 6 Aug 2025 19:56:53 +0000 (12:56 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 18 Sep 2025 19:58:15 +0000 (12:58 -0700)
Restrict support for GLOBAL_CTRL, GLOBAL_STATUS, fixed PMCs, and PEBS to
v2 or later vPMUs.  The SDM explicitly states that GLOBAL_{CTRL,STATUS} and
fixed counters were introduced with PMU v2, and PEBS has hard dependencies
on fixed counters and the bitmap MSR layouts established by PMU v2.

Reported-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://lore.kernel.org/r/20250806195706.1650976-32-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/pmu_intel.c

index edf0db8e53ad33a69fdd258256fbba848bc60055..de1d9785c01ff9359b36b9f83b624e21e98f75c0 100644 (file)
@@ -541,16 +541,33 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                                      kvm_pmu_cap.events_mask_len);
        pmu->available_event_types = ~entry->ebx & (BIT_ULL(eax.split.mask_length) - 1);
 
-       if (pmu->version == 1) {
-               pmu->nr_arch_fixed_counters = 0;
-       } else {
-               pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
-                                                   kvm_pmu_cap.num_counters_fixed);
-               edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
-                                                 kvm_pmu_cap.bit_width_fixed);
-               pmu->counter_bitmask[KVM_PMC_FIXED] = BIT_ULL(edx.split.bit_width_fixed) - 1;
+       entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
+       if (entry &&
+           (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
+           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+               pmu->reserved_bits ^= HSW_IN_TX;
+               pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
        }
 
+       perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+       if (intel_pmu_lbr_is_compatible(vcpu) &&
+           (perf_capabilities & PERF_CAP_LBR_FMT))
+               memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
+       else
+               lbr_desc->records.nr = 0;
+
+       if (lbr_desc->records.nr)
+               bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
+
+       if (pmu->version == 1)
+               return;
+
+       pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
+                                           kvm_pmu_cap.num_counters_fixed);
+       edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
+                                         kvm_pmu_cap.bit_width_fixed);
+       pmu->counter_bitmask[KVM_PMC_FIXED] = BIT_ULL(edx.split.bit_width_fixed) - 1;
+
        intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
                                                 INTEL_FIXED_0_USER |
                                                 INTEL_FIXED_0_ENABLE_PMI);
@@ -571,24 +588,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->global_status_rsvd &=
                                ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
 
-       entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
-       if (entry &&
-           (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
-           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
-               pmu->reserved_bits ^= HSW_IN_TX;
-               pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
-       }
-
-       perf_capabilities = vcpu_get_perf_capabilities(vcpu);
-       if (intel_pmu_lbr_is_compatible(vcpu) &&
-           (perf_capabilities & PERF_CAP_LBR_FMT))
-               memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
-       else
-               lbr_desc->records.nr = 0;
-
-       if (lbr_desc->records.nr)
-               bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
-
        if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
                if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
                        pmu->pebs_enable_rsvd = counter_rsvd;