]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: selftests: Track unavailable_mask for PMU events as 32-bit value
authorSean Christopherson <seanjc@google.com>
Fri, 19 Sep 2025 21:46:45 +0000 (14:46 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 23 Sep 2025 15:38:59 +0000 (08:38 -0700)
Track the mask of "unavailable" PMU events as a 32-bit value.  While bits
31:9 are currently reserved, silently truncating those bits is unnecessary
and asking for missed coverage.  To avoid running afoul of the sanity check
in vcpu_set_cpuid_property(), explicitly adjust the mask based on the
non-reserved bits as reported by KVM's supported CPUID.

Opportunistically update the "all ones" testcase to pass -1u instead of
0xff.

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20250919214648.1585683-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/x86/pmu_counters_test.c

index 8aaaf25b6111d21be654ce125a5d470f68d00d2a..1ef038c4c73ffd6ade7365422bc428dfb8f3740d 100644 (file)
@@ -311,7 +311,7 @@ static void guest_test_arch_events(void)
 }
 
 static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
-                            uint8_t length, uint8_t unavailable_mask)
+                            uint8_t length, uint32_t unavailable_mask)
 {
        struct kvm_vcpu *vcpu;
        struct kvm_vm *vm;
@@ -320,6 +320,9 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
        if (!pmu_version)
                return;
 
+       unavailable_mask &= GENMASK(X86_PROPERTY_PMU_EVENTS_MASK.hi_bit,
+                                   X86_PROPERTY_PMU_EVENTS_MASK.lo_bit);
+
        vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events,
                                         pmu_version, perf_capabilities);
 
@@ -630,7 +633,7 @@ static void test_intel_counters(void)
                         */
                        for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) {
                                test_arch_events(v, perf_caps[i], j, 0);
-                               test_arch_events(v, perf_caps[i], j, 0xff);
+                               test_arch_events(v, perf_caps[i], j, -1u);
 
                                for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++)
                                        test_arch_events(v, perf_caps[i], j, BIT(k));