]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: selftests: Validate more arch-events in pmu_counters_test
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Fri, 19 Sep 2025 21:46:47 +0000 (14:46 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 23 Sep 2025 15:38:59 +0000 (08:38 -0700)
Add support for 5 new architectural events (4 topdown level 1 metrics
events and LBR inserts event) that will first show up in Intel's
Clearwater Forest CPUs.  Detailed info about the new events can be found
in SDM section 21.2.7 "Pre-defined Architectural  Performance Events".

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Yi Lai <yi1.lai@intel.com>
[sean: drop "unavailable_mask" changes]
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20250919214648.1585683-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86/pmu.h
tools/testing/selftests/kvm/include/x86/processor.h
tools/testing/selftests/kvm/lib/x86/pmu.c
tools/testing/selftests/kvm/x86/pmu_counters_test.c

index 3c10c4dc0ae8f4c6101db5405e1ac55f68effd18..2aabda2da00256fb3509f7b9d85a5d181d1a5ab6 100644 (file)
 #define        INTEL_ARCH_BRANCHES_RETIRED             RAW_EVENT(0xc4, 0x00)
 #define        INTEL_ARCH_BRANCHES_MISPREDICTED        RAW_EVENT(0xc5, 0x00)
 #define        INTEL_ARCH_TOPDOWN_SLOTS                RAW_EVENT(0xa4, 0x01)
+#define        INTEL_ARCH_TOPDOWN_BE_BOUND             RAW_EVENT(0xa4, 0x02)
+#define        INTEL_ARCH_TOPDOWN_BAD_SPEC             RAW_EVENT(0x73, 0x00)
+#define        INTEL_ARCH_TOPDOWN_FE_BOUND             RAW_EVENT(0x9c, 0x01)
+#define        INTEL_ARCH_TOPDOWN_RETIRING             RAW_EVENT(0xc2, 0x02)
+#define        INTEL_ARCH_LBR_INSERTS                  RAW_EVENT(0xe4, 0x01)
 
 #define        AMD_ZEN_CORE_CYCLES                     RAW_EVENT(0x76, 0x00)
 #define        AMD_ZEN_INSTRUCTIONS_RETIRED            RAW_EVENT(0xc0, 0x00)
@@ -80,6 +85,11 @@ enum intel_pmu_architectural_events {
        INTEL_ARCH_BRANCHES_RETIRED_INDEX,
        INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
        INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
+       INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX,
+       INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX,
+       INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX,
+       INTEL_ARCH_TOPDOWN_RETIRING_INDEX,
+       INTEL_ARCH_LBR_INSERTS_INDEX,
        NR_INTEL_ARCH_EVENTS,
 };
 
index efcc4b1de523a05c50453b6a2a76fd147ff2ec77..e8bad89fbb7f6ce4f2f48cdd06afb61350ed3010 100644 (file)
@@ -265,7 +265,7 @@ struct kvm_x86_cpu_property {
 #define X86_PROPERTY_PMU_NR_GP_COUNTERS                KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
-#define X86_PROPERTY_PMU_EVENTS_MASK           KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
+#define X86_PROPERTY_PMU_EVENTS_MASK           KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12)
 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK        KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS     KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH      KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
@@ -332,6 +332,11 @@ struct kvm_x86_pmu_feature {
 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED           KVM_X86_PMU_FEATURE(EBX, 5)
 #define X86_PMU_FEATURE_BRANCHES_MISPREDICTED          KVM_X86_PMU_FEATURE(EBX, 6)
 #define X86_PMU_FEATURE_TOPDOWN_SLOTS                  KVM_X86_PMU_FEATURE(EBX, 7)
+#define X86_PMU_FEATURE_TOPDOWN_BE_BOUND               KVM_X86_PMU_FEATURE(EBX, 8)
+#define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC               KVM_X86_PMU_FEATURE(EBX, 9)
+#define X86_PMU_FEATURE_TOPDOWN_FE_BOUND               KVM_X86_PMU_FEATURE(EBX, 10)
+#define X86_PMU_FEATURE_TOPDOWN_RETIRING               KVM_X86_PMU_FEATURE(EBX, 11)
+#define X86_PMU_FEATURE_LBR_INSERTS                    KVM_X86_PMU_FEATURE(EBX, 12)
 
 #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED            KVM_X86_PMU_FEATURE(ECX, 0)
 #define X86_PMU_FEATURE_CPU_CYCLES_FIXED               KVM_X86_PMU_FEATURE(ECX, 1)
index f31f0427c17cb293a0f1bcf4982eb90fab4e28fc..5ab44bf54773722ec66a8691000c6b84d2f5a435 100644 (file)
@@ -19,6 +19,11 @@ const uint64_t intel_pmu_arch_events[] = {
        INTEL_ARCH_BRANCHES_RETIRED,
        INTEL_ARCH_BRANCHES_MISPREDICTED,
        INTEL_ARCH_TOPDOWN_SLOTS,
+       INTEL_ARCH_TOPDOWN_BE_BOUND,
+       INTEL_ARCH_TOPDOWN_BAD_SPEC,
+       INTEL_ARCH_TOPDOWN_FE_BOUND,
+       INTEL_ARCH_TOPDOWN_RETIRING,
+       INTEL_ARCH_LBR_INSERTS,
 };
 kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
 
index c6987a9b65bf594004a9d6851cd1a2f0360d50b2..24599d98f898d18e7b0cd2a8fdb9b00f65eeec21 100644 (file)
@@ -75,6 +75,11 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx)
                [INTEL_ARCH_BRANCHES_RETIRED_INDEX]      = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
                [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
                [INTEL_ARCH_TOPDOWN_SLOTS_INDEX]         = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
+               [INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX]      = { X86_PMU_FEATURE_TOPDOWN_BE_BOUND, X86_PMU_FEATURE_NULL },
+               [INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX]      = { X86_PMU_FEATURE_TOPDOWN_BAD_SPEC, X86_PMU_FEATURE_NULL },
+               [INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX]      = { X86_PMU_FEATURE_TOPDOWN_FE_BOUND, X86_PMU_FEATURE_NULL },
+               [INTEL_ARCH_TOPDOWN_RETIRING_INDEX]      = { X86_PMU_FEATURE_TOPDOWN_RETIRING, X86_PMU_FEATURE_NULL },
+               [INTEL_ARCH_LBR_INSERTS_INDEX]           = { X86_PMU_FEATURE_LBR_INSERTS, X86_PMU_FEATURE_NULL },
        };
 
        kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS);
@@ -171,9 +176,12 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr
                fallthrough;
        case INTEL_ARCH_CPU_CYCLES_INDEX:
        case INTEL_ARCH_REFERENCE_CYCLES_INDEX:
+       case INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX:
+       case INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX:
                GUEST_ASSERT_NE(count, 0);
                break;
        case INTEL_ARCH_TOPDOWN_SLOTS_INDEX:
+       case INTEL_ARCH_TOPDOWN_RETIRING_INDEX:
                __GUEST_ASSERT(count >= NUM_INSNS_RETIRED,
                               "Expected top-down slots >= %u, got count = %lu",
                               NUM_INSNS_RETIRED, count);