]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf/x86: Add config_mask to represent EVENTSEL bitmask
authorKan Liang <kan.liang@linux.intel.com>
Wed, 26 Jun 2024 14:35:38 +0000 (07:35 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 4 Jul 2024 14:00:39 +0000 (16:00 +0200)
Different vendors may support different fields in EVENTSEL MSR, such as
Intel would introduce new fields umask2 and eq bits in EVENTSEL MSR
since Perfmon version 6. However, a fixed mask X86_RAW_EVENT_MASK is
used to filter the attr.config.

Introduce a new config_mask to record the real supported EVENTSEL
bitmask.
Only apply it to the existing code now. No functional change.

Co-developed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Link: https://lkml.kernel.org/r/20240626143545.480761-7-kan.liang@linux.intel.com
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/perf_event.h

index 848dbe9cbd0eb9d4ae3cc5b4d802618e14cd3b45..8ea1c988e19ba7b3aa6f8a4c3d84b578aa11c4e6 100644 (file)
@@ -624,7 +624,7 @@ int x86_pmu_hw_config(struct perf_event *event)
                event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
 
        if (event->attr.type == event->pmu->type)
-               event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+               event->hw.config |= x86_pmu_get_event_config(event);
 
        if (event->attr.sample_period && x86_pmu.limit_period) {
                s64 left = event->attr.sample_period;
@@ -2098,6 +2098,9 @@ static int __init init_hw_perf_events(void)
        if (!x86_pmu.intel_ctrl)
                x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
 
+       if (!x86_pmu.config_mask)
+               x86_pmu.config_mask = X86_RAW_EVENT_MASK;
+
        perf_events_lapic_init();
        register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
 
index 6a6f1f4b92055d15c3de5f0455f7e533603ac7a9..6e42ba0e6b727d36df81ffe306befe52f72a6e08 100644 (file)
@@ -6144,6 +6144,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
                pmu->cntr_mask64 = x86_pmu.cntr_mask64;
                pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
                pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
+               pmu->config_mask = X86_RAW_EVENT_MASK;
                pmu->unconstrained = (struct event_constraint)
                                     __EVENT_CONSTRAINT(0, pmu->cntr_mask64,
                                                        0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
index 493bc9f70ffe1c2b9dbd8258e235a60d72790c18..55468ea89d23dd7618f88b180990020dd306352c 100644 (file)
@@ -695,6 +695,7 @@ struct x86_hybrid_pmu {
        union perf_capabilities         intel_cap;
        u64                             intel_ctrl;
        u64                             pebs_events_mask;
+       u64                             config_mask;
        union {
                        u64             cntr_mask64;
                        unsigned long   cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -790,6 +791,7 @@ struct x86_pmu {
        int             (*rdpmc_index)(int index);
        u64             (*event_map)(int);
        int             max_events;
+       u64             config_mask;
        union {
                        u64             cntr_mask64;
                        unsigned long   cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -1241,6 +1243,11 @@ static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
        return fls64(hybrid(pmu, fixed_cntr_mask64));
 }
 
+static inline u64 x86_pmu_get_event_config(struct perf_event *event)
+{
+       return event->attr.config & hybrid(event->pmu, config_mask);
+}
+
 extern struct event_constraint emptyconstraint;
 
 extern struct event_constraint unconstrained;