]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/x86/intel: Introduce pairs of PEBS static calls
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Tue, 15 Apr 2025 11:44:12 +0000 (11:44 +0000)
committerIngo Molnar <mingo@kernel.org>
Thu, 17 Apr 2025 12:21:24 +0000 (14:21 +0200)
Arch-PEBS retires IA32_PEBS_ENABLE and MSR_PEBS_DATA_CFG MSRs, so
intel_pmu_pebs_enable/disable() and intel_pmu_pebs_enable/disable_all()
are not needed to call for ach-PEBS.

To make the code cleaner, introduce static calls
x86_pmu_pebs_enable/disable() and x86_pmu_pebs_enable/disable_all()
instead of adding "x86_pmu.arch_pebs" check directly in these helpers.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20250415114428.341182-7-dapeng1.mi@linux.intel.com
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/perf_event.h

index cae213296a6327e15218587f0dad5365e90ea78c..995df8f392b679e3208f69f6adfb0b8682bdf20d 100644 (file)
@@ -95,6 +95,11 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
 
 DEFINE_STATIC_CALL_NULL(x86_pmu_late_setup, *x86_pmu.late_setup);
 
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all);
+DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all);
+
 /*
  * This one is magic, it will get called even when PMU init fails (because
  * there is no PMU), in which case it should simply return NULL.
@@ -2049,6 +2054,11 @@ static void x86_pmu_static_call_update(void)
        static_call_update(x86_pmu_filter, x86_pmu.filter);
 
        static_call_update(x86_pmu_late_setup, x86_pmu.late_setup);
+
+       static_call_update(x86_pmu_pebs_enable, x86_pmu.pebs_enable);
+       static_call_update(x86_pmu_pebs_disable, x86_pmu.pebs_disable);
+       static_call_update(x86_pmu_pebs_enable_all, x86_pmu.pebs_enable_all);
+       static_call_update(x86_pmu_pebs_disable_all, x86_pmu.pebs_disable_all);
 }
 
 static void _x86_pmu_read(struct perf_event *event)
index 7bbc7a7402422e4d7f5992f396923e2b32ac2122..cd632920731139ca2c0df9d6cce7259f6c3f6e43 100644 (file)
@@ -2306,7 +2306,7 @@ static __always_inline void __intel_pmu_disable_all(bool bts)
 static __always_inline void intel_pmu_disable_all(void)
 {
        __intel_pmu_disable_all(true);
-       intel_pmu_pebs_disable_all();
+       static_call_cond(x86_pmu_pebs_disable_all)();
        intel_pmu_lbr_disable_all();
 }
 
@@ -2338,7 +2338,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
 
 static void intel_pmu_enable_all(int added)
 {
-       intel_pmu_pebs_enable_all();
+       static_call_cond(x86_pmu_pebs_enable_all)();
        __intel_pmu_enable_all(added, false);
 }
 
@@ -2595,7 +2595,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
         * so we don't trigger the event without PEBS bit set.
         */
        if (unlikely(event->attr.precise_ip))
-               intel_pmu_pebs_disable(event);
+               static_call(x86_pmu_pebs_disable)(event);
 }
 
 static void intel_pmu_assign_event(struct perf_event *event, int idx)
@@ -2948,7 +2948,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
        int idx = hwc->idx;
 
        if (unlikely(event->attr.precise_ip))
-               intel_pmu_pebs_enable(event);
+               static_call(x86_pmu_pebs_enable)(event);
 
        switch (idx) {
        case 0 ... INTEL_PMC_IDX_FIXED - 1:
index 1d6b3fa6a8eb788df5e3ccfc79572908d7531a23..e216622b94dc24d79098baa9fb07cbaf223cd9c6 100644 (file)
@@ -2679,6 +2679,11 @@ void __init intel_pebs_init(void)
                if (format < 4)
                        x86_pmu.intel_cap.pebs_baseline = 0;
 
+               x86_pmu.pebs_enable = intel_pmu_pebs_enable;
+               x86_pmu.pebs_disable = intel_pmu_pebs_disable;
+               x86_pmu.pebs_enable_all = intel_pmu_pebs_enable_all;
+               x86_pmu.pebs_disable_all = intel_pmu_pebs_disable_all;
+
                switch (format) {
                case 0:
                        pr_cont("PEBS fmt0%c, ", pebs_type);
index 2ef407d0a7e216a295c0a31a16e6912f131409a8..d201e6ac2edeeb6b20dac39a8a404f3cfef8ea04 100644 (file)
@@ -808,6 +808,10 @@ struct x86_pmu {
        int             (*hw_config)(struct perf_event *event);
        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        void            (*late_setup)(void);
+       void            (*pebs_enable)(struct perf_event *event);
+       void            (*pebs_disable)(struct perf_event *event);
+       void            (*pebs_enable_all)(void);
+       void            (*pebs_disable_all)(void);
        unsigned        eventsel;
        unsigned        perfctr;
        unsigned        fixedctr;
@@ -1120,6 +1124,10 @@ DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
 DECLARE_STATIC_CALL(x86_pmu_update,     *x86_pmu.update);
 DECLARE_STATIC_CALL(x86_pmu_drain_pebs,        *x86_pmu.drain_pebs);
 DECLARE_STATIC_CALL(x86_pmu_late_setup,        *x86_pmu.late_setup);
+DECLARE_STATIC_CALL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable);
+DECLARE_STATIC_CALL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable);
+DECLARE_STATIC_CALL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all);
+DECLARE_STATIC_CALL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all);
 
 static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
 {