]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read
authorKan Liang <kan.liang@linux.intel.com>
Tue, 21 Jan 2025 15:23:01 +0000 (07:23 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Apr 2025 12:37:42 +0000 (14:37 +0200)
commit f9bdf1f953392c9edd69a7f884f78c0390127029 upstream.

The WARN_ON(this_cpu_read(cpu_hw_events.enabled)) in the
intel_pmu_save_and_restart_reload() is triggered, when sampling read
topdown events.

In a NMI handler, the cpu_hw_events.enabled is set and used to indicate
the status of core PMU. The generic pmu->pmu_disable_count, updated in
the perf_pmu_disable/enable pair, is not touched.
However, the perf_pmu_disable/enable pair is invoked when sampling read
in a NMI handler. The cpuc->enabled is mistakenly set by the
perf_pmu_enable().

Avoid disabling PMU if the core PMU is already disabled.
Merge the logic together.

Fixes: 7b2c05a15d29 ("perf/x86/intel: Generic support for hardware TopDown metrics")
Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20250121152303.3128733-2-kan.liang@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/perf_event.h

index 1fcc8fd50083c6c6d5dd7fe1e15b9184deb0d73d..66d5782df18f8c26353e97bfa9c2642b842d1631 100644 (file)
@@ -2720,28 +2720,33 @@ static u64 adl_update_topdown_event(struct perf_event *event)
 
 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
 
-static void intel_pmu_read_topdown_event(struct perf_event *event)
+static void intel_pmu_read_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
+               struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+               bool pmu_enabled = cpuc->enabled;
 
-       /* Only need to call update_topdown_event() once for group read. */
-       if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
-           !is_slots_event(event))
-               return;
+               /* Only need to call update_topdown_event() once for group read. */
+               if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
+                       return;
 
-       perf_pmu_disable(event->pmu);
-       static_call(intel_pmu_update_topdown_event)(event);
-       perf_pmu_enable(event->pmu);
-}
+               cpuc->enabled = 0;
+               if (pmu_enabled)
+                       intel_pmu_disable_all();
 
-static void intel_pmu_read_event(struct perf_event *event)
-{
-       if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
-               intel_pmu_auto_reload_read(event);
-       else if (is_topdown_count(event))
-               intel_pmu_read_topdown_event(event);
-       else
-               x86_perf_event_update(event);
+               if (is_topdown_event(event))
+                       static_call(intel_pmu_update_topdown_event)(event);
+               else
+                       intel_pmu_drain_pebs_buffer();
+
+               cpuc->enabled = pmu_enabled;
+               if (pmu_enabled)
+                       intel_pmu_enable_all(0);
+
+               return;
+       }
+
+       x86_perf_event_update(event);
 }
 
 static void intel_pmu_enable_fixed(struct perf_event *event)
index 7bba8630065db9f4b6047d714fb56f8840092da0..dcb1e9b8d86629f3beb48109e34dabdfcb94dc84 100644 (file)
@@ -843,7 +843,7 @@ unlock:
        return 1;
 }
 
-static inline void intel_pmu_drain_pebs_buffer(void)
+void intel_pmu_drain_pebs_buffer(void)
 {
        struct perf_sample_data data;
 
@@ -1965,15 +1965,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
        return NULL;
 }
 
-void intel_pmu_auto_reload_read(struct perf_event *event)
-{
-       WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
-
-       perf_pmu_disable(event->pmu);
-       intel_pmu_drain_pebs_buffer();
-       perf_pmu_enable(event->pmu);
-}
-
 /*
  * Special variant of intel_pmu_save_and_restart() for auto-reload.
  */
index ff646c5d4642ef2816b45c926c92d40c5eab6526..4564521296acb3dd043a66ecb3747942673a7931 100644 (file)
@@ -1540,7 +1540,7 @@ void intel_pmu_pebs_disable_all(void);
 
 void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
 
-void intel_pmu_auto_reload_read(struct perf_event *event);
+void intel_pmu_drain_pebs_buffer(void);
 
 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);