]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf/x86/intel: Always reprogram ACR events to prevent stale masks
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Thu, 30 Apr 2026 00:25:55 +0000 (08:25 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 May 2026 13:31:15 +0000 (15:31 +0200)
commit 8ba0b706a485b1e607594cf4210786d517ad1611 upstream.

Members of an ACR group are logically linked via a bitmask of their
hardware counter indices. If some members of the group are assigned new
hardware counters during rescheduling, even events that keep their
original counter index must be updated with a new mask.

Without this, an event will continue to use a stale acr_mask that
references the old indices of its group peers. Ensure all ACR events are
reprogrammed during the scheduling path to maintain consistency across
the group.

Fixes: ec980e4facef ("perf/x86/intel: Support auto counter reload")
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: https://patch.msgid.link/20260430002558.712334-3-dapeng1.mi@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/events/core.c

index 810ab21ffd991322600ece5c299bd5287a0286bc..4b9e105309c6a92448bfa3c2a8dfe84b07475356 100644 (file)
@@ -1294,13 +1294,16 @@ int x86_perf_rdpmc_index(struct perf_event *event)
        return event->hw.event_base_rdpmc;
 }
 
-static inline int match_prev_assignment(struct hw_perf_event *hwc,
+static inline int match_prev_assignment(struct perf_event *event,
                                        struct cpu_hw_events *cpuc,
                                        int i)
 {
+       struct hw_perf_event *hwc = &event->hw;
+
        return hwc->idx == cpuc->assign[i] &&
-               hwc->last_cpu == smp_processor_id() &&
-               hwc->last_tag == cpuc->tags[i];
+              hwc->last_cpu == smp_processor_id() &&
+              hwc->last_tag == cpuc->tags[i] &&
+              !is_acr_event_group(event);
 }
 
 static void x86_pmu_start(struct perf_event *event, int flags);
@@ -1346,7 +1349,7 @@ static void x86_pmu_enable(struct pmu *pmu)
                         * - no other event has used the counter since
                         */
                        if (hwc->idx == -1 ||
-                           match_prev_assignment(hwc, cpuc, i))
+                           match_prev_assignment(event, cpuc, i))
                                continue;
 
                        /*
@@ -1367,7 +1370,7 @@ static void x86_pmu_enable(struct pmu *pmu)
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
-                       if (!match_prev_assignment(hwc, cpuc, i))
+                       if (!match_prev_assignment(event, cpuc, i))
                                x86_assign_hw_event(event, cpuc, i);
                        else if (i < n_running)
                                continue;