]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf/core: Fix broken throttling when max_samples_per_tick=1
authorQing Wang <wangqing7171@gmail.com>
Sat, 5 Apr 2025 14:16:35 +0000 (22:16 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 25 Apr 2025 12:55:22 +0000 (14:55 +0200)
According to the throttling mechanism, the pmu interrupts number can not
exceed the max_samples_per_tick in one tick. But this mechanism is
ineffective when max_samples_per_tick=1, because the throttling check is
skipped during the first interrupt and only performed when the second
interrupt arrives.

Perhaps this bug may cause little influence in one tick, but if in a
larger time scale, the problem can not be underestimated.

When max_samples_per_tick = 1:
Allowed-interrupts-per-second max-samples-per-second  default-HZ  ARCH
200                           100                     100         X86
500                           250                     250         ARM64
...
Obviously, the pmu interrupt number far exceed the user's expect.

Fixes: e050e3f0a71b ("perf: Fix broken interrupt rate throttling")
Signed-off-by: Qing Wang <wangqing7171@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250405141635.243786-3-wangqing7171@gmail.com
kernel/events/core.c

index 3c69a1a3f41c2e96a760b28213d3fdfc56001d97..05136e835042f0c63d67b4f974a6206bf66df91e 100644 (file)
@@ -10065,14 +10065,14 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
                hwc->interrupts = 1;
        } else {
                hwc->interrupts++;
-               if (unlikely(throttle &&
-                            hwc->interrupts > max_samples_per_tick)) {
-                       __this_cpu_inc(perf_throttled_count);
-                       tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
-                       hwc->interrupts = MAX_INTERRUPTS;
-                       perf_log_throttle(event, 0);
-                       ret = 1;
-               }
+       }
+
+       if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) {
+               __this_cpu_inc(perf_throttled_count);
+               tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
+               hwc->interrupts = MAX_INTERRUPTS;
+               perf_log_throttle(event, 0);
+               ret = 1;
        }
 
        if (event->attr.freq) {