]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf/core: Add the is_event_in_freq_mode() helper to simplify the code
authorKan Liang <kan.liang@linux.intel.com>
Fri, 16 May 2025 18:28:38 +0000 (11:28 -0700)
committerIngo Molnar <mingo@kernel.org>
Sat, 17 May 2025 08:02:27 +0000 (10:02 +0200)
Add a helper to check if an event is in freq mode to improve readability.

No functional changes.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250516182853.2610284-2-kan.liang@linux.intel.com
kernel/events/core.c

index b8461074600bd4a4f08c0f2ea339b77c7441d5de..952340f1df9de88250955d98e0b4251105942036 100644 (file)
@@ -2351,6 +2351,11 @@ event_filter_match(struct perf_event *event)
               perf_cgroup_match(event);
 }
 
+static inline bool is_event_in_freq_mode(struct perf_event *event)
+{
+       return event->attr.freq && event->attr.sample_freq;
+}
+
 static void
 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
 {
@@ -2388,7 +2393,7 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
 
        if (!is_software_event(event))
                cpc->active_oncpu--;
-       if (event->attr.freq && event->attr.sample_freq) {
+       if (is_event_in_freq_mode(event)) {
                ctx->nr_freq--;
                epc->nr_freq--;
        }
@@ -2686,7 +2691,7 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
 
        if (!is_software_event(event))
                cpc->active_oncpu++;
-       if (event->attr.freq && event->attr.sample_freq) {
+       if (is_event_in_freq_mode(event)) {
                ctx->nr_freq++;
                epc->nr_freq++;
        }
@@ -4252,11 +4257,11 @@ static void perf_adjust_freq_unthr_events(struct list_head *event_list)
                if (hwc->interrupts == MAX_INTERRUPTS) {
                        hwc->interrupts = 0;
                        perf_log_throttle(event, 1);
-                       if (!event->attr.freq || !event->attr.sample_freq)
+                       if (!is_event_in_freq_mode(event))
                                event->pmu->start(event, 0);
                }
 
-               if (!event->attr.freq || !event->attr.sample_freq)
+               if (!is_event_in_freq_mode(event))
                        continue;
 
                /*
@@ -12848,7 +12853,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        hwc = &event->hw;
        hwc->sample_period = attr->sample_period;
-       if (attr->freq && attr->sample_freq)
+       if (is_event_in_freq_mode(event))
                hwc->sample_period = 1;
        hwc->last_period = hwc->sample_period;