]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf: Extract a few helpers
authorPeter Zijlstra <peterz@infradead.org>
Wed, 7 Aug 2024 11:29:26 +0000 (13:29 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 8 Aug 2024 10:27:31 +0000 (12:27 +0200)
The context time update code is repeated verbatim a few times.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240807115550.031212518@infradead.org
kernel/events/core.c

index dad2b9ac42c0b7a7b9778db0714ca20c1f68672f..eb03c9ab167003d060b5dd972f4756419df0e426 100644 (file)
@@ -2330,6 +2330,24 @@ group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
                event_sched_out(event, ctx);
 }
 
+static inline void
+ctx_time_update(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx)
+{
+       if (ctx->is_active & EVENT_TIME) {
+               update_context_time(ctx);
+               update_cgrp_time_from_cpuctx(cpuctx, false);
+       }
+}
+
+static inline void
+ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event)
+{
+       if (ctx->is_active & EVENT_TIME) {
+               update_context_time(ctx);
+               update_cgrp_time_from_event(event);
+       }
+}
+
 #define DETACH_GROUP   0x01UL
 #define DETACH_CHILD   0x02UL
 #define DETACH_DEAD    0x04UL
@@ -2349,10 +2367,7 @@ __perf_remove_from_context(struct perf_event *event,
        struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
        unsigned long flags = (unsigned long)info;
 
-       if (ctx->is_active & EVENT_TIME) {
-               update_context_time(ctx);
-               update_cgrp_time_from_cpuctx(cpuctx, false);
-       }
+       ctx_time_update(cpuctx, ctx);
 
        /*
         * Ensure event_sched_out() switches to OFF, at the very least
@@ -2437,12 +2452,8 @@ static void __perf_event_disable(struct perf_event *event,
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return;
 
-       if (ctx->is_active & EVENT_TIME) {
-               update_context_time(ctx);
-               update_cgrp_time_from_event(event);
-       }
-
        perf_pmu_disable(event->pmu_ctx->pmu);
+       ctx_time_update_event(ctx, event);
 
        if (event == event->group_leader)
                group_sched_out(event, ctx);
@@ -4529,10 +4540,7 @@ static void __perf_event_read(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       if (ctx->is_active & EVENT_TIME) {
-               update_context_time(ctx);
-               update_cgrp_time_from_event(event);
-       }
+       ctx_time_update_event(ctx, event);
 
        perf_event_update_time(event);
        if (data->group)
@@ -4732,10 +4740,7 @@ again:
                 * May read while context is not active (e.g., thread is
                 * blocked), in that case we cannot update context time
                 */
-               if (ctx->is_active & EVENT_TIME) {
-                       update_context_time(ctx);
-                       update_cgrp_time_from_event(event);
-               }
+               ctx_time_update_event(ctx, event);
 
                perf_event_update_time(event);
                if (group)