]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf: Fix event_function_call() locking
authorPeter Zijlstra <peterz@infradead.org>
Wed, 7 Aug 2024 11:29:27 +0000 (13:29 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2024 10:00:36 +0000 (12:00 +0200)
[ Upstream commit 558abc7e3f895049faa46b08656be4c60dc6e9fd ]

All the event_function/@func call context already uses perf_ctx_lock()
except for the !ctx->is_active case. Make it all consistent.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240807115550.138301094@infradead.org
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/events/core.c

index 081d9692ce7476581c5458129e1d52d6bbb106d5..e18a07de9920a41b6680daa407aa325c07d350da 100644 (file)
@@ -263,6 +263,7 @@ unlock:
 static void event_function_call(struct perf_event *event, event_f func, void *data)
 {
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
        struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
        struct event_function_struct efs = {
                .event = event,
@@ -291,22 +292,22 @@ again:
        if (!task_function_call(task, event_function, &efs))
                return;
 
-       raw_spin_lock_irq(&ctx->lock);
+       perf_ctx_lock(cpuctx, ctx);
        /*
         * Reload the task pointer, it might have been changed by
         * a concurrent perf_event_context_sched_out().
         */
        task = ctx->task;
        if (task == TASK_TOMBSTONE) {
-               raw_spin_unlock_irq(&ctx->lock);
+               perf_ctx_unlock(cpuctx, ctx);
                return;
        }
        if (ctx->is_active) {
-               raw_spin_unlock_irq(&ctx->lock);
+               perf_ctx_unlock(cpuctx, ctx);
                goto again;
        }
        func(event, NULL, ctx, data);
-       raw_spin_unlock_irq(&ctx->lock);
+       perf_ctx_unlock(cpuctx, ctx);
 }
 
 /*