]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf: Don't disable preemption in perf_pending_task().
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Thu, 4 Jul 2024 17:03:40 +0000 (19:03 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 9 Jul 2024 11:26:36 +0000 (13:26 +0200)
perf_pending_task() is invoked in task context and disables preemption
because perf_swevent_get_recursion_context() used to access per-CPU
variables. The other reason is to create a RCU read section while
accessing the perf_event.

The recursion counter is no longer a per-CPU accounter so disabling
preemption is no longer required. The RCU section is needed and must be
created explicit.

Replace the preemption-disable section with a explicit RCU-read section.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Marco Elver <elver@google.com>
Link: https://lore.kernel.org/r/20240704170424.1466941-7-bigeasy@linutronix.de
kernel/events/core.c

index b5232257bc83a3b2f2ce6aeaa700ef4a6562a23e..96e03d6b52d182fc0486d304977425cdc4d0055a 100644 (file)
@@ -5208,10 +5208,9 @@ static void perf_pending_task_sync(struct perf_event *event)
        }
 
        /*
-        * All accesses related to the event are within the same
-        * non-preemptible section in perf_pending_task(). The RCU
-        * grace period before the event is freed will make sure all
-        * those accesses are complete by then.
+        * All accesses related to the event are within the same RCU section in
+        * perf_pending_task(). The RCU grace period before the event is freed
+        * will make sure all those accesses are complete by then.
         */
        rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
 }
@@ -6831,7 +6830,7 @@ static void perf_pending_task(struct callback_head *head)
         * critical section as the ->pending_work reset. See comment in
         * perf_pending_task_sync().
         */
-       preempt_disable_notrace();
+       rcu_read_lock();
        /*
         * If we 'fail' here, that's OK, it means recursion is already disabled
         * and we won't recurse 'further'.
@@ -6844,10 +6843,10 @@ static void perf_pending_task(struct callback_head *head)
                local_dec(&event->ctx->nr_pending);
                rcuwait_wake_up(&event->pending_work_wait);
        }
+       rcu_read_unlock();
 
        if (rctx >= 0)
                perf_swevent_put_recursion_context(rctx);
-       preempt_enable_notrace();
 }
 
 #ifdef CONFIG_GUEST_PERF_EVENTS