]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf: Rename perf_event_context.nr_pending to nr_no_switch_fast.
authorBen Gainey <ben.gainey@arm.com>
Tue, 30 Jul 2024 08:44:14 +0000 (09:44 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 2 Aug 2024 09:30:29 +0000 (11:30 +0200)
nr_pending counts the number of events in the context that
either pending_sigtrap or pending_work, but it is used
to prevent taking the fast path in perf_event_context_sched_out.

Renamed to reflect what it is used for, rather than what it
counts. This change allows using the field to track other
event properties that also require skipping the fast path
without possible confusion over the name.

Signed-off-by: Ben Gainey <ben.gainey@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20240730084417.7693-2-ben.gainey@arm.com
include/linux/perf_event.h
kernel/events/core.c

index 6bb0c21d6335b8108af39e481c0910c8020f741d..655f66b18418a8aa58bc5b1754d613dd96e3f76c 100644 (file)
@@ -966,12 +966,13 @@ struct perf_event_context {
        struct rcu_head                 rcu_head;
 
        /*
-        * Sum (event->pending_work + event->pending_work)
+        * The count of events for which using the switch-out fast path
+        * should be avoided.
         *
         * The SIGTRAP is targeted at ctx->task, as such it won't do changing
         * that until the signal is delivered.
         */
-       local_t                         nr_pending;
+       local_t                         nr_no_switch_fast;
 };
 
 struct perf_cpu_pmu_context {
index aa3450bdc2276cd47d749ac9631600f7963ecfa9..e6cc354a3ceefb5902979c79fe244ab2282c162d 100644 (file)
@@ -3516,9 +3516,9 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
 
                        perf_ctx_disable(ctx, false);
 
-                       /* PMIs are disabled; ctx->nr_pending is stable. */
-                       if (local_read(&ctx->nr_pending) ||
-                           local_read(&next_ctx->nr_pending)) {
+                       /* PMIs are disabled; ctx->nr_no_switch_fast is stable. */
+                       if (local_read(&ctx->nr_no_switch_fast) ||
+                           local_read(&next_ctx->nr_no_switch_fast)) {
                                /*
                                 * Must not swap out ctx when there's pending
                                 * events that rely on the ctx->task relation.
@@ -5204,7 +5204,7 @@ static void perf_pending_task_sync(struct perf_event *event)
         */
        if (task_work_cancel(current, head)) {
                event->pending_work = 0;
-               local_dec(&event->ctx->nr_pending);
+               local_dec(&event->ctx->nr_no_switch_fast);
                return;
        }
 
@@ -6868,7 +6868,7 @@ static void perf_pending_task(struct callback_head *head)
        if (event->pending_work) {
                event->pending_work = 0;
                perf_sigtrap(event);
-               local_dec(&event->ctx->nr_pending);
+               local_dec(&event->ctx->nr_no_switch_fast);
                rcuwait_wake_up(&event->pending_work_wait);
        }
        rcu_read_unlock();
@@ -9740,7 +9740,7 @@ static int __perf_event_overflow(struct perf_event *event,
                if (!event->pending_work &&
                    !task_work_add(current, &event->pending_task, notify_mode)) {
                        event->pending_work = pending_id;
-                       local_inc(&event->ctx->nr_pending);
+                       local_inc(&event->ctx->nr_no_switch_fast);
 
                        event->pending_addr = 0;
                        if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))