]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched_ext: Put event_stats_cpu in struct scx_sched_pcpu
authorTejun Heo <tj@kernel.org>
Wed, 3 Sep 2025 21:33:28 +0000 (11:33 -1000)
committerTejun Heo <tj@kernel.org>
Wed, 3 Sep 2025 21:33:28 +0000 (11:33 -1000)
scx_sched.event_stats_cpu is the percpu counters that are used to track
stats. Introduce struct scx_sched_pcpu and move the counters inside. This
will ease adding more per-cpu fields. No functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c
kernel/sched/ext_internal.h

index 7e15e852370cd2bf3a9414b362c7d241f81e1589..701ca239ad003a184926ef93ec7fd23098c8b84f 100644 (file)
@@ -635,7 +635,7 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
  * This can be used when preemption is not disabled.
  */
 #define scx_add_event(sch, name, cnt) do {                                     \
-       this_cpu_add((sch)->event_stats_cpu->name, (cnt));                      \
+       this_cpu_add((sch)->pcpu->event_stats.name, (cnt));                     \
        trace_sched_ext_event(#name, (cnt));                                    \
 } while(0)
 
@@ -648,7 +648,7 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
  * This should be used only when preemption is disabled.
  */
 #define __scx_add_event(sch, name, cnt) do {                                   \
-       __this_cpu_add((sch)->event_stats_cpu->name, (cnt));                    \
+       __this_cpu_add((sch)->pcpu->event_stats.name, (cnt));                   \
        trace_sched_ext_event(#name, cnt);                                      \
 } while(0)
 
@@ -3543,7 +3543,7 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
        int node;
 
        kthread_stop(sch->helper->task);
-       free_percpu(sch->event_stats_cpu);
+       free_percpu(sch->pcpu);
 
        for_each_node_state(node, N_POSSIBLE)
                kfree(sch->global_dsqs[node]);
@@ -4444,13 +4444,13 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
                sch->global_dsqs[node] = dsq;
        }
 
-       sch->event_stats_cpu = alloc_percpu(struct scx_event_stats);
-       if (!sch->event_stats_cpu)
+       sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
+       if (!sch->pcpu)
                goto err_free_gdsqs;
 
        sch->helper = kthread_run_worker(0, "sched_ext_helper");
        if (!sch->helper)
-               goto err_free_event_stats;
+               goto err_free_pcpu;
        sched_set_fifo(sch->helper->task);
 
        atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
@@ -4468,8 +4468,8 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
 
 err_stop_helper:
        kthread_stop(sch->helper->task);
-err_free_event_stats:
-       free_percpu(sch->event_stats_cpu);
+err_free_pcpu:
+       free_percpu(sch->pcpu);
 err_free_gdsqs:
        for_each_node_state(node, N_POSSIBLE)
                kfree(sch->global_dsqs[node]);
@@ -6493,7 +6493,7 @@ static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *event
        /* Aggregate per-CPU event counters into @events. */
        memset(events, 0, sizeof(*events));
        for_each_possible_cpu(cpu) {
-               e_cpu = per_cpu_ptr(sch->event_stats_cpu, cpu);
+               e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats;
                scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
                scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
                scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
index 76690ede8700ffa3921e4e6db902888790e01e71..af4c054fb6f852fc19d734d445e35adb67ffc4c4 100644 (file)
@@ -846,6 +846,15 @@ struct scx_event_stats {
        s64             SCX_EV_BYPASS_ACTIVATE;
 };
 
+struct scx_sched_pcpu {
+       /*
+        * The event counters are in a per-CPU variable to minimize the
+        * accounting overhead. A system-wide view on the event counter is
+        * constructed when requested by scx_bpf_events().
+        */
+       struct scx_event_stats  event_stats;
+};
+
 struct scx_sched {
        struct sched_ext_ops    ops;
        DECLARE_BITMAP(has_op, SCX_OPI_END);
@@ -860,13 +869,7 @@ struct scx_sched {
         */
        struct rhashtable       dsq_hash;
        struct scx_dispatch_q   **global_dsqs;
-
-       /*
-        * The event counters are in a per-CPU variable to minimize the
-        * accounting overhead. A system-wide view on the event counter is
-        * constructed when requested by scx_bpf_events().
-        */
-       struct scx_event_stats __percpu *event_stats_cpu;
+       struct scx_sched_pcpu __percpu *pcpu;
 
        bool                    warned_zero_slice;