]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Wrap deferred_reenq_local_node into a struct
authorTejun Heo <tj@kernel.org>
Sat, 7 Mar 2026 15:29:49 +0000 (05:29 -1000)
committerTejun Heo <tj@kernel.org>
Sat, 7 Mar 2026 15:29:49 +0000 (05:29 -1000)
Wrap the deferred_reenq_local_node list_head into struct
scx_deferred_reenq_local. More fields will be added and this allows using a
shorthand pointer to access them.

No functional change.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c
kernel/sched/ext_internal.h

index 9c3129a45103b8f4716e0c66003e7e15461de183..3548cf61477ac384cf107ec2af888f1afdeeb720 100644 (file)
@@ -3647,15 +3647,19 @@ static void process_deferred_reenq_locals(struct rq *rq)
                struct scx_sched *sch;
 
                scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
-                       struct scx_sched_pcpu *sch_pcpu =
+                       struct scx_deferred_reenq_local *drl =
                                list_first_entry_or_null(&rq->scx.deferred_reenq_locals,
-                                                        struct scx_sched_pcpu,
-                                                        deferred_reenq_local_node);
-                       if (!sch_pcpu)
+                                                        struct scx_deferred_reenq_local,
+                                                        node);
+                       struct scx_sched_pcpu *sch_pcpu;
+
+                       if (!drl)
                                return;
 
+                       sch_pcpu = container_of(drl, struct scx_sched_pcpu,
+                                               deferred_reenq_local);
                        sch = sch_pcpu->sch;
-                       list_del_init(&sch_pcpu->deferred_reenq_local_node);
+                       list_del_init(&drl->node);
                }
 
                reenq_local(sch, rq);
@@ -4199,7 +4203,7 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
        for_each_possible_cpu(cpu) {
                struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
 
-               WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local_node));
+               WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local.node));
        }
 
        free_percpu(sch->pcpu);
@@ -5812,7 +5816,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
                struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
 
                pcpu->sch = sch;
-               INIT_LIST_HEAD(&pcpu->deferred_reenq_local_node);
+               INIT_LIST_HEAD(&pcpu->deferred_reenq_local.node);
        }
 
        sch->helper = kthread_run_worker(0, "sched_ext_helper");
@@ -8390,8 +8394,8 @@ __bpf_kfunc void scx_bpf_reenqueue_local___v2(const struct bpf_prog_aux *aux)
        scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
                struct scx_sched_pcpu *pcpu = this_cpu_ptr(sch->pcpu);
 
-               if (list_empty(&pcpu->deferred_reenq_local_node))
-                       list_move_tail(&pcpu->deferred_reenq_local_node,
+               if (list_empty(&pcpu->deferred_reenq_local.node))
+                       list_move_tail(&pcpu->deferred_reenq_local.node,
                                       &rq->scx.deferred_reenq_locals);
        }
 
index 80d40a9c5ad9327c72fde3c819b4954fc17e5529..1a8d61097cab5a0f2e6745854bc5eb53a9baa04f 100644 (file)
@@ -954,6 +954,10 @@ struct scx_dsp_ctx {
        struct scx_dsp_buf_ent  buf[];
 };
 
+struct scx_deferred_reenq_local {
+       struct list_head        node;
+};
+
 struct scx_sched_pcpu {
        struct scx_sched        *sch;
        u64                     flags;  /* protected by rq lock */
@@ -965,7 +969,7 @@ struct scx_sched_pcpu {
         */
        struct scx_event_stats  event_stats;
 
-       struct list_head        deferred_reenq_local_node;
+       struct scx_deferred_reenq_local deferred_reenq_local;
        struct scx_dispatch_q   bypass_dsq;
 #ifdef CONFIG_EXT_SUB_SCHED
        u32                     bypass_host_seq;