]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Add rq parameter to dispatch_enqueue()
authorAndrea Righi <arighi@nvidia.com>
Wed, 18 Feb 2026 08:32:16 +0000 (09:32 +0100)
committerTejun Heo <tj@kernel.org>
Mon, 23 Feb 2026 20:01:00 +0000 (10:01 -1000)
This prepares for a later commit fixing the ops.dequeue() semantics.
No functional change intended.

Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c

index 87397781c1bf1c60f05bdb3a18275cd34b60aab7..044bb2168dd072beede7749ebe7f1ff8f66dfb7d 100644 (file)
@@ -1010,8 +1010,9 @@ static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p
                resched_curr(rq);
 }
 
-static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
-                            struct task_struct *p, u64 enq_flags)
+static void dispatch_enqueue(struct scx_sched *sch, struct rq *rq,
+                            struct scx_dispatch_q *dsq, struct task_struct *p,
+                            u64 enq_flags)
 {
        bool is_local = dsq->id == SCX_DSQ_LOCAL;
 
@@ -1325,7 +1326,7 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
                return;
        }
 
-       dispatch_enqueue(sch, dsq, p,
+       dispatch_enqueue(sch, rq, dsq, p,
                         p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
 }
 
@@ -1415,7 +1416,7 @@ direct:
        direct_dispatch(sch, p, enq_flags);
        return;
 local_norefill:
-       dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
+       dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, enq_flags);
        return;
 local:
        dsq = &rq->scx.local_dsq;
@@ -1435,7 +1436,7 @@ enqueue:
         */
        touch_core_sched(rq, p);
        refill_task_slice_dfl(sch, p);
-       dispatch_enqueue(sch, dsq, p, enq_flags);
+       dispatch_enqueue(sch, rq, dsq, p, enq_flags);
 }
 
 static bool task_runnable(const struct task_struct *p)
@@ -1888,7 +1889,7 @@ static struct rq *move_task_between_dsqs(struct scx_sched *sch,
                dispatch_dequeue_locked(p, src_dsq);
                raw_spin_unlock(&src_dsq->lock);
 
-               dispatch_enqueue(sch, dst_dsq, p, enq_flags);
+               dispatch_enqueue(sch, dst_rq, dst_dsq, p, enq_flags);
        }
 
        return dst_rq;
@@ -1978,14 +1979,14 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
         * If dispatching to @rq that @p is already on, no lock dancing needed.
         */
        if (rq == src_rq && rq == dst_rq) {
-               dispatch_enqueue(sch, dst_dsq, p,
+               dispatch_enqueue(sch, rq, dst_dsq, p,
                                 enq_flags | SCX_ENQ_CLEAR_OPSS);
                return;
        }
 
        if (src_rq != dst_rq &&
            unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
-               dispatch_enqueue(sch, find_global_dsq(sch, p), p,
+               dispatch_enqueue(sch, rq, find_global_dsq(sch, p), p,
                                 enq_flags | SCX_ENQ_CLEAR_OPSS);
                return;
        }
@@ -2023,7 +2024,7 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
                 */
                if (src_rq == dst_rq) {
                        p->scx.holding_cpu = -1;
-                       dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
+                       dispatch_enqueue(sch, dst_rq, &dst_rq->scx.local_dsq, p,
                                         enq_flags);
                } else {
                        move_remote_task_to_local_dsq(p, enq_flags,
@@ -2122,7 +2123,7 @@ retry:
        if (dsq->id == SCX_DSQ_LOCAL)
                dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
        else
-               dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
+               dispatch_enqueue(sch, rq, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
 }
 
 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
@@ -2423,7 +2424,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
                 * DSQ.
                 */
                if (p->scx.slice && !scx_rq_bypassing(rq)) {
-                       dispatch_enqueue(sch, &rq->scx.local_dsq, p,
+                       dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p,
                                         SCX_ENQ_HEAD);
                        goto switch_class;
                }
@@ -3954,7 +3955,7 @@ resume:
                 * between bypass DSQs.
                 */
                dispatch_dequeue_locked(p, donor_dsq);
-               dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED);
+               dispatch_enqueue(sch, donee_rq, donee_dsq, p, SCX_ENQ_NESTED);
 
                /*
                 * $donee might have been idle and need to be woken up. No need