]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Add support to pick functions to take rf
authorJoel Fernandes <joelagnelf@nvidia.com>
Sat, 9 Aug 2025 18:47:50 +0000 (14:47 -0400)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 16 Oct 2025 09:13:55 +0000 (11:13 +0200)
Some pick functions like the internal pick_next_task_fair() already take
rf but some others dont. We need this for scx's server pick function.
Prepare for this by having pick functions accept it.

[peterz: - added RETRY_TASK handling
         - removed pick_next_task_fair indirection]
Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Tejun Heo <tj@kernel.org>
include/linux/sched.h
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/ext.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c

index 77426c347cff54d82eaec49bc5e21a82bca15ca7..07576479c0edc2e4b1967fd9783809a78d207522 100644 (file)
@@ -637,8 +637,8 @@ struct sched_rt_entity {
 #endif
 } __randomize_layout;
 
-typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
-typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+struct rq_flags;
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
 
 struct sched_dl_entity {
        struct rb_node                  rb_node;
@@ -730,9 +730,6 @@ struct sched_dl_entity {
         * dl_server_update().
         *
         * @rq the runqueue this server is for
-        *
-        * @server_has_tasks() returns true if @server_pick return a
-        * runnable task.
         */
        struct rq                       *rq;
        dl_server_pick_f                server_pick_task;
index 9fc990ff68454103eaa4aec82bf5b40f55e0fe51..a75d45680f9cea1cafede19768a1cf1660f4c0bb 100644 (file)
@@ -5901,7 +5901,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
                /* Assume the next prioritized class is idle_sched_class */
                if (!p) {
-                       p = pick_task_idle(rq);
+                       p = pick_task_idle(rq, rf);
                        put_prev_set_next_task(rq, prev, p);
                }
 
@@ -5913,11 +5913,15 @@ restart:
 
        for_each_active_class(class) {
                if (class->pick_next_task) {
-                       p = class->pick_next_task(rq, prev);
+                       p = class->pick_next_task(rq, prev, rf);
+                       if (unlikely(p == RETRY_TASK))
+                               goto restart;
                        if (p)
                                return p;
                } else {
-                       p = class->pick_task(rq);
+                       p = class->pick_task(rq, rf);
+                       if (unlikely(p == RETRY_TASK))
+                               goto restart;
                        if (p) {
                                put_prev_set_next_task(rq, prev, p);
                                return p;
@@ -5947,7 +5951,11 @@ static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
        return a->core_cookie == b->core_cookie;
 }
 
-static inline struct task_struct *pick_task(struct rq *rq)
+/*
+ * Careful; this can return RETRY_TASK, it does not include the retry-loop
+ * itself due to the whole SMT pick retry thing below.
+ */
+static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
 {
        const struct sched_class *class;
        struct task_struct *p;
@@ -5955,7 +5963,7 @@ static inline struct task_struct *pick_task(struct rq *rq)
        rq->dl_server = NULL;
 
        for_each_active_class(class) {
-               p = class->pick_task(rq);
+               p = class->pick_task(rq, rf);
                if (p)
                        return p;
        }
@@ -5970,7 +5978,7 @@ static void queue_core_balance(struct rq *rq);
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-       struct task_struct *next, *p, *max = NULL;
+       struct task_struct *next, *p, *max;
        const struct cpumask *smt_mask;
        bool fi_before = false;
        bool core_clock_updated = (rq == rq->core);
@@ -6055,7 +6063,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
         * and there are no cookied tasks running on siblings.
         */
        if (!need_sync) {
-               next = pick_task(rq);
+restart_single:
+               next = pick_task(rq, rf);
+               if (unlikely(next == RETRY_TASK))
+                       goto restart_single;
                if (!next->core_cookie) {
                        rq->core_pick = NULL;
                        rq->core_dl_server = NULL;
@@ -6075,6 +6086,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
         *
         * Tie-break prio towards the current CPU
         */
+restart_multi:
+       max = NULL;
        for_each_cpu_wrap(i, smt_mask, cpu) {
                rq_i = cpu_rq(i);
 
@@ -6086,7 +6099,11 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                if (i != cpu && (rq_i != rq->core || !core_clock_updated))
                        update_rq_clock(rq_i);
 
-               rq_i->core_pick = p = pick_task(rq_i);
+               p = pick_task(rq_i, rf);
+               if (unlikely(p == RETRY_TASK))
+                       goto restart_multi;
+
+               rq_i->core_pick = p;
                rq_i->core_dl_server = rq_i->dl_server;
 
                if (!max || prio_less(max, p, fi_before))
@@ -6108,7 +6125,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                        if (cookie)
                                p = sched_core_find(rq_i, cookie);
                        if (!p)
-                               p = idle_sched_class.pick_task(rq_i);
+                               p = idle_sched_class.pick_task(rq_i, rf);
                }
 
                rq_i->core_pick = p;
index 83e6175d79f5f7c8d6437616cd0456d03c7386e9..48357d4609bf9f42dae7f707076b3bc2ebbce4ff 100644 (file)
@@ -2352,7 +2352,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
  * __pick_next_task_dl - Helper to pick the next -deadline task to run.
  * @rq: The runqueue to pick the next task from.
  */
-static struct task_struct *__pick_task_dl(struct rq *rq)
+static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
 {
        struct sched_dl_entity *dl_se;
        struct dl_rq *dl_rq = &rq->dl;
@@ -2366,7 +2366,7 @@ again:
        WARN_ON_ONCE(!dl_se);
 
        if (dl_server(dl_se)) {
-               p = dl_se->server_pick_task(dl_se);
+               p = dl_se->server_pick_task(dl_se, rf);
                if (!p) {
                        dl_server_stop(dl_se);
                        goto again;
@@ -2379,9 +2379,9 @@ again:
        return p;
 }
 
-static struct task_struct *pick_task_dl(struct rq *rq)
+static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
 {
-       return __pick_task_dl(rq);
+       return __pick_task_dl(rq, rf);
 }
 
 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
index 949c3a6e24d4bc875820986ff4190837780cc480..dc743cac59cb0a3a79d49305ca3a66a4f51f7a2d 100644 (file)
@@ -2332,7 +2332,7 @@ static struct task_struct *first_local_task(struct rq *rq)
                                        struct task_struct, scx.dsq_list.node);
 }
 
-static struct task_struct *pick_task_scx(struct rq *rq)
+static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
 {
        struct task_struct *prev = rq->curr;
        struct task_struct *p;
index 23ac05cca4a49e8368ccc286c332abb91a1df51c..2554055c1ba13d3e9171f588ef551ad23bb637c8 100644 (file)
@@ -8705,15 +8705,6 @@ static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context
        set_task_max_allowed_capacity(p);
 }
 
-static int
-balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-{
-       if (sched_fair_runnable(rq))
-               return 1;
-
-       return sched_balance_newidle(rq, rf) != 0;
-}
-
 static void set_next_buddy(struct sched_entity *se)
 {
        for_each_sched_entity(se) {
@@ -8822,7 +8813,7 @@ preempt:
        resched_curr_lazy(rq);
 }
 
-static struct task_struct *pick_task_fair(struct rq *rq)
+static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf)
 {
        struct sched_entity *se;
        struct cfs_rq *cfs_rq;
@@ -8866,7 +8857,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
        int new_tasks;
 
 again:
-       p = pick_task_fair(rq);
+       p = pick_task_fair(rq, rf);
        if (!p)
                goto idle;
        se = &p->se;
@@ -8945,14 +8936,10 @@ idle:
        return NULL;
 }
 
-static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
-{
-       return pick_next_task_fair(rq, prev, NULL);
-}
-
-static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
+static struct task_struct *
+fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
 {
-       return pick_task_fair(dl_se->rq);
+       return pick_task_fair(dl_se->rq, rf);
 }
 
 void fair_server_init(struct rq *rq)
@@ -13644,11 +13631,10 @@ DEFINE_SCHED_CLASS(fair) = {
        .wakeup_preempt         = check_preempt_wakeup_fair,
 
        .pick_task              = pick_task_fair,
-       .pick_next_task         = __pick_next_task_fair,
+       .pick_next_task         = pick_next_task_fair,
        .put_prev_task          = put_prev_task_fair,
        .set_next_task          = set_next_task_fair,
 
-       .balance                = balance_fair,
        .select_task_rq         = select_task_rq_fair,
        .migrate_task_rq        = migrate_task_rq_fair,
 
index 055b0ddbcd54dfb1727d7925c2a2a2bb7e0e5bcd..7fa0b593bcff7359314f7a2fe58df0296d3e51e6 100644 (file)
@@ -466,7 +466,7 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
        next->se.exec_start = rq_clock_task(rq);
 }
 
-struct task_struct *pick_task_idle(struct rq *rq)
+struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)
 {
        scx_update_idle(rq, true, false);
        return rq->idle;
index 9bc828d59121a3165c462dacc80a8094fbc2db5d..1fd97f2d7ec628d78dd82989176117d92c4c8490 100644 (file)
@@ -1695,7 +1695,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
        return rt_task_of(rt_se);
 }
 
-static struct task_struct *pick_task_rt(struct rq *rq)
+static struct task_struct *pick_task_rt(struct rq *rq, struct rq_flags *rf)
 {
        struct task_struct *p;
 
index f4a323007dced65942244eeda33791686615da23..8946294929a4280b0fb9f1de3eeda05914fdeae9 100644 (file)
@@ -2470,7 +2470,7 @@ struct sched_class {
        /*
         * schedule/pick_next_task: rq->lock
         */
-       struct task_struct *(*pick_task)(struct rq *rq);
+       struct task_struct *(*pick_task)(struct rq *rq, struct rq_flags *rf);
        /*
         * Optional! When implemented pick_next_task() should be equivalent to:
         *
@@ -2480,7 +2480,8 @@ struct sched_class {
         *       set_next_task_first(next);
         *   }
         */
-       struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
+       struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev,
+                                             struct rq_flags *rf);
 
        /*
         * sched_change:
@@ -2707,8 +2708,9 @@ static inline bool sched_fair_runnable(struct rq *rq)
        return rq->cfs.nr_queued > 0;
 }
 
-extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
-extern struct task_struct *pick_task_idle(struct rq *rq);
+extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev,
+                                              struct rq_flags *rf);
+extern struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf);
 
 #define SCA_CHECK              0x01
 #define SCA_MIGRATE_DISABLE    0x02
index d98c453c9b4eb50cb66d8452ee191af659bf1b5a..4f9192be4b5b0a2a81072c4349c25023a736be1d 100644 (file)
@@ -32,7 +32,7 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool fir
        stop->se.exec_start = rq_clock_task(rq);
 }
 
-static struct task_struct *pick_task_stop(struct rq *rq)
+static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf)
 {
        if (!sched_stop_runnable(rq))
                return NULL;