]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Combine the last put_prev_task() and the first set_next_task()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 13 Aug 2024 22:25:54 +0000 (00:25 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 3 Sep 2024 13:26:31 +0000 (15:26 +0200)
Ensure the last put_prev_task() and the first set_next_task() always
go together.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240813224016.158454756@infradead.org
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index b9429eb5dbbef5922934bb248c53d2ba1eac4a1b..8a1cf93da20327e8a9120c2c4543369b605f06de 100644 (file)
@@ -5894,8 +5894,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                /* Assume the next prioritized class is idle_sched_class */
                if (!p) {
                        p = pick_task_idle(rq);
-                       put_prev_task(rq, prev);
-                       set_next_task_first(rq, p);
+                       put_prev_set_next_task(rq, prev, p);
                }
 
                /*
@@ -5926,8 +5925,7 @@ restart:
                } else {
                        p = class->pick_task(rq);
                        if (p) {
-                               put_prev_task(rq, prev);
-                               set_next_task_first(rq, p);
+                               put_prev_set_next_task(rq, prev, p);
                                return p;
                        }
                }
@@ -6016,13 +6014,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
 
                next = rq->core_pick;
-               if (next != prev) {
-                       put_prev_task(rq, prev);
-                       set_next_task_first(rq, next);
-               }
-
                rq->core_pick = NULL;
-               goto out;
+               goto out_set_next;
        }
 
        prev_balance(rq, prev, rf);
@@ -6192,9 +6185,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
        }
 
 out_set_next:
-       put_prev_task(rq, prev);
-       set_next_task_first(rq, next);
-out:
+       put_prev_set_next_task(rq, prev, next);
        if (rq->core->core_forceidle_count && next == rq->idle)
                queue_core_balance(rq);
 
index 53556b08edef5d705ee34a4676f9ea64fd89dd2a..c5b7873dcc301235f86e7ca2c986f87d1977300e 100644 (file)
@@ -8819,8 +8819,7 @@ again:
 
 simple:
 #endif
-       put_prev_task(rq, prev);
-       set_next_task_fair(rq, p, true);
+       put_prev_set_next_task(rq, prev, p);
        return p;
 
 idle:
index 64a4ed758ba1c1cc3ba347393bc9bd656f386c2d..aae35818cca44fc7b463fbd18e866ad95bc4f082 100644 (file)
@@ -2370,8 +2370,16 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
        next->sched_class->set_next_task(rq, next, false);
 }
 
-static inline void set_next_task_first(struct rq *rq, struct task_struct *next)
+static inline void put_prev_set_next_task(struct rq *rq,
+                                         struct task_struct *prev,
+                                         struct task_struct *next)
 {
+       WARN_ON_ONCE(rq->curr != prev);
+
+       if (next == prev)
+               return;
+
+       prev->sched_class->put_prev_task(rq, prev);
        next->sched_class->set_next_task(rq, next, true);
 }