From 436f3eed5c69c1048a5754df6e3dbb291e5cccbd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:54 +0200 Subject: [PATCH] sched: Combine the last put_prev_task() and the first set_next_task() Ensure the last put_prev_task() and the first set_next_task() always go together. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.158454756@infradead.org --- kernel/sched/core.c | 17 ++++------------- kernel/sched/fair.c | 3 +-- kernel/sched/sched.h | 10 +++++++++- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b9429eb5dbbef..8a1cf93da2032 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5894,8 +5894,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* Assume the next prioritized class is idle_sched_class */ if (!p) { p = pick_task_idle(rq); - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); } /* @@ -5926,8 +5925,7 @@ restart: } else { p = class->pick_task(rq); if (p) { - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); return p; } } @@ -6016,13 +6014,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); next = rq->core_pick; - if (next != prev) { - put_prev_task(rq, prev); - set_next_task_first(rq, next); - } - rq->core_pick = NULL; - goto out; + goto out_set_next; } prev_balance(rq, prev, rf); @@ -6192,9 +6185,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: - put_prev_task(rq, prev); - set_next_task_first(rq, next); -out: + put_prev_set_next_task(rq, prev, next); if (rq->core->core_forceidle_count && next == rq->idle) queue_core_balance(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 53556b08edef5..c5b7873dcc301 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8819,8 +8819,7 @@ again: simple: #endif - put_prev_task(rq, prev); - set_next_task_fair(rq, p, true); + put_prev_set_next_task(rq, prev, p); return p; idle: diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 64a4ed758ba1c..aae35818cca44 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2370,8 +2370,16 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } -static inline void set_next_task_first(struct rq *rq, struct task_struct *next) +static inline void put_prev_set_next_task(struct rq *rq, + struct task_struct *prev, + struct task_struct *next) { + WARN_ON_ONCE(rq->curr != prev); + + if (next == prev) + return; + + prev->sched_class->put_prev_task(rq, prev); next->sched_class->set_next_task(rq, next, true); } -- 2.39.5