From 41e39d762d25d03aa742f821c6875cf63eadd6b7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 21 Aug 2025 07:22:26 +0200 Subject: [PATCH] drop 6.15 pending as that's eol --- ...line-initialize-dl_servers-after-smp.patch | 140 --------------- ...ne-less-agressive-dl_server-handling.patch | 163 ------------------ 2 files changed, 303 deletions(-) delete mode 100644 next/6.15/sched-deadline-initialize-dl_servers-after-smp.patch delete mode 100644 next/6.15/sched-deadline-less-agressive-dl_server-handling.patch diff --git a/next/6.15/sched-deadline-initialize-dl_servers-after-smp.patch b/next/6.15/sched-deadline-initialize-dl_servers-after-smp.patch deleted file mode 100644 index 67b52d2e61..0000000000 --- a/next/6.15/sched-deadline-initialize-dl_servers-after-smp.patch +++ /dev/null @@ -1,140 +0,0 @@ -From 3444b7294c4455310b55a9f0b60d3de6480c71cb Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Fri, 27 Jun 2025 13:51:14 +0200 -Subject: sched/deadline: Initialize dl_servers after SMP - -From: Juri Lelli - -[ Upstream commit 9f239df55546ee1d28f0976130136ffd1cad0fd7 ] - -dl-servers are currently initialized too early at boot when CPUs are not -fully up (only boot CPU is). This results in miscalculation of per -runqueue DEADLINE variables like extra_bw (which needs a stable CPU -count). - -Move initialization of dl-servers later on after SMP has been -initialized and CPUs are all online, so that CPU count is stable and -DEADLINE variables can be computed correctly. - -Fixes: d741f297bceaf ("sched/fair: Fair server interface") -Reported-by: Marcel Ziswiler -Signed-off-by: Juri Lelli -Signed-off-by: Peter Zijlstra (Intel) -Acked-by: Waiman Long -Tested-by: Marcel Ziswiler # nuc & rock5b -Link: https://lore.kernel.org/r/20250627115118.438797-2-juri.lelli@redhat.com -Signed-off-by: Sasha Levin ---- - kernel/sched/core.c | 2 ++ - kernel/sched/deadline.c | 48 +++++++++++++++++++++++++---------------- - kernel/sched/sched.h | 1 + - 3 files changed, 33 insertions(+), 18 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 7d5f51e2f761..333743f143aa 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -8501,6 +8501,8 @@ void __init sched_init_smp(void) - init_sched_rt_class(); - init_sched_dl_class(); - -+ sched_init_dl_servers(); -+ - sched_smp_initialized = true; - } - -diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index 094134c9b135..ef5b5c045769 100644 ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -824,6 +824,8 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) - struct dl_rq *dl_rq = dl_rq_of_se(dl_se); - struct rq *rq = rq_of_dl_rq(dl_rq); - -+ update_rq_clock(rq); -+ - WARN_ON(is_dl_boosted(dl_se)); - WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); - -@@ -1652,23 +1654,7 @@ void dl_server_start(struct sched_dl_entity *dl_se) - { - struct rq *rq = dl_se->rq; - -- /* -- * XXX: the apply do not work fine at the init phase for the -- * fair server because things are not yet set. We need to improve -- * this before getting generic. -- */ -- if (!dl_server(dl_se)) { -- u64 runtime = 50 * NSEC_PER_MSEC; -- u64 period = 1000 * NSEC_PER_MSEC; -- -- dl_server_apply_params(dl_se, runtime, period, 1); -- -- dl_se->dl_server = 1; -- dl_se->dl_defer = 1; -- setup_new_dl_entity(dl_se); -- } -- -- if (!dl_se->dl_runtime || dl_se->dl_server_active) -+ if (!dl_server(dl_se) || dl_se->dl_server_active) - return; - - dl_se->dl_server_active = 1; -@@ -1679,7 +1665,7 @@ void dl_server_start(struct sched_dl_entity *dl_se) - - void dl_server_stop(struct sched_dl_entity *dl_se) - { -- if (!dl_se->dl_runtime) -+ if (!dl_server(dl_se) || !dl_server_active(dl_se)) - return; - - dequeue_dl_entity(dl_se, DEQUEUE_SLEEP); -@@ -1712,6 +1698,32 @@ void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, - dl_se->server_pick_task = pick_task; - } - -+void sched_init_dl_servers(void) -+{ -+ int cpu; -+ struct rq *rq; -+ struct sched_dl_entity *dl_se; -+ -+ for_each_online_cpu(cpu) { -+ u64 runtime = 50 * NSEC_PER_MSEC; -+ u64 period = 1000 * NSEC_PER_MSEC; -+ -+ rq = cpu_rq(cpu); -+ -+ guard(rq_lock_irq)(rq); -+ -+ dl_se = &rq->fair_server; -+ -+ WARN_ON(dl_server(dl_se)); -+ -+ dl_server_apply_params(dl_se, runtime, period, 1); -+ -+ dl_se->dl_server = 1; -+ dl_se->dl_defer = 1; -+ setup_new_dl_entity(dl_se); -+ } -+} -+ - void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq) - { - u64 new_bw = dl_se->dl_bw; -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index d6f82833f652..063f29a228ad 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -384,6 +384,7 @@ extern void dl_server_stop(struct sched_dl_entity *dl_se); - extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, - dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick_task); -+extern void sched_init_dl_servers(void); - - extern void dl_server_update_idle_time(struct rq *rq, - struct task_struct *p); --- -2.39.5 - diff --git a/next/6.15/sched-deadline-less-agressive-dl_server-handling.patch b/next/6.15/sched-deadline-less-agressive-dl_server-handling.patch deleted file mode 100644 index 0441721fab..0000000000 --- a/next/6.15/sched-deadline-less-agressive-dl_server-handling.patch +++ /dev/null @@ -1,163 +0,0 @@ -From c0a825f92d92fdccff156fd030efa068df138577 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 20 May 2025 11:19:30 +0200 -Subject: sched/deadline: Less agressive dl_server handling - -From: Peter Zijlstra - -[ Upstream commit cccb45d7c4295bbfeba616582d0249f2d21e6df5 ] - -Chris reported that commit 5f6bd380c7bd ("sched/rt: Remove default -bandwidth control") caused a significant dip in his favourite -benchmark of the day. Simply disabling dl_server cured things. - -His workload hammers the 0->1, 1->0 transitions, and the -dl_server_{start,stop}() overhead kills it -- fairly obviously a bad -idea in hind sight and all that. - -Change things around to only disable the dl_server when there has not -been a fair task around for a whole period. Since the default period -is 1 second, this ensures the benchmark never trips this, overhead -gone. - -Fixes: 557a6bfc662c ("sched/fair: Add trivial fair server") -Reported-by: Chris Mason -Signed-off-by: Peter Zijlstra (Intel) -Reviewed-by: Juri Lelli -Acked-by: Juri Lelli -Link: https://lkml.kernel.org/r/20250702121158.465086194@infradead.org -Signed-off-by: Sasha Levin ---- - include/linux/sched.h | 1 + - kernel/sched/deadline.c | 25 ++++++++++++++++++++++--- - kernel/sched/fair.c | 9 --------- - 3 files changed, 23 insertions(+), 12 deletions(-) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index f96ac1982893..1f92572b20c0 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -702,6 +702,7 @@ struct sched_dl_entity { - unsigned int dl_defer : 1; - unsigned int dl_defer_armed : 1; - unsigned int dl_defer_running : 1; -+ unsigned int dl_server_idle : 1; - - /* - * Bandwidth enforcement timer. Each -deadline task has its -diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index 89019a140826..094134c9b135 100644 ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -1215,6 +1215,8 @@ static void __push_dl_task(struct rq *rq, struct rq_flags *rf) - /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */ - static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC; - -+static bool dl_server_stopped(struct sched_dl_entity *dl_se); -+ - static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se) - { - struct rq *rq = rq_of_dl_se(dl_se); -@@ -1234,6 +1236,7 @@ static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_ - - if (!dl_se->server_has_tasks(dl_se)) { - replenish_dl_entity(dl_se); -+ dl_server_stopped(dl_se); - return HRTIMER_NORESTART; - } - -@@ -1639,8 +1642,10 @@ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p) - void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) - { - /* 0 runtime = fair server disabled */ -- if (dl_se->dl_runtime) -+ if (dl_se->dl_runtime) { -+ dl_se->dl_server_idle = 0; - update_curr_dl_se(dl_se->rq, dl_se, delta_exec); -+ } - } - - void dl_server_start(struct sched_dl_entity *dl_se) -@@ -1663,7 +1668,7 @@ void dl_server_start(struct sched_dl_entity *dl_se) - setup_new_dl_entity(dl_se); - } - -- if (!dl_se->dl_runtime) -+ if (!dl_se->dl_runtime || dl_se->dl_server_active) - return; - - dl_se->dl_server_active = 1; -@@ -1684,6 +1689,20 @@ void dl_server_stop(struct sched_dl_entity *dl_se) - dl_se->dl_server_active = 0; - } - -+static bool dl_server_stopped(struct sched_dl_entity *dl_se) -+{ -+ if (!dl_se->dl_server_active) -+ return false; -+ -+ if (dl_se->dl_server_idle) { -+ dl_server_stop(dl_se); -+ return true; -+ } -+ -+ dl_se->dl_server_idle = 1; -+ return false; -+} -+ - void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, - dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick_task) -@@ -2435,7 +2454,7 @@ static struct task_struct *__pick_task_dl(struct rq *rq) - if (dl_server(dl_se)) { - p = dl_se->server_pick_task(dl_se); - if (!p) { -- if (dl_server_active(dl_se)) { -+ if (!dl_server_stopped(dl_se)) { - dl_se->dl_yielded = 1; - update_curr_dl_se(rq, dl_se, 0); - } -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 138d9f4658d5..9746eff2eff7 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -5886,7 +5886,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) - struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); - struct sched_entity *se; - long queued_delta, runnable_delta, idle_delta, dequeue = 1; -- long rq_h_nr_queued = rq->cfs.h_nr_queued; - - raw_spin_lock(&cfs_b->lock); - /* This will start the period timer if necessary */ -@@ -5970,10 +5969,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) - - /* At this point se is NULL and we are at root level*/ - sub_nr_running(rq, queued_delta); -- -- /* Stop the fair server if throttling resulted in no runnable tasks */ -- if (rq_h_nr_queued && !rq->cfs.h_nr_queued) -- dl_server_stop(&rq->fair_server); - done: - /* - * Note: distribution will already see us throttled via the -@@ -7067,7 +7062,6 @@ static void set_next_buddy(struct sched_entity *se); - static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) - { - bool was_sched_idle = sched_idle_rq(rq); -- int rq_h_nr_queued = rq->cfs.h_nr_queued; - bool task_sleep = flags & DEQUEUE_SLEEP; - bool task_delayed = flags & DEQUEUE_DELAYED; - struct task_struct *p = NULL; -@@ -7151,9 +7145,6 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) - - sub_nr_running(rq, h_nr_queued); - -- if (rq_h_nr_queued && !rq->cfs.h_nr_queued) -- dl_server_stop(&rq->fair_server); -- - /* balance early to pull high priority tasks */ - if (unlikely(!was_sched_idle && sched_idle_rq(rq))) - rq->next_balance = jiffies; --- -2.39.5 - -- 2.47.3