--- /dev/null
+From b23decf8ac9102fc52c4de5196f4dc0a5f3eb80b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 28 Oct 2024 11:43:42 +0100
+Subject: sched: Initialize idle tasks only once
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit b23decf8ac9102fc52c4de5196f4dc0a5f3eb80b upstream.
+
+Idle tasks are initialized via __sched_fork() twice:
+
+ fork_idle()
+ copy_process()
+ sched_fork()
+ __sched_fork()
+ init_idle()
+ __sched_fork()
+
+Instead of cleaning this up, sched_ext hacked around it. Even when analyis
+and solution were provided in a discussion, nobody cared to clean this up.
+
+init_idle() is also invoked from sched_init() to initialize the boot CPU's
+idle task, which requires the __sched_fork() invocation. But this can be
+trivially solved by invoking __sched_fork() before init_idle() in
+sched_init() and removing the __sched_fork() invocation from init_idle().
+
+Do so and clean up the comments explaining this historical leftover.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20241028103142.359584747@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4424,7 +4424,8 @@ int wake_up_state(struct task_struct *p,
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ *
+- * __sched_fork() is basic setup used by init_idle() too:
++ * __sched_fork() is basic setup which is also used by sched_init() to
++ * initialize the boot CPU's idle task.
+ */
+ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+ {
+@@ -7683,8 +7684,6 @@ void __init init_idle(struct task_struct
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+- __sched_fork(0, idle);
+-
+ raw_spin_lock_irqsave(&idle->pi_lock, flags);
+ raw_spin_rq_lock(rq);
+
+@@ -7699,10 +7698,8 @@ void __init init_idle(struct task_struct
+
+ #ifdef CONFIG_SMP
+ /*
+- * It's possible that init_idle() gets called multiple times on a task,
+- * in that case do_set_cpus_allowed() will not do the right thing.
+- *
+- * And since this is boot we can forgo the serialization.
++ * No validation and serialization required at boot time and for
++ * setting up the idle tasks of not yet online CPUs.
+ */
+ set_cpus_allowed_common(idle, &ac);
+ #endif
+@@ -8546,6 +8543,7 @@ void __init sched_init(void)
+ * but because we are the idle thread, we just pick up running again
+ * when this runqueue becomes "idle".
+ */
++ __sched_fork(0, current);
+ init_idle(current, smp_processor_id());
+
+ calc_load_update = jiffies + LOAD_FREQ;