--- /dev/null
+From 55ed11b181c43d81ce03b50209e4e7c4a14ba099 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <arighi@nvidia.com>
+Date: Sat, 20 Sep 2025 15:26:21 +0200
+Subject: sched_ext: idle: Handle migration-disabled tasks in BPF code
+
+From: Andrea Righi <arighi@nvidia.com>
+
+commit 55ed11b181c43d81ce03b50209e4e7c4a14ba099 upstream.
+
+When scx_bpf_select_cpu_dfl()/and() kfuncs are invoked outside of
+ops.select_cpu() we can't rely on @p->migration_disabled to determine if
+migration is disabled for the task @p.
+
+In fact, migration is always disabled for the current task while running
+BPF code: __bpf_prog_enter() disables migration and __bpf_prog_exit()
+re-enables it.
+
+To handle this, when @p->migration_disabled == 1, check whether @p is
+the current task. If so, migration was not disabled before entering the
+callback, otherwise migration was disabled.
+
+This ensures correct idle CPU selection in all cases. The behavior of
+ops.select_cpu() remains unchanged, because this callback is never
+invoked for the current task and migration-disabled tasks are always
+excluded.
+
+Example: without this change scx_bpf_select_cpu_and() called from
+ops.enqueue() always returns -EBUSY; with this change applied, it
+correctly returns idle CPUs.
+
+Fixes: 06efc9fe0b8de ("sched_ext: idle: Handle migration-disabled tasks in idle selection")
+Cc: stable@vger.kernel.org # v6.16+
+Signed-off-by: Andrea Righi <arighi@nvidia.com>
+Acked-by: Changwoo Min <changwoo@igalia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext_idle.c | 28 +++++++++++++++++++++++++++-
+ 1 file changed, 27 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/ext_idle.c
++++ b/kernel/sched/ext_idle.c
+@@ -870,6 +870,32 @@ static bool check_builtin_idle_enabled(v
+ return false;
+ }
+
++/*
++ * Determine whether @p is a migration-disabled task in the context of BPF
++ * code.
++ *
++ * We can't simply check whether @p->migration_disabled is set in a
++ * sched_ext callback, because migration is always disabled for the current
++ * task while running BPF code.
++ *
++ * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) respectively
++ * disable and re-enable migration. For this reason, the current task
++ * inside a sched_ext callback is always a migration-disabled task.
++ *
++ * Therefore, when @p->migration_disabled == 1, check whether @p is the
++ * current task or not: if it is, then migration was not disabled before
++ * entering the callback, otherwise migration was disabled.
++ *
++ * Returns true if @p is migration-disabled, false otherwise.
++ */
++static bool is_bpf_migration_disabled(const struct task_struct *p)
++{
++ if (p->migration_disabled == 1)
++ return p != current;
++ else
++ return p->migration_disabled;
++}
++
+ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ const struct cpumask *allowed, u64 flags)
+ {
+@@ -913,7 +939,7 @@ static s32 select_cpu_from_kfunc(struct
+ * selection optimizations and simply check whether the previously
+ * used CPU is idle and within the allowed cpumask.
+ */
+- if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) {
++ if (p->nr_cpus_allowed == 1 || is_bpf_migration_disabled(p)) {
+ if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
+ scx_idle_test_and_clear_cpu(prev_cpu))
+ cpu = prev_cpu;
--- /dev/null
+From 353656eb84fef8ffece3b1be4345cbacbbb5267f Mon Sep 17 00:00:00 2001
+From: Andrea Righi <arighi@nvidia.com>
+Date: Wed, 4 Jun 2025 16:33:12 +0200
+Subject: sched_ext: idle: Make local functions static in ext_idle.c
+
+From: Andrea Righi <arighi@nvidia.com>
+
+commit 353656eb84fef8ffece3b1be4345cbacbbb5267f upstream.
+
+Functions that are only used within ext_idle.c can be marked as static
+to limit their scope.
+
+No functional changes.
+
+Signed-off-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext_idle.c | 24 +++++++++++++++++-------
+ kernel/sched/ext_idle.h | 7 -------
+ 2 files changed, 17 insertions(+), 14 deletions(-)
+
+--- a/kernel/sched/ext_idle.c
++++ b/kernel/sched/ext_idle.c
+@@ -75,7 +75,7 @@ static int scx_cpu_node_if_enabled(int c
+ return cpu_to_node(cpu);
+ }
+
+-bool scx_idle_test_and_clear_cpu(int cpu)
++static bool scx_idle_test_and_clear_cpu(int cpu)
+ {
+ int node = scx_cpu_node_if_enabled(cpu);
+ struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
+@@ -198,7 +198,7 @@ pick_idle_cpu_from_online_nodes(const st
+ /*
+ * Find an idle CPU in the system, starting from @node.
+ */
+-s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
++static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
+ {
+ s32 cpu;
+
+@@ -794,6 +794,16 @@ static void reset_idle_masks(struct sche
+ cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
+ }
+ }
++#else /* !CONFIG_SMP */
++static bool scx_idle_test_and_clear_cpu(int cpu)
++{
++ return -EBUSY;
++}
++
++static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
++{
++ return -EBUSY;
++}
+ #endif /* CONFIG_SMP */
+
+ void scx_idle_enable(struct sched_ext_ops *ops)
+@@ -860,8 +870,8 @@ static bool check_builtin_idle_enabled(v
+ return false;
+ }
+
+-s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+- const struct cpumask *allowed, u64 flags)
++static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
++ const struct cpumask *allowed, u64 flags)
+ {
+ struct rq *rq;
+ struct rq_flags rf;
+@@ -1125,10 +1135,10 @@ __bpf_kfunc bool scx_bpf_test_and_clear_
+ if (!check_builtin_idle_enabled())
+ return false;
+
+- if (kf_cpu_valid(cpu, NULL))
+- return scx_idle_test_and_clear_cpu(cpu);
+- else
++ if (!kf_cpu_valid(cpu, NULL))
+ return false;
++
++ return scx_idle_test_and_clear_cpu(cpu);
+ }
+
+ /**
+--- a/kernel/sched/ext_idle.h
++++ b/kernel/sched/ext_idle.h
+@@ -15,16 +15,9 @@ struct sched_ext_ops;
+ #ifdef CONFIG_SMP
+ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops);
+ void scx_idle_init_masks(void);
+-bool scx_idle_test_and_clear_cpu(int cpu);
+-s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags);
+ #else /* !CONFIG_SMP */
+ static inline void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) {}
+ static inline void scx_idle_init_masks(void) {}
+-static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
+-static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
+-{
+- return -EBUSY;
+-}
+ #endif /* CONFIG_SMP */
+
+ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,