]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Replace rq_lock() to raw_spin_rq_lock() in scx_ops_bypass()
authorChangwoo Min <changwoo@igalia.com>
Wed, 8 Jan 2025 15:08:06 +0000 (00:08 +0900)
committerTejun Heo <tj@kernel.org>
Wed, 8 Jan 2025 16:48:53 +0000 (06:48 -1000)
scx_ops_bypass() iterates all CPUs to re-enqueue all the scx tasks.
For each CPU, it acquires a lock using rq_lock() regardless of whether
a CPU is offline or the CPU is currently running a task in a higher
scheduler class (e.g., deadline). The rq_lock() is supposed to be used
for online CPUs, and the use of rq_lock() may trigger an unnecessary
warning in rq_pin_lock(). Therefore, replace rq_lock() to
raw_spin_rq_lock() in scx_ops_bypass().

Without this change, we observe the following warning:

===== START =====
[    6.615205] rq->balance_callback && rq->balance_callback != &balance_push_callback
[    6.615208] WARNING: CPU: 2 PID: 0 at kernel/sched/sched.h:1730 __schedule+0x1130/0x1c90
=====  END  =====

Fixes: 0e7ffff1b811 ("scx: Fix raciness in scx_ops_bypass()")
Signed-off-by: Changwoo Min <changwoo@igalia.com>
Acked-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c

index 335371cc2cbd1cda0af69ee540a0b2184a10da15..11a0e1a9d86ef680211167ef62042b7b747ad305 100644 (file)
@@ -4747,10 +4747,9 @@ static void scx_ops_bypass(bool bypass)
         */
        for_each_possible_cpu(cpu) {
                struct rq *rq = cpu_rq(cpu);
-               struct rq_flags rf;
                struct task_struct *p, *n;
 
-               rq_lock(rq, &rf);
+               raw_spin_rq_lock(rq);
 
                if (bypass) {
                        WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
@@ -4766,7 +4765,7 @@ static void scx_ops_bypass(bool bypass)
                 * sees scx_rq_bypassing() before moving tasks to SCX.
                 */
                if (!scx_enabled()) {
-                       rq_unlock(rq, &rf);
+                       raw_spin_rq_unlock(rq);
                        continue;
                }
 
@@ -4786,10 +4785,11 @@ static void scx_ops_bypass(bool bypass)
                        sched_enq_and_set_task(&ctx);
                }
 
-               rq_unlock(rq, &rf);
-
                /* resched to restore ticks and idle state */
-               resched_cpu(cpu);
+               if (cpu_online(cpu) || cpu == smp_processor_id())
+                       resched_curr(rq);
+
+               raw_spin_rq_unlock(rq);
        }
 
        atomic_dec(&scx_ops_breather_depth);