]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched_ext: Use the resched_cpu() to replace resched_curr() in the bypass_lb_node()
authorZqiang <qiang.zhang@linux.dev>
Mon, 22 Dec 2025 11:53:17 +0000 (19:53 +0800)
committerTejun Heo <tj@kernel.org>
Tue, 23 Dec 2025 03:51:51 +0000 (17:51 -1000)
For the PREEMPT_RT kernels, the scx_bypass_lb_timerfn() running in the
preemptible per-CPU ktimer kthread context, this means that the following
scenarios will occur(for x86 platform):

       cpu1                          cpu2
 ktimer kthread:
                                 ->scx_bypass_lb_timerfn
                                   ->bypass_lb_node
                                     ->for_each_cpu(cpu, resched_mask)

    migration/1:                       by preempt by migration/2:
    multi_cpu_stop()                     multi_cpu_stop()
    ->take_cpu_down()
      ->__cpu_disable()
->set cpu1 offline

                                       ->rq1 = cpu_rq(cpu1)
                                       ->resched_curr(rq1)
                                         ->smp_send_reschedule(cpu1)
   ->native_smp_send_reschedule(cpu1)
     ->if(unlikely(cpu_is_offline(cpu))) {
                 WARN(1, "sched: Unexpected
reschedule of offline CPU#%d!\n", cpu);
                 return;
         }

This commit therefore use the resched_cpu() to replace resched_curr()
in the bypass_lb_node() to avoid send-ipi to offline CPUs.

Signed-off-by: Zqiang <qiang.zhang@linux.dev>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c

index 5ebf8a7408478fd28e1f1dc5228236277451af7f..8f6d8d7f895ccc494414e971d370229da7c7ffe1 100644 (file)
@@ -3956,13 +3956,8 @@ static void bypass_lb_node(struct scx_sched *sch, int node)
                                             nr_donor_target, nr_target);
        }
 
-       for_each_cpu(cpu, resched_mask) {
-               struct rq *rq = cpu_rq(cpu);
-
-               raw_spin_rq_lock_irq(rq);
-               resched_curr(rq);
-               raw_spin_rq_unlock_irq(rq);
-       }
+       for_each_cpu(cpu, resched_mask)
+               resched_cpu(cpu);
 
        for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
                u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);