]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge branch 'for-7.0-fixes' into for-7.1
authorTejun Heo <tj@kernel.org>
Mon, 30 Mar 2026 18:52:33 +0000 (08:52 -1000)
committerTejun Heo <tj@kernel.org>
Mon, 30 Mar 2026 19:02:05 +0000 (09:02 -1000)
Conflict in kernel/sched/ext.c init_sched_ext_class() between:

  415cb193bb97 ("sched_ext: Fix SCX_KICK_WAIT deadlock by deferring wait
  to balance callback")

which adds cpus_to_sync cpumask allocation, and:

  84b1a0ea0b7c ("sched_ext: Implement scx_bpf_dsq_reenq() for user DSQs")
  8c1b9453fde6 ("sched_ext: Convert deferred_reenq_locals from llist to
  regular list")

which add deferred_reenq init code at the same location. Both are
independent additions. Include both.

Signed-off-by: Tejun Heo <tj@kernel.org>
1  2 
kernel/sched/ext.c
kernel/sched/ext_idle.c
kernel/sched/sched.h
tools/testing/selftests/sched_ext/Makefile

index bfe923b7ffe09c4bbf8f3728b71b564ce6d90f41,d5bdcdb3f70041fc77e0c0a2be98767a237485dc..9628c64e5592ae8285aece5bbd977738b0ae57c8
@@@ -3016,9 -2402,9 +3016,9 @@@ static void switch_class(struct rq *rq
  static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
                              struct task_struct *next)
  {
 -      struct scx_sched *sch = scx_root;
 +      struct scx_sched *sch = scx_task_sched(p);
  
-       /* see kick_cpus_irq_workfn() */
+       /* see kick_sync_wait_bal_cb() */
        smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
  
        update_curr_scx(rq);
@@@ -7780,9 -5838,7 +7824,10 @@@ void __init init_sched_ext_class(void
                BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
                BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
                BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
+               BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_sync, GFP_KERNEL, n));
 +              raw_spin_lock_init(&rq->scx.deferred_reenq_lock);
 +              INIT_LIST_HEAD(&rq->scx.deferred_reenq_locals);
 +              INIT_LIST_HEAD(&rq->scx.deferred_reenq_users);
                rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
                rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
  
Simple merge
index 5b93f6190d313c29ab60793532be38b1c6b5f0e6,1ef9ba480f51d1224c21da7a6f93b16f809b2069..ae0783e27c1ece4b6670329185255a45bfa45e8e
@@@ -806,17 -805,15 +806,20 @@@ struct scx_rq 
        cpumask_var_t           cpus_to_kick_if_idle;
        cpumask_var_t           cpus_to_preempt;
        cpumask_var_t           cpus_to_wait;
+       cpumask_var_t           cpus_to_sync;
+       bool                    kick_sync_pending;
        unsigned long           kick_sync;
 -      local_t                 reenq_local_deferred;
 +
 +      struct task_struct      *sub_dispatch_prev;
 +
 +      raw_spinlock_t          deferred_reenq_lock;
 +      u64                     deferred_reenq_locals_seq;
 +      struct list_head        deferred_reenq_locals;  /* scheds requesting reenq of local DSQ */
 +      struct list_head        deferred_reenq_users;   /* user DSQs requesting reenq */
        struct balance_callback deferred_bal_cb;
+       struct balance_callback kick_sync_bal_cb;
        struct irq_work         deferred_irq_work;
        struct irq_work         kick_cpus_irq_work;
 -      struct scx_dispatch_q   bypass_dsq;
  };
  #endif /* CONFIG_SCHED_CLASS_EXT */