]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Provide and use set_need_resched_current()
authorPeter Zijlstra <peterz@infradead.org>
Sun, 16 Nov 2025 20:51:07 +0000 (21:51 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 20 Nov 2025 21:26:09 +0000 (22:26 +0100)
set_tsk_need_resched(current) requires set_preempt_need_resched(current) to
work correctly outside of the scheduler.

Provide set_need_resched_current() which wraps this correctly and replace
all the open coded instances.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://patch.msgid.link/20251116174750.665769842@linutronix.de
arch/s390/mm/pfault.c
include/linux/sched.h
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h

index e6175d75e4b0790a456ab826d89382cccc57aeec..2f829448c719e3ada1fd9b1e7cd391cb61bac869 100644 (file)
@@ -199,8 +199,7 @@ block:
                         * return to userspace schedule() to block.
                         */
                        __set_current_state(TASK_UNINTERRUPTIBLE);
-                       set_tsk_need_resched(tsk);
-                       set_preempt_need_resched();
+                       set_need_resched_current();
                }
        }
 out:
index bb436ee1942d30387a0f44586bb8b440b45ddfb0..021d05aa941a3832dced893e910f55e99650e599 100644 (file)
@@ -2058,6 +2058,13 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
        return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
 }
 
+static inline void set_need_resched_current(void)
+{
+       lockdep_assert_irqs_disabled();
+       set_tsk_need_resched(current);
+       set_preempt_need_resched();
+}
+
 /*
  * cond_resched() and cond_resched_lock(): latency reduction via
  * explicit rescheduling in places that are safe. The return
index c1ebfd51768ba4fe8a150fd9d807c8e49abfa705..585cade21010e75c011313557fb8d5f8239bbc91 100644 (file)
@@ -70,12 +70,10 @@ void rcu_qs(void)
  */
 void rcu_sched_clock_irq(int user)
 {
-       if (user) {
+       if (user)
                rcu_qs();
-       } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
-               set_tsk_need_resched(current);
-               set_preempt_need_resched();
-       }
+       else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail)
+               set_need_resched_current();
 }
 
 /*
index 8293bae1dec169013b917a04a88fef1c7554daa9..85b82a7007b9c050907085041f8b08f2d2c636d4 100644 (file)
@@ -2696,10 +2696,8 @@ void rcu_sched_clock_irq(int user)
        /* The load-acquire pairs with the store-release setting to true. */
        if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
                /* Idle and userspace execution already are quiescent states. */
-               if (!rcu_is_cpu_rrupt_from_idle() && !user) {
-                       set_tsk_need_resched(current);
-                       set_preempt_need_resched();
-               }
+               if (!rcu_is_cpu_rrupt_from_idle() && !user)
+                       set_need_resched_current();
                __this_cpu_write(rcu_data.rcu_urgent_qs, false);
        }
        rcu_flavor_sched_clock_irq(user);
@@ -2824,7 +2822,6 @@ static void strict_work_handler(struct work_struct *work)
 /* Perform RCU core processing work for the current CPU.  */
 static __latent_entropy void rcu_core(void)
 {
-       unsigned long flags;
        struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
        struct rcu_node *rnp = rdp->mynode;
 
@@ -2837,8 +2834,8 @@ static __latent_entropy void rcu_core(void)
        if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
                rcu_preempt_deferred_qs(current);
        } else if (rcu_preempt_need_deferred_qs(current)) {
-               set_tsk_need_resched(current);
-               set_preempt_need_resched();
+               guard(irqsave)();
+               set_need_resched_current();
        }
 
        /* Update RCU state based on any recent quiescent states. */
@@ -2847,10 +2844,9 @@ static __latent_entropy void rcu_core(void)
        /* No grace period and unregistered callbacks? */
        if (!rcu_gp_in_progress() &&
            rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
-               local_irq_save(flags);
+               guard(irqsave)();
                if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
                        rcu_accelerate_cbs_unlocked(rnp, rdp);
-               local_irq_restore(flags);
        }
 
        rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
index 6058a734090c1d9bdda1a10714ad28b1b7a8c101..96c49c56fc14a44638d3d67a0d9c382942700e0a 100644 (file)
@@ -729,8 +729,7 @@ static void rcu_exp_need_qs(void)
        __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
        /* Store .exp before .rcu_urgent_qs. */
        smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
-       set_tsk_need_resched(current);
-       set_preempt_need_resched();
+       set_need_resched_current();
 }
 
 #ifdef CONFIG_PREEMPT_RCU
index d85763336b3c0f1563f29df136fae1f403651bd1..dbe2d02be824a02997648056528534ea3c9083c5 100644 (file)
@@ -753,8 +753,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
                        // Also if no expediting and no possible deboosting,
                        // slow is OK.  Plus nohz_full CPUs eventually get
                        // tick enabled.
-                       set_tsk_need_resched(current);
-                       set_preempt_need_resched();
+                       set_need_resched_current();
                        if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
                            needs_exp && rdp->defer_qs_iw_pending != DEFER_QS_PENDING &&
                            cpu_online(rdp->cpu)) {
@@ -813,10 +812,8 @@ static void rcu_flavor_sched_clock_irq(int user)
        if (rcu_preempt_depth() > 0 ||
            (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
                /* No QS, force context switch if deferred. */
-               if (rcu_preempt_need_deferred_qs(t)) {
-                       set_tsk_need_resched(t);
-                       set_preempt_need_resched();
-               }
+               if (rcu_preempt_need_deferred_qs(t))
+                       set_need_resched_current();
        } else if (rcu_preempt_need_deferred_qs(t)) {
                rcu_preempt_deferred_qs(t); /* Report deferred QS. */
                return;
index d16afeb1150624ca238fc46cb3672143ec2b5ca0..b67532cb877050a882f2c1c6c198002d7a3c7916 100644 (file)
@@ -763,8 +763,7 @@ static void print_cpu_stall(unsigned long gp_seq, unsigned long gps)
         * progress and it could be we're stuck in kernel space without context
         * switches for an entirely unreasonable amount of time.
         */
-       set_tsk_need_resched(current);
-       set_preempt_need_resched();
+       set_need_resched_current();
 }
 
 static bool csd_lock_suppress_rcu_stall;