]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled
authorJohn Stultz <jstultz@google.com>
Tue, 17 Dec 2024 04:07:35 +0000 (20:07 -0800)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 20 Dec 2024 14:31:21 +0000 (15:31 +0100)
A common pattern seen when wake_qs are used to defer a wakeup
until after a lock is released is something like:
  preempt_disable();
  raw_spin_unlock(lock);
  wake_up_q(wake_q);
  preempt_enable();

So create some raw_spin_unlock*_wake() helper functions to clean
this up.

Applies on top of the fix I submitted here:
 https://lore.kernel.org/lkml/20241212222138.2400498-1-jstultz@google.com/

NOTE: I recognise the unlock()/unlock_irq()/unlock_irqrestore()
variants creates its own duplication, which we could use a macro
to generate the similar functions, but I often dislike how those
generation macros making finding the actual implementation
harder, so I left the three functions as is. If folks would
prefer otherwise, let me know and I'll switch it.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: John Stultz <jstultz@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20241217040803.243420-1-jstultz@google.com
include/linux/sched/wake_q.h
kernel/futex/pi.c
kernel/locking/mutex.c
kernel/locking/rtmutex.c

index 06cd8fb2f40982397ac9fb42a43128bde534dcee..0f28b4623ad45b90d91475d3729ca231bc416304 100644 (file)
@@ -63,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
 extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
 extern void wake_up_q(struct wake_q_head *head);
 
+/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
+static inline
+void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+       guard(preempt)();
+       raw_spin_unlock(lock);
+       if (wake_q) {
+               wake_up_q(wake_q);
+               wake_q_init(wake_q);
+       }
+}
+
+static inline
+void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+       guard(preempt)();
+       raw_spin_unlock_irq(lock);
+       if (wake_q) {
+               wake_up_q(wake_q);
+               wake_q_init(wake_q);
+       }
+}
+
+static inline
+void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
+                                    struct wake_q_head *wake_q)
+{
+       guard(preempt)();
+       raw_spin_unlock_irqrestore(lock, flags);
+       if (wake_q) {
+               wake_up_q(wake_q);
+               wake_q_init(wake_q);
+       }
+}
 #endif /* _LINUX_SCHED_WAKE_Q_H */
index d62cca5ed8f4c8d2084b6e553bddfefd48ca2c1b..daea650b16f51be890f22f7d243845bd0b972c54 100644 (file)
@@ -1020,10 +1020,7 @@ retry_private:
         * it sees the futex_q::pi_state.
         */
        ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q);
-       preempt_disable();
-       raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
-       wake_up_q(&wake_q);
-       preempt_enable();
+       raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q);
 
        if (ret) {
                if (ret == 1)
index 3302e52f0c96727391025528a1022a24649f377b..b36f23de48f1b544e9950a3ed20e0ba6b89b9fc5 100644 (file)
@@ -657,10 +657,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
                                goto err;
                }
 
-               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-               /* Make sure we do wakeups before calling schedule */
-               wake_up_q(&wake_q);
-               wake_q_init(&wake_q);
+               raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
 
                schedule_preempt_disabled();
 
@@ -710,8 +707,7 @@ skip_wait:
        if (ww_ctx)
                ww_mutex_lock_acquired(ww, ww_ctx);
 
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-       wake_up_q(&wake_q);
+       raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
        preempt_enable();
        return 0;
 
@@ -720,10 +716,9 @@ err:
        __mutex_remove_waiter(lock, &waiter);
 err_early_kill:
        trace_contention_end(lock, ret);
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+       raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, ip);
-       wake_up_q(&wake_q);
        preempt_enable();
        return ret;
 }
@@ -935,10 +930,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
-       preempt_disable();
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-       wake_up_q(&wake_q);
-       preempt_enable();
+       raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
index 697a56d3d949b9e5d9d17979f511bcb335895623..4a8df1800cbbd1aef0a1ac96c426d57533f7fbe7 100644 (file)
@@ -1292,13 +1292,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
         */
        get_task_struct(owner);
 
-       preempt_disable();
-       raw_spin_unlock_irq(&lock->wait_lock);
-       /* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
-       wake_up_q(wake_q);
-       wake_q_init(wake_q);
-       preempt_enable();
-
+       raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
 
        res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
                                         next_lock, waiter, task);
@@ -1642,13 +1636,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
                        owner = rt_mutex_owner(lock);
                else
                        owner = NULL;
-               preempt_disable();
-               raw_spin_unlock_irq(&lock->wait_lock);
-               if (wake_q) {
-                       wake_up_q(wake_q);
-                       wake_q_init(wake_q);
-               }
-               preempt_enable();
+               raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
 
                if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
                        rt_mutex_schedule();
@@ -1799,10 +1787,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
         */
        raw_spin_lock_irqsave(&lock->wait_lock, flags);
        ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q);
-       preempt_disable();
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-       wake_up_q(&wake_q);
-       preempt_enable();
+       raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
        rt_mutex_post_schedule();
 
        return ret;
@@ -1860,11 +1845,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
                        owner = rt_mutex_owner(lock);
                else
                        owner = NULL;
-               preempt_disable();
-               raw_spin_unlock_irq(&lock->wait_lock);
-               wake_up_q(wake_q);
-               wake_q_init(wake_q);
-               preempt_enable();
+               raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
 
                if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
                        schedule_rtlock();
@@ -1893,10 +1874,7 @@ static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
 
        raw_spin_lock_irqsave(&lock->wait_lock, flags);
        rtlock_slowlock_locked(lock, &wake_q);
-       preempt_disable();
-       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-       wake_up_q(&wake_q);
-       preempt_enable();
+       raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
 }
 
 #endif /* RT_MUTEX_BUILD_SPINLOCKS */