]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Match __task_rq_{,un}lock()
authorPeter Zijlstra <peterz@infradead.org>
Thu, 25 Sep 2025 09:26:22 +0000 (11:26 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 16 Oct 2025 09:13:54 +0000 (11:13 +0200)
In preparation to adding more rules to __task_rq_lock(), such that
__task_rq_unlock() will no longer be equivalent to rq_unlock(),
make sure every __task_rq_lock() is matched by a __task_rq_unlock()
and vice-versa.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
kernel/sched/core.c
kernel/sched/sched.h
kernel/sched/stats.h

index 8c55740b1e062e1ec3a6c2217b880821b2c134ae..e715147c31b2508ccaa3ae7f2da966337d6b47d3 100644 (file)
@@ -2582,7 +2582,8 @@ static int migration_cpu_stop(void *data)
                 */
                WARN_ON_ONCE(!pending->stop_pending);
                preempt_disable();
-               task_rq_unlock(rq, p, &rf);
+               rq_unlock(rq, &rf);
+               raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
                stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
                                    &pending->arg, &pending->stop_work);
                preempt_enable();
@@ -2591,7 +2592,8 @@ static int migration_cpu_stop(void *data)
 out:
        if (pending)
                pending->stop_pending = false;
-       task_rq_unlock(rq, p, &rf);
+       rq_unlock(rq, &rf);
+       raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
 
        if (complete)
                complete_all(&pending->done);
@@ -3708,7 +3710,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
                ttwu_do_wakeup(p);
                ret = 1;
        }
-       __task_rq_unlock(rq, &rf);
+       __task_rq_unlock(rq, p, &rf);
 
        return ret;
 }
@@ -4301,7 +4303,7 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg)
        ret = func(p, arg);
 
        if (rq)
-               rq_unlock(rq, &rf);
+               __task_rq_unlock(rq, p, &rf);
 
        raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
        return ret;
@@ -7362,7 +7364,8 @@ out_unlock:
 
        rq_unpin_lock(rq, &rf);
        __balance_callbacks(rq);
-       raw_spin_rq_unlock(rq);
+       rq_repin_lock(rq, &rf);
+       __task_rq_unlock(rq, p, &rf);
 
        preempt_enable();
 }
index 346214503452c26597d5954b97521418f3500e27..e3d271013c8b0f00f68fae21603b0739dc53e105 100644 (file)
@@ -1825,7 +1825,8 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
        __acquires(p->pi_lock)
        __acquires(rq->lock);
 
-static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
+static inline void
+__task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
        __releases(rq->lock)
 {
        rq_unpin_lock(rq, rf);
@@ -1837,8 +1838,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
        __releases(rq->lock)
        __releases(p->pi_lock)
 {
-       rq_unpin_lock(rq, rf);
-       raw_spin_rq_unlock(rq);
+       __task_rq_unlock(rq, p, rf);
        raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 }
 
@@ -1849,7 +1849,7 @@ DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
 
 DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct,
                    _T->rq = __task_rq_lock(_T->lock, &_T->rf),
-                   __task_rq_unlock(_T->rq, &_T->rf),
+                   __task_rq_unlock(_T->rq, _T->lock, &_T->rf),
                    struct rq *rq; struct rq_flags rf)
 
 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
index 26f3fd4d34cead0733386f929d2abbb69e3defe7..cbf7206b3f9d7f9085e4cc14f7f0a50fd68bc60d 100644 (file)
@@ -206,7 +206,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
 
                rq = __task_rq_lock(p, &rf);
                psi_task_change(p, p->psi_flags, 0);
-               __task_rq_unlock(rq, &rf);
+               __task_rq_unlock(rq, p, &rf);
        }
 }