]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/headers: Inline raw_spin_rq_unlock()
authorXie Yuanbin <qq570070308@gmail.com>
Mon, 16 Feb 2026 16:49:49 +0000 (00:49 +0800)
committerIngo Molnar <mingo@kernel.org>
Fri, 6 Mar 2026 05:21:48 +0000 (06:21 +0100)
raw_spin_rq_unlock() is short, and is called in some hot code paths
such as finish_lock_switch().

Inline raw_spin_rq_unlock() to micro-optimize performance a bit.

Signed-off-by: Xie Yuanbin <qq570070308@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://patch.msgid.link/20260216164950.147617-3-qq570070308@gmail.com
kernel/sched/core.c
kernel/sched/sched.h

index bfd280ec0f97bbee4915aedc2680561c105fb087..b59bab255e57e4e40932b7b49a94cd12c37e295f 100644 (file)
@@ -687,11 +687,6 @@ bool raw_spin_rq_trylock(struct rq *rq)
        }
 }
 
-void raw_spin_rq_unlock(struct rq *rq)
-{
-       raw_spin_unlock(rq_lockp(rq));
-}
-
 /*
  * double_rq_lock - safely lock two runqueues
  */
index fa2237e89beea4aabba2fd761425fc4a4a59a61c..953d89d718042e3125783b79af1246692484b2bf 100644 (file)
@@ -1607,15 +1607,18 @@ extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
 extern bool raw_spin_rq_trylock(struct rq *rq)
        __cond_acquires(true, __rq_lockp(rq));
 
-extern void raw_spin_rq_unlock(struct rq *rq)
-       __releases(__rq_lockp(rq));
-
 static inline void raw_spin_rq_lock(struct rq *rq)
        __acquires(__rq_lockp(rq))
 {
        raw_spin_rq_lock_nested(rq, 0);
 }
 
+static inline void raw_spin_rq_unlock(struct rq *rq)
+       __releases(__rq_lockp(rq))
+{
+       raw_spin_unlock(rq_lockp(rq));
+}
+
 static inline void raw_spin_rq_lock_irq(struct rq *rq)
        __acquires(__rq_lockp(rq))
 {