]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
rqspinlock: Fix TAS fallback lock entry creation
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Thu, 22 Jan 2026 11:59:11 +0000 (03:59 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 23 Jan 2026 18:03:49 +0000 (10:03 -0800)
The TAS fallback can be invoked directly when queued spin locks are
disabled, and through the slow path when paravirt is enabled for queued
spin locks. In the latter case, the res_spin_lock macro will attempt the
fast path and already hold the entry when entering the slow path. This
will lead to creation of extraneous entries that are not released, which
may cause false positives for deadlock detection.

Fix this by always preceding invocation of the TAS fallback in every
case with the grabbing of the held lock entry, and add a comment to make
note of this.

Fixes: c9102a68c070 ("rqspinlock: Add a test-and-set fallback")
Reported-by: Amery Hung <ameryhung@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Tested-by: Amery Hung <ameryhung@gmail.com>
Link: https://lore.kernel.org/r/20260122115911.3668985-1-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/asm-generic/rqspinlock.h
kernel/bpf/rqspinlock.c

index 0f2dcbbfee2f0aa94a1e78ac4fec75d8231ae10d..5c5cf2f7fc395a3bc95ae10962b09820c2d7b96c 100644 (file)
@@ -191,7 +191,7 @@ static __always_inline int res_spin_lock(rqspinlock_t *lock)
 
 #else
 
-#define res_spin_lock(lock) resilient_tas_spin_lock(lock)
+#define res_spin_lock(lock) ({ grab_held_lock_entry(lock); resilient_tas_spin_lock(lock); })
 
 #endif /* CONFIG_QUEUED_SPINLOCKS */
 
index f7d0c8d4644edb970e3ed634faa7315352e6c6a6..2fdfa828e3d35ac318b88ff20965f72ff773bb30 100644 (file)
@@ -265,10 +265,11 @@ int __lockfunc resilient_tas_spin_lock(rqspinlock_t *lock)
 
        RES_INIT_TIMEOUT(ts);
        /*
-        * The fast path is not invoked for the TAS fallback, so we must grab
-        * the deadlock detection entry here.
+        * We are either called directly from res_spin_lock after grabbing the
+        * deadlock detection entry when queued spinlocks are disabled, or from
+        * resilient_queued_spin_lock_slowpath after grabbing the deadlock
+        * detection entry. No need to obtain it here.
         */
-       grab_held_lock_entry(lock);
 
        /*
         * Since the waiting loop's time is dependent on the amount of