]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
locking/local_lock: Introduce local_lock_is_locked().
authorAlexei Starovoitov <ast@kernel.org>
Tue, 9 Sep 2025 01:00:02 +0000 (18:00 -0700)
committerVlastimil Babka <vbabka@suse.cz>
Mon, 29 Sep 2025 07:42:35 +0000 (09:42 +0200)
Introduce local_lock_is_locked() that returns true when
given local_lock is locked by current cpu (in !PREEMPT_RT) or
by current task (in PREEMPT_RT).
The goal is to detect a deadlock by the caller.

Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/local_lock.h
include/linux/local_lock_internal.h
include/linux/rtmutex.h
kernel/locking/rtmutex_common.h

index 2ba84641952440d6cb383f3834577ab5ceb067a8..0d91d060e3e90c075f63d0149ec118485771e035 100644 (file)
@@ -66,6 +66,8 @@
  */
 #define local_trylock(lock)            __local_trylock(this_cpu_ptr(lock))
 
+#define local_lock_is_locked(lock)     __local_lock_is_locked(lock)
+
 /**
  * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
  *                        interrupts if acquired
index 949de37700dbc10feafc06d0b52382cf2e00c694..a4dc479157b5cd63a7e57463b9aba1381e52869e 100644 (file)
@@ -165,6 +165,9 @@ do {                                                                \
                !!tl;                                           \
        })
 
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
 #define __local_lock_release(lock)                                     \
        do {                                                            \
                local_trylock_t *tl;                                    \
@@ -285,4 +288,8 @@ do {                                                                \
                __local_trylock(lock);                          \
        })
 
+/* migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(__lock)                                 \
+       (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
 #endif /* CONFIG_PREEMPT_RT */
index fa9f1021541eac3d56ff01a8b883c19052d0b3d2..ede4c6bf6f22666ade6e6597215b693b727c4c9f 100644 (file)
@@ -44,6 +44,16 @@ static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
        return READ_ONCE(lock->owner) != NULL;
 }
 
+#ifdef CONFIG_RT_MUTEXES
+#define RT_MUTEX_HAS_WAITERS   1UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
+{
+       unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+
+       return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
+}
+#endif
 extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
 
 /**
index 78dd3d8c65544e2c29d4a6a7d3e59fe7d735e803..cf6ddd1b23a25108db3e93f88ec28e2f64fd7c53 100644 (file)
@@ -153,15 +153,6 @@ static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
                        pi_tree.entry);
 }
 
-#define RT_MUTEX_HAS_WAITERS   1UL
-
-static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
-{
-       unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
-
-       return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
-}
-
 /*
  * Constants for rt mutex functions which have a selectable deadlock
  * detection.