struct rt_mutex_base {
raw_spinlock_t wait_lock;
- struct rb_root_cached waiters;
- struct task_struct *owner;
+ struct rb_root_cached waiters __guarded_by(&wait_lock);
+ struct task_struct *owner __guarded_by(&wait_lock);
};
#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \
*/
static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
{
- return READ_ONCE(lock->owner) != NULL;
+ return data_race(READ_ONCE(lock->owner) != NULL);
}
#ifdef CONFIG_RT_MUTEXES
static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
{
- unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+ unsigned long owner = (unsigned long) data_race(READ_ONCE(lock->owner));
return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
}
KCOV_INSTRUMENT := n
CONTEXT_ANALYSIS_mutex.o := y
+CONTEXT_ANALYSIS_rtmutex_api.o := y
+CONTEXT_ANALYSIS_ww_rt_mutex.o := y
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
+ __must_hold(&lock->wait_lock)
{
unsigned long val = (unsigned long)owner;
static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
+ __must_hold(&lock->wait_lock)
{
/*
* lock->wait_lock is held but explicit acquire semantics are needed
}
static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
/* lock->wait_lock is held so the unlock provides release semantics. */
WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
}
static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
+ __must_hold(&lock->wait_lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;
}
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
struct ww_acquire_ctx *ww_ctx,
enum rtmutex_chainwalk chwalk,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
/* Check whether the waiter should back out immediately */
rtm = container_of(lock, struct rt_mutex, rtmutex);
+ __assume_ctx_lock(&rtm->rtmutex.wait_lock);
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
if (res) {
raw_spin_lock(&task->pi_lock);
}
static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
int ret = try_to_take_rt_mutex(lock, current, NULL);
* - the VCPU on which owner runs is preempted
*/
if (!owner_on_cpu(owner) || need_resched() ||
- !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
+ !data_race(rt_mutex_waiter_is_top_waiter(lock, waiter))) {
res = false;
break;
}
*/
static void __sched remove_waiter(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
struct task_struct *owner;
int ret = 0;
+ __assume_ctx_lock(&rtm->rtmutex.wait_lock);
+
lockevent_inc(rtmutex_slow_block);
for (;;) {
/* Try to acquire the lock: */
static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
struct rt_mutex_base *lock,
struct rt_mutex_waiter *w)
+ __must_hold(&lock->wait_lock)
{
/*
* If the result is not -EDEADLOCK or the caller requested
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct ww_mutex *ww = ww_container_of(rtm);
int ret;
+ __assume_ctx_lock(&rtm->rtmutex.wait_lock);
lockdep_assert_held(&lock->wait_lock);
lockevent_inc(rtmutex_slowlock);
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct rt_mutex_waiter waiter;
int ret;
unsigned int subclass,
struct lockdep_map *nest_lock,
unsigned long ip)
+ __acquires(lock) __no_context_analysis
{
int ret;
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
void __sched mutex_unlock(struct mutex *lock)
+ __releases(lock) __no_context_analysis
{
mutex_release(&lock->dep_map, _RET_IP_);
__rt_mutex_unlock(&lock->rtmutex);
* PI-futex support (proxy locking functions, etc.):
*/
extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
- struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
+ struct task_struct *proxy_owner)
+ __must_hold(&lock->wait_lock);
+
+extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock);
+
extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
- struct wake_q_head *);
+ struct wake_q_head *)
+ __must_hold(&lock->wait_lock);
+
extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter);
-extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
-extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
+extern int rt_mutex_futex_trylock(struct rt_mutex_base *lock);
+extern int __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock);
extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
*/
#ifdef CONFIG_RT_MUTEXES
static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
}
*/
static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
struct rb_node *leftmost = rb_first_cached(&lock->waiters);
}
static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
struct rb_node *leftmost = rb_first_cached(&lock->waiters);
struct rt_mutex_waiter *w = NULL;
static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
{
- raw_spin_lock_init(&lock->wait_lock);
- lock->waiters = RB_ROOT_CACHED;
- lock->owner = NULL;
+ scoped_guard (raw_spinlock_init, &lock->wait_lock) {
+ lock->waiters = RB_ROOT_CACHED;
+ lock->owner = NULL;
+ }
}
/* Debug functions */
#define MUTEX mutex
#define MUTEX_WAITER mutex_waiter
+#define WAIT_LOCK wait_lock
static inline struct mutex_waiter *
__ww_waiter_first(struct mutex *lock)
#define MUTEX rt_mutex
#define MUTEX_WAITER rt_mutex_waiter
+#define WAIT_LOCK rtmutex.wait_lock
static inline struct rt_mutex_waiter *
__ww_waiter_first(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
if (!n)
static inline struct rt_mutex_waiter *
__ww_waiter_last(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
if (!n)
static inline bool
__ww_mutex_has_waiters(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
return rt_mutex_has_waiters(&lock->rtmutex);
}
static inline void lock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
+ __acquires(&lock->rtmutex.wait_lock)
{
raw_spin_lock_irqsave(&lock->rtmutex.wait_lock, *flags);
}
static inline void unlock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
+ __releases(&lock->rtmutex.wait_lock)
{
raw_spin_unlock_irqrestore(&lock->rtmutex.wait_lock, *flags);
}
static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
lockdep_assert_held(&lock->rtmutex.wait_lock);
}
struct ww_acquire_ctx *ww_ctx,
struct ww_acquire_ctx *hold_ctx,
struct wake_q_head *wake_q)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct task_struct *owner = __ww_mutex_owner(lock);
static void
__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx,
struct wake_q_head *wake_q)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct MUTEX_WAITER *cur;
{
DEFINE_WAKE_Q(wake_q);
unsigned long flags;
+ bool has_waiters;
ww_mutex_lock_acquired(lock, ctx);
* __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
* and/or !empty list.
*/
- if (likely(!__ww_mutex_has_waiters(&lock->base)))
+ has_waiters = data_race(__ww_mutex_has_waiters(&lock->base));
+ if (likely(!has_waiters))
return;
/*
static inline int
__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ctx)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx,
struct wake_q_head *wake_q)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct MUTEX_WAITER *cur, *pos = NULL;
bool is_wait_die;
EXPORT_SYMBOL(ww_mutex_lock_interruptible);
void __sched ww_mutex_unlock(struct ww_mutex *lock)
+ __no_context_analysis
{
struct rt_mutex *rtm = &lock->base;
src:*include/linux/rcupdate.h=emit
src:*include/linux/refcount.h=emit
src:*include/linux/rhashtable.h=emit
+src:*include/linux/rtmutex*.h=emit
src:*include/linux/rwlock*.h=emit
src:*include/linux/rwsem.h=emit
src:*include/linux/sched*=emit