static void __mutex_init_generic(struct mutex *lock)
{
atomic_long_set(&lock->owner, 0);
- raw_spin_lock_init(&lock->wait_lock);
- lock->first_waiter = NULL;
+ scoped_guard (raw_spinlock_init, &lock->wait_lock) {
+ lock->first_waiter = NULL;
+ }
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
* follow with a __mutex_trylock() before failing.
*/
static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
+ __cond_acquires(true, lock)
{
unsigned long curr = (unsigned long)current;
unsigned long zero = 0UL;
}
static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
+ __cond_releases(true, lock)
{
unsigned long curr = (unsigned long)current;
static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct mutex_waiter *first)
+ __must_hold(&lock->wait_lock)
{
hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
debug_mutex_add_waiter(lock, waiter, current);
static void
__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
if (list_empty(&waiter->list)) {
__mutex_clear_flag(lock, MUTEX_FLAGS);
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
-static void __sched __mutex_lock_slowpath(struct mutex *lock);
+static void __sched __mutex_lock_slowpath(struct mutex *lock)
+ __acquires(lock);
/**
* mutex_lock - acquire the mutex
* Similarly, stop spinning if we are no longer the
* first waiter.
*/
- if (waiter && lock->first_waiter != waiter)
+ if (waiter && data_race(lock->first_waiter != waiter))
return false;
return true;
}
#endif
-static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
+static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
+ __releases(lock);
/**
* mutex_unlock - release the mutex
* of a unlocked mutex is not allowed.
*/
void __sched ww_mutex_unlock(struct ww_mutex *lock)
+ __no_context_analysis
{
__ww_mutex_unlock(lock);
mutex_unlock(&lock->base);
__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+ __cond_acquires(0, lock)
{
DEFINE_WAKE_Q(wake_q);
struct mutex_waiter waiter;
static int __sched
__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip)
+ __cond_acquires(0, lock)
{
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
}
static int __sched
__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
unsigned long ip, struct ww_acquire_ctx *ww_ctx)
+ __cond_acquires(0, lock)
{
return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
}
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
+ __acquire(lock);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
+ __acquire(lock);
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
token = io_schedule_prepare();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
subclass, NULL, _RET_IP_, NULL, 0);
+ __acquire(lock);
io_schedule_finish(token);
}
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __cond_releases(nonzero, lock)
{
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned tmp;
* Release the lock, slowpath:
*/
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
+ __releases(lock)
{
struct task_struct *next = NULL;
struct mutex_waiter *waiter;
unsigned long flags;
mutex_release(&lock->dep_map, ip);
+ __release(lock);
/*
* Release the lock before (potentially) taking the spinlock such that
static noinline void __sched
__mutex_lock_slowpath(struct mutex *lock)
+ __acquires(lock)
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
+ __acquire(lock);
}
static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex *lock)
+ __cond_acquires(0, lock)
{
return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
}
static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex *lock)
+ __cond_acquires(0, lock)
{
return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
}
static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock)
{
return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
_RET_IP_, ctx);
static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock)
{
return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
_RET_IP_, ctx);
static inline struct mutex_waiter *
__ww_waiter_first(struct mutex *lock)
+ __must_hold(&lock->wait_lock)
{
return lock->first_waiter;
}
static inline struct mutex_waiter *
__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
+ __must_hold(&lock->wait_lock)
{
w = list_next_entry(w, list);
if (lock->first_waiter == w)
static inline struct mutex_waiter *
__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
+ __must_hold(&lock->wait_lock)
{
w = list_prev_entry(w, list);
if (lock->first_waiter == w)
static inline struct mutex_waiter *
__ww_waiter_last(struct mutex *lock)
+ __must_hold(&lock->wait_lock)
{
struct mutex_waiter *w = lock->first_waiter;
static inline void
__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
+ __must_hold(&lock->wait_lock)
{
__mutex_add_waiter(lock, waiter, pos);
}
}
static inline void lock_wait_lock(struct mutex *lock, unsigned long *flags)
+ __acquires(&lock->wait_lock)
{
raw_spin_lock_irqsave(&lock->wait_lock, *flags);
}
static inline void unlock_wait_lock(struct mutex *lock, unsigned long *flags)
+ __releases(&lock->wait_lock)
{
raw_spin_unlock_irqrestore(&lock->wait_lock, *flags);
}
static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
+ __must_hold(&lock->wait_lock)
{
lockdep_assert_held(&lock->wait_lock);
}
struct ww_acquire_ctx *ww_ctx,
struct ww_acquire_ctx *hold_ctx,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct task_struct *owner = __ww_mutex_owner(lock);
static void
__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct MUTEX_WAITER *cur;
static inline int
__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ctx)
+ __must_hold(&lock->wait_lock)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct MUTEX_WAITER *cur, *pos = NULL;
bool is_wait_die;