Currently the following synchronization primitives are supported:
`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`,
-`bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`.
+`bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`,
+`ww_mutex`.
For context locks with an initialization function (e.g., `spin_lock_init()`),
calling this function before initializing any guarded members or globals
unsigned int is_wait_die;
};
-struct ww_mutex {
+context_lock_struct(ww_mutex) {
struct WW_MUTEX_BASE base;
struct ww_acquire_ctx *ctx;
#ifdef DEBUG_WW_MUTEXES
#endif
};
-struct ww_acquire_ctx {
+context_lock_struct(ww_acquire_ctx) {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
+ __assumes_ctx_lock(lock)
{
ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
lock->ctx = NULL;
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
+ __acquires(ctx) __no_context_analysis
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+ __releases(ctx) __acquires_shared(ctx) __no_context_analysis
{
#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+ __releases_shared(ctx) __no_context_analysis
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
-extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
+extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __acquires(lock) __must_hold(ctx) __no_context_analysis
{
int ret;
#ifdef DEBUG_WW_MUTEXES
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx)
{
#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
return ww_mutex_lock_interruptible(lock, ctx);
}
-extern void ww_mutex_unlock(struct ww_mutex *lock);
+extern void ww_mutex_unlock(struct ww_mutex *lock) __releases(lock);
extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(true, lock) __must_hold(ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
+ __must_not_hold(lock)
{
#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);
#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
+#include <linux/ww_mutex.h>
/*
* Test that helper macros work as expected.
local_unlock(&test_local_trylock_data.lock);
}
}
+
+static DEFINE_WD_CLASS(ww_class);
+
+struct test_ww_mutex_data {
+ struct ww_mutex mtx;
+ int counter __guarded_by(&mtx);
+};
+
+static void __used test_ww_mutex_init(struct test_ww_mutex_data *d)
+{
+ ww_mutex_init(&d->mtx, &ww_class);
+ d->counter = 0;
+}
+
+static void __used test_ww_mutex_lock_noctx(struct test_ww_mutex_data *d)
+{
+ if (!ww_mutex_lock(&d->mtx, NULL)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (!ww_mutex_lock_interruptible(&d->mtx, NULL)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (ww_mutex_trylock(&d->mtx, NULL)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ ww_mutex_lock_slow(&d->mtx, NULL);
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+
+ ww_mutex_destroy(&d->mtx);
+}
+
+static void __used test_ww_mutex_lock_ctx(struct test_ww_mutex_data *d)
+{
+ struct ww_acquire_ctx ctx;
+
+ ww_acquire_init(&ctx, &ww_class);
+
+ if (!ww_mutex_lock(&d->mtx, &ctx)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (!ww_mutex_lock_interruptible(&d->mtx, &ctx)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ if (ww_mutex_trylock(&d->mtx, &ctx)) {
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+ }
+
+ ww_mutex_lock_slow(&d->mtx, &ctx);
+ d->counter++;
+ ww_mutex_unlock(&d->mtx);
+
+ ww_acquire_done(&ctx);
+ ww_acquire_fini(&ctx);
+
+ ww_mutex_destroy(&d->mtx);
+}