`bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`,
`ww_mutex`.
-For context locks with an initialization function (e.g., `spin_lock_init()`),
-calling this function before initializing any guarded members or globals
-prevents the compiler from issuing warnings about unguarded initialization.
+To initialize variables guarded by a context lock with an initialization
+function (``type_init(&lock)``), prefer using ``guard(type_init)(&lock)`` or
+``scoped_guard(type_init, &lock) { ... }`` to initialize such guarded members
+or globals in the enclosing scope. This initializes the context lock and treats
+the context as active within the initialization scope (initialization implies
+exclusive access to the underlying object).
+
+For example::
+
+ struct my_data {
+ spinlock_t lock;
+ int counter __guarded_by(&lock);
+ };
+
+ void init_my_data(struct my_data *d)
+ {
+ ...
+ guard(spinlock_init)(&d->lock);
+ d->counter = 0;
+ ...
+ }
+
+Alternatively, initializing guarded variables can be done with context analysis
+disabled, preferably in the smallest possible scope (due to lack of any other
+checking): either with a ``context_unsafe(var = init)`` expression, or by
+marking small initialization functions with the ``__context_unsafe(init)``
+attribute.
Lockdep assertions, such as `lockdep_assert_held()`, inform the compiler's
context analysis that the associated synchronization primitive is held after
/*
* The "assert_capability" attribute is a bit confusingly named. It does not
* generate a check. Instead, it tells the analysis to *assume* the capability
- * is held. This is used for:
- *
- * 1. Augmenting runtime assertions, that can then help with patterns beyond the
- * compiler's static reasoning abilities.
- *
- * 2. Initialization of context locks, so we can access guarded variables right
- * after initialization (nothing else should access the same object yet).
+ * is held. This is used for augmenting runtime assertions, that can then help
+ * with patterns beyond the compiler's static reasoning abilities.
*/
# define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__)))
# define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
local_lock_nested_bh(_T->lock),
local_unlock_nested_bh(_T->lock))
+DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */)
+
DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T)
DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T)
DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T))
+#define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T)
+
+DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T))
+#define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T)
#endif
#include <linux/percpu-defs.h>
#include <linux/irqflags.h>
#include <linux/lockdep.h>
+#include <linux/debug_locks.h>
#include <asm/current.h>
#ifndef CONFIG_PREEMPT_RT
DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */)
DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
#define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T)
extern unsigned long mutex_get_owner(struct mutex *lock);
DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
#define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T)
+DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T)
+
/*
* downgrade write lock to read lock
*/
*/
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#define scoped_seqlock_read(_seqlock, _target) \
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T))
+#define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T)
+
#endif /* __LINUX_SEQLOCK_H */
DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
#define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T)
+DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T)
+
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T))
#define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T)
+DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T)
+
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))
DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
#define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T)
+DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T)
+
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
}; \
static void __used test_##class##_init(struct test_##class##_data *d) \
{ \
- type_init(&d->lock); \
+ guard(type_init)(&d->lock); \
d->counter = 0; \
} \
static void __used test_##class(struct test_##class##_data *d) \
TEST_SPINLOCK_COMMON(raw_spinlock,
raw_spinlock_t,
- raw_spin_lock_init,
+ raw_spinlock_init,
raw_spin_lock,
raw_spin_unlock,
raw_spin_trylock,
TEST_SPINLOCK_COMMON(spinlock,
spinlock_t,
- spin_lock_init,
+ spinlock_init,
spin_lock,
spin_unlock,
spin_trylock,
static void __used test_mutex_init(struct test_mutex_data *d)
{
- mutex_init(&d->mtx);
+ guard(mutex_init)(&d->mtx);
d->counter = 0;
}
static void __used test_seqlock_init(struct test_seqlock_data *d)
{
- seqlock_init(&d->sl);
+ guard(seqlock_init)(&d->sl);
d->counter = 0;
}
static void __used test_rwsem_init(struct test_rwsem_data *d)
{
- init_rwsem(&d->sem);
+ guard(rwsem_init)(&d->sem);
d->counter = 0;
}
static void __used test_local_lock_init(struct test_local_lock_data *d)
{
- local_lock_init(&d->lock);
+ guard(local_lock_init)(&d->lock);
d->counter = 0;
}
static void __used test_local_trylock_init(struct test_local_trylock_data *d)
{
- local_trylock_init(&d->lock);
+ guard(local_trylock_init)(&d->lock);
d->counter = 0;
}