~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the following synchronization primitives are supported:
-`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`.
+`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`,
+`bit_spinlock`.
For context locks with an initialization function (e.g., `spin_lock_init()`),
calling this function before initializing any guarded members or globals
#include <asm/processor.h> /* for cpu_relax() */
+/*
+ * For static context analysis, we need a unique token for each possible bit
+ * that can be used as a bit_spinlock. The easiest way to do that is to create a
+ * fake context that we can cast to with the __bitlock(bitnum, addr) macro
+ * below, which will give us unique instances for each (bit, addr) pair that the
+ * static analysis can use.
+ */
+context_lock_struct(__context_bitlock) { };
+#define __bitlock(bitnum, addr) (struct __context_bitlock *)(bitnum + (addr))
+
/*
* bit-based spin_lock()
*
* are significantly faster.
*/
static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
+ __acquires(__bitlock(bitnum, addr))
{
/*
* Assuming the lock is uncontended, this never enters
preempt_disable();
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
}
/*
* Return true if it was acquired
*/
static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+ __cond_acquires(true, __bitlock(bitnum, addr))
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return 0;
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
return 1;
}
* bit-based spin_unlock()
*/
static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
* protecting the rest of the flags in the word.
*/
static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
* positive errors when compiled with Clang's context analysis.
*/
+#include <linux/bit_spinlock.h>
#include <linux/build_bug.h>
#include <linux/mutex.h>
#include <linux/seqlock.h>
(void)d->counter;
}
}
+
+struct test_bit_spinlock_data {
+ unsigned long bits;
+ int counter __guarded_by(__bitlock(3, &bits));
+};
+
+static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
+{
+ /*
+ * Note, the analysis seems to have false negatives, because it won't
+ * precisely recognize the bit of the fake __bitlock() token.
+ */
+ bit_spin_lock(3, &d->bits);
+ d->counter++;
+ bit_spin_unlock(3, &d->bits);
+
+ bit_spin_lock(3, &d->bits);
+ d->counter++;
+ __bit_spin_unlock(3, &d->bits);
+
+ if (bit_spin_trylock(3, &d->bits)) {
+ d->counter++;
+ bit_spin_unlock(3, &d->bits);
+ }
+}