From: Maria Matejka Date: Wed, 5 Jun 2024 11:12:12 +0000 (+0200) Subject: Read-write spinlocks X-Git-Tag: v3.0.0~181 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4104d668d9922c2a5ebadfe73ffd34612925d550;p=thirdparty%2Fbird.git Read-write spinlocks --- diff --git a/lib/birdlib.h b/lib/birdlib.h index 9a1a3e6d1..a53204d5b 100644 --- a/lib/birdlib.h +++ b/lib/birdlib.h @@ -291,4 +291,9 @@ static inline u64 u64_hash0(u64 v, u32 p, u64 acc) static inline u32 u64_hash(u64 v) { return hash_value(u64_hash0(v, HASH_PARAM, 0)); } + +/* Yield for a little while. Use only in special cases. */ +void birdloop_yield(void); + + #endif diff --git a/lib/io-loop.h b/lib/io-loop.h index 6c9465314..aee0566f0 100644 --- a/lib/io-loop.h +++ b/lib/io-loop.h @@ -72,7 +72,4 @@ void birdloop_remove_socket(struct birdloop *, struct birdsock *); void birdloop_init(void); -/* Yield for a little while. Use only in special cases. */ -void birdloop_yield(void); - #endif /* _BIRD_IO_LOOP_H_ */ diff --git a/lib/locking.h b/lib/locking.h index 7a59d4709..7741a62fc 100644 --- a/lib/locking.h +++ b/lib/locking.h @@ -9,6 +9,7 @@ #ifndef _BIRD_LOCKING_H_ #define _BIRD_LOCKING_H_ +#include "lib/birdlib.h" #include "lib/macro.h" #include "lib/rcu.h" @@ -83,7 +84,150 @@ extern DOMAIN(the_bird) the_bird_domain; #define ASSERT_THE_BIRD_LOCKED ({ if (!the_bird_locked()) bug("The BIRD lock must be locked here: %s:%d", __FILE__, __LINE__); }) -/* Unwind stored lock state helpers */ +/* + * RW spinlocks + */ + +#define RWS_READ_PENDING_POS 0 +#define RWS_READ_ACTIVE_POS 20 +#define RWS_WRITE_PENDING_POS 40 +#define RWS_WRITE_ACTIVE_POS 56 + +#define RWS_READ_PENDING (1ULL << RWS_READ_PENDING_POS) +#define RWS_READ_ACTIVE (1ULL << RWS_READ_ACTIVE_POS) +#define RWS_WRITE_PENDING (1ULL << RWS_WRITE_PENDING_POS) +#define RWS_WRITE_ACTIVE (1ULL << RWS_WRITE_ACTIVE_POS) + +#define RWS_READ_PENDING_MASK (RWS_READ_ACTIVE - 1) +#define RWS_READ_ACTIVE_MASK ((RWS_WRITE_PENDING - 1) & ~(RWS_READ_ACTIVE - 1)) +#define RWS_WRITE_PENDING_MASK ((RWS_WRITE_ACTIVE - 1) & ~(RWS_WRITE_PENDING - 1)) +#define RWS_WRITE_ACTIVE_MASK (~(RWS_WRITE_ACTIVE - 1)) + +typedef struct { + u64 _Atomic spin; +} rw_spinlock; + +#define MAX_RWS_AT_ONCE 32 +extern _Thread_local rw_spinlock *rw_spinlocks_taken[MAX_RWS_AT_ONCE]; +extern _Thread_local btime rw_spinlocks_time[MAX_RWS_AT_ONCE]; +extern _Thread_local u32 rw_spinlocks_taken_cnt; +extern _Thread_local u32 rw_spinlocks_taken_write; + +/* Borrowed from lib/timer.h */ +btime current_time_now(void); + +#ifdef DEBUGGING +static inline void rws_mark(rw_spinlock *p, _Bool write, _Bool lock) +{ + if (lock) { + ASSERT_DIE(rw_spinlocks_taken_cnt < MAX_RWS_AT_ONCE); + if (write) + rw_spinlocks_taken_write |= (1 << rw_spinlocks_taken_cnt); + else + rw_spinlocks_taken_write &= ~(1 << rw_spinlocks_taken_cnt); + rw_spinlocks_time[rw_spinlocks_taken_cnt] = current_time_now(); + rw_spinlocks_taken[rw_spinlocks_taken_cnt++] = p; + + } + else { + ASSERT_DIE(rw_spinlocks_taken_cnt > 0); + ASSERT_DIE(rw_spinlocks_taken[--rw_spinlocks_taken_cnt] == p); + ASSERT_DIE(!(rw_spinlocks_taken_write & (1 << rw_spinlocks_taken_cnt)) == !write); + btime tdif = current_time_now() - rw_spinlocks_time[rw_spinlocks_taken_cnt]; + if (tdif > 1 S_) + log(L_WARN "Spent an alarming time %t s in spinlock %p (%s); " + "if this happens often to you, please contact the developers.", + tdif, p, write ? "write" : "read"); + } +} +#else +#define rws_mark(...) +#endif + +static inline void rws_init(rw_spinlock *p) +{ + atomic_store_explicit(&p->spin, 0, memory_order_relaxed); +} + +static inline void rws_read_lock(rw_spinlock *p) +{ + u64 old = atomic_fetch_add_explicit(&p->spin, RWS_READ_PENDING, memory_order_acquire); + + while (1) + { + /* Wait until all writers end */ + while (old & (RWS_WRITE_PENDING_MASK | RWS_WRITE_ACTIVE_MASK)) + { + birdloop_yield(); + old = atomic_load_explicit(&p->spin, memory_order_acquire); + } + + /* Convert to active */ + old = atomic_fetch_add_explicit(&p->spin, RWS_READ_ACTIVE - RWS_READ_PENDING, memory_order_acq_rel); + + if (old & RWS_WRITE_ACTIVE_MASK) + /* Oh but some writer was faster */ + old = atomic_fetch_sub_explicit(&p->spin, RWS_READ_ACTIVE - RWS_READ_PENDING, memory_order_acq_rel); + else + /* No writers, approved */ + break; + } + + rws_mark(p, 0, 1); +} + +static inline void rws_read_unlock(rw_spinlock *p) +{ + rws_mark(p, 0, 0); + u64 old = atomic_fetch_sub_explicit(&p->spin, RWS_READ_ACTIVE, memory_order_release); + ASSERT_DIE(old & RWS_READ_ACTIVE_MASK); +} + +static inline void rws_write_lock(rw_spinlock *p) +{ + u64 old = atomic_fetch_add_explicit(&p->spin, RWS_WRITE_PENDING, memory_order_acquire); + + /* Wait until all active readers end */ + while (1) + { + while (old & (RWS_READ_ACTIVE_MASK | RWS_WRITE_ACTIVE_MASK)) + { + birdloop_yield(); + old = atomic_load_explicit(&p->spin, memory_order_acquire); + } + + /* Mark self as active */ + u64 updated = atomic_fetch_or_explicit(&p->spin, RWS_WRITE_ACTIVE, memory_order_acquire); + + /* And it's us */ + if (!(updated & RWS_WRITE_ACTIVE)) + { + if (updated & RWS_READ_ACTIVE_MASK) + /* But some reader was faster */ + atomic_fetch_and_explicit(&p->spin, ~RWS_WRITE_ACTIVE, memory_order_release); + else + /* No readers, approved */ + break; + } + } + + /* It's us, then we aren't actually pending */ + u64 updated = atomic_fetch_sub_explicit(&p->spin, RWS_WRITE_PENDING, memory_order_acquire); + ASSERT_DIE(updated & RWS_WRITE_PENDING_MASK); + rws_mark(p, 1, 1); +} + +static inline void rws_write_unlock(rw_spinlock *p) +{ + rws_mark(p, 1, 0); + u64 old = atomic_fetch_and_explicit(&p->spin, ~RWS_WRITE_ACTIVE, memory_order_release); + ASSERT_DIE(old & RWS_WRITE_ACTIVE); +} + + +/* + * Unwind stored lock state helpers + */ struct locking_unwind_status { struct lock_order *desired; enum { diff --git a/lib/locking_test.c b/lib/locking_test.c index 6c5de0cb8..38faed61b 100644 --- a/lib/locking_test.c +++ b/lib/locking_test.c @@ -77,6 +77,96 @@ t_locking(void) return 1; } +#define RWS_DATASIZE 333 +#define RWS_THREADS 128 + +struct rws_test_data { + int data[RWS_DATASIZE]; + rw_spinlock rws[RWS_DATASIZE]; +}; + +static void * +rwspin_thread_run(void *_rtd) +{ + struct rws_test_data *d = _rtd; + + for (_Bool sorted = 0; !sorted++; ) + { + for (int i=0; (irws[i]); + rws_read_lock(&d->rws[i+1]); + + ASSERT_DIE(d->data[i] >= 0); + ASSERT_DIE(d->data[i+1] >= 0); + if (d->data[i] > d->data[i+1]) + sorted = 0; + + rws_read_unlock(&d->rws[i+1]); + rws_read_unlock(&d->rws[i]); + } + + for (int i=0; (irws[i]); + rws_write_lock(&d->rws[i+1]); + + int first = d->data[i]; + int second = d->data[i+1]; + + ASSERT_DIE(first >= 0); + ASSERT_DIE(second >= 0); + + d->data[i] = d->data[i+1] = -1; + + if (first > second) + { + d->data[i] = second; + d->data[i+1] = first; + } + else + { + d->data[i] = first; + d->data[i+1] = second; + } + + rws_write_unlock(&d->rws[i+1]); + rws_write_unlock(&d->rws[i]); + } + } + + return NULL; +} + +static int +t_rwspin(void) +{ + struct rws_test_data d; + + /* Setup an array to sort */ + for (int i=0; i