/* get the current qp index */
for (;;) {
+ qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
+
/*
* Notes on use of __ATOMIC_ACQUIRE
* We need to ensure the following:
* of the lock is flushed from a local cpu cache so that we see any
* updates prior to the load. This is a non-issue on cache coherent
* systems like x86, but is relevant on other arches
- * Note: This applies to the reload below as well
*/
- qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_ACQUIRE);
-
ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
__ATOMIC_ACQUIRE);
* prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
* is visible prior to our read
* however this is likely just necessary to silence a tsan warning
+ * because the read side should not do any write operation
+ * outside the atomic itself
*/
do {
count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
struct rcu_lock_st *new;
/*
- * We need a minimum of 3 qp's
+ * We need a minimum of 2 qp's
*/
- if (num_writers < 3)
- num_writers = 3;
+ if (num_writers < 2)
+ num_writers = 2;
ctx = ossl_lib_ctx_get_concrete(ctx);
if (ctx == NULL)
pthread_mutex_init(&new->alloc_lock, NULL);
pthread_cond_init(&new->prior_signal, NULL);
pthread_cond_init(&new->alloc_signal, NULL);
- /* By default our first writer is already alloced */
- new->writers_alloced = 1;
new->qp_group = allocate_new_qp_group(new, num_writers);
if (new->qp_group == NULL) {
struct rcu_lock_st *new;
/*
- * We need a minimum of 3 qps
+ * We need a minimum of 2 qps
*/
- if (num_writers < 3)
- num_writers = 3;
+ if (num_writers < 2)
+ num_writers = 2;
ctx = ossl_lib_ctx_get_concrete(ctx);
if (ctx == NULL)
new->alloc_lock = ossl_crypto_mutex_new();
new->prior_lock = ossl_crypto_mutex_new();
new->qp_group = allocate_new_qp_group(new, num_writers);
- /* By default the first qp is already alloced */
- new->writers_alloced = 1;
if (new->qp_group == NULL
|| new->alloc_signal == NULL
|| new->prior_signal == NULL