"write_granted=%u, write_quota=%u\n",
rwl, isc_thread_self(), operation,
(type == isc_rwlocktype_read ? "read" : "write"),
- atomic_load_explicit(&rwl->write_requests, memory_order_relaxed),
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed),
- atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed),
+ atomic_load_relaxed(&rwl->write_requests),
+ atomic_load_relaxed(&rwl->write_completions),
+ atomic_load_relaxed(&rwl->cnt_and_flag),
rwl->readers_waiting,
- rwl->write_granted, rwl->write_quota);
+ atomic_load_relaxed(&rwl->write_granted), rwl->write_quota);
}
#endif /* ISC_RWLOCK_TRACE */
*/
rwl->magic = 0;
- rwl->spins = 0;
+ atomic_init(&rwl->spins, 0);
atomic_init(&rwl->write_requests, 0);
atomic_init(&rwl->write_completions, 0);
atomic_init(&rwl->cnt_and_flag, 0);
rwl->readers_waiting = 0;
- rwl->write_granted = 0;
+ atomic_init(&rwl->write_granted, 0);
if (read_quota != 0) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"read quota is not supported");
isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
- REQUIRE(atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) &&
- atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) == 0 && rwl->readers_waiting == 0);
+ REQUIRE(atomic_load_relaxed(&rwl->write_requests) ==
+ atomic_load_relaxed(&rwl->write_completions) &&
+ atomic_load_relaxed(&rwl->cnt_and_flag) == 0 && rwl->readers_waiting == 0);
rwl->magic = 0;
(void)isc_condition_destroy(&rwl->readable);
#endif
if (type == isc_rwlocktype_read) {
- if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
+ if (atomic_load_relaxed(&rwl->write_requests) !=
+ atomic_load_relaxed(&rwl->write_completions))
{
/* there is a waiting or active writer */
LOCK(&rwl->lock);
- if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) {
+ if (atomic_load_relaxed(&rwl->write_requests) !=
+ atomic_load_relaxed(&rwl->write_completions)) {
rwl->readers_waiting++;
WAIT(&rwl->readable, &rwl->lock);
rwl->readers_waiting--;
UNLOCK(&rwl->lock);
}
- cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
- READER_INCR,
- memory_order_relaxed);
+ cntflag = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
+ READER_INCR);
POST(cntflag);
while (1) {
- if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) == 0)
+ if ((atomic_load_relaxed(&rwl->cnt_and_flag)
+ & WRITER_ACTIVE) == 0)
+ {
break;
+ }
/* A writer is still working */
LOCK(&rwl->lock);
rwl->readers_waiting++;
- if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) != 0) {
+ if ((atomic_load_relaxed(&rwl->cnt_and_flag)
+ & WRITER_ACTIVE) != 0)
+ {
WAIT(&rwl->readable, &rwl->lock);
}
rwl->readers_waiting--;
* quota, reset the condition (race among readers doesn't
* matter).
*/
- rwl->write_granted = 0;
+ atomic_store_relaxed(&rwl->write_granted, 0);
} else {
int32_t prev_writer;
/* enter the waiting queue, and wait for our turn */
- prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
- memory_order_relaxed);
- while (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
+ prev_writer = atomic_fetch_add_relaxed(&rwl->write_requests, 1);
+ while (atomic_load_relaxed(&rwl->write_completions)
+ != prev_writer)
+ {
LOCK(&rwl->lock);
- if (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
+ if (atomic_load_relaxed(&rwl->write_completions)
+ != prev_writer)
+ {
WAIT(&rwl->writeable, &rwl->lock);
UNLOCK(&rwl->lock);
continue;
while (1) {
int_fast32_t zero = 0;
- if (atomic_compare_exchange_strong_explicit
- (&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
- memory_order_relaxed, memory_order_relaxed))
+ if (atomic_compare_exchange_strong_relaxed(
+ &rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
{
break;
}
/* Another active reader or writer is working. */
LOCK(&rwl->lock);
- if (atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) != 0) {
+ if (atomic_load_relaxed(&rwl->cnt_and_flag) != 0) {
WAIT(&rwl->writeable, &rwl->lock);
}
UNLOCK(&rwl->lock);
}
- INSIST((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE));
- rwl->write_granted++;
+ INSIST((atomic_load_relaxed(&rwl->cnt_and_flag)
+ & WRITER_ACTIVE));
+ atomic_fetch_add_relaxed(&rwl->write_granted, 1);
}
#ifdef ISC_RWLOCK_TRACE
isc_result_t
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
int32_t cnt = 0;
- int32_t max_cnt = rwl->spins * 2 + 10;
+ int32_t spins = atomic_load_relaxed(&rwl->spins) * 2 + 10;
+ int32_t max_cnt = ISC_MAX(spins, RWLOCK_MAX_ADAPTIVE_COUNT);
isc_result_t result = ISC_R_SUCCESS;
- if (max_cnt > RWLOCK_MAX_ADAPTIVE_COUNT)
- max_cnt = RWLOCK_MAX_ADAPTIVE_COUNT;
-
do {
if (cnt++ >= max_cnt) {
result = isc__rwlock_lock(rwl, type);
isc_rwlock_pause();
} while (isc_rwlock_trylock(rwl, type) != ISC_R_SUCCESS);
- rwl->spins += (cnt - rwl->spins) / 8;
+ atomic_fetch_add_relaxed(&rwl->spins, (cnt - spins) / 8);
return (result);
}
if (type == isc_rwlocktype_read) {
/* If a writer is waiting or working, we fail. */
- if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
+ if (atomic_load_relaxed(&rwl->write_requests) !=
+ atomic_load_relaxed(&rwl->write_completions))
return (ISC_R_LOCKBUSY);
/* Otherwise, be ready for reading. */
- cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
- READER_INCR,
- memory_order_relaxed);
+ cntflag = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
+ READER_INCR);
if ((cntflag & WRITER_ACTIVE) != 0) {
/*
* A writer is working. We lose, and cancel the read
* request.
*/
- cntflag = atomic_fetch_sub_explicit
- (&rwl->cnt_and_flag, READER_INCR,
- memory_order_relaxed);
+ cntflag = atomic_fetch_sub_relaxed(
+ &rwl->cnt_and_flag, READER_INCR);
/*
* If no other readers are waiting and we've suspended
* new writers in this short period, wake them up.
*/
if (cntflag == READER_INCR &&
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
- atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
+ atomic_load_relaxed(&rwl->write_completions) !=
+ atomic_load_relaxed(&rwl->write_requests))
+ {
LOCK(&rwl->lock);
BROADCAST(&rwl->writeable);
UNLOCK(&rwl->lock);
} else {
/* Try locking without entering the waiting queue. */
int_fast32_t zero = 0;
- if (!atomic_compare_exchange_strong_explicit
- (&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
- memory_order_relaxed, memory_order_relaxed))
+ if (!atomic_compare_exchange_strong_relaxed(
+ &rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
{
return (ISC_R_LOCKBUSY);
}
* XXXJT: jump into the queue, possibly breaking the writer
* order.
*/
- atomic_fetch_sub_explicit(&rwl->write_completions, 1,
- memory_order_relaxed);
-
- rwl->write_granted++;
+ atomic_fetch_sub_relaxed(&rwl->write_completions, 1);
+ atomic_fetch_add_relaxed(&rwl->write_granted, 1);
}
#ifdef ISC_RWLOCK_TRACE
int_fast32_t reader_incr = READER_INCR;
/* Try to acquire write access. */
- atomic_compare_exchange_strong_explicit
- (&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE,
- memory_order_relaxed, memory_order_relaxed);
+ atomic_compare_exchange_strong_relaxed(
+ &rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
- atomic_fetch_sub_explicit(&rwl->write_completions, 1,
- memory_order_relaxed);
+ atomic_fetch_sub_relaxed(&rwl->write_completions, 1);
} else
return (ISC_R_LOCKBUSY);
{
/* Become an active reader. */
- prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
- READER_INCR,
- memory_order_relaxed);
+ prev_readers = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
+ READER_INCR);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */
- atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
- memory_order_relaxed);
- atomic_fetch_add_explicit(&rwl->write_completions, 1,
- memory_order_relaxed);
+ atomic_fetch_sub_relaxed(&rwl->cnt_and_flag, WRITER_ACTIVE);
+ atomic_fetch_add_relaxed(&rwl->write_completions, 1);
}
/* Resume other readers */
#endif
if (type == isc_rwlocktype_read) {
- prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
- READER_INCR,
- memory_order_relaxed);
+ prev_cnt = atomic_fetch_sub_relaxed(&rwl->cnt_and_flag,
+ READER_INCR);
/*
* If we're the last reader and any writers are waiting, wake
* them up. We need to wake up all of them to ensure the
* FIFO order.
*/
if (prev_cnt == READER_INCR &&
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
- atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
+ atomic_load_relaxed(&rwl->write_completions) !=
+ atomic_load_relaxed(&rwl->write_requests)) {
LOCK(&rwl->lock);
BROADCAST(&rwl->writeable);
UNLOCK(&rwl->lock);
* Reset the flag, and (implicitly) tell other writers
* we are done.
*/
- atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
- memory_order_relaxed);
- atomic_fetch_add_explicit(&rwl->write_completions, 1,
- memory_order_relaxed);
-
- if (rwl->write_granted >= rwl->write_quota ||
- (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) ||
- (atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & ~WRITER_ACTIVE)) {
+ atomic_fetch_sub_relaxed(&rwl->cnt_and_flag, WRITER_ACTIVE);
+ atomic_fetch_add_relaxed(&rwl->write_completions, 1);
+
+ if ((atomic_load_relaxed(&rwl->write_granted) >=
+ rwl->write_quota) ||
+ (atomic_load_relaxed(&rwl->write_requests) ==
+ atomic_load_relaxed(&rwl->write_completions)) ||
+ (atomic_load_relaxed(&rwl->cnt_and_flag)
+ & ~WRITER_ACTIVE))
+ {
/*
* We have passed the write quota, no writer is
* waiting, or some readers are almost ready, pending
UNLOCK(&rwl->lock);
}
- if ((atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
- atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) &&
+ if ((atomic_load_relaxed(&rwl->write_requests) !=
+ atomic_load_relaxed(&rwl->write_completions)) &&
wakeup_writers) {
LOCK(&rwl->lock);
BROADCAST(&rwl->writeable);