HA_ATOMIC_OR(&st->wait_writers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__RWLOCK_WRLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
/* We set waiting writer because trywrlock could wait for readers to quit */
HA_ATOMIC_OR(&st->wait_writers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
r = __RWLOCK_TRYWRLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
+ start_time += now_mono_time();
if (unlikely(r)) {
HA_ATOMIC_AND(&st->wait_writers, ~tbit);
return r;
}
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
st->cur_writer = tbit;
HA_ATOMIC_OR(&st->wait_readers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__RWLOCK_RDLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
HA_ATOMIC_OR(&st->cur_readers, tbit);
{
ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
int r;
if ((st->cur_readers | st->cur_seeker | st->cur_writer) & tbit)
abort();
/* try read should never wait */
+ start_time = -now_mono_time();
r = __RWLOCK_TRYRDLOCK(&l->lock);
+ start_time += now_mono_time();
+
if (unlikely(r))
return r;
+
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
HA_ATOMIC_OR(&st->cur_readers, tbit);
HA_ATOMIC_OR(&st->wait_readers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__RWLOCK_WRTORD(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
HA_ATOMIC_OR(&st->wait_seekers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__RWLOCK_WRTOSK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
HA_ATOMIC_OR(&st->wait_seekers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__RWLOCK_SKLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
HA_ATOMIC_OR(&st->wait_writers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__RWLOCK_SKTOWR(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
HA_ATOMIC_OR(&st->wait_readers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__RWLOCK_SKTORD(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
HA_ATOMIC_OR(&st->wait_seekers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
r = __RWLOCK_TRYSKLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+ start_time += now_mono_time();
if (likely(!r)) {
/* got the lock ! */
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
HA_ATOMIC_OR(&st->cur_seeker, tbit);
l->info.last_location.function = func;
HA_ATOMIC_OR(&st->wait_seekers, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
r = __RWLOCK_TRYRDTOSK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+ start_time += now_mono_time();
if (likely(!r)) {
/* got the lock ! */
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
HA_ATOMIC_OR(&st->cur_seeker, tbit);
HA_ATOMIC_AND(&st->cur_readers, ~tbit);
HA_ATOMIC_OR(&st->waiters, tbit);
- start_time = now_mono_time();
+ start_time = -now_mono_time();
__SPIN_LOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+ start_time += now_mono_time();
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
{
ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
struct ha_spinlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
int r;
if (unlikely(st->owner & tbit)) {
}
/* try read should never wait */
+ start_time = -now_mono_time();
r = __SPIN_TRYLOCK(&l->lock);
+ start_time += now_mono_time();
+
if (unlikely(r))
return r;
+
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, start_time);
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
st->owner = tbit;