#include <haproxy/global.h>
#include <haproxy/log.h>
#include <haproxy/thread.h>
+#include <haproxy/time.h>
#include <haproxy/tools.h>
struct thread_info ha_thread_info[MAX_THREADS] = { };
}
}
-static uint64_t nsec_now(void)
-{
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return ((uint64_t) ts.tv_sec * 1000000000ULL +
- (uint64_t) ts.tv_nsec);
-}
-
void __ha_rwlock_init(struct ha_rwlock *l)
{
memset(l, 0, sizeof(struct ha_rwlock));
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__RWLOCK_WRLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
/* We set waiting writer because trywrlock could wait for readers to quit */
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
r = __RWLOCK_TRYWRLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
if (unlikely(r)) {
HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
return r;
HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__RWLOCK_RDLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__RWLOCK_WRTORD(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__RWLOCK_WRTOSK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__RWLOCK_SKLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__RWLOCK_SKTOWR(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__RWLOCK_SKTORD(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
r = __RWLOCK_TRYSKLOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
if (likely(!r)) {
/* got the lock ! */
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
r = __RWLOCK_TRYRDTOSK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
if (likely(!r)) {
/* got the lock ! */
HA_ATOMIC_OR(&l->info.waiters, tid_bit);
- start_time = nsec_now();
+ start_time = now_mono_time();
__SPIN_LOCK(&l->lock);
- HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);