struct timeval ready_date; /* date when the process was considered ready */
ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
-volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
+volatile uint _global_now_ms; /* locally stored common monotonic date in milliseconds (may wrap) */
+volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
/* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
* otherwise catch up.
*/
old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
- old_now_ms = _HA_ATOMIC_LOAD(&global_now_ms);
+ old_now_ms = _HA_ATOMIC_LOAD(global_now_ms);
do {
if (now_ns < old_now_ns)
* and ms forms) or loop again.
*/
} while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
- (now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
+ (now_ms != old_now_ms && !_HA_ATOMIC_CAS(global_now_ms, &old_now_ms, now_ms))) &&
__ha_cpu_relax());
if (!th_ctx->curr_mono_time) {
if (!global_now_ns) // CLOCK_MONOTONIC not supported
global_now_ns = tv_to_ns(&date);
now_ns = global_now_ns;
- global_now_ms = ns_to_ms(now_ns);
+
+ _global_now_ms = ns_to_ms(now_ns);
/* force time to wrap 20s after boot: we first compute the time offset
* that once applied to the wall-clock date will make the local time
* and will be used to recompute the local time, both of which will
* match and continue from this shifted date.
*/
- now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
+ now_offset = sec_to_ns((uint)((uint)(-_global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
global_now_ns += now_offset;
now_ns = global_now_ns;
now_ms = ns_to_ms(now_ns);
/* correct for TICK_ETNERITY (0) */
if (now_ms == TICK_ETERNITY)
now_ms++;
- global_now_ms = now_ms;
+ _global_now_ms = now_ms;
+
+ /* for now global_now_ms points to the process-local _global_now_ms */
+ global_now_ms = &_global_now_ms;
th_ctx->idle_pct = 100;
clock_update_date(0, 1);
*/
for (;; __ha_cpu_relax()) {
curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
- now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms);
+ now_ms_tmp = HA_ATOMIC_LOAD(global_now_ms);
if (now_ms_tmp - curr_tick < period)
return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
{
int remain;
- remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
+ remain = tick + period - HA_ATOMIC_LOAD(global_now_ms);
if (unlikely(remain < 0)) {
/* We're past the first period, check if we can still report a
* part of last period or if we're too far away.
return 0;
}
- elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick;
+ elapsed = HA_ATOMIC_LOAD(global_now_ms) - tick;
if (unlikely(elapsed < 0 || elapsed > period)) {
/* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */
return 0;
int fd;
if (tick_isset(last_check) &&
- !tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(&global_now_ms)))
+ !tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(global_now_ms)))
return HA_ATOMIC_LOAD(&sock_inet6_seems_reachable);
/* update the test date to ensure nobody else does it in parallel */
- HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(&global_now_ms));
+ HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(global_now_ms));
fd = socket(AF_INET6, SOCK_DGRAM, 0);
if (fd >= 0) {