On some systems time_t is int32_t.
However, on wire timeouts are generally uint32_t.
As such, we need to avoid using timespec internally except to
record when the timeout was created. Instead, record the timeout
as unsigned int seconds and long nanoseconds.
On long running systems using time_t as int32_t, monotonic time
would wrap after approximately 68 years. It's highly unlikely dhcpcd
would be running for so long, but just incase it does, the code
should now cope.
lease->renewaltime = lease->rebindtime = lease->leasetime;
else {
eloop_timeout_add_sec(ctx->eloop,
- (time_t)lease->renewaltime, dhcp_startrenew, ifp);
+ lease->renewaltime, dhcp_startrenew, ifp);
eloop_timeout_add_sec(ctx->eloop,
- (time_t)lease->rebindtime, dhcp_rebind, ifp);
+ lease->rebindtime, dhcp_rebind, ifp);
eloop_timeout_add_sec(ctx->eloop,
- (time_t)lease->leasetime, dhcp_expire, ifp);
+ lease->leasetime, dhcp_expire, ifp);
logdebugx("%s: renew in %"PRIu32" seconds, rebind in %"PRIu32
" seconds",
ifp->name, lease->renewaltime, lease->rebindtime);
size_t old_len;
struct dhcp_lease lease;
const char *reason;
- time_t interval;
- time_t nakoff;
+ int interval;
+ unsigned int nakoff;
uint32_t xid;
int socket;
if (state->renew && state->renew != ND6_INFINITE_LIFETIME)
eloop_timeout_add_sec(ifp->ctx->eloop,
- (time_t)state->renew,
+ state->renew,
state->state == DH6S_INFORMED ?
dhcp6_startinform : dhcp6_startrenew, ifp);
if (state->rebind && state->rebind != ND6_INFINITE_LIFETIME)
eloop_timeout_add_sec(ifp->ctx->eloop,
- (time_t)state->rebind, dhcp6_startrebind, ifp);
+ state->rebind, dhcp6_startrebind, ifp);
if (state->expire != ND6_INFINITE_LIFETIME)
eloop_timeout_add_sec(ifp->ctx->eloop,
- (time_t)state->expire, dhcp6_startexpire, ifp);
+ state->expire, dhcp6_startexpire, ifp);
else if (timed_out)
eloop_timeout_add_sec(ifp->ctx->eloop,
- (time_t)state->expire, dhcp6_startdiscover, ifp);
+ state->expire, dhcp6_startdiscover, ifp);
ipv6_addaddrs(&state->addrs);
dhcp6_deprecateaddrs(&state->addrs);
struct interface *ifp;
uint16_t family = 0;
int opt, oi = 0, i;
- unsigned int logopts;
- time_t t;
+ unsigned int logopts, t;
ssize_t len;
#if defined(USE_SIGNALS) || !defined(THERE_IS_NO_FORK)
pid_t pid;
#ifndef MSEC_PER_SEC
#define MSEC_PER_SEC 1000L
#define NSEC_PER_MSEC 1000000L
+#define NSEC_PER_SEC 1000000000L
#endif
#if defined(HAVE_KQUEUE)
struct eloop_timeout {
TAILQ_ENTRY(eloop_timeout) next;
- struct timespec when;
+ struct timespec created;
+ unsigned int seconds;
+ long nseconds;
void (*callback)(void *);
void *arg;
int queue;
int exitcode;
};
+/*
+ * time_t is a signed integer of an unspecified
+ * size. To adjust for time_t wrapping, we need
+ * to work the maximum signed value and use that
+ * as a maximum.
+ */
+#ifndef TIME_MAX
+#define TIME_MAX (time_t)((1ULL << (sizeof(time_t) * NBBY - 1)) - 1)
+#endif
+
#ifdef HAVE_REALLOCARRAY
#define eloop_realloca reallocarray
#else
return 1;
}
-int
-eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
- const struct timespec *when, void (*callback)(void *), void *arg)
+/*
+ * This implementation should cope with UINT_MAX seconds on a system
+ * where time_t is INT32_MAX. It should also cope with the monotonic timer
+ * wrapping, although this is highly unlikely.
+ * unsigned int should match or be greater than any on wire specified timeout.
+ */
+static int
+eloop_q_timeout_add(struct eloop *eloop, int queue,
+ unsigned int seconds, long nseconds, void (*callback)(void *), void *arg)
{
- struct timespec now, w;
+ struct timespec diff;
struct eloop_timeout *t, *tt = NULL;
+ unsigned int cseconds;
+ long cnseconds;
assert(eloop != NULL);
- assert(when != NULL);
assert(callback != NULL);
- clock_gettime(CLOCK_MONOTONIC, &now);
- timespecadd(&now, when, &w);
- /* Check for time_t overflow. */
- if (timespeccmp(&w, &now, <)) {
- errno = ERANGE;
- return -1;
- }
-
/* Remove existing timeout if present. */
TAILQ_FOREACH(t, &eloop->timeouts, next) {
if (t->callback == callback && t->arg == arg) {
}
}
- t->when = w;
+ clock_gettime(CLOCK_MONOTONIC, &t->created);
+ t->seconds = seconds;
+ t->nseconds = nseconds;
t->callback = callback;
t->arg = arg;
t->queue = queue;
/* The timeout list should be in chronological order,
* soonest first. */
TAILQ_FOREACH(tt, &eloop->timeouts, next) {
- if (timespeccmp(&t->when, &tt->when, <)) {
+ /* Check for a wrapped timer. */
+ if (timespeccmp(&t->created, &tt->created, >))
+ timespecsub(&t->created, &tt->created, &diff);
+ else {
+ diff.tv_sec = (TIME_MAX - tt->created.tv_sec)
+ + t->created.tv_sec;
+ diff.tv_nsec = t->created.tv_nsec - tt->created.tv_nsec;
+ if (diff.tv_nsec < 0) {
+ diff.tv_sec--;
+ diff.tv_nsec += NSEC_PER_SEC;
+ }
+ }
+ cseconds = (unsigned int)(tt->seconds - diff.tv_sec);
+ cnseconds = tt->nseconds - diff.tv_nsec;
+ if (seconds < cseconds ||
+ (seconds == cseconds && nseconds < cnseconds))
+ {
TAILQ_INSERT_BEFORE(tt, t, next);
return 0;
}
}
int
-eloop_q_timeout_add_sec(struct eloop *eloop, int queue, time_t when,
+eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
+ const struct timespec *when, void (*callback)(void *), void *arg)
+{
+
+ if (when->tv_sec > UINT_MAX) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ return eloop_q_timeout_add(eloop, queue,
+ (unsigned int)when->tv_sec, when->tv_sec, callback, arg);
+}
+
+int
+eloop_q_timeout_add_sec(struct eloop *eloop, int queue, unsigned int seconds,
void (*callback)(void *), void *arg)
{
- struct timespec tv;
- tv.tv_sec = when;
- tv.tv_nsec = 0;
- return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
+ return eloop_q_timeout_add(eloop, queue, seconds, 0, callback, arg);
}
int
eloop_q_timeout_add_msec(struct eloop *eloop, int queue, long when,
void (*callback)(void *), void *arg)
{
- struct timespec tv;
+ long seconds, nseconds;
- tv.tv_sec = when / MSEC_PER_SEC;
- tv.tv_nsec = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
- return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
+ seconds = when / MSEC_PER_SEC;
+ if (seconds > UINT_MAX) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ nseconds = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
+ return eloop_q_timeout_add(eloop, queue,
+ (unsigned int)seconds, nseconds, callback, arg);
}
#if !defined(HAVE_KQUEUE)
t0(eloop->timeout0_arg);
continue;
}
- if ((t = TAILQ_FIRST(&eloop->timeouts))) {
+
+ t = TAILQ_FIRST(&eloop->timeouts);
+ if (t == NULL && eloop->events_len == 0)
+ break;
+
+ if (t != NULL) {
+ unsigned int seconds;
+ long nseconds;
+
clock_gettime(CLOCK_MONOTONIC, &now);
- if (timespeccmp(&now, &t->when, >)) {
+ if (timespeccmp(&now, &t->created, >))
+ timespecsub(&now, &t->created, &ts);
+ else {
+ ts.tv_sec = (TIME_MAX - t->created.tv_sec)
+ + now.tv_sec;
+ ts.tv_nsec = now.tv_nsec - t->created.tv_nsec;
+ if (ts.tv_nsec < 0) {
+ ts.tv_sec--;
+ ts.tv_nsec += NSEC_PER_SEC;
+ }
+ }
+ if (ts.tv_sec > t->seconds ||
+ (ts.tv_sec == t->seconds &&
+ ts.tv_nsec > t->nseconds))
+ {
TAILQ_REMOVE(&eloop->timeouts, t, next);
t->callback(t->arg);
- TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
+ TAILQ_INSERT_TAIL(&eloop->free_timeouts,
+ t, next);
continue;
}
- timespecsub(&t->when, &now, &ts);
- tsp = &ts;
- } else
- /* No timeouts, so wait forever. */
- tsp = NULL;
- if (tsp == NULL && eloop->events_len == 0)
- break;
+ seconds = (unsigned int)(t->seconds - ts.tv_sec);
+ nseconds = t->nseconds - ts.tv_nsec;
+ if (nseconds < 0) {
+ seconds--;
+ nseconds += NSEC_PER_SEC;
+ }
+
+ /* If t->seconds is greater than time_t we need
+ * to reduce it AND adjust t->created to compenstate.
+ * This does reduce the accuracy of the timer slightly,
+ * but we have little choice. */
+ if (t->seconds > (unsigned int)TIME_MAX) {
+ t->created = now;
+ t->seconds -= (unsigned int)ts.tv_sec;
+ t->nseconds -= ts.tv_nsec;
+ if (t->nseconds < 0) {
+ t->seconds--;
+ t->nseconds += NSEC_PER_SEC;
+ }
+ }
+
+ if (seconds > INT_MAX) {
+ ts.tv_sec = (time_t)INT_MAX;
+ ts.tv_nsec = 0;
+ } else {
+ ts.tv_sec = (time_t)seconds;
+ ts.tv_nsec = nseconds;
+ }
+ tsp = &ts;
#ifndef HAVE_KQUEUE
- if (tsp == NULL)
+ if (seconds > INT_MAX / 1000 ||
+ seconds == INT_MAX / 1000 &&
+ ((nseconds + 999999) / 1000000 > INT_MAX % 1000000))
+ timeout = INT_MAX;
+ else
+ timeout = (int)(seconds * 1000 +
+ (nseconds + 999999) / 1000000);
+#endif
+ } else {
+ tsp = NULL;
+#ifndef HAVE_KQUEUE
timeout = -1;
- else if (tsp->tv_sec > INT_MAX / 1000 ||
- (tsp->tv_sec == INT_MAX / 1000 &&
- (tsp->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))
- timeout = INT_MAX;
- else
- timeout = (int)(tsp->tv_sec * 1000 +
- (tsp->tv_nsec + 999999) / 1000000);
#endif
+ }
#if defined(HAVE_KQUEUE)
n = kevent(eloop->poll_fd, NULL, 0, &ke, 1, tsp);
continue;
return -errno;
}
+ if (n == 0)
+ continue;
/* Process any triggered events.
* We go back to the start after calling each callback incase
* the current event or next event is removed. */
#if defined(HAVE_KQUEUE)
- if (n) {
- if (ke.filter == EVFILT_SIGNAL) {
- eloop->signal_cb((int)ke.ident,
- eloop->signal_cb_ctx);
- continue;
- }
+ if (ke.filter == EVFILT_SIGNAL) {
+ eloop->signal_cb((int)ke.ident,
+ eloop->signal_cb_ctx);
+ } else {
e = (struct eloop_event *)ke.udata;
- if (ke.filter == EVFILT_WRITE) {
+ if (ke.filter == EVFILT_WRITE && e->write_cb != NULL)
e->write_cb(e->write_cb_arg);
- continue;
- } else if (ke.filter == EVFILT_READ) {
+ else if (ke.filter == EVFILT_READ && e->read_cb != NULL)
e->read_cb(e->read_cb_arg);
- continue;
- }
}
#elif defined(HAVE_EPOLL)
- if (n) {
- e = (struct eloop_event *)epe.data.ptr;
- if (epe.events & EPOLLOUT && e->write_cb != NULL) {
- e->write_cb(e->write_cb_arg);
- continue;
- }
- if (epe.events &
- (EPOLLIN | EPOLLERR | EPOLLHUP) &&
- e->read_cb != NULL)
- {
- e->read_cb(e->read_cb_arg);
- continue;
- }
- }
+ e = (struct eloop_event *)epe.data.ptr;
+ if (epe.events & EPOLLOUT && e->write_cb != NULL)
+ e->write_cb(e->write_cb_arg);
+ else if (epe.events & (EPOLLIN | EPOLLERR | EPOLLHUP) &&
+ e->read_cb != NULL)
+ e->read_cb(e->read_cb_arg);
#elif defined(HAVE_POLL)
- if (n > 0) {
- size_t i;
-
- for (i = 0; i < eloop->events_len; i++) {
- if (eloop->fds[i].revents & POLLOUT) {
- e = eloop->event_fds[eloop->fds[i].fd];
- if (e->write_cb != NULL) {
- e->write_cb(e->write_cb_arg);
- break;
- }
+ size_t i;
+
+ for (i = 0; i < eloop->events_len; i++) {
+ if (eloop->fds[i].revents & POLLOUT) {
+ e = eloop->event_fds[eloop->fds[i].fd];
+ if (e->write_cb != NULL) {
+ e->write_cb(e->write_cb_arg);
+ break;
}
- if (eloop->fds[i].revents) {
- e = eloop->event_fds[eloop->fds[i].fd];
- if (e->read_cb != NULL) {
- e->read_cb(e->read_cb_arg);
- break;
- }
+ }
+ if (eloop->fds[i].revents) {
+ e = eloop->event_fds[eloop->fds[i].fd];
+ if (e->read_cb != NULL) {
+ e->read_cb(e->read_cb_arg);
+ break;
}
}
}
int eloop_q_timeout_add_tv(struct eloop *, int,
const struct timespec *, void (*)(void *), void *);
int eloop_q_timeout_add_sec(struct eloop *, int,
- time_t, void (*)(void *), void *);
+ unsigned int, void (*)(void *), void *);
int eloop_q_timeout_add_msec(struct eloop *, int,
long, void (*)(void *), void *);
int eloop_q_timeout_delete(struct eloop *, int, void (*)(void *), void *);
break;
case 't':
ARG_REQUIRED;
- ifo->timeout = (time_t)strtoi(arg, NULL, 0, 0, INT32_MAX, &e);
+ ifo->timeout = (uint32_t)strtou(arg, NULL, 0, 0, UINT32_MAX, &e);
if (e) {
logerrx("failed to convert timeout %s", arg);
return -1;
break;
case 'y':
ARG_REQUIRED;
- ifo->reboot = (time_t)strtoi(arg, NULL, 0, 0, UINT32_MAX, &e);
+ ifo->reboot = (uint32_t)strtou(arg, NULL, 0, 0, UINT32_MAX, &e);
if (e) {
logerr("failed to convert reboot %s", arg);
return -1;
uint8_t nomask6[(UINT16_MAX + 1) / NBBY];
uint8_t rejectmask6[(UINT16_MAX + 1) / NBBY];
uint32_t leasetime;
- time_t timeout;
- time_t reboot;
+ uint32_t timeout;
+ uint32_t reboot;
unsigned long long options;
struct in_addr req_addr;
ia->prefix_vltime &&
ip6_use_tempaddr(ifp->name))
eloop_timeout_add_sec(ifp->ctx->eloop,
- (time_t)ia->prefix_pltime - REGEN_ADVANCE,
+ ia->prefix_pltime - REGEN_ADVANCE,
ipv6_regentempaddr, ia);
#endif
ipv6_regen_desync(struct interface *ifp, int force)
{
struct ipv6_state *state;
- time_t max;
+ unsigned int max, pref;
state = IPV6_STATE(ifp);
* greater than TEMP_VALID_LIFETIME - REGEN_ADVANCE.
* I believe this is an error and it should be never be greateter than
* TEMP_PREFERRED_LIFETIME - REGEN_ADVANCE. */
- max = ip6_temp_preferred_lifetime(ifp->name) - REGEN_ADVANCE;
+ pref = (unsigned int)ip6_temp_preferred_lifetime(ifp->name);
+ max = pref - REGEN_ADVANCE;
if (state->desync_factor && !force && state->desync_factor < max)
return;
if (state->desync_factor == 0)
state->desync_factor =
- (time_t)arc4random_uniform(MIN(MAX_DESYNC_FACTOR,
- (uint32_t)max));
- max = ip6_temp_preferred_lifetime(ifp->name) -
- state->desync_factor - REGEN_ADVANCE;
+ arc4random_uniform(MIN(MAX_DESYNC_FACTOR, max));
+ max = pref - state->desync_factor - REGEN_ADVANCE;
eloop_timeout_add_sec(ifp->ctx->eloop, max, ipv6_regentempifid, ifp);
}
ipv6_regen_desync(ia->iface, 0);
/* RFC4941 Section 3.3.4 */
- i = (uint32_t)(ip6_temp_preferred_lifetime(ia0->iface->name) -
- state->desync_factor);
+ i = (uint32_t)ip6_temp_preferred_lifetime(ia0->iface->name) -
+ state->desync_factor;
ia->prefix_pltime = MIN(ia0->prefix_pltime, i);
i = (uint32_t)ip6_temp_valid_lifetime(ia0->iface->name);
ia->prefix_vltime = MIN(ia0->prefix_vltime, i);
ap->prefix_pltime &&
IN6_ARE_ADDR_EQUAL(&ia->prefix, &ap->prefix))
{
- time_t max, ext;
+ unsigned int max, ext;
if (flags == 0) {
if (ap->prefix_pltime -
/* RFC4941 Section 3.3.2
* Extend temporary times, but ensure that they
* never last beyond the system limit. */
- ext = ia->acquired.tv_sec + (time_t)ia->prefix_pltime;
- max = ap->created.tv_sec +
+ ext = (unsigned int)ia->acquired.tv_sec
+ + ia->prefix_pltime;
+ max = (unsigned int)(ap->created.tv_sec +
ip6_temp_preferred_lifetime(ap->iface->name) -
- state->desync_factor;
+ state->desync_factor);
if (ext < max)
ap->prefix_pltime = ia->prefix_pltime;
else
(uint32_t)(max - ia->acquired.tv_sec);
valid:
- ext = ia->acquired.tv_sec + (time_t)ia->prefix_vltime;
- max = ap->created.tv_sec +
- ip6_temp_valid_lifetime(ap->iface->name);
+ ext = (unsigned int)ia->acquired.tv_sec +
+ ia->prefix_vltime;
+ max = (unsigned int)(ap->created.tv_sec +
+ ip6_temp_valid_lifetime(ap->iface->name));
if (ext < max)
ap->prefix_vltime = ia->prefix_vltime;
else
struct ll_callback_head ll_callbacks;
#ifdef IPV6_MANAGETEMPADDR
- time_t desync_factor;
+ uint32_t desync_factor;
uint8_t randomseed0[8]; /* upper 64 bits of MD5 digest */
uint8_t randomseed1[8]; /* lower 64 bits */
uint8_t randomid[8];
{
struct interface *ifp;
struct ra *rap, *ran;
- struct timespec now, lt, expire, next;
+ struct timespec now, expire;
bool expired, valid;
struct ipv6_addr *ia;
size_t len, olen;
#endif
struct nd_opt_dnssl dnssl;
struct nd_opt_rdnss rdnss;
- uint32_t ltime;
+ unsigned int next = 0, ltime;
size_t nexpired = 0;
ifp = arg;
clock_gettime(CLOCK_MONOTONIC, &now);
expired = false;
- timespecclear(&next);
TAILQ_FOREACH_SAFE(rap, ifp->ctx->ra_routers, next, ran) {
if (rap->iface != ifp || rap->expired)
continue;
valid = false;
if (rap->lifetime) {
- lt.tv_sec = (time_t)rap->lifetime;
- lt.tv_nsec = 0;
- timespecadd(&rap->acquired, <, &expire);
- if (timespeccmp(&now, &expire, >)) {
+ timespecsub(&now, &rap->acquired, &expire);
+ if (expire.tv_sec > rap->lifetime) {
if (!rap->expired) {
logwarnx("%s: %s: router expired",
ifp->name, rap->sfrom);
}
} else {
valid = true;
- timespecsub(&expire, &now, <);
- if (!timespecisset(&next) ||
- timespeccmp(&next, <, >))
- next = lt;
+ ltime = (unsigned int)
+ (rap->lifetime - expire.tv_sec);
+ if (next == 0 || ltime < next)
+ next = ltime;
}
}
valid = true;
continue;
}
- lt.tv_sec = (time_t)ia->prefix_vltime;
- lt.tv_nsec = 0;
- timespecadd(&ia->acquired, <, &expire);
- if (timespeccmp(&now, &expire, >)) {
+ timespecsub(&now, &ia->acquired, &expire);
+ if (expire.tv_sec > ia->prefix_vltime) {
if (ia->flags & IPV6_AF_ADDED) {
logwarnx("%s: expired address %s",
ia->iface->name, ia->saddr);
~(IPV6_AF_ADDED | IPV6_AF_DADCOMPLETED);
expired = true;
} else {
- timespecsub(&expire, &now, <);
- if (!timespecisset(&next) ||
- timespeccmp(&next, <, >))
- next = lt;
valid = true;
+ ltime = (unsigned int)
+ (ia->prefix_vltime - expire.tv_sec);
+ if (next == 0 || ltime < next)
+ next = ltime;
}
}
/* Work out expiry for ND options */
+ timespecsub(&now, &rap->acquired, &expire);
len = rap->data_len - sizeof(struct nd_router_advert);
for (p = rap->data + sizeof(struct nd_router_advert);
len >= sizeof(ndo);
continue;
}
- lt.tv_sec = (time_t)ntohl(ltime);
- lt.tv_nsec = 0;
- timespecadd(&rap->acquired, <, &expire);
- if (timespeccmp(&now, &expire, >)) {
+ ltime = ntohl(ltime);
+ if (expire.tv_sec > ltime) {
expired = true;
continue;
}
- timespecsub(&expire, &now, <);
- if (!timespecisset(&next) ||
- timespeccmp(&next, <, >))
- {
- next = lt;
- valid = true;
- }
+ valid = true;
+ ltime = (unsigned int)(ltime - expire.tv_sec);
+ if (next == 0 || ltime < next)
+ next = ltime;
}
if (valid)
ipv6nd_free_ra(rap);
}
- if (timespecisset(&next))
- eloop_timeout_add_tv(ifp->ctx->eloop,
- &next, ipv6nd_expirera, ifp);
+ if (next != 0)
+ eloop_timeout_add_sec(ifp->ctx->eloop,
+ next, ipv6nd_expirera, ifp);
if (expired) {
logwarnx("%s: part of Router Advertisement expired", ifp->name);
rt_build(ifp->ctx, AF_INET6);