request->resend++;
} else { /* request->packet->id >= 0 */
- fr_time_delta_t now = fr_time();
+ fr_time_t now = fr_time();
/*
* FIXME: Accounting packets are never retried!
/*
* Not time for a retry, do so.
*/
- if ((now - request->timestamp) < timeout) {
+ if (fr_time_sub(now, request->timestamp) < timeout) {
/*
* When we walk over the tree sending
* packets, we update the minimum time
* required to sleep.
*/
if ((sleep_time == -1) ||
- (sleep_time > (now - request->timestamp))) {
- sleep_time = now - request->timestamp;
+ (sleep_time > fr_time_sub(now, request->timestamp))) {
+ sleep_time = fr_time_sub(now, request->timestamp);
}
return 0;
}
/*
* Receive one packet, maybe.
*/
-static int recv_one_packet(fr_time_t wait_time)
+static int recv_one_packet(fr_time_delta_t wait_time)
{
fd_set set;
fr_time_delta_t our_wait_time;
max_fd = fr_packet_list_fd_set(packet_list, &set);
if (max_fd < 0) fr_exit_now(1); /* no sockets to listen on! */
- our_wait_time = (wait_time <= 0) ? 0 : wait_time;
+ our_wait_time = wait_time <= 0 ? fr_time_delta_from_sec(0) : wait_time;
/*
* No packet was received.
rc_file_pair_t *files; //!< Request and response file names.
fr_pair_t *password; //!< Password.Cleartext
- fr_time_delta_t timestamp;
+ fr_time_t timestamp;
fr_radius_packet_t *packet; //!< The outgoing request.
fr_radius_packet_t *reply; //!< The incoming response.
fr_time_delta_t exit_after = *(fr_time_delta_t *)uctx;
- if (now == 0) {
+ if (fr_time_eq(now, fr_time_wrap(0))) {
if (fr_event_timer_in(el, el, &ev, exit_after, fr_exit_after, uctx) < 0) {
PERROR("Failed inserting exit event");
}
fr_time_sync_event(main_loop_event_list(), fr_time(), NULL);
#ifndef NDEBUG
- if (exit_after > 0) fr_exit_after(main_loop_event_list(), 0, &exit_after);
+ if (exit_after > 0) fr_exit_after(main_loop_event_list(), fr_time_wrap(0), &exit_after);
#endif
/*
* Process requests until HUP or exit.
INFO("\tUser : %s (%u)", uid_str, info.sem_perm.uid);
INFO("\tGroup : %s (%u)", gid_str, info.sem_perm.gid);
INFO("\tTime : %s",
- fr_asprintf(autofree, "%pV", fr_box_date(fr_time_from_sec(info.sem_otime))));
+ fr_asprintf(autofree, "%pV", fr_box_time(fr_time_from_sec(info.sem_otime))));
INFO("Created:");
INFO("\tUser : %s (%u)", cuid_str, info.sem_perm.cuid);
INFO("\tGroup : %s (%u)", cgid_str, info.sem_perm.cgid);
INFO("\tTime : %s",
- fr_asprintf(autofree, "%pV", fr_box_date(fr_time_from_sec(info.sem_ctime))));
+ fr_asprintf(autofree, "%pV", fr_box_time(fr_time_from_sec(info.sem_ctime))));
}
EXIT_WITH_SUCCESS;
/** radmin functions, tables, and callbacks
*
*/
-static fr_time_delta_t start_time;
+static fr_time_t start_time;
static int cmd_exit(UNUSED FILE *fp, UNUSED FILE *fp_err, UNUSED void *ctx, UNUSED fr_cmd_info_t const *info)
{
{
fr_time_delta_t uptime;
- uptime = fr_time() - start_time;
+ uptime = fr_time_sub(fr_time(), start_time);
fr_fprintf(fp, "Uptime: %pVs seconds\n", fr_box_time_delta(uptime));
* It's a linked response
*/
if (original && original->linked) {
- latency = fr_time_delta_to_timeval(packet->timestamp - original->packet->timestamp);
+ latency = fr_time_delta_to_timeval(fr_time_sub(packet->timestamp, original->packet->timestamp));
/*
* Update stats for both the request and response types.
static void rs_got_packet(fr_event_list_t *el, int fd, UNUSED int flags, void *ctx)
{
static uint64_t count = 0; /* Packets seen */
- static fr_time_t last_sync = 0;
+ static fr_time_t last_sync = fr_time_wrap(0);
fr_time_t now_real;
rs_event_t *event = talloc_get_type(ctx, rs_event_t);
pcap_t *handle = event->in->handle;
* event ourselves.
*/
now_real = fr_time();
- if ((now_real - last_sync) > fr_time_delta_from_sec(1)) {
+ if (fr_time_sub(now_real, last_sync) > fr_time_delta_from_sec(1)) {
fr_time_sync();
last_sync = now_real;
}
}
}
-static int _rs_event_status(fr_time_delta_t wake_t, UNUSED void *uctx)
+static int _rs_event_status(UNUSED fr_time_t now, fr_time_delta_t wake_t, UNUSED void *uctx)
{
struct timeval wake;
ERROR("Will attempt to re-establish connection in %i ms", RS_SOCKET_REOPEN_DELAY);
if (fr_event_timer_at(NULL, el, &event,
- now + fr_time_delta_from_msec(RS_SOCKET_REOPEN_DELAY), rs_collectd_reopen, el) < 0) {
+ fr_time_add(now, fr_time_delta_from_msec(RS_SOCKET_REOPEN_DELAY)),
+ rs_collectd_reopen, el) < 0) {
ERROR("Failed inserting re-open event");
RS_ASSERT(0);
}
uint16_t server_port; //!< Port to send requests to.
unsigned int retries; //!< Number of retries.
- fr_time_t timeout;
+ fr_time_delta_t timeout;
char *secret; //!< Shared secret.
} radsnmp_conf_t;
* retransmit which nukes our ID, and therefore our state.
*/
if (((request && RDEBUG_ENABLED) || (!request && DEBUG_ENABLED)) &&
- (eap_session->tls && !eap_session->finished && ((fr_time() - eap_session->updated) > (((fr_time_t) 3) * NSEC)))) {
+ (eap_session->tls && !eap_session->finished && (fr_time_sub(fr_time(), eap_session->updated) > fr_time_delta_from_sec(3)))) {
ROPTIONAL(RWDEBUG, WARN, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
ROPTIONAL(RWDEBUG, WARN, "!! EAP session %016" PRIxPTR " did not finish! !!",
(uintptr_t)eap_session);
* and an atomic queue in each direction to allow for bidirectional communication.
*/
struct fr_channel_s {
- fr_time_t cpu_time; //!< Total time used by the responder for this channel.
- fr_time_t processing_time; //!< Time spent by the responder processing requests.
+ fr_time_delta_t cpu_time; //!< Total time used by the responder for this channel.
+ fr_time_delta_t processing_time; //!< Time spent by the responder processing requests.
bool same_thread; //!< are both ends in the same thread?
fr_table_num_sorted_t const channel_signals[] = {
{ L("error"), FR_CHANNEL_ERROR },
- { L("data-to-responder"), FR_CHANNEL_SIGNAL_DATA_TO_RESPONDER },
- { L("data-to-requestor"), FR_CHANNEL_DATA_READY_REQUESTOR },
+ { L("data-to-responder"), FR_CHANNEL_SIGNAL_DATA_TO_RESPONDER },
+ { L("data-to-requestor"), FR_CHANNEL_DATA_READY_REQUESTOR },
{ L("open"), FR_CHANNEL_OPEN },
{ L("close"), FR_CHANNEL_CLOSE },
{ L("data-done-responder"), FR_CHANNEL_SIGNAL_DATA_DONE_RESPONDER },
- { L("responder-sleeping"), FR_CHANNEL_SIGNAL_RESPONDER_SLEEPING },
+ { L("responder-sleeping"), FR_CHANNEL_SIGNAL_RESPONDER_SLEEPING },
};
size_t channel_signals_len = NUM_ELEMENTS(channel_signals);
int fr_channel_send_request(fr_channel_t *ch, fr_channel_data_t *cd)
{
uint64_t sequence;
- fr_time_t when, message_interval;
+ fr_time_t when;
+ fr_time_delta_t message_interval;
fr_channel_end_t *requestor;
if (!fr_cond_assert_msg(atomic_load(&ch->end[TO_RESPONDER].active), "Channel not active")) return -1;
}
requestor->sequence = sequence;
- message_interval = when - requestor->stats.last_write;
+ message_interval = fr_time_sub(when, requestor->stats.last_write);
if (!requestor->stats.message_interval) {
requestor->stats.message_interval = message_interval;
requestor->stats.message_interval = RTT(requestor->stats.message_interval, message_interval);
}
- fr_assert_msg(requestor->stats.last_write <= when,
+ fr_assert_msg(fr_time_lteq(requestor->stats.last_write, when),
"Channel data timestamp (%" PRId64") older than last channel data sent (%" PRId64 ")",
- when, requestor->stats.last_write);
+ fr_time_unwrap(when), fr_time_unwrap(requestor->stats.last_write));
requestor->stats.last_write = when;
requestor->stats.outstanding++;
requestor->ack = cd->live.sequence;
requestor->their_view_of_my_sequence = cd->live.ack;
- fr_assert(requestor->stats.last_read_other <= cd->m.when);
+ fr_assert(fr_time_lteq(requestor->stats.last_read_other, cd->m.when));
requestor->stats.last_read_other = cd->m.when;
ch->end[TO_RESPONDER].recv(ch->end[TO_RESPONDER].recv_uctx, ch, cd);
responder->ack = cd->live.sequence;
responder->their_view_of_my_sequence = cd->live.ack;
- fr_assert(responder->stats.last_read_other <= cd->m.when);
+ fr_assert(fr_time_lteq(responder->stats.last_read_other, cd->m.when));
responder->stats.last_read_other = cd->m.when;
ch->end[TO_REQUESTOR].recv(ch->end[TO_REQUESTOR].recv_uctx, ch, cd);
int fr_channel_send_reply(fr_channel_t *ch, fr_channel_data_t *cd)
{
uint64_t sequence;
- fr_time_t when, message_interval;
+ fr_time_t when;
+ fr_time_delta_t message_interval;
fr_channel_end_t *responder;
if (!fr_cond_assert_msg(atomic_load(&ch->end[TO_REQUESTOR].active), "Channel not active")) return -1;
MPRINT("\tRESPONDER replies %"PRIu64", num_outstanding %"PRIu64"\n", responder->stats.packets, responder->stats.outstanding);
responder->sequence = sequence;
- message_interval = when - responder->stats.last_write;
+ message_interval = fr_time_sub(when, responder->stats.last_write);
responder->stats.message_interval = RTT(responder->stats.message_interval, message_interval);
- fr_assert_msg(responder->stats.last_write <= when,
+ fr_assert_msg(fr_time_lteq(responder->stats.last_write, when),
"Channel data timestamp (%" PRId64") older than last channel data sent (%" PRId64 ")",
- when, responder->stats.last_write);
+ fr_time_unwrap(when), fr_time_unwrap(responder->stats.last_write));
responder->stats.last_write = when;
/*
fr_log(log, L_INFO, file, line, "\toutstanding = %" PRIu64 "\n", ch->end[TO_RESPONDER].stats.outstanding);
fr_log(log, L_INFO, file, line, "\tpackets processed = %" PRIu64 "\n", ch->end[TO_RESPONDER].stats.packets);
fr_log(log, L_INFO, file, line, "\tmessage interval (RTT) = %" PRIu64 "\n", ch->end[TO_RESPONDER].stats.message_interval);
- fr_log(log, L_INFO, file, line, "\tlast write = %" PRIu64 "\n", ch->end[TO_RESPONDER].stats.last_read_other);
- fr_log(log, L_INFO, file, line, "\tlast read other end = %" PRIu64 "\n", ch->end[TO_RESPONDER].stats.last_read_other);
- fr_log(log, L_INFO, file, line, "\tlast signal other = %" PRIu64 "\n", ch->end[TO_RESPONDER].stats.last_sent_signal);
+ fr_log(log, L_INFO, file, line, "\tlast write = %" PRIu64 "\n", fr_time_unwrap(ch->end[TO_RESPONDER].stats.last_read_other));
+ fr_log(log, L_INFO, file, line, "\tlast read other end = %" PRIu64 "\n", fr_time_unwrap(ch->end[TO_RESPONDER].stats.last_read_other));
+ fr_log(log, L_INFO, file, line, "\tlast signal other = %" PRIu64 "\n", fr_time_unwrap(ch->end[TO_RESPONDER].stats.last_sent_signal));
fr_log(log, L_INFO, file, line, "responder\n");
fr_log(log, L_INFO, file, line, "\tsignals sent = %" PRIu64"\n", ch->end[TO_REQUESTOR].stats.signals);
fr_log(log, L_INFO, file, line, "\tkevents checked = %" PRIu64 "\n", ch->end[TO_REQUESTOR].stats.kevents);
fr_log(log, L_INFO, file, line, "\tpackets processed = %" PRIu64 "\n", ch->end[TO_REQUESTOR].stats.packets);
fr_log(log, L_INFO, file, line, "\tmessage interval (RTT) = %" PRIu64 "\n", ch->end[TO_REQUESTOR].stats.message_interval);
- fr_log(log, L_INFO, file, line, "\tlast write = %" PRIu64 "\n", ch->end[TO_REQUESTOR].stats.last_read_other);
- fr_log(log, L_INFO, file, line, "\tlast read other end = %" PRIu64 "\n", ch->end[TO_REQUESTOR].stats.last_read_other);
- fr_log(log, L_INFO, file, line, "\tlast signal other = %" PRIu64 "\n", ch->end[TO_REQUESTOR].stats.last_sent_signal);
+ fr_log(log, L_INFO, file, line, "\tlast write = %" PRIu64 "\n", fr_time_unwrap(ch->end[TO_REQUESTOR].stats.last_read_other));
+ fr_log(log, L_INFO, file, line, "\tlast read other end = %" PRIu64 "\n", fr_time_unwrap(ch->end[TO_REQUESTOR].stats.last_read_other));
+ fr_log(log, L_INFO, file, line, "\tlast signal other = %" PRIu64 "\n", fr_time_unwrap(ch->end[TO_REQUESTOR].stats.last_sent_signal));
}
} request;
struct {
- fr_time_delta_t cpu_time; //!< total CPU time, including predicted work, (only worker -> network)
- fr_time_delta_t processing_time; //!< actual processing time for this packet (only worker -> network)
- fr_time_t request_time; //!< timestamp of the request packet
+ fr_time_delta_t cpu_time; //!< Total CPU time, including predicted work, (only worker -> network).
+ fr_time_delta_t processing_time; //!< Actual processing time for this packet (only worker -> network).
+ fr_time_t request_time; //!< Timestamp of the request packet.
} reply;
};
* it more likely that the next timer fires on time.
*/
for (i = 0; i < count; i++) {
- l->callback(now + i, l->uctx);
+ l->callback(fr_time_add(now, fr_time_delta_from_nsec(i)), l->uctx);
}
}
static void load_timer(fr_event_list_t *el, fr_time_t now, void *uctx)
{
fr_load_t *l = uctx;
- fr_time_t delta;
+ fr_time_delta_t delta;
int count;
/*
/*
* If we're done this step, go to the next one.
*/
- if (l->next >= l->step_end) {
+ if (fr_time_gteq(l->next, l->step_end)) {
l->step_start = l->next;
- l->step_end = l->next + ((uint64_t) l->config->duration) * NSEC;
+ l->step_end = fr_time_add(l->next, l->config->duration);
l->step_received = l->stats.received;
l->pps += l->config->step;
l->stats.pps = l->pps;
l->stats.skipped = 0;
- l->delta = (NSEC * ((uint64_t) l->config->parallel)) / l->pps;
+ l->delta = fr_time_delta_from_sec(l->config->parallel) / l->pps;
/*
* Stop at max PPS, if it's set. Otherwise
/*
* Skip timers if we're too busy.
*/
- l->next += l->delta;
- if (l->next < now) {
- while ((l->next + l->delta) < now) {
+ l->next = fr_time_add(l->next, l->delta);
+ if (fr_time_lt(l->next, now)) {
+ while (fr_time_lt(fr_time_add(l->next, l->delta), now)) {
// l->stats.skipped += l->count;
- l->next += l->delta;
+ l->next = fr_time_add(l->next, l->delta);
}
}
- delta = l->next - now;
+ delta = fr_time_sub(l->next, now);
/*
* Set the timer for the next packet.
{
l->stats.start = fr_time();
l->step_start = l->stats.start;
- l->step_end = l->step_start + ((uint64_t) l->config->duration) * NSEC;
+ l->step_end = fr_time_add(l->step_start, l->config->duration);
l->pps = l->config->start_pps;
l->stats.pps = l->pps;
l->count = l->config->parallel;
l->delta = (NSEC * ((uint64_t) l->config->parallel)) / l->pps;
- l->next = l->step_start + l->delta;
+ l->next = fr_time_add(l->step_start, l->delta);
load_timer(l->el, l->step_start, l);
return 0;
* for any kind of timing.
*/
now = fr_time();
- t = now - request_time;
+ t = fr_time_sub(now, request_time);
l->stats.rttvar = RTTVAR(l->stats.rtt, l->stats.rttvar, t);
l->stats.rtt = RTT(l->stats.rtt, t);
}
- now_f = now - l->stats.start;
+ now_f = fr_time_sub(now, l->stats.start);
now_f /= NSEC;
- last_send_f = l->stats.last_send - l->stats.start;
+ last_send_f = fr_time_sub(l->stats.last_send, l->stats.start);
last_send_f /= NSEC;
/*
* is 1B, the calculations have to be done via 64-bit
* numbers, and then converted to a final 32-bit counter.
*/
- if (now > l->step_start) {
- l->stats.pps_accepted = (((uint64_t) (l->stats.received - l->step_received)) * NSEC) / (now - l->step_start);
+ if (fr_time_gt(now, l->step_start)) {
+ l->stats.pps_accepted = fr_time_delta_from_sec(l->stats.received - l->step_received) / fr_time_sub(now, l->step_start);
}
return snprintf(buffer, buflen,
typedef struct {
uint32_t start_pps; //!< start PPS
uint32_t max_pps; //!< max PPS, 0 for "no limit".
- uint32_t duration; //!< duration of each step
+ fr_time_delta_t duration; //!< duration of each step
uint32_t step; //!< how much to increase each load test by
uint32_t parallel; //!< how many packets in parallel to send
uint32_t milliseconds; //!< how many milliseconds of backlog to top out at
* packets go in. Since we'll never have two identical
* "recv_time" values, the code should never get here.
*/
- return CMP_PREFER_SMALLER(a->recv_time, b->recv_time);
+ return CMP_PREFER_SMALLER(fr_time_unwrap(a->recv_time), fr_time_unwrap(b->recv_time));
}
/*
static ssize_t mod_read(fr_listen_t *li, void **packet_ctx, fr_time_t *recv_time_p,
uint8_t *buffer, size_t buffer_len, size_t *leftover, uint32_t *priority, bool *is_dup)
{
- fr_io_instance_t const *inst;
- fr_io_thread_t *thread;
- ssize_t packet_len = -1;
- fr_time_t recv_time = 0;
- fr_io_client_t *client;
- fr_io_address_t address;
- fr_io_connection_t my_connection, *connection;
- fr_io_pending_packet_t *pending;
- fr_io_track_t *track, *new_track;
- fr_listen_t *child;
- int value, accept_fd = -1;
+ fr_io_instance_t const *inst;
+ fr_io_thread_t *thread;
+ ssize_t packet_len = -1;
+ fr_time_t recv_time = fr_time_wrap(0);
+ fr_io_client_t *client;
+ fr_io_address_t address;
+ fr_io_connection_t my_connection, *connection;
+ fr_io_pending_packet_t *pending;
+ fr_io_track_t *track, *new_track;
+ fr_listen_t *child;
+ int value, accept_fd = -1;
fr_assert(is_dup != NULL);
*is_dup = false;
* it's fast, but also that it's hard to look up
* random packets in the pending heap.
*/
- if (pending->recv_time != track->timestamp) {
+ if (fr_time_neq(pending->recv_time, track->timestamp)) {
DEBUG3("Discarding old packet");
talloc_free(pending);
goto redo;
* Called from the read or write functions with
* now==0, to signal that we have to *set* the timer.
*/
- if (!now) {
+ if (fr_time_eq(now, fr_time_wrap(0))) {
switch (client->state) {
case PR_CLIENT_CONNECTED:
fr_assert(connection != NULL);
*
* On duplicates this also extends the expiry timer.
*/
- if (!now && !track->discard && inst->app_io->track_duplicates) {
+ if (fr_time_eq(now, fr_time_wrap(0)) && !track->discard && inst->app_io->track_duplicates) {
fr_assert(inst->cleanup_delay > 0);
fr_assert(track->do_not_respond || track->reply_len);
- track->expires = fr_time() + inst->cleanup_delay;
+ track->expires = fr_time_add(fr_time(), inst->cleanup_delay);
/*
* if the timer succeeds, then "track"
* So that all cleanup paths can come here, not just the
* timeout ones.
*/
- if (now) {
+ if (fr_time_neq(now, fr_time_wrap(0))) {
DEBUG2("TIMER - proto_%s - cleanup delay", inst->app_io->name);
} else {
DEBUG2("proto_%s - cleaning up", inst->app_io->name);
* The request later received a conflicting
* packet, so we discard this one.
*/
- if ((track->timestamp != request_time) || track->discard) {
+ if (fr_time_neq(track->timestamp, request_time) || track->discard) {
fr_assert(track->packets > 0);
track->packets--;
buffer, buffer_len, written);
if (packet_len <= 0) {
track->discard = true;
- packet_expiry_timer(el, 0, track);
+ packet_expiry_timer(el, fr_time_wrap(0), track);
return packet_len;
}
* On dedup this also extends the timer.
*/
setup_timer:
- packet_expiry_timer(el, 0, track);
+ packet_expiry_timer(el, fr_time_wrap(0), track);
return buffer_len;
}
*/
if (connection && (inst->ipproto == IPPROTO_UDP)) {
connection = fr_io_connection_alloc(inst, thread, client, -1, connection->address, connection);
- client_expiry_timer(el, 0, connection->client);
+ client_expiry_timer(el, fr_time_wrap(0), connection->client);
errno = ECONNREFUSED;
return -1;
* expiry timer, which will close and free the
* connection.
*/
- client_expiry_timer(el, 0, client);
+ client_expiry_timer(el, fr_time_wrap(0), client);
return buffer_len;
}
* timed out, so there's nothing more to do. In that case, set up the expiry timers.
*/
if (client->packets == 0) {
- client_expiry_timer(el, 0, client);
+ client_expiry_timer(el, fr_time_wrap(0), client);
}
reread:
*/
typedef struct {
fr_heap_index_t heap_id; //!< workers are in a heap
- fr_time_t cpu_time; //!< how much CPU time this worker has spent
- fr_time_t predicted; //!< predicted processing time for one packet
+ fr_time_delta_t cpu_time; //!< how much CPU time this worker has spent
+ fr_time_delta_t predicted; //!< predicted processing time for one packet
bool blocked; //!< is this worker blocked?
};
static void fr_network_post_event(fr_event_list_t *el, fr_time_t now, void *uctx);
-static int fr_network_pre_event(fr_time_t wake, void *uctx);
+static int fr_network_pre_event(fr_time_t now, fr_time_delta_t wake, void *uctx);
static void fr_network_socket_dead(fr_network_t *nr, fr_network_socket_t *s);
static void fr_network_read(UNUSED fr_event_list_t *el, int sockfd, UNUSED int flags, void *ctx);
fr_channel_data_t const *a = one, *b = two;
int ret;
- ret = (a->priority > b->priority) - (a->priority < b->priority);
+ ret = CMP(a->priority, b->priority);
if (ret != 0) return ret;
- return (a->m.when > b->m.when) - (a->m.when < b->m.when);
+ return fr_time_cmp(a->m.when, b->m.when);
}
static int8_t waiting_cmp(void const *one, void const *two)
fr_channel_data_t const *a = one, *b = two;
int ret;
- ret = (a->priority > b->priority) - (a->priority < b->priority);
+ ret = CMP(a->priority, b->priority);
if (ret != 0) return ret;
- return (a->reply.request_time > b->reply.request_time) - (a->reply.request_time < b->reply.request_time);
+ return fr_time_cmp(a->reply.request_time, b->reply.request_time);
}
static int8_t socket_listen_cmp(void const *one, void const *two)
worker = fr_channel_requestor_uctx_get(ch);
worker->stats.out++;
worker->cpu_time = cd->reply.cpu_time;
- if (!worker->predicted) {
+ if (worker->predicted == 0) {
worker->predicted = cd->reply.processing_time;
} else {
worker->predicted = RTT(worker->predicted, cd->reply.processing_time);
}
} else {
int i;
- fr_time_t cpu_time = ~((fr_time_t) 0);
+ fr_time_delta_t cpu_time = ~((fr_time_delta_t) 0);
fr_network_worker_t *found = NULL;
/*
* Ensure this hasn't been somehow corrupted during
* ring buffer allocation.
*/
- fr_assert(cd->m.when == now);
+ fr_assert(fr_time_eq(cd->m.when, now));
if (fr_network_send_request(nr, cd) < 0) {
talloc_free(cd->packet_ctx); /* not sure what else to do here */
* work, and tell the event code to return to the main loop if
* there's work to do.
*
- * @param[in] wake the time when the event loop will wake up.
- * @param[in] uctx the network
+ * @param[in] now the current time.
+ * @param[in] wake the time when the event loop will wake up.
+ * @param[in] uctx the network
*/
-static int fr_network_pre_event(UNUSED fr_time_t wake, void *uctx)
+static int fr_network_pre_event(UNUSED fr_time_t now, UNUSED fr_time_delta_t wake, void *uctx)
{
fr_network_t *nr = talloc_get_type_abort(uctx, fr_network_t);
return;
}
- fr_assert(buff = 1);
+ fr_assert(buff == 1);
/*
* fr_network_stop() will signal the workers
fr_network_stats_log(sn->nr, sn->sc->log);
- (void) fr_event_timer_at(sn, el, &sn->ev, now + sn->sc->config->stats_interval, stats_timer, sn);
+ (void) fr_event_timer_at(sn, el, &sn->ev, fr_time_add(now, sn->sc->config->stats_interval), stats_timer, sn);
}
/** Initialize and run the network thread.
*/
#define ASSERT_ON_TIME_TRAVEL(_tt, _now) \
do { \
- fr_assert((_tt)->last_changed <= (_now)); \
- fr_assert((_tt)->started <= (_now)); \
- fr_assert((_tt)->ended <= (_now)); \
- fr_assert((_tt)->last_yielded <= (_now)); \
- fr_assert((_tt)->last_resumed <= (_now)); \
+ fr_assert(fr_time_lteq((_tt)->last_changed, (_now))); \
+ fr_assert(fr_time_lteq((_tt)->started, (_now))); \
+ fr_assert(fr_time_lteq((_tt)->ended, (_now))); \
+ fr_assert(fr_time_lteq((_tt)->last_yielded, (_now))); \
+ fr_assert(fr_time_lteq((_tt)->last_resumed, (_now))); \
} while(0);
/** Set the last time a tracked entity started in its list of parents
{
fr_time_delta_t run_time;
- fr_assert(parent->parent = tt->parent);
+ fr_assert(parent->parent == tt->parent);
fr_assert_msg(tt->state == FR_TIME_TRACKING_RUNNING, "Unexpected time tracking state state %i", tt->state);
- run_time = now - tt->last_changed;
+ run_time = fr_time_sub(now, tt->last_changed);
tt->last_changed = parent->started = now;
UPDATE_PARENT_RUN_TIME(tt, run_time, last_changed, now);
fr_time_delta_t run_time;
fr_assert_msg(tt->state == FR_TIME_TRACKING_RUNNING, "Unexpected time tracking state state %i", tt->state);
- run_time = now - tt->last_changed;
+ run_time = fr_time_sub(now, tt->last_changed);
tt->last_changed = tt->parent->ended = now;
tt->running_total += run_time;
tt->state = FR_TIME_TRACKING_YIELDED;
tt->last_yielded = tt->last_changed = now;
- run_time = now - tt->last_resumed;
+ run_time = fr_time_sub(now, tt->last_resumed);
tt->running_total += run_time;
UPDATE_PARENT_RUN_TIME(tt, run_time, last_yielded, now);
}
tt->state = FR_TIME_TRACKING_RUNNING;
tt->last_resumed = tt->last_changed = now;
- wait_time = now - tt->last_yielded;
+ wait_time = fr_time_sub(now, tt->last_yielded);
tt->waiting_total += wait_time;
UPDATE_PARENT_WAIT_TIME(tt, wait_time, last_resumed, now);
}
tt->state = FR_TIME_TRACKING_STOPPED;
tt->ended = tt->last_changed = now;
- run_time = now - tt->last_resumed;
+ run_time = fr_time_sub(now, tt->last_resumed);
tt->running_total += run_time;
UPDATE_PARENT_RUN_TIME(tt, run_time, ended, now);
*/
static inline CC_HINT(nonnull) void fr_time_tracking_debug(fr_time_tracking_t *tt, FILE *fp)
{
+#define DPRINT_TIME(_x) fprintf(fp, "\t" #_x " = %"PRIu64"\n", fr_time_unwrap(tt->_x));
#define DPRINT(_x) fprintf(fp, "\t" #_x " = %"PRIu64"\n", tt->_x);
- DPRINT(started);
- DPRINT(ended);
- DPRINT(last_changed);
+ DPRINT_TIME(started);
+ DPRINT_TIME(ended);
+ DPRINT_TIME(last_changed);
- DPRINT(last_yielded);
- DPRINT(last_resumed);
+ DPRINT_TIME(last_yielded);
+ DPRINT_TIME(last_resumed);
DPRINT(running_total);
DPRINT(waiting_total);
REQUEST_VERIFY(request);
- cleanup = request->async->recv_time;
- cleanup += worker->config.max_request_time;
- if (cleanup > now) break;
+ cleanup = fr_time_add(request->async->recv_time, worker->config.max_request_time);
+ if (fr_time_gt(cleanup, now)) break;
/*
* Waiting too long, delete it.
request = fr_heap_peek_tail(worker->time_order);
if (!request) return;
- cleanup = request->async->recv_time;
- cleanup += worker->config.max_request_time;
+ cleanup = fr_time_add(request->async->recv_time, worker->config.max_request_time);
DEBUG2("Resetting cleanup timer to +%pV", fr_box_time_delta(worker->config.max_request_time));
if (fr_event_timer_at(worker, worker->el, &worker->ev_cleanup,
/*
* Update the various timers.
*/
- fr_time_elapsed_update(&worker->cpu_time, now, now + reply->reply.processing_time);
+ fr_time_elapsed_update(&worker->cpu_time, now, fr_time_add(now, reply->reply.processing_time));
fr_time_elapsed_update(&worker->wall_clock, reply->reply.request_time, now);
RDEBUG("Finished request");
* @todo - fix the channel code to do queue
* depth, and not sequence / ack.
*/
- if (old->async->recv_time == request->async->recv_time) {
+ if (fr_time_eq(old->async->recv_time, request->async->recv_time)) {
RWARN("Discarding duplicate of request (%"PRIu64")", old->number);
fr_channel_null_reply(request->async->channel);
ret = CMP(a->async->sequence, b->async->sequence);
if (ret != 0) return ret;
- return CMP(a->async->recv_time, b->async->recv_time);
+ return fr_time_cmp(a->async->recv_time, b->async->recv_time);
}
/**
{
request_t const *a = one, *b = two;
- return CMP(a->async->recv_time, b->async->recv_time);
+ return fr_time_cmp(a->async->recv_time, b->async->recv_time);
}
/**
* event loop fewer times per second, instead of after
* every request.
*/
- while (((now - start) < (NSEC / 100000)) &&
+ while ((fr_time_sub(now, start) < (NSEC / 100000)) &&
((request = fr_heap_pop(worker->runnable)) != NULL)) {
REQUEST_VERIFY(request);
*
* This should be run ONLY in single-threaded mode!
*/
-int fr_worker_pre_event(UNUSED fr_time_t wake, void *uctx)
+int fr_worker_pre_event(UNUSED fr_time_t now, UNUSED fr_time_delta_t wake, void *uctx)
{
fr_worker_t *worker = talloc_get_type_abort(uctx, fr_worker_t);
request_t *request;
static int cmd_stats_worker(FILE *fp, UNUSED FILE *fp_err, void *ctx, fr_cmd_info_t const *info)
{
fr_worker_t const *worker = ctx;
- fr_time_t when;
+ fr_time_delta_t when;
if ((info->argc == 0) || (strcmp(info->argv[0], "count") == 0)) {
fprintf(fp, "count.in\t\t\t%" PRIu64 "\n", worker->stats.in);
void fr_worker_debug(fr_worker_t *worker, FILE *fp) CC_HINT(nonnull);
-int fr_worker_pre_event(fr_time_t wake, void *uctx);
+int fr_worker_pre_event(fr_time_t now, fr_time_delta_t wake, void *uctx);
void fr_worker_post_event(fr_event_list_t *el, fr_time_t now, void *uctx);
* The remap times are _our_ times, not the _request_ time.
*/
now = fr_time();
- if (now == cluster->last_updated) {
+ if (fr_time_eq(now, cluster->last_updated)) {
too_soon:
ROPTIONAL(RWARN, WARN, "Cluster was updated less than a second ago, ignoring remap request");
return FR_REDIS_CLUSTER_RCODE_IGNORED;
fr_redis_reply_free(&map); /* Free the map */
goto in_progress;
}
- if (now == cluster->last_updated) {
+ if (fr_time_eq(now, cluster->last_updated)) {
pthread_mutex_unlock(&cluster->mutex);
fr_redis_reply_free(&map); /* Free the map */
goto too_soon;
/*
* Failed spawn recently, probably bad
*/
- if (fr_time_delta_to_msec(now - state->last_failed) < FAILED_PERIOD) return FAILED_WEIGHT;
+ if (fr_time_delta_to_msec(fr_time_sub(now, state->last_failed)) < FAILED_PERIOD) return FAILED_WEIGHT;
/*
* Closed recently, probably bad
*/
- if (fr_time_delta_to_msec(now - state->last_closed) < CLOSED_PERIOD) return CLOSED_WEIGHT;
+ if (fr_time_delta_to_msec(fr_time_sub(now, state->last_closed)) < CLOSED_PERIOD) return CLOSED_WEIGHT;
/*
* Released too long ago, don't know
*/
- if (fr_time_delta_to_msec(now - state->last_released) > RELEASED_PERIOD) return RELEASED_MIN_WEIGHT;
+ if (fr_time_delta_to_msec(fr_time_sub(now, state->last_released)) > RELEASED_PERIOD) return RELEASED_MIN_WEIGHT;
/*
* Released not long ago, might be ok.
*/
- return RELEASED_MIN_WEIGHT + (RELEASED_PERIOD - fr_time_delta_to_msec(now - state->last_released));
+ return RELEASED_MIN_WEIGHT + (RELEASED_PERIOD - fr_time_delta_to_msec(fr_time_sub(now, state->last_released)));
}
/** Issue a ping request against a cluster node
*/
void *fr_redis_cluster_conn_create(TALLOC_CTX *ctx, void *instance, fr_time_delta_t timeout)
{
- fr_redis_cluster_node_t *node = instance;
+ fr_redis_cluster_node_t *node = instance;
fr_redis_conn_t *conn = NULL;
redisContext *handle;
redisReply *reply = NULL;
/*
* Callback for the connection pool to create a new connection
*/
-void *fr_redis_cluster_conn_create(TALLOC_CTX *ctx, void *instance, fr_time_t timeout);
+void *fr_redis_cluster_conn_create(TALLOC_CTX *ctx, void *instance, fr_time_delta_t timeout);
/*
* Functions to resolve a key to a cluster node
FD_ZERO(&fds);
FD_SET(fd, &fds);
- elapsed = fr_time() - start;
+ elapsed = fr_time_sub(fr_time(), start);
if (elapsed >= timeout) goto too_long;
rcode = select(fd + 1, &fds, NULL, NULL, &fr_time_delta_to_timeval(timeout - elapsed));
struct exfile_s {
uint32_t max_entries; //!< How many file descriptors we keep track of.
- uint32_t max_idle; //!< Maximum idle time for a descriptor.
+ fr_time_delta_t max_idle; //!< Maximum idle time for a descriptor.
fr_time_t last_cleaned;
pthread_mutex_t mutex;
exfile_entry_t *entries;
* - new context.
* - NULL on error.
*/
-exfile_t *exfile_init(TALLOC_CTX *ctx, uint32_t max_entries, uint32_t max_idle, bool locking)
+exfile_t *exfile_init(TALLOC_CTX *ctx, uint32_t max_entries, fr_time_delta_t max_idle, bool locking)
{
exfile_t *ef;
pthread_mutex_lock(&ef->mutex);
- if (now > (ef->last_cleaned + 1)) do_cleanup = true;
+ if (fr_time_gt(now, fr_time_add(ef->last_cleaned, fr_time_delta_from_sec(1)))) do_cleanup = true;
/*
* Find the matching entry, or an unused one.
}
if ((oldest < 0) ||
- (ef->entries[i].last_used < ef->entries[oldest].last_used)) {
+ (fr_time_lt(ef->entries[i].last_used, ef->entries[oldest].last_used))) {
oldest = i;
}
* do so now.
*/
} else if (do_cleanup) {
- if ((ef->entries[i].last_used + ef->max_idle) >= now) continue;
+ if (fr_time_gteq(fr_time_add(ef->entries[i].last_used, ef->max_idle), now)) continue;
exfile_cleanup_entry(ef, &ef->entries[i]);
}
*/
typedef struct exfile_s exfile_t;
-exfile_t *exfile_init(TALLOC_CTX *ctx, uint32_t entries, uint32_t idle, bool locking);
+exfile_t *exfile_init(TALLOC_CTX *ctx, uint32_t entries, fr_time_delta_t idle, bool locking);
void exfile_enable_triggers(exfile_t *ef, CONF_SECTION *cs, char const *trigger_prefix,
fr_pair_list_t *trigger_args);
do {\
if (fr_rate_limit_enabled()) {\
(_entry)->now = fr_time();\
- if ((_entry)->now != (_entry)->last_complained) {\
+ if (fr_time_to_sec((_entry)->now) != fr_time_to_sec((_entry)->last_complained)) {\
(_entry)->last_complained = (_entry)->now;\
- if (((_entry)->repeated > 0) && (((_entry)->now - (_entry)->last_complained) == 1)) { \
+ if (((_entry)->repeated > 0) && (fr_time_delta_to_sec(fr_time_sub((_entry)->now, (_entry)->last_complained)) == 1)) { \
_log(_fmt " - repeated %u time(s)", ##__VA_ARGS__, (_entry)->repeated); \
} else { \
_log(_fmt, ##__VA_ARGS__); \
do {\
if (fr_rate_limit_enabled()) {\
(_entry)->now = fr_time();\
- if ((_entry)->now != (_entry)->last_complained) {\
+ if (fr_time_to_sec((_entry)->now) != fr_time_to_sec((_entry)->last_complained)) {\
(_entry)->last_complained = (_entry)->now;\
- if (((_entry)->repeated > 0) && (((_entry)->now - (_entry)->last_complained) == 1)) { \
+ if (((_entry)->repeated > 0) && (fr_time_delta_to_sec(fr_time_sub((_entry)->now, (_entry)->last_complained)) == 1)) { \
ROPTIONAL(_l_request, _l_global, _fmt " - repeated %u time(s)", ##__VA_ARGS__, (_entry)->repeated); \
} else { \
ROPTIONAL(_l_request, _l_global, _fmt, ##__VA_ARGS__); \
{
fr_time_t when;
- static fr_time_t last_hup = 0;
+ static fr_time_t last_hup = fr_time_wrap(0);
/*
* Re-open the log file. If we can't, then keep logging
* Only check the config files every few seconds.
*/
when = fr_time();
- if ((last_hup + fr_time_delta_from_sec(2)) >= when) {
+ if (fr_time_gteq(fr_time_add(last_hup, fr_time_delta_from_sec(2)), when)) {
INFO("HUP - Last HUP was too recent. Ignoring");
return;
}
*/
if ((flag & RADIUS_SIGNAL_SELF_HUP) != 0) {
fr_time_t when;
- static fr_time_t last_hup = 0;
+ static fr_time_t last_hup = fr_time_wrap(0);
when = fr_time();
- if (when - last_hup < fr_time_delta_from_sec(5)) {
+ if (fr_time_sub(when, last_hup) < fr_time_delta_from_sec(5)) {
INFO("Ignoring HUP (less than 5s since last one)");
return;
}
return ret;
}
-static int _loop_status(fr_time_t wake, UNUSED void *ctx)
+static int _loop_status(UNUSED fr_time_t now, fr_time_delta_t wake, UNUSED void *ctx)
{
if (wake > (NSEC / 10)) DEBUG3("Main loop waking up in %pV seconds", fr_box_time_delta(wake));
exfile_t *module_exfile_init(TALLOC_CTX *ctx,
CONF_SECTION *module,
uint32_t max_entries,
- uint32_t max_idle,
+ fr_time_delta_t max_idle,
bool locking,
char const *trigger_prefix,
fr_pair_list_t *trigger_args)
exfile_t *module_exfile_init(TALLOC_CTX *ctx,
CONF_SECTION *module,
uint32_t max_entries,
- uint32_t max_idle,
+ fr_time_delta_t max_idle,
bool locking,
char const *trigger_prefix,
fr_pair_list_t *trigger_args);
fr_pool_connection_t *next; //!< Next connection in list.
fr_heap_index_t heap_id; //!< For the next connection heap.
- time_t created; //!< Time connection was created.
+ fr_time_t created; //!< Time connection was created.
fr_time_t last_reserved; //!< Last time the connection was reserved.
fr_time_t last_released; //!< Time the connection was released.
* opening connections, don't open multiple connections until
* we successfully open at least one.
*/
- if ((pool->state.num == 0) && pool->state.pending && pool->state.last_failed) return NULL;
+ if ((pool->state.num == 0) &&
+ pool->state.pending &&
+ fr_time_gt(pool->state.last_failed, fr_time_wrap(0))) return NULL;
pthread_mutex_lock(&pool->mutex);
fr_assert(pool->state.num <= pool->max);
* If the last attempt failed, wait a bit before
* retrying.
*/
- if (pool->state.last_failed && ((pool->state.last_failed + pool->retry_delay) > now)) {
+ if (fr_time_gt(pool->state.last_failed, fr_time_wrap(0)) &&
+ fr_time_gt(fr_time_add(pool->state.last_failed, pool->retry_delay), now)) {
bool complain = false;
- if ((now - pool->state.last_throttled) >= NSEC) {
+ if (fr_time_sub(now, pool->state.last_throttled) >= fr_time_delta_from_sec(1)) {
complain = true;
pool->state.last_throttled = now;
if (!fr_rate_limit_enabled() || complain) {
ERROR("Last connection attempt failed, waiting %pV seconds before retrying",
- fr_box_time_delta(pool->state.last_failed + pool->retry_delay - now));
+ fr_box_time_delta(fr_time_sub(fr_time_add(pool->state.last_failed, pool->retry_delay), now)));
}
return NULL;
pool->state.last_spawned = fr_time();
pool->delay_interval = pool->cleanup_interval;
pool->state.next_delay = pool->cleanup_interval;
- pool->state.last_failed = 0;
+ pool->state.last_failed = fr_time_wrap(0);
/*
* Must be done inside the mutex, reconnect callback
* - 0 if connection was closed.
* - 1 if connection handle was left open.
*/
-static int connection_manage(fr_pool_t *pool, request_t *request, fr_pool_connection_t *this, time_t now)
+static int connection_manage(fr_pool_t *pool, request_t *request, fr_pool_connection_t *this, fr_time_t now)
{
fr_assert(pool != NULL);
fr_assert(this != NULL);
}
if ((pool->lifetime > 0) &&
- ((this->created + pool->lifetime) < now)) {
+ (fr_time_lt(fr_time_add(this->created, pool->lifetime), now))) {
ROPTIONAL(RDEBUG2, DEBUG2, "Closing expired connection (%" PRIu64 "): Hit lifetime limit",
this->number);
goto do_delete;
}
if ((pool->idle_timeout > 0) &&
- ((this->last_released + pool->idle_timeout) < now)) {
+ (fr_time_lt(fr_time_add(this->last_released, pool->idle_timeout), now))) {
ROPTIONAL(RINFO, INFO, "Closing connection (%" PRIu64 "): Hit idle_timeout, was idle for %pVs",
- this->number, fr_box_time_delta(now - this->last_released));
+ this->number, fr_box_time_delta(fr_time_sub(now, this->last_released)));
goto do_delete;
}
fr_time_t now = fr_time();
fr_pool_connection_t *this, *next;
- if ((now - pool->state.last_checked) < NSEC) {
+ if (fr_time_sub(now, pool->state.last_checked) < fr_time_delta_from_sec(1)) {
pthread_mutex_unlock(&pool->mutex);
return 1;
}
* Don't close connections too often, in order to
* prevent flapping.
*/
- if (now < (pool->state.last_spawned + pool->delay_interval)) goto manage_connections;
+ if (fr_time_lt(now, fr_time_add(pool->state.last_spawned, pool->delay_interval))) goto manage_connections;
/*
* Find a connection to close.
for (this = pool->tail; this != NULL; this = this->prev) {
if (this->in_use) continue;
- if (!found || (this->last_reserved < found->last_reserved)) found = this;
+ if (!found || (fr_time_lt(this->last_reserved, found->last_reserved))) found = this;
}
if (!fr_cond_assert(found)) goto done;
/*
* Rate-limit complaints.
*/
- if ((now - pool->state.last_at_max) > NSEC) {
+ if (fr_time_sub(now, pool->state.last_at_max) > fr_time_delta_from_sec(1)) {
complain = true;
pool->state.last_at_max = now;
}
* This is done inside the mutex to ensure
* updates are atomic.
*/
- held = this->last_released - this->last_reserved;
+ held = fr_time_sub(this->last_released, this->last_reserved);
/*
* Check we've not exceeded out trigger limits
*/
if (pool->held_trigger_min &&
(held < pool->held_trigger_min) &&
- ((this->last_released - pool->state.last_held_min) >= NSEC)) {
+ (fr_time_sub(this->last_released, pool->state.last_held_min) >= fr_time_delta_from_sec(1))) {
trigger_min = true;
pool->state.last_held_min = this->last_released;
}
if (pool->held_trigger_min &&
(held > pool->held_trigger_max) &&
- ((this->last_released - pool->state.last_held_max) >= NSEC)) {
+ (fr_time_sub(this->last_released, pool->state.last_held_max) >= fr_time_delta_from_sec(1))) {
trigger_max = true;
pool->state.last_held_max = this->last_released;
}
fr_assert(map->da->type == FR_TYPE_UINT32);
now = fr_time();
- delta = now - start_time;
+ delta = fr_time_sub(now, start_time);
/*
* ticks are in 1/100's of seconds.
*/
- out->vb_uint32 += delta / 10000000;
+ out->vb_uint32 += fr_time_delta_to_csec(delta);
return 0;
}
fr_assert(map->da->type == FR_TYPE_UINT32);
now = fr_time();
- delta = now - reset_time;
+ delta = fr_time_sub(now, reset_time);
/*
* ticks are in 1/100's of seconds.
*/
- out->vb_uint32 += delta / 10000000;
+ out->vb_uint32 += fr_time_delta_to_csec(delta);
return 0;
}
/*
* Too old, we can delete it.
*/
- if (entry->cleanup < now) {
+ if (fr_time_lt(entry->cleanup, now)) {
state_entry_unlink(state, entry);
fr_dlist_insert_tail(&to_free, entry);
timed_out++;
* isn't perfect, but it's reasonable, and it's one less
* thing for an administrator to configure.
*/
- entry->cleanup = now + state->timeout;
+ entry->cleanup = fr_time_add(now, state->timeout);
/*
* Some modules create their own magic
fr_pair_append(reply_list, vp);
}
- DEBUG4("State ID %" PRIu64 " created, value 0x%pH, expires %" PRIu64 "s",
- entry->id, fr_box_octets(entry->state, sizeof(entry->state)), (uint64_t)entry->cleanup - now);
+ DEBUG4("State ID %" PRIu64 " created, value 0x%pH, expires %pV",
+ entry->id, fr_box_octets(entry->state, sizeof(entry->state)),
+ fr_box_time_delta(fr_time_sub(entry->cleanup, now)));
PTHREAD_MUTEX_LOCK(&state->mutex);
void radius_stats_ema(fr_stats_ema_t *ema, fr_time_t start, fr_time_t end)
{
- uint64_t tdiff;
+ fr_time_delta_t tdiff;
#ifdef WITH_STATS_DEBUG
static int n = 0;
#endif
if (ema->window == 0) return;
- fr_assert(start <= end);
+ fr_assert(fr_time_lteq(start, end));
/*
* Initialize it.
ema->f10 = (2 * F_EMA_SCALE) / ((10 * ema->window) + 1);
}
- tdiff = fr_time_delta_to_usec(start);
- tdiff -= fr_time_delta_to_usec(end);
+ tdiff = fr_time_delta_to_usec(fr_time_sub(start, end));
tdiff *= EMA_SCALE;
if (ema->ema1 == 0) {
*/
void fr_stats_bins(fr_stats_t *stats, fr_time_t start, fr_time_t end)
{
- fr_time_t diff;
+ fr_time_delta_t diff;
uint32_t delay;
- if (end < start) return; /* bad data */
- diff = end - start;
+ if (fr_time_lt(end, start)) return; /* bad data */
+ diff = fr_time_sub(end, start);
if (diff >= fr_time_delta_from_sec(10)) {
stats->elapsed[7]++;
if (!found) {
MEM(found = talloc(NULL, trigger_last_fired_t));
found->ci = ci;
- found->last_fired = 0;
+ found->last_fired = fr_time_wrap(0);
fr_rb_insert(trigger_last_fired_tree, found);
}
/*
* Send the rate_limited traps at most once per second.
*/
- if (found->last_fired == now) return -1;
+ if (fr_time_to_sec(found->last_fired) == fr_time_to_sec(now)) return -1;
found->last_fired = now;
}
static atomic_uint_fast64_t request_counter = ATOMIC_VAR_INIT(1);
#ifdef TESTING_TRUNK
-static fr_time_t test_time_base = 1;
+static fr_time_t test_time_base = fr_time_wrap(1);
static fr_time_t test_time(void)
{
* one or more connections comes online.
*/
if (!trunk->conf.backlog_on_failed_conn &&
- trunk->pub.last_failed && (trunk->pub.last_connected < trunk->pub.last_failed)) {
+ fr_time_gt(trunk->pub.last_failed, fr_time_wrap(0)) &&
+ fr_time_lt(trunk->pub.last_connected, trunk->pub.last_failed)) {
RATE_LIMIT_LOCAL_ROPTIONAL(&trunk->limit_last_failure_log,
RWARN, WARN, "Refusing to enqueue requests - "
"No active connections and last event was a connection failure");
fr_assert(treq->pub.trunk == trunk);
fr_assert(treq->pub.tconn == NULL);
fr_assert(treq->cancel_reason == FR_TRUNK_CANCEL_REASON_NONE);
- fr_assert(treq->last_freed > 0);
+ fr_assert(fr_time_gt(treq->last_freed, fr_time_wrap(0)));
trunk->pub.req_alloc_reused++;
} else {
MEM(treq = talloc_pooled_object(trunk, fr_trunk_request_t,
* have been idle for too long.
*/
while ((treq = fr_dlist_tail(&trunk->free_requests)) &&
- ((treq->last_freed + trunk->conf.req_cleanup_delay) <= now)) talloc_free(treq);
+ fr_time_lteq(fr_time_add(treq->last_freed, trunk->conf.req_cleanup_delay), now)) talloc_free(treq);
/*
* Free any connections which have drained
* We're above the target requests per connection
* spawn more connections!
*/
- if ((trunk->pub.last_above_target >= trunk->pub.last_below_target)) {
+ if (fr_time_gteq(trunk->pub.last_above_target, trunk->pub.last_below_target)) {
/*
* If connecting is provided, check we
* wouldn't have too many connections in
* Only apply hysteresis if we have at least
* one available connection.
*/
- if (conn_count && ((trunk->pub.last_above_target + trunk->conf.open_delay) > now)) {
+ if (conn_count && fr_time_gt(fr_time_add(trunk->pub.last_above_target, trunk->conf.open_delay), now)) {
DEBUG3("Not opening connection - Need to be above target for %pVs. It's been %pVs",
fr_box_time_delta(trunk->conf.open_delay),
- fr_box_time_delta(now - trunk->pub.last_above_target));
+ fr_box_time_delta(fr_time_sub(now, trunk->pub.last_above_target)));
return; /* too soon */
}
* Implement delay if there's no connections that
* could be immediately re-activated.
*/
- if ((trunk->pub.last_open + trunk->conf.open_delay) > now) {
+ if (fr_time_gt(fr_time_add(trunk->pub.last_open, trunk->conf.open_delay), now)) {
DEBUG3("Not opening connection - Need to wait %pVs before opening another connection. "
"It's been %pVs",
fr_box_time_delta(trunk->conf.open_delay),
- fr_box_time_delta(now - trunk->pub.last_open));
+ fr_box_time_delta(fr_time_sub(now, trunk->pub.last_open)));
return;
}
* We're below the target requests per connection.
* Free some connections...
*/
- else if (trunk->pub.last_below_target > trunk->pub.last_above_target) {
- if ((trunk->pub.last_below_target + trunk->conf.close_delay) > now) {
+ else if (fr_time_gt(trunk->pub.last_below_target, trunk->pub.last_above_target)) {
+ if (fr_time_gt(fr_time_add(trunk->pub.last_below_target, trunk->conf.close_delay), now)) {
DEBUG3("Not closing connection - Need to be below target for %pVs. It's been %pVs",
fr_box_time_delta(trunk->conf.close_delay),
- fr_box_time_delta(now - trunk->pub.last_below_target));
+ fr_box_time_delta(fr_time_sub(now, trunk->pub.last_below_target)));
return; /* too soon */
}
ROUND_UP_DIV(req_count, conn_count), trunk->conf.target_req_per_conn);
close:
- if ((trunk->pub.last_closed + trunk->conf.close_delay) > now) {
+ if (fr_time_gt(fr_time_add(trunk->pub.last_closed, trunk->conf.close_delay), now)) {
DEBUG3("Not closing connection - Need to wait %pVs before closing another connection. "
"It's been %pVs",
fr_box_time_delta(trunk->conf.close_delay),
- fr_box_time_delta(now - trunk->pub.last_closed));
+ fr_box_time_delta(fr_time_sub(now, trunk->pub.last_closed)));
return;
}
uint16_t conn_count = 0;
uint64_t req_per_conn = 0;
- fr_assert(now > 0);
+ fr_assert(fr_time_gt(now, fr_time_wrap(0)));
/*
* No need to update these as the trunk is being freed
*
* The equality check is correct here as both values start at 0.
*/
- if (trunk->pub.last_above_target <= trunk->pub.last_below_target) trunk->pub.last_above_target = now;
+ if (fr_time_lteq(trunk->pub.last_above_target, trunk->pub.last_below_target)) trunk->pub.last_above_target = now;
} else if (req_per_conn < trunk->conf.target_req_per_conn) {
below_target:
/*
*
* The equality check is correct here as both values start at 0.
*/
- if (trunk->pub.last_below_target <= trunk->pub.last_above_target) trunk->pub.last_below_target = now;
+ if (fr_time_lteq(trunk->pub.last_below_target, trunk->pub.last_above_target)) trunk->pub.last_below_target = now;
}
done:
mi->module = (module_t *)server->process_module;
mi->number = 0; /* Hacky hack hack */
- if (unlikely(track && track->dynamic && server->dynamic_client_module)) {
+ if (unlikely(track && fr_time_gt(track->dynamic, fr_time_wrap(0)) && server->dynamic_client_module)) {
process = (fr_process_module_t const *) server->dynamic_client_module->module->common;
mi->dl_inst = server->dynamic_client_module;
fr_assert(tls_cache->store.sess);
fr_assert(tls_cache->store.state == FR_TLS_CACHE_STORE_REQUESTED);
- if (expires <= now) {
+ if (fr_time_lteq(expires, now)) {
RWDEBUG("Session has already expired, not storing");
return UNLANG_ACTION_CALCULATE_RESULT;
}
* How long the session has to live
*/
MEM(pair_update_request(&vp, attr_tls_session_ttl) >= 0);
- vp->vp_time_delta = fr_time_delta_from_nsec(expires - now);
+ vp->vp_time_delta = fr_time_sub(expires, now);
/*
* Serialize the session
/*
* Check the cert hasn't expired
*/
- if (fr_time_from_sec(not_after) < now) {
+ if (fr_time_lt(fr_time_from_sec(not_after), now)) {
fr_strerror_printf("Certificate has expired. "
"Validity period (notAfter) ends %pV, current time is %pV",
fr_box_date(fr_unix_time_from_time(not_before)), fr_box_date(fr_time_to_unix_time(now)));
* Check the cert's validity period
* has started.
*/
- if (fr_time_from_sec(not_before) > now) {
+ if (fr_time_gt(fr_time_from_sec(not_before), now)) {
fr_strerror_printf("Certificate is not yet valid. "
"Validity period (notBefore) starts %pV, current time is %pV",
fr_box_date(fr_unix_time_from_time(not_before)), fr_box_date(fr_time_to_unix_time(now)));
t = &unlang_thread_array[instruction->number];
- t->cpu_time += (fr_time() - t->enter);
+ t->cpu_time += fr_time_sub(fr_time(), t->enter);
}
* frame is cleaned up.
*/
if (instruction->actions.retry.mrd) {
- retry->timeout = fr_time() + instruction->actions.retry.mrd;
+ retry->timeout = fr_time_add(fr_time(), instruction->actions.retry.mrd);
if (fr_event_timer_at(retry, unlang_interpret_event_list(request), &retry->ev, retry->timeout,
instruction_timeout_handler, request) < 0) {
case FR_RETRY_MRD:
REDEBUG("Reached max_rtx_duration (%pVs > %pVs) - sending timeout signal",
- fr_box_time_delta(now - state->retry.start), fr_box_time_delta(state->retry.config->mrd));
+ fr_box_time_delta(fr_time_sub(now, state->retry.start)), fr_box_time_delta(state->retry.config->mrd));
break;
case FR_RETRY_MRC:
unlang_frame_state_module_t *state = talloc_get_type_abort(frame->state, unlang_frame_state_module_t);
char const *caller;
unlang_action_t ua;
- fr_time_t now = 0;
+ fr_time_t now = fr_time_wrap(0);
*p_result = state->rcode = RLM_MODULE_NOOP;
state->set_rcode = true;
* If we have retry timers, then start the retries.
*/
if (frame->instruction->actions.retry.irt) {
- fr_assert(now != 0);
+ fr_assert(fr_time_gt(now, fr_time_wrap(0)));
(void) fr_retry_init(&state->retry, now, &frame->instruction->actions.retry); /* can't fail */
TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p,
fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx)
{
- fr_time_t now;
-
- now = el->time();
- now += delta;
-
return _fr_event_timer_at(NDEBUG_LOCATION_VALS
- ctx, el, ev_p, now, callback, uctx);
+ ctx, el, ev_p, fr_time_add(el->time(), delta), callback, uctx);
}
/** Delete a timer event from the event list
struct kevent evset;
int waiting = 0;
int kq = kqueue();
- fr_time_t now, start = el->time(), end = start + timeout;
+ fr_time_t now, start = el->time(), end = fr_time_add(start, timeout);
if (unlikely(kq < 0)) goto force;
/*
* Keep draining process exits as they come in...
*/
- while ((waiting > 0) && (end > (now = el->time()))) {
+ while ((waiting > 0) && fr_time_gt(end, (now = el->time()))) {
struct kevent kev;
int ret;
- ret = kevent(kq, NULL, 0, &kev, 1, &fr_time_delta_to_timespec(end - now));
+ ret = kevent(kq, NULL, 0, &kev, 1,
+ &fr_time_delta_to_timespec(fr_time_sub(end, now)));
switch (ret) {
default:
EVENT_DEBUG("%p - %s - Reaper tmp loop error %s, forcing process reaping",
if (unlikely(!el)) return 0;
if (fr_lst_num_elements(el->times) == 0) {
- *when = 0;
+ *when = fr_time_wrap(0);
return 0;
}
ev = fr_lst_peek(el->times);
if (!ev) {
- *when = 0;
+ *when = fr_time_wrap(0);
return 0;
}
/*
* See if it's time to do this one.
*/
- if (ev->when > *when) {
+ if (fr_time_gt(ev->when, *when)) {
*when = ev->when;
return 0;
}
*/
int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait)
{
- fr_time_t when, *wake;
+ fr_time_delta_t when, *wake;
struct timespec ts_when, *ts_wake;
fr_event_pre_t *pre;
int num_fd_events;
*/
ev = fr_lst_peek(el->times);
if (ev) {
- if (ev->when <= el->now) {
+ if (fr_time_lteq(ev->when, el->now)) {
timer_event_ready = true;
} else if (wait) {
- when = ev->when - el->now;
+ when = fr_time_sub(ev->when, el->now);
} /* else we're not waiting, leave "when == 0" */
for (pre = fr_dlist_head(&el->pre_callbacks);
pre != NULL;
pre = fr_dlist_next(&el->pre_callbacks, pre)) {
- if (pre->callback(wake ? *wake : 0, pre->uctx) > 0) {
+ if (pre->callback(now, wake ? *wake : 0, pre->uctx) > 0) {
wake = &when;
when = 0;
}
* If there are no FD events, we must have woken up from a timer
*/
if (!num_fd_events) {
- el->now += when;
+ el->now = fr_time_add(el->now, when);
if (wait) timer_event_ready = true;
}
/*
for (ev = fr_lst_iter_init(el->times, &iter);
ev != NULL;
ev = fr_lst_iter_next(el->times, &iter)) {
- fr_time_delta_t diff = ev->when - now;
+ fr_time_delta_t diff = fr_time_sub(ev->when, now);
for (i = 0; i < NUM_ELEMENTS(decades); i++) {
if ((diff <= decades[i]) || (i == NUM_ELEMENTS(decades) - 1)) {
now = el->time();
- EVENT_DEBUG("Time is now %"PRId64"", now);
+ EVENT_DEBUG("Time is now %"PRId64"", fr_time_unwrap(now));
for (ev = fr_lst_iter_init(el->times, &iter);
ev;
ev = fr_lst_iter_next(el->times, &iter)) {
(void)talloc_get_type_abort(ev, fr_event_timer_t);
EVENT_DEBUG("%s[%u]: %p time=%" PRId64 " (%c), callback=%p",
- ev->file, ev->line, ev, ev->when, now > ev->when ? '<' : '>', ev->callback);
+ ev->file, ev->line, ev, fr_time_unwrap(ev->when),
+ fr_time_gt(now, ev->when) ? '<' : '>', ev->callback);
}
}
#endif
* Called before calling kqueue to put the thread in a sleeping state.
*
* @param[in] now The current time.
+ * @param[in] wake When we'll next need to wake up to service an event.
* @param[in] uctx User ctx passed to #fr_event_list_alloc.
*/
-typedef int (*fr_event_status_cb_t)(fr_time_t now, void *uctx);
+typedef int (*fr_event_status_cb_t)(fr_time_t now, fr_time_delta_t wake, void *uctx);
/** Called when an IO event occurs on a file descriptor
*
end = fr_time();
TEST_MSG_ALWAYS("\ncycle size: %d\n", HEAP_CYCLE_SIZE);
- TEST_MSG_ALWAYS("insert: %2.2f s\n", ((double)(start_remove - start_insert)) / NSEC);
- TEST_MSG_ALWAYS("extract: %2.2f s\n", ((double)(start_swap - start_remove)) / NSEC);
- TEST_MSG_ALWAYS("swap: %2.2f s\n", ((double)(end - start_swap)) / NSEC);
+ TEST_MSG_ALWAYS("insert: %2.2f s\n", fr_time_sub(start_remove, start_insert) / (double)NSEC);
+ TEST_MSG_ALWAYS("extract: %2.2f s\n", fr_time_sub(start_swap, start_remove)/ (double)NSEC);
+ TEST_MSG_ALWAYS("swap: %2.2f s\n", fr_time_sub(end, start_swap) / (double)NSEC);
talloc_free(hp);
free(array);
end = fr_time();
TEST_MSG_ALWAYS("\ncycle size: %d\n", LST_CYCLE_SIZE);
- TEST_MSG_ALWAYS("insert: %2.2f s\n", ((double)(start_remove - start_insert)) / NSEC);
- TEST_MSG_ALWAYS("extract: %2.2f s\n", ((double)(start_swap - start_remove)) / NSEC);
- TEST_MSG_ALWAYS("swap: %2.2f s\n", ((double)(end - start_swap)) / NSEC);
+ TEST_MSG_ALWAYS("insert: %2.2f s\n", fr_time_sub(start_remove, start_insert) / (double)NSEC);
+ TEST_MSG_ALWAYS("extract: %2.2f s\n", fr_time_sub(start_swap, start_remove) / (double)NSEC);
+ TEST_MSG_ALWAYS("swap: %2.2f s\n", fr_time_sub(end, start_swap) / (double)NSEC);
talloc_free(lst);
free(values);
* Check times for LST alloc, insert, pop
*/
{
- fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first = 0;
+ fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first = fr_time_wrap(0);
populate_values(values, count);
end_pop = fr_time();
TEST_MSG_ALWAYS("\nlst size: %u\n", count);
- TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", (end_alloc - start_alloc) / 1000);
- TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", (end_insert - start_insert) / 1000);
- TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", (end_pop_first - start_pop) / 1000);
- TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", (end_pop - start_pop) / 1000);
+ TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", fr_time_sub(end_alloc, start_alloc) / 1000);
+ TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", fr_time_sub(end_insert, start_insert) / 1000);
+ TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", fr_time_sub(end_pop_first, start_pop) / 1000);
+ TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", fr_time_sub(end_pop, start_pop) / 1000);
talloc_free(lst);
}
* Check times for heap alloc, insert, pop
*/
{
- fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first = 0;
+ fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first = fr_time_wrap(0);
populate_values(values, count);
end_pop = fr_time();
TEST_MSG_ALWAYS("\nheap size: %u\n", count);
- TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", (end_alloc - start_alloc) / 1000);
- TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", (end_insert - start_insert) / 1000);
- TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", (end_pop_first - start_pop) / 1000);
- TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", (end_pop - start_pop) / 1000);
+ TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", fr_time_sub(end_alloc, start_alloc) / 1000);
+ TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", fr_time_sub(end_insert, start_insert) / 1000);
+ TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", fr_time_sub(end_pop_first, start_pop) / 1000);
+ TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", fr_time_sub(end_pop, start_pop) / 1000);
talloc_free(heap);
}
{
lst_thing **array;
populate_values(values, count);
- fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first = 0;
+ fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first = fr_time_wrap(0);
start_alloc = fr_time();
array = talloc_array(NULL, lst_thing *, count);
end_pop = fr_time();
TEST_MSG_ALWAYS("\narray size: %u\n", count);
- TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", (end_alloc - start_alloc) / 1000);
- TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", (end_insert - start_insert) / 1000);
- TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", (end_pop_first - start_pop) / 1000);
- TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", (end_pop - start_pop) / 1000);
+ TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", fr_time_sub(end_alloc, start_alloc) / 1000);
+ TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", fr_time_sub(end_insert, start_insert) / 1000);
+ TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", fr_time_sub(end_pop_first, start_pop) / 1000);
+ TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", fr_time_sub(end_pop, start_pop) / 1000);
talloc_free(array);
}
fr_pair_list_t test_vps;
unsigned int i, j;
fr_pair_t *new_vp;
- fr_time_t start, end, used = 0;
+ fr_time_t start, end;
+ fr_time_delta_t used = 0;
size_t input_count = talloc_array_length(source_vps);
fr_pair_list_init(&test_vps);
start = fr_time();
fr_pair_append(&test_vps, new_vp);
end = fr_time();
- used += (end - start);
+ used += fr_time_sub(end, start);
}
TEST_CHECK(fr_pair_list_len(&test_vps) == len);
fr_pair_list_free(&test_vps);
fr_pair_list_t test_vps;
unsigned int i, j;
fr_pair_t *new_vp;
- fr_time_t start, end, used = 0;
+ fr_time_t start, end;
+ fr_time_delta_t used = 0;
fr_dict_attr_t const *da;
size_t input_count = talloc_array_length(source_vps);
start = fr_time();
(void) fr_pair_find_by_da(&test_vps, da, 0);
end = fr_time();
- used += (end - start);
+ used += fr_time_sub(end, start);
}
}
fr_pair_list_free(&test_vps);
fr_pair_list_t test_vps;
unsigned int i, j, nth_item;
fr_pair_t *new_vp;
- fr_time_t start, end, used = 0;
+ fr_time_t start, end;
+ fr_time_delta_t used = 0;
fr_dict_attr_t const *da;
size_t input_count = talloc_array_length(source_vps);
start = fr_time();
(void) fr_pair_find_by_da(&test_vps, da, nth_item);
end = fr_time();
- used += (end - start);
+ used += fr_time_sub(end, start);
}
}
fr_pair_list_free(&test_vps);
fr_pair_list_t test_vps;
unsigned int i, j;
fr_pair_t *new_vp;
- fr_time_t start, end, used = 0;
+ fr_time_t start, end;
+ fr_time_delta_t used = 0;
size_t input_count = talloc_array_length(source_vps);
fr_pair_list_init(&test_vps);
start = fr_time();
fr_pair_list_free(&test_vps);
end = fr_time();
- used += (end - start);
+ used += fr_time_sub(end, start);
}
fr_pair_list_free(&test_vps);
TEST_MSG_ALWAYS("repetitions=%d", reps);
char fmt_test[64];
fr_time_t now = fr_time();
- snprintf(fmt_test, sizeof(fmt_test), "Now is %ld", (long)now);
+ snprintf(fmt_test, sizeof(fmt_test), "Now is %"PRId64, fr_time_unwrap(now));
TEST_CASE("Find 'Test-String'");
TEST_CHECK((vp = fr_pair_find_by_da(&test_pairs, fr_dict_attr_test_string, 0)) != NULL);
VP_VERIFY(vp);
TEST_CASE("Copy content of 'fmt_test' to attribute value using fr_pair_value_aprintf()");
- TEST_CHECK(fr_pair_value_aprintf(vp, "Now is %ld", (long)now) == 0);
+ TEST_CHECK(fr_pair_value_aprintf(vp, "Now is %"PRId64, fr_time_unwrap(now)) == 0);
TEST_CASE("Validating VP_VERIFY()");
VP_VERIFY(vp);
rt = uint128_to_64(uint128_rshift(delay, 32));
r->rt = rt;
- r->next = now + rt;
+ r->next = fr_time_add(now, rt);
return 0;
}
if (r->config->mrd) {
fr_time_t end;
- end = r->start;
- end += r->config->mrd;
-
- if (now > end) {
+ end = fr_time_add(r->start, r->config->mrd);
+ if (fr_time_gt(now, end)) {
return FR_RETRY_MRD;
}
}
* the packet at "next + rt", and not "now + rt". That
* way the timer won't drift.
*/
- r->next += rt;
+ r->next = fr_time_add(r->next, rt);
/*
* The "next" retransmission time is in the past, AND
* i.e. if we weren't serviced for one event, just skip
* it, and go to the next one.
*/
- if ((r->next + (rt / 2)) < now) goto redo;
+ if (fr_time_lt(fr_time_add(r->next, (rt / 2)), now)) goto redo;
return FR_RETRY_CONTINUE;
}
}
stop = fr_time();
- rate = (uint64_t)((float)NSEC / ((stop - start) / 100000));
+ rate = (uint64_t)((float)NSEC / (fr_time_sub(stop, start) / 100000));
printf("printf pop rate %" PRIu64 "\n", rate);
/* shared runners are terrible for performance tests */
}
stop = fr_time();
- rate = (uint64_t)((float)NSEC / ((stop - start) / 100000));
+ rate = (uint64_t)((float)NSEC / (fr_time_sub(stop, start) / 100000));
printf("const pop rate %" PRIu64 "\n", rate);
/* shared runners are terrible for performance tests */
void fr_time_elapsed_update(fr_time_elapsed_t *elapsed, fr_time_t start, fr_time_t end)
{
- fr_time_t delay;
+ fr_time_delta_t delay;
- if (start >= end) {
+ if (fr_time_gteq(start, end)) {
delay = 0;
} else {
- delay = end - start;
+ delay = fr_time_sub(end, start);
}
if (delay < 1000) { /* microseconds */
} else if (delay < 10000000) {
elapsed->array[4]++;
- } else if (delay < (fr_time_t) 100000000) {
+ } else if (delay < (fr_time_delta_t) 100000000) {
elapsed->array[5]++;
- } else if (delay < (fr_time_t) 1000000000) { /* seconds */
+ } else if (delay < (fr_time_delta_t) 1000000000) { /* seconds */
elapsed->array[6]++;
} else { /* tens of seconds or more */
* @brief Simple time functions
*
* @copyright 2016-2019 Alan DeKok (aland@freeradius.org)
- * @copyright 2019-2020 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
+ * @copyright 2019-2021 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
*/
RCSIDH(time_h, "$Id$")
* for internal timers, events, etc. It can skew randomly as NTP
* plays with the local clock.
*/
-typedef int64_t fr_time_t;
+typedef struct fr_time_s {
+ int64_t value;
+} fr_time_t;
/** "Unix" time. This is the time in nanoseconds since midnight January 1, 1970
*
*/
typedef int64_t fr_time_delta_t;
+#define fr_time_wrap(_time) (fr_time_t){ .value = (_time) }
+#define fr_time_unwrap(_time) (_time).value
+
+/* Don't add fr_time_add_time_time, it's almost always a type error */
+static inline fr_time_t fr_time_add_time_delta(fr_time_t a, fr_time_delta_t b) { return fr_time_wrap(fr_time_unwrap(a) + b); }
+static inline fr_time_t fr_time_add_delta_time(fr_time_delta_t a, fr_time_t b) { return fr_time_wrap(a + fr_time_unwrap(b)); }
+static inline fr_time_delta_t fr_time_add_delta_delta(fr_time_delta_t a, fr_time_delta_t b) { return a + b; }
+
+#define fr_time_add(_a, _b) \
+ _Generic(_a, \
+ fr_time_t : _Generic(_b, \
+ fr_time_delta_t : fr_time_add_time_delta \
+ ), \
+ fr_time_delta_t : _Generic(_b, \
+ fr_time_t : fr_time_add_delta_time, \
+ fr_time_delta_t : fr_time_add_delta_delta \
+ ) \
+ )(_a, _b)
+
+static inline fr_time_delta_t fr_time_sub_time_time(fr_time_t a, fr_time_t b) { return fr_time_unwrap(a) - fr_time_unwrap(b); }
+static inline fr_time_t fr_time_sub_time_delta(fr_time_t a, fr_time_delta_t b) { return fr_time_wrap(fr_time_unwrap(a) - b); }
+
+/** Subtract one time from another
+ *
+ */
+#define fr_time_sub(_a, _b) \
+ _Generic(_a, \
+ fr_time_t : _Generic(_b, \
+ fr_time_t : fr_time_sub_time_time, \
+ fr_time_delta_t : fr_time_sub_time_delta \
+ ) \
+ )(_a, _b)
+
+static inline bool fr_time_gt(fr_time_t a, fr_time_t b) { return fr_time_unwrap(a) > fr_time_unwrap(b); }
+static inline bool fr_time_gteq(fr_time_t a, fr_time_t b) { return fr_time_unwrap(a) >= fr_time_unwrap(b); }
+static inline bool fr_time_lt(fr_time_t a, fr_time_t b) { return fr_time_unwrap(a) < fr_time_unwrap(b); }
+static inline bool fr_time_lteq(fr_time_t a, fr_time_t b) { return fr_time_unwrap(a) <= fr_time_unwrap(b); }
+static inline bool fr_time_eq(fr_time_t a, fr_time_t b) { return fr_time_unwrap(a) == fr_time_unwrap(b); }
+static inline bool fr_time_neq(fr_time_t a, fr_time_t b) { return fr_time_unwrap(a) != fr_time_unwrap(b); }
+
/** The base resolution for print parse operations
*/
typedef enum {
#define NSEC (1000000000)
#define USEC (1000000)
#define MSEC (1000)
+#define CSEC (100)
extern _Atomic int64_t our_realtime;
#define fr_unix_time_from_nsec(_x) (fr_unix_time_t)(_x)
#define fr_unix_time_from_usec(_x) (fr_unix_time_t)fr_time_delta_from_usec((fr_time_delta_t)(_x))
#define fr_unix_time_from_msec(_x) (fr_unix_time_t)fr_time_delta_from_msec((fr_time_delta_t)(_x))
+#define fr_unix_time_from_csec(_x) (fr_unix_time_t)fr_time_delta_from_csec((fr_time_delta_t)(_x))
#define fr_unix_time_from_sec(_x) (fr_unix_time_t)fr_time_delta_from_sec((fr_time_delta_t)(_x))
/** Covert a time_t into out internal fr_unix_time_t
#define fr_unix_time_to_nsec(_x) (uint64_t)(_x)
#define fr_unix_time_to_usec(_x) (uint64_t)fr_time_delta_to_usec(_x)
#define fr_unix_time_to_msec(_x) (uint64_t)fr_time_delta_to_msec(_x)
+#define fr_unix_time_to_csec(_x) (uint64_t)fr_time_delta_to_csec(_x)
#define fr_unix_time_to_sec(_x) (uint64_t)fr_time_delta_to_sec(_x)
static inline CC_HINT(nonnull) fr_unix_time_t fr_unix_time_from_timeval(struct timeval const *tv)
return (msec * (NSEC / MSEC));
}
+static inline fr_time_delta_t fr_time_delta_from_csec(int64_t csec)
+{
+ return (csec * (NSEC / CSEC));
+}
+
static inline fr_time_delta_t fr_time_delta_from_sec(int64_t sec)
{
return (sec * NSEC);
return (delta / (NSEC / MSEC));
}
+static inline int64_t fr_time_delta_to_csec(fr_time_delta_t delta)
+{
+ return (delta / (NSEC / CSEC));
+}
+
static inline int64_t fr_time_delta_to_sec(fr_time_delta_t delta)
{
return (delta / NSEC);
*
* @param[in] _when The server epoch time to convert.
*/
-#define fr_time_to_timeval(_when) fr_time_delta_to_timeval(fr_time_wallclock_at_last_sync() + _when)
+#define fr_time_to_timeval(_when) fr_time_delta_to_timeval(fr_time_wallclock_at_last_sync() + fr_time_unwrap(_when))
/** Convert server epoch time to unix epoch time
*
* @param[in] _when The server epoch time to convert.
*/
-#define fr_time_to_timespec(_when) fr_time_delta_to_timespec(fr_time_wallclock_at_last_sync() + _when)
+#define fr_time_to_timespec(_when) fr_time_delta_to_timespec(fr_time_wallclock_at_last_sync() + fr_time_unwrap(_when))
/** Nanoseconds since the Unix Epoch the last time we synced internal time with wallclock time
*/
static inline fr_unix_time_t fr_time_to_unix_time(fr_time_t when)
{
- return when + atomic_load_explicit(&our_realtime, memory_order_consume);
+ return fr_time_unwrap(when) + atomic_load_explicit(&our_realtime, memory_order_consume);
}
/** Convert an fr_time_t (internal time) to number of usec since the unix epoch (wallclock time)
*/
static inline int64_t fr_time_to_usec(fr_time_t when)
{
- return ((when + atomic_load_explicit(&our_realtime, memory_order_consume)) / (NSEC / USEC));
+ return ((fr_time_unwrap(when) + atomic_load_explicit(&our_realtime, memory_order_consume)) / (NSEC / USEC));
}
/** Convert an fr_time_t (internal time) to number of msec since the unix epoch (wallclock time)
*/
static inline int64_t fr_time_to_msec(fr_time_t when)
{
- return ((when + atomic_load_explicit(&our_realtime, memory_order_consume)) / (NSEC / MSEC));
+ return ((fr_time_unwrap(when) + atomic_load_explicit(&our_realtime, memory_order_consume)) / (NSEC / MSEC));
+}
+
+/** Convert an fr_time_t (internal time) to number of csec since the unix epoch (wallclock time)
+ *
+ */
+static inline int64_t fr_time_to_csec(fr_time_t when)
+{
+ return ((fr_time_unwrap(when) + atomic_load_explicit(&our_realtime, memory_order_consume)) / (NSEC / CSEC));
}
/** Convert an fr_time_t (internal time) to number of sec since the unix epoch (wallclock time)
*/
static inline int64_t fr_time_to_sec(fr_time_t when)
{
- return ((when + atomic_load_explicit(&our_realtime, memory_order_consume)) / NSEC);
+ return ((fr_time_unwrap(when) + atomic_load_explicit(&our_realtime, memory_order_consume)) / NSEC);
+}
+
+/** Convert a timespec (wallclock time) to a fr_time_t (internal time)
+ *
+ * @param[in] when_ts The timestamp to convert.
+ * @return
+ * - >0 number of nanoseconds since the server started.
+ * - 0 when the server started.
+ * - 0 if when_tv occurred before the server started.
+ */
+static inline CC_HINT(nonnull) fr_time_t fr_time_from_timespec(struct timespec const *when_ts)
+{
+ return fr_time_wrap(fr_time_delta_from_timespec(when_ts) - atomic_load_explicit(&our_realtime, memory_order_consume));
}
/** Convert a timeval (wallclock time) to a fr_time_t (internal time)
*/
static inline CC_HINT(nonnull) fr_time_t fr_time_from_timeval(struct timeval const *when_tv)
{
- return fr_time_delta_from_timeval(when_tv) - atomic_load_explicit(&our_realtime, memory_order_consume);
+ return fr_time_wrap(fr_time_delta_from_timeval(when_tv) - atomic_load_explicit(&our_realtime, memory_order_consume));
}
-/** Convert a time_t (wallclock time) to a fr_time_t (internal time)
+/** Convert a nsec (wallclock time) to a fr_time_t (internal time)
*
* @param[in] when The timestamp to convert.
* @return
* - 0 when the server started.
* - <0 number of nanoseconds before the server started.
*/
-static inline fr_time_t fr_time_from_sec(time_t when)
+static inline fr_time_t fr_time_from_nsec(int64_t when)
{
- return (((fr_time_t) when) * NSEC) - atomic_load_explicit(&our_realtime, memory_order_consume);
+ return fr_time_wrap((when * NSEC) - atomic_load_explicit(&our_realtime, memory_order_consume));
}
-/** Convert msec (wallclock time) to a fr_time_t (internal time)
+/** Convert usec (wallclock time) to a fr_time_t (internal time)
*
* @param[in] when The timestamp to convert.
* @return
* - 0 when the server started.
* - <0 number of nanoseconds before the server started.
*/
-static inline fr_time_t fr_time_from_msec(int64_t when)
+static inline fr_time_t fr_time_from_usec(int64_t when)
{
- return (((fr_time_t) when) * MSEC) - atomic_load_explicit(&our_realtime, memory_order_consume);
+ return fr_time_wrap((when * USEC) - atomic_load_explicit(&our_realtime, memory_order_consume));
}
-/** Convert usec (wallclock time) to a fr_time_t (internal time)
+/** Convert msec (wallclock time) to a fr_time_t (internal time)
*
* @param[in] when The timestamp to convert.
* @return
* - 0 when the server started.
* - <0 number of nanoseconds before the server started.
*/
-static inline fr_time_t fr_time_from_usec(int64_t when)
+static inline fr_time_t fr_time_from_msec(int64_t when)
{
- return (((fr_time_t) when) * USEC) - atomic_load_explicit(&our_realtime, memory_order_consume);
+ return fr_time_wrap((when * MSEC) - atomic_load_explicit(&our_realtime, memory_order_consume));
}
-/** Convert a nsec (wallclock time) to a fr_time_t (internal time)
+/** Convert csec (wallclock time) to a fr_time_t (internal time)
*
* @param[in] when The timestamp to convert.
* @return
* - 0 when the server started.
* - <0 number of nanoseconds before the server started.
*/
-static inline fr_time_t fr_time_from_nsec(int64_t when)
+static inline fr_time_t fr_time_from_csec(int64_t when)
{
- return (((fr_time_t) when) * NSEC) - atomic_load_explicit(&our_realtime, memory_order_consume);
+ return fr_time_wrap((when * CSEC) - atomic_load_explicit(&our_realtime, memory_order_consume));
}
-/** Convert a timespec (wallclock time) to a fr_time_t (internal time)
+/** Convert a time_t (wallclock time) to a fr_time_t (internal time)
*
- * @param[in] when_ts The timestamp to convert.
+ * @param[in] when The timestamp to convert.
* @return
* - >0 number of nanoseconds since the server started.
* - 0 when the server started.
- * - 0 if when_tv occurred before the server started.
+ * - <0 number of nanoseconds before the server started.
*/
-static inline CC_HINT(nonnull) fr_time_t fr_time_from_timespec(struct timespec const *when_ts)
+static inline fr_time_t fr_time_from_sec(time_t when)
{
- return fr_time_delta_from_timespec(when_ts) - atomic_load_explicit(&our_realtime, memory_order_consume);
+ return fr_time_wrap((when * NSEC) - atomic_load_explicit(&our_realtime, memory_order_consume));
}
/** Compare two fr_time_t values
*/
static inline int8_t fr_time_cmp(fr_time_t a, fr_time_t b)
{
- return CMP(a, b);
+ return CMP(fr_time_unwrap(a), fr_time_unwrap(b));
}
/** Return a relative time since the server our_epoch
#ifdef HAVE_CLOCK_GETTIME
struct timespec ts;
(void) clock_gettime(CLOCK_MONOTONIC, &ts);
- return fr_time_delta_from_timespec(&ts) - our_epoch;
+ return fr_time_wrap(fr_time_delta_from_timespec(&ts) - our_epoch);
#else /* __MACH__ is defined */
uint64_t when;
if ((flags & UDP_FLAGS_PEEK) != 0) sock_flags |= MSG_PEEK;
- if (when) *when = 0;
+ if (when) *when = fr_time_wrap(0);
/*
* Always initialise the output socket structure
* We didn't get it from the kernel
* so use our own time source.
*/
- if (when && !*when) *when = fr_time();
+ if (when && fr_time_eq(*when, fr_time_wrap(0))) *when = fr_time();
return slen;
}
*/
if (config->send_buff_is_set) {
int opt;
-
+
opt = config->send_buff;
if (opt < 65536) opt = 65536;
* If the entry is expired, tell the caller that
* it wasn't written to the socket.
*/
- if (now >= entry->expires) {
+ if (fr_time_gteq(now, entry->expires)) {
void *rctx = entry->rctx;
talloc_free(entry);
.sockaddr = sockaddr,
.socklen = socklen,
.uq = uq,
- .expires = fr_time() + uq->config->max_queued_time,
+ .expires = fr_time_add(fr_time(), uq->config->max_queued_time),
.rctx = rctx,
.packet_len = packet_len,
};
if (from_len) *from_len = msgh.msg_namelen;
if (ifindex) *ifindex = 0;
- if (when) *when = 0;
+ if (when) *when = fr_time_wrap(0);
/* Process auxiliary received data in msgh */
for (cmsg = CMSG_FIRSTHDR(&msgh);
#endif
}
- if (when && !*when) *when = fr_time();
+ if (when && fr_time_eq(*when, fr_time_wrap(0))) *when = fr_time();
return ret;
}
#define fr_box_date(_val) _fr_box(FR_TYPE_DATE, .vb_date, _val)
+#define fr_box_time(_val) _fr_box(FR_TYPE_DATE, .vb_date, fr_time_to_unix_time(_val))
+
#define fr_box_size(_val) _fr_box(FR_TYPE_SIZE, .vb_size, _val)
#define _fr_box_with_da(_type, _field, _val, _da) (&(fr_value_box_t){ .type = _type, _field = (_val), .enumv = (_da) })
cf_section_name2(thread->inst->parent->server_cs), buffer, end - start);
}
- if (fr_event_timer_at(thread, el, &thread->ev, now + fr_time_delta_from_sec(end - start), do_cron, thread) < 0) {
+ if (fr_event_timer_at(thread, el, &thread->ev, fr_time_add(now, fr_time_delta_from_sec(end - start)),
+ do_cron, thread) < 0) {
fr_assert(0);
}
vp = fr_pair_afrom_da(request->request_ctx, attr_packet_original_timestamp);
if (vp) {
- vp->vp_date = ((fr_time_t) timestamp) * NSEC;
+ vp->vp_date = fr_unix_time_from_sec(timestamp);
vp->type = VT_DATA;
fr_dcursor_append(&cursor, vp);
}
* "detail.work" exists, try to lock it.
*/
if (rad_lockfd_nonblock(fd, 0) < 0) {
- fr_time_t delay;
+ fr_time_delta_t delay;
DEBUG3("proto_detail (%s): Failed locking %s: %s",
thread->name, inst->filename_work, fr_syserror(errno));
fr_assert(thread->fd >= 0);
if (!buffer[0]) {
- if (track->retry.start == 0) {
+ if (fr_time_eq(track->retry.start, fr_time_wrap(0))) {
fr_retry_init(&track->retry, fr_time(), &inst->retry_config);
} else {
fr_retry_state_t state;
{ FR_CONF_OFFSET("start_pps", FR_TYPE_UINT32, proto_load_step_t, load.start_pps) },
{ FR_CONF_OFFSET("max_pps", FR_TYPE_UINT32, proto_load_step_t, load.max_pps) },
- { FR_CONF_OFFSET("duration", FR_TYPE_UINT32, proto_load_step_t, load.duration) },
+ { FR_CONF_OFFSET("duration", FR_TYPE_TIME_DELTA, proto_load_step_t, load.duration) },
{ FR_CONF_OFFSET("step", FR_TYPE_UINT32, proto_load_step_t, load.step) },
{ FR_CONF_OFFSET("max_backlog", FR_TYPE_UINT32, proto_load_step_t, load.milliseconds) },
{ FR_CONF_OFFSET("parallel", FR_TYPE_UINT32, proto_load_step_t, load.parallel) },
if (inst->load.max_pps > 0) FR_INTEGER_BOUND_CHECK("max_pps", inst->load.max_pps, >, inst->load.start_pps);
FR_INTEGER_BOUND_CHECK("max_pps", inst->load.max_pps, <, 100000);
- FR_INTEGER_BOUND_CHECK("duration", inst->load.duration, >=, 1);
- FR_INTEGER_BOUND_CHECK("duration", inst->load.duration, <, 10000);
+ FR_TIME_DELTA_BOUND_CHECK("duration", inst->load.duration, >=, fr_time_delta_from_sec(1));
+ FR_TIME_DELTA_BOUND_CHECK("duration", inst->load.duration, <, fr_time_delta_from_sec(10000));
FR_INTEGER_BOUND_CHECK("parallel", inst->load.parallel, >=, 1);
static void bfd_set_timeout(bfd_state_t *session, fr_time_t when)
{
- fr_time_t now = when;
+ fr_time_t now;
fr_event_timer_delete(&session->ev_timeout);
- now += fr_time_delta_from_usec(session->detection_time);
+ now = fr_time_add(when, fr_time_delta_from_usec(session->detection_time));
if (session->detect_multi >= 2) {
uint32_t delay;
delay = session->detection_time / session->detect_multi;
delay += delay / 2;
- session->next_recv += fr_time_delta_from_usec(delay);
+ session->next_recv = fr_time_add(session->next_recv, fr_time_delta_from_usec(delay));
}
if (fr_event_timer_at(session, session->el, &session->ev_timeout,
* We've received a packet, but missed the previous one.
* Warn about it.
*/
- if ((session->detect_multi >= 2) && (session->last_recv > session->next_recv)) {
+ if ((session->detect_multi >= 2) && (fr_time_gt(session->last_recv, session->next_recv))) {
fr_radius_packet_t packet;
request_t request;
static const CONF_PARSER module_config[] = {
{ FR_CONF_OFFSET("driver", FR_TYPE_STRING, rlm_cache_config_t, driver_name), .dflt = "rlm_cache_rbtree" },
{ FR_CONF_OFFSET("key", FR_TYPE_TMPL | FR_TYPE_REQUIRED, rlm_cache_config_t, key) },
- { FR_CONF_OFFSET("ttl", FR_TYPE_UINT32, rlm_cache_config_t, ttl), .dflt = "500" },
+ { FR_CONF_OFFSET("ttl", FR_TYPE_TIME_DELTA, rlm_cache_config_t, ttl), .dflt = "500s" },
{ FR_CONF_OFFSET("max_entries", FR_TYPE_UINT32, rlm_cache_config_t, max_entries), .dflt = "0" },
/* Should be a type which matches time_t, @fixme before 2038 */
*/
if ((c->expires < fr_time_to_unix_time(request->packet->timestamp)) ||
(c->created < fr_unix_time_from_sec(inst->config.epoch))) {
- RDEBUG2("Found entry for \"%pV\", but it expired %pV seconds ago. Removing it",
+ RDEBUG2("Found entry for \"%pV\", but it expired %pV ago. Removing it",
fr_box_strvalue_len((char const *)key, key_len),
- fr_box_date(fr_time_to_unix_time(request->packet->timestamp -
- fr_time_delta_from_sec(c->expires))));
+ fr_box_time_delta(fr_time_to_unix_time(request->packet->timestamp) - c->expires));
inst->driver->expire(&inst->config, inst->driver_inst->dl_inst->data, request, handle, c->key, c->key_len);
cache_free(inst, &c);
*/
static unlang_action_t cache_insert(rlm_rcode_t *p_result,
rlm_cache_t const *inst, request_t *request, rlm_cache_handle_t **handle,
- uint8_t const *key, size_t key_len, int ttl)
+ uint8_t const *key, size_t key_len, fr_time_delta_t ttl)
{
map_t const *map = NULL;
map_t *c_map;
* All in NSEC resolution
*/
c->created = c->expires = fr_time_to_unix_time(request->packet->timestamp);
- c->expires += fr_time_delta_from_sec(ttl);
+ c->expires += ttl;
RDEBUG2("Creating new cache entry");
RETURN_MODULE_FAIL;
case CACHE_OK:
- RDEBUG2("Committed entry, TTL %d seconds", ttl);
+ RDEBUG2("Committed entry, TTL %pV seconds", fr_box_time_delta(ttl));
cache_free(inst, &c);
RETURN_MODULE_RCODE(merge ? RLM_MODULE_UPDATED : RLM_MODULE_OK);
ssize_t key_len;
rlm_rcode_t rcode = RLM_MODULE_NOOP;
- int ttl = inst->config.ttl;
+ fr_time_delta_t ttl = inst->config.ttl;
key_len = tmpl_expand((char const **)&key, (char *)buffer, sizeof(buffer),
request, inst->config.key, NULL, NULL);
expire = true;
} else if (vp->vp_int32 < 0) {
expire = true;
- ttl = -(vp->vp_int32);
+ ttl = fr_time_delta_from_sec(-(vp->vp_int32));
/* Updating the TTL */
} else {
set_ttl = true;
- ttl = vp->vp_int32;
+ ttl = fr_time_delta_from_sec(vp->vp_int32);
}
}
RDEBUG3("merge : %s", merge ? "yes" : "no");
RDEBUG3("insert : %s", insert ? "yes" : "no");
RDEBUG3("expire : %s", expire ? "yes" : "no");
- RDEBUG3("ttl : %i", ttl);
+ RDEBUG3("ttl : %pV", fr_box_time_delta(ttl));
REXDENT();
if (cache_acquire(&handle, inst, request) < 0) {
RETURN_MODULE_FAIL;
typedef struct {
char const *name; //!< Name of xlat function to register.
char const *driver_name; //!< Driver name.
- tmpl_t *key; //!< What to expand to get the value of the key.
- uint32_t ttl; //!< How long an entry is valid for.
+ tmpl_t *key; //!< What to expand to get the value of the key.
+ fr_time_delta_t ttl; //!< How long an entry is valid for.
uint32_t max_entries; //!< Maximum entries allowed.
int32_t epoch; //!< Time after which entries are considered valid.
bool stats; //!< Generate statistics.
rlm_cache_config_t config; //!< Must come first because of icky hacks.
module_instance_t *driver_inst; //!< Driver's instance data.
- rlm_cache_driver_t const *driver; //!< Driver's exported interface.
+ rlm_cache_driver_t const *driver; //!< Driver's exported interface.
fr_map_list_t maps; //!< Attribute map applied to users.
//!< and profiles.
* timeout should never be *before* the scheduled time,
* if it is, something is very broken.
*/
- if (!fr_cond_assert(fired >= *yielded)) REDEBUG("Unexpected resume time");
+ if (!fr_cond_assert(fr_time_gteq(fired, *yielded))) REDEBUG("Unexpected resume time");
unlang_interpret_mark_runnable(request);
}
* timeout should never be *before* the scheduled time,
* if it is, something is very broken.
*/
- if (!fr_cond_assert(fired > *yielded)) REDEBUG("Unexpected resume time");
+ if (!fr_cond_assert(fr_time_gt(fired, *yielded))) REDEBUG("Unexpected resume time");
unlang_interpret_mark_runnable(request);
}
static int delay_add(request_t *request, fr_time_t *resume_at, fr_time_t now,
- fr_time_t delay, bool force_reschedule, bool relative)
+ fr_time_delta_t delay, bool force_reschedule, bool relative)
{
/*
* Delay is zero (and reschedule is not forced)
* Process the delay relative to the start of packet processing
*/
if (relative) {
- *resume_at = request->packet->timestamp + delay;
+ *resume_at = fr_time_add(request->packet->timestamp, delay);
} else {
- *resume_at = now + delay;
+ *resume_at = fr_time_add(now, delay);
}
/*
* If resume_at is in the past (and reschedule is not forced), just return noop
*/
- if (!force_reschedule && (*resume_at <= now)) return 1;
+ if (!force_reschedule && fr_time_lteq(*resume_at, now)) return 1;
- if (*resume_at > now) {
- RDEBUG2("Delaying request by ~%pVs", fr_box_time_delta(*resume_at - now));
+ if (fr_time_gt(*resume_at, now)) {
+ RDEBUG2("Delaying request by ~%pVs", fr_box_time_delta(fr_time_sub(*resume_at, now)));
} else {
RDEBUG2("Rescheduling request");
}
/*
* Print how long the delay *really* was.
*/
- RDEBUG3("Request delayed by %pV", fr_box_time_delta(fr_time() - *yielded));
+ RDEBUG3("Request delayed by %pV", fr_box_time_delta(fr_time_sub(fr_time(), *yielded)));
talloc_free(yielded);
RETURN_MODULE_OK;
RETURN_MODULE_NOOP;
}
- /*
- * FIXME - Should print wallclock time
- */
RDEBUG3("Current time %pVs, resume time %pVs",
- fr_box_time_delta(*yielded_at), fr_box_time_delta(resume_at));
+ fr_box_time(*yielded_at), fr_box_time(resume_at));
if (unlang_module_timeout_add(request, _delay_done, yielded_at, resume_at) < 0) {
RPEDEBUG("Adding event failed");
UNUSED fr_value_box_list_t *in, void *rctx)
{
fr_time_t *yielded_at = talloc_get_type_abort(rctx, fr_time_t);
- fr_time_t delayed;
+ fr_time_delta_t delayed;
fr_value_box_t *vb;
- delayed = fr_time() - *yielded_at;
+ delayed = fr_time_sub(fr_time(), *yielded_at);
talloc_free(yielded_at);
MEM(vb = fr_value_box_alloc(ctx, FR_TYPE_TIME_DELTA, NULL, false));
}
yield:
- /*
- * FIXME - Should print wallclock time
- */
- RDEBUG3("Current time %pVs, resume time %pVs", fr_box_time_delta(*yielded_at), fr_box_time_delta(resume_at));
+ RDEBUG3("Current time %pVs, resume time %pVs", fr_box_time(*yielded_at), fr_box_time(resume_at));
if (unlang_xlat_event_timeout_add(request, _xlat_delay_done, yielded_at, resume_at) < 0) {
RPEDEBUG("Adding event failed");
inst->escape_func = rad_filename_make_safe;
}
- inst->ef = module_exfile_init(inst, conf, 256, 30, inst->locking, NULL, NULL);
+ inst->ef = module_exfile_init(inst, conf, 256, fr_time_delta_from_sec(30), inst->locking, NULL, NULL);
if (!inst->ef) {
cf_log_err(conf, "Failed creating log file context");
return -1;
if (vp->da == attr_eap_fast_pac_acknowledge) {
if (vp->vp_uint32 == EAP_FAST_TLV_RESULT_SUCCESS) {
code = FR_RADIUS_CODE_ACCESS_ACCEPT;
- t->pac.expires = ~((fr_time_t) 0);
+ t->pac.expires = fr_time_wrap(~fr_time_unwrap(fr_time_wrap(0)));
t->pac.expired = false;
t->stage = EAP_FAST_COMPLETE;
}
/*
* Send a new pac at ~0.6 times the lifetime.
*/
- if (!t->pac.expires || t->pac.expired ||
- t->pac.expires <= (request->packet->timestamp + fr_time_delta_from_sec((t->pac_lifetime >> 1) + (t->pac_lifetime >> 3)))) {
+ if (fr_time_eq(t->pac.expires, fr_time_wrap(0)) || t->pac.expired ||
+ fr_time_lteq(t->pac.expires,
+ fr_time_add(request->packet->timestamp, t->pac_lifetime))) {
t->pac.send = true;
}
}
int default_method;
int default_provisioning_method;
- uint32_t pac_lifetime;
+ fr_time_delta_t pac_lifetime;
char const *authority_identity;
uint8_t const *a_id;
uint8_t const *pac_opaque_key;
int stage; //!< Processing stage.
- uint32_t pac_lifetime; //!< seconds to add to current time to describe PAC lifetime
+ fr_time_delta_t pac_lifetime; //!< seconds to add to current time to describe PAC lifetime
char const *authority_identity; //!< The identity we present in the EAP-TLS
uint8_t a_id[PAC_A_ID_LENGTH]; //!< The identity we present in the EAP-TLS
char const *pac_opaque_key; //!< The key used to encrypt PAC-Opaque
{ FR_CONF_OFFSET("require_client_cert", FR_TYPE_BOOL, rlm_eap_fast_t, req_client_cert), .dflt = "no" },
- { FR_CONF_OFFSET("pac_lifetime", FR_TYPE_UINT32, rlm_eap_fast_t, pac_lifetime), .dflt = "604800" },
+ { FR_CONF_OFFSET("pac_lifetime", FR_TYPE_TIME_DELTA, rlm_eap_fast_t, pac_lifetime), .dflt = "604800" },
{ FR_CONF_OFFSET("authority_identity", FR_TYPE_STRING | FR_TYPE_REQUIRED, rlm_eap_fast_t, authority_identity) },
{ FR_CONF_OFFSET("pac_opaque_key", FR_TYPE_STRING | FR_TYPE_REQUIRED, rlm_eap_fast_t, pac_opaque_key) },
fr_assert(t->pac.type == 0);
t->pac.type = vp->vp_uint16;
} else if (vp->da == attr_eap_fast_pac_info_pac_lifetime) {
- fr_assert(t->pac.expires == 0);
- t->pac.expires = request->packet->timestamp + fr_time_delta_from_sec(vp->vp_uint32);
+ fr_assert(fr_time_eq(t->pac.expires, fr_time_wrap(0)));
+ t->pac.expires = fr_time_add(request->packet->timestamp, fr_time_delta_from_sec(vp->vp_uint32));
t->pac.expired = false;
/*
* Not sure if this is the correct attr
goto error;
}
- if (!t->pac.expires) {
+ if (fr_time_eq(t->pac.expires, fr_time_wrap(0))) {
errmsg = "PAC missing lifetime TLV";
goto error;
}
return XLAT_ACTION_FAIL;
}
- if (unlang_xlat_event_timeout_add(request, _xlat_icmp_timeout, echo, fr_time() + inst->timeout) < 0) {
+ if (unlang_xlat_event_timeout_add(request, _xlat_icmp_timeout, echo,
+ fr_time_add(fr_time(), inst->timeout)) < 0) {
RPEDEBUG("Failed adding timeout");
(void) fr_rb_delete(thread->t->tree, echo);
talloc_free(echo);
return -1;
}
- inst->file.ef = module_exfile_init(inst, conf, 256, 30, true, NULL, NULL);
+ inst->file.ef = module_exfile_init(inst, conf, 256, fr_time_delta_from_sec(30), true, NULL, NULL);
if (!inst->file.ef) {
cf_log_err(conf, "Failed creating log file context");
return -1;
#include <freeradius-devel/util/debug.h>
#include <freeradius-devel/radius/radius.h>
+DIAG_OFF(compound-token-split-by-macro) /* Perl does horrible things with macros */
#ifdef INADDR_ANY
# undef INADDR_ANY
#endif
h->status_checking = false;
u->num_replies = 0; /* Reset */
- u->retry.start = 0;
+ u->retry.start = fr_time_wrap(0);
if (u->ev) (void) fr_event_timer_delete(&u->ev);
switch (fr_retry_next(&u->retry, now)) {
case FR_RETRY_MRD:
DEBUG("%s - Reached maximum_retransmit_duration (%pVs > %pVs), failing status checks",
- h->module_name, fr_box_time_delta(now - u->retry.start), fr_box_time_delta(u->retry.config->mrd));
+ h->module_name, fr_box_time_delta(fr_time_sub(now, u->retry.start)),
+ fr_box_time_delta(u->retry.config->mrd));
goto fail;
case FR_RETRY_MRC:
* Last trunk event was a failure, be more careful about
* bringing up the connection (require multiple responses).
*/
- if ((trunk->last_failed && (trunk->last_failed > trunk->last_connected)) &&
+ if ((fr_time_gt(trunk->last_failed, fr_time_wrap(0)) && (fr_time_gt(trunk->last_failed, trunk->last_connected))) &&
(u->num_replies < inst->num_answers_to_alive)) {
/*
* Leave the timer in place. This timer is BOTH when we
DEBUG("%s - Received %u / %u replies for status check, on connection - %s",
h->module_name, u->num_replies, inst->num_answers_to_alive, h->name);
DEBUG("%s - Next status check packet will be in %pVs",
- h->module_name, fr_box_time_delta(u->retry.next - fr_time()));
+ h->module_name, fr_box_time_delta(fr_time_sub(u->retry.next, fr_time())));
/*
* Set the timer for the next retransmit.
udp_request_t *u = h->status_u;
ssize_t slen;
- if (!u->retry.start) {
+ if (fr_time_eq(u->retry.start, fr_time_wrap(0))) {
u->id = fr_rand() & 0xff; /* We don't care what the value is here */
h->status_checking = true; /* Ensure this is valid */
(void) fr_retry_init(&u->retry, fr_time(), &h->inst->parent->retry[u->code]);
/*
* Larger priority is more important.
*/
- ret = (a->priority < b->priority) - (a->priority > b->priority);
+ ret = CMP(a->priority, b->priority);
if (ret != 0) return ret;
/*
* Smaller timestamp (i.e. earlier) is more important.
*/
- return (a->recv_time > b->recv_time) - (a->recv_time < b->recv_time);
+ return CMP_PREFER_SMALLER(fr_time_unwrap(a->recv_time), fr_time_unwrap(b->recv_time));
}
/** Decode response packet data, extracting relevant information and validating the packet
/*
* Fixup retry times
*/
- if (u->retry.start > h->mrs_time) h->mrs_time = u->retry.start;
+ if (fr_time_gt(u->retry.start, h->mrs_time)) h->mrs_time = u->retry.start;
return DECODE_FAIL_NONE;
}
*/
memcpy(&delay, attr + 2, 4);
delay = ntohl(delay);
- delay += fr_time_delta_to_sec(now - u->recv_time);
+ delay += fr_time_delta_to_sec(fr_time_sub(now, u->recv_time));
delay = htonl(delay);
memcpy(attr + 2, &delay, 4);
break;
/*
* Revive the connection after a time.
*/
- if (fr_event_timer_at(h, el, &h->zombie_ev, now + h->inst->parent->revive_interval, revive_timeout, h) < 0) {
+ if (fr_event_timer_at(h, el, &h->zombie_ev,
+ fr_time_add(now, h->inst->parent->revive_interval), revive_timeout, h) < 0) {
ERROR("Failed inserting revive timeout for connection");
fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
}
*/
if (h->status_checking || h->zombie_ev) return true;
- if (now == 0) now = fr_time();
+ if (fr_time_eq(now, fr_time_wrap(0))) now = fr_time();
/*
* We received a reply since this packet was sent, the connection isn't zombie.
*/
- if (h->last_reply >= last_sent) return false;
+ if (fr_time_gteq(h->last_reply, last_sent)) return false;
/*
* If we've seen ANY response in the allowed window, then the connection is still alive.
*/
- if (h->inst->parent->synchronous && last_sent && ((last_sent + h->inst->parent->response_window) < now)) return false;
+ if (h->inst->parent->synchronous && fr_time_gt(last_sent, fr_time_wrap(0)) &&
+ (fr_time_lt(fr_time_add(last_sent, h->inst->parent->response_window), now))) return false;
/*
* Mark the connection as inactive, but keep sending
* Queue up the status check packet. It will be sent
* when the connection is writable.
*/
- h->status_u->retry.start = 0;
+ h->status_u->retry.start = fr_time_wrap(0);
h->status_r->treq = NULL;
if (fr_trunk_request_enqueue_on_conn(&h->status_r->treq, tconn, h->status_request,
fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
}
} else {
- if (fr_event_timer_at(h, el, &h->zombie_ev, now + h->inst->parent->zombie_period, zombie_timeout, h) < 0) {
+ if (fr_event_timer_at(h, el, &h->zombie_ev, fr_time_add(now, h->inst->parent->zombie_period),
+ zombie_timeout, h) < 0) {
ERROR("Failed inserting zombie timeout for connection");
fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
}
case FR_RETRY_MRD:
REDEBUG("Reached maximum_retransmit_duration (%pVs > %pVs), failing request",
- fr_box_time_delta(now - u->retry.start), fr_box_time_delta(u->retry.config->mrd));
+ fr_box_time_delta(fr_time_sub(now, u->retry.start)), fr_box_time_delta(u->retry.config->mrd));
break;
case FR_RETRY_MRC:
case FR_RETRY_MRD:
REDEBUG("Reached maximum_retransmit_duration (%pVs > %pVs), failing request",
- fr_box_time_delta(now - u->retry.start), fr_box_time_delta(u->retry.config->mrd));
+ fr_box_time_delta(fr_time_sub(now, u->retry.start)), fr_box_time_delta(u->retry.config->mrd));
break;
case FR_RETRY_MRC:
* If the connection is zombie, then don't try to enqueue
* things on it!
*/
- if (check_for_zombie(el, tconn, 0, h->last_sent)) return;
+ if (check_for_zombie(el, tconn, fr_time_wrap(0), h->last_sent)) return;
/*
* Encode multiple packets in preparation
/*
* Start retransmissions from when the socket is writable.
*/
- if (!u->retry.start) {
+ if (fr_time_eq(u->retry.start, fr_time_wrap(0))) {
(void) fr_retry_init(&u->retry, fr_time(), &h->inst->parent->retry[u->code]);
fr_assert(u->retry.rt > 0);
- fr_assert(u->retry.next > 0);
+ fr_assert(fr_time_gt(u->retry.next, fr_time_wrap(0)));
}
/*
if (u->retry.count == 1) {
action = inst->parent->originate ? "Originated" : "Proxied";
h->last_sent = u->retry.start;
- if (h->first_sent <= h->last_idle) h->first_sent = h->last_sent;
+ if (fr_time_lteq(h->first_sent, h->last_idle)) h->first_sent = h->last_sent;
} else {
action = "Retransmitted";
}
} else if (u->retry.count == 1) {
- if (fr_event_timer_at(u, el, &u->ev, u->retry.start + h->inst->parent->response_window, request_timeout, treq) < 0) {
+ if (fr_event_timer_at(u, el, &u->ev,
+ fr_time_add(u->retry.start, h->inst->parent->response_window),
+ request_timeout, treq) < 0) {
RERROR("Failed inserting timeout for connection");
fr_trunk_request_signal_fail(treq);
continue;
if (u->num_replies < inst->num_answers_to_alive) {
DEBUG("Received %d / %u replies for status check, on connection - %s",
u->num_replies, inst->num_answers_to_alive, h->name);
- DEBUG("Next status check packet will be in %pVs", fr_box_time_delta(u->retry.next - now));
+ DEBUG("Next status check packet will be in %pVs", fr_box_time_delta(fr_time_sub(u->retry.next, now)));
/*
* If we're retransmitting, leave the ID,
FD_SET(sockfd, &read_fd);
if (config->query_timeout) {
- elapsed = fr_time() - start;
+ elapsed = fr_time_sub(fr_time(), start);
if (elapsed >= timeout) goto too_long;
}
inst->driver->sql_escape_func :
sql_escape_func;
- inst->ef = module_exfile_init(inst, conf, 256, 30, true, NULL, NULL);
+ inst->ef = module_exfile_init(inst, conf, 256, fr_time_delta_from_sec(30), true, NULL, NULL);
if (!inst->ef) {
cf_log_err(conf, "Failed creating log file context");
return -1;
{ NULL }
};
-static int find_next_reset(rlm_sqlcounter_t *inst, time_t timeval)
+static int find_next_reset(rlm_sqlcounter_t *inst, fr_time_t now)
{
int ret = 0;
size_t len;
unsigned int num = 1;
char last = '\0';
struct tm *tm, s_tm;
- char sCurrentTime[40], sNextTime[40];
- time_t date;
+ time_t time_s = fr_time_to_sec(now);
- tm = localtime_r(&timeval, &s_tm);
- len = strftime(sCurrentTime, sizeof(sCurrentTime), "%Y-%m-%d %H:%M:%S", tm);
- if (len == 0) *sCurrentTime = '\0';
+ tm = localtime_r(&time_s, &s_tm);
tm->tm_sec = tm->tm_min = 0;
fr_assert(inst->reset != NULL);
* Round up to the next nearest hour.
*/
tm->tm_hour += num;
- date = mktime(tm);
+ inst->reset_time = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "daily") == 0 || last == 'd') {
/*
* Round up to the next nearest day.
*/
tm->tm_hour = 0;
tm->tm_mday += num;
- date = mktime(tm);
+ inst->reset_time = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "weekly") == 0 || last == 'w') {
/*
* Round up to the next nearest week.
*/
tm->tm_hour = 0;
tm->tm_mday += (7 - tm->tm_wday) +(7*(num-1));
- date = mktime(tm);
+ inst->reset_time = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "monthly") == 0 || last == 'm') {
tm->tm_hour = 0;
tm->tm_mday = 1;
tm->tm_mon += num;
- date = mktime(tm);
+ inst->reset_time = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "never") == 0) {
- date = 0;
+ inst->reset_time = fr_time_wrap(0);
} else {
return -1;
}
- inst->reset_time = fr_time_from_sec(date);
-
- len = strftime(sNextTime, sizeof(sNextTime),"%Y-%m-%d %H:%M:%S",tm);
- if (len == 0) *sNextTime = '\0';
- DEBUG2("Current Time: %" PRId64 " [%s], Next reset %" PRId64 " [%s]",
- (int64_t) timeval, sCurrentTime, (int64_t) date, sNextTime);
+ DEBUG2("Current Time: %pV, Next reset %pV", fr_box_time(now), fr_box_time(inst->reset_time));
return ret;
}
/* I don't believe that this routine handles Daylight Saving Time adjustments
properly. Any suggestions?
*/
-static int find_prev_reset(rlm_sqlcounter_t *inst, time_t timeval)
+static int find_prev_reset(rlm_sqlcounter_t *inst, fr_time_t now)
{
int ret = 0;
size_t len;
unsigned int num = 1;
char last = '\0';
struct tm *tm, s_tm;
- char sCurrentTime[40], sPrevTime[40];
+ time_t time_s = fr_time_to_sec(now);
- tm = localtime_r(&timeval, &s_tm);
- len = strftime(sCurrentTime, sizeof(sCurrentTime), "%Y-%m-%d %H:%M:%S", tm);
- if (len == 0) *sCurrentTime = '\0';
+ tm = localtime_r(&time_s, &s_tm);
tm->tm_sec = tm->tm_min = 0;
fr_assert(inst->reset != NULL);
if (!isalpha((int) last))
last = 'd';
num = atoi(inst->reset);
- DEBUG("num=%d, last=%c",num,last);
+ DEBUG("num=%d, last=%c", num, last);
}
if (strcmp(inst->reset, "hourly") == 0 || last == 'h') {
/*
* Round down to the prev nearest hour.
*/
tm->tm_hour -= num - 1;
- inst->last_reset = mktime(tm);
+ inst->last_reset = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "daily") == 0 || last == 'd') {
/*
* Round down to the prev nearest day.
*/
tm->tm_hour = 0;
tm->tm_mday -= num - 1;
- inst->last_reset = mktime(tm);
+ inst->last_reset = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "weekly") == 0 || last == 'w') {
/*
* Round down to the prev nearest week.
*/
tm->tm_hour = 0;
tm->tm_mday -= tm->tm_wday +(7*(num-1));
- inst->last_reset = mktime(tm);
+ inst->last_reset = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "monthly") == 0 || last == 'm') {
tm->tm_hour = 0;
tm->tm_mday = 1;
tm->tm_mon -= num - 1;
- inst->last_reset = mktime(tm);
+ inst->last_reset = fr_time_from_sec(mktime(tm));
} else if (strcmp(inst->reset, "never") == 0) {
- inst->reset_time = 0;
+ inst->reset_time = fr_time_wrap(0);
} else {
return -1;
}
- len = strftime(sPrevTime, sizeof(sPrevTime), "%Y-%m-%d %H:%M:%S", tm);
- if (len == 0) *sPrevTime = '\0';
- DEBUG2("Current Time: %" PRId64 " [%s], Prev reset %" PRId64 " [%s]",
- (int64_t) timeval, sCurrentTime, (int64_t) inst->last_reset, sPrevTime);
+
+ DEBUG2("Current Time: %pV, Prev reset %pV", fr_box_time(now), fr_box_time(inst->last_reset));
return ret;
}
switch (*p) {
case 'b': /* last_reset */
- snprintf(tmpdt, sizeof(tmpdt), "%" PRId64, (int64_t) inst->last_reset);
+ snprintf(tmpdt, sizeof(tmpdt), "%" PRId64, fr_time_to_sec(inst->last_reset));
strlcpy(q, tmpdt, freespace);
q += strlen(q);
p++;
break;
case 'e': /* reset_time */
- snprintf(tmpdt, sizeof(tmpdt), "%" PRId64, (int64_t) inst->reset_time);
+ snprintf(tmpdt, sizeof(tmpdt), "%" PRId64, fr_time_to_sec(inst->reset_time));
strlcpy(q, tmpdt, freespace);
q += strlen(q);
p++;
* Before doing anything else, see if we have to reset
* the counters.
*/
- if (inst->reset_time && (inst->reset_time <= fr_time_to_sec(request->packet->timestamp))) {
+ if (fr_time_eq(inst->reset_time, fr_time_wrap(0)) &&
+ (fr_time_lteq(inst->reset_time, request->packet->timestamp))) {
/*
* Re-set the next time and prev_time for this counters range
*/
inst->last_reset = inst->reset_time;
- find_next_reset(inst, fr_time_to_sec(request->packet->timestamp));
+ find_next_reset(inst, request->packet->timestamp);
}
if (tmpl_find_vp(&limit, request, inst->limit_attr) < 0) {
/*
* Check if check item > counter
*/
- if (limit->vp_uint64 <= counter) {
+ if (limit->vp_uint64 <= (uint64_t)fr_time_delta_from_sec(counter)) {
fr_pair_t *vp;
/* User is denied access, send back a reply message */
* again. Do this only for Session-Timeout.
*/
if ((tmpl_da(inst->reply_attr) == attr_session_timeout) &&
- inst->reset_time &&
- (res >= (uint64_t)(inst->reset_time - fr_time_to_sec(request->packet->timestamp)))) {
- uint64_t to_reset = inst->reset_time - fr_time_to_sec(request->packet->timestamp);
+ fr_time_gt(inst->reset_time, fr_time_wrap(0)) &&
+ ((int64_t)res >= fr_time_delta_to_sec(fr_time_sub(inst->reset_time, request->packet->timestamp)))) {
+ fr_time_delta_t to_reset = fr_time_sub(inst->reset_time, request->packet->timestamp);
- RDEBUG2("Time remaining (%" PRIu64 "s) is greater than time to reset (%" PRIu64 "s). "
- "Adding %" PRIu64 "s to reply value", to_reset, res, to_reset);
- res = to_reset + limit->vp_uint64;
+ RDEBUG2("Time remaining (%pV) is greater than time to reset (%" PRIu64 "s). "
+ "Adding %pV to reply value",
+ fr_box_time_delta(to_reset), res, fr_box_time_delta(to_reset));
+ res = fr_time_delta_to_sec(to_reset) + limit->vp_uint64;
/*
* Limit the reply attribute to the minimum of the existing value, or this new one.
static int mod_instantiate(void *instance, CONF_SECTION *conf)
{
rlm_sqlcounter_t *inst = instance;
- time_t now;
fr_assert(inst->query && *inst->query);
- now = time(NULL);
- inst->reset_time = 0;
+ inst->reset_time = fr_time_wrap(0);
- if (find_next_reset(inst, now) == -1) {
+ if (find_next_reset(inst, fr_time()) == -1) {
cf_log_err(conf, "Invalid reset '%s'", inst->reset);
return -1;
}
/*
* Discover the beginning of the current time period.
*/
- inst->last_reset = 0;
+ inst->last_reset = fr_time_wrap(0);
- if (find_prev_reset(inst, now) < 0) {
+ if (find_prev_reset(inst, fr_time()) < 0) {
cf_log_err(conf, "Invalid reset '%s'", inst->reset);
return -1;
}
talloc_free(vp);
goto raw;
}
- vp->vp_date += ((fr_time_t) DHCPV6_DATE_OFFSET) * NSEC;
+ vp->vp_date += fr_time_delta_from_sec(DHCPV6_DATE_OFFSET);
break;
case FR_TYPE_STRUCT: