goto cleanup; \
} while (0)
-static fr_event_timer_t const *fr_time_sync_ev = NULL;
+static fr_timer_t *fr_time_sync_ev = NULL;
-static void fr_time_sync_event(fr_event_list_t *el, UNUSED fr_time_t now, UNUSED void *uctx)
+static void fr_time_sync_event(fr_timer_list_t *tl, UNUSED fr_time_t now, UNUSED void *uctx)
{
fr_time_delta_t when = fr_time_delta_from_sec(1);
- (void) fr_event_timer_in(el, el, &fr_time_sync_ev, when, fr_time_sync_event, NULL);
+ (void) fr_timer_in(tl, tl, &fr_time_sync_ev, when, false, fr_time_sync_event, NULL);
(void) fr_time_sync();
}
#ifndef NDEBUG
/** Encourage the server to exit after a period of time
*
- * @param[in] el The main loop.
+ * @param[in] tl The main loop.
* @param[in] now Current time. Should be 0, when adding the event.
* @param[in] uctx Pointer to a fr_time_delta_t indicating how long
* the server should run before exit.
*/
-static void fr_exit_after(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void fr_exit_after(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
- static fr_event_timer_t const *ev;
+ static fr_timer_t *ev;
fr_time_delta_t exit_after = *(fr_time_delta_t *)uctx;
if (fr_time_eq(now, fr_time_wrap(0))) {
- if (fr_event_timer_in(el, el, &ev, exit_after, fr_exit_after, uctx) < 0) {
+ if (fr_timer_in(tl, tl, &ev, exit_after, false, fr_exit_after, uctx) < 0) {
PERROR("%s: Failed inserting exit event", program);
}
return;
DEBUG("Global memory protected");
}
- fr_time_sync_event(main_loop_event_list(), fr_time(), NULL);
+ fr_time_sync_event(main_loop_event_list()->tl, fr_time(), NULL);
#ifndef NDEBUG
- if (fr_time_delta_ispos(exit_after)) fr_exit_after(main_loop_event_list(), fr_time_wrap(0), &exit_after);
+ if (fr_time_delta_ispos(exit_after)) fr_exit_after(main_loop_event_list()->tl, fr_time_wrap(0), &exit_after);
#endif
/*
* Process requests until HUP or exit.
/** Process stats for a single interval
*
*/
-static void rs_stats_process(fr_event_list_t *el, fr_time_t now_t, void *ctx)
+static void rs_stats_process(fr_timer_list_t *tl, fr_time_t now_t, void *ctx)
{
size_t i;
size_t rs_codes_len = (NUM_ELEMENTS(rs_useful_codes));
}
{
- static fr_event_timer_t const *event;
+ static fr_timer_t *event;
now.tv_sec += conf->stats.interval;
now.tv_usec = 0;
- if (fr_event_timer_at(NULL, el, &event,
- fr_time_from_timeval(&now), rs_stats_process, ctx) < 0) {
+ if (fr_timer_at(NULL, tl, &event,
+ fr_time_from_timeval(&now),
+ false, rs_stats_process, ctx) < 0) {
ERROR("Failed inserting stats interval event");
}
}
static int rs_install_stats_processor(rs_stats_t *stats, fr_event_list_t *el,
fr_pcap_t *in, struct timeval *now, bool live)
{
- static fr_event_timer_t const *event;
+ static fr_timer_t *event;
static rs_update_t update;
memset(&update, 0, sizeof(update));
rs_tv_add_ms(now, conf->stats.timeout, &(stats->quiet));
}
- if (fr_event_timer_at(NULL, events, (void *) &event,
- fr_time_from_timeval(now), rs_stats_process, &update) < 0) {
+ if (fr_timer_at(NULL, events->tl, (void *) &event,
+ fr_time_from_timeval(now),
+ false, rs_stats_process, &update) < 0) {
ERROR("Failed inserting stats event");
return -1;
}
}
if (request->event) {
- ret = fr_event_timer_delete(&request->event);
+ ret = fr_timer_delete(&request->event);
if (ret < 0) {
fr_perror("Failed deleting timer");
RS_ASSERT(0 == 1);
talloc_free(request);
}
-static void _rs_event(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx)
+static void _rs_event(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx)
{
rs_request_t *request = talloc_get_type_abort(ctx, rs_request_t);
- request->event = NULL;
rs_packet_cleanup(request);
}
*/
fr_pair_list_free(&original->link_vps);
fr_packet_free(&original->linked);
- fr_event_timer_delete(&original->event);
+ fr_timer_delete(&original->event);
/*
* ...nope it's the first response to a request.
*/
original->linked = talloc_steal(original, packet);
fr_pair_list_append(&original->link_vps, &decoded); /* Move the vps over */
rs_tv_add_ms(&header->ts, conf->stats.timeout, &original->when);
- if (fr_event_timer_at(NULL, event->list, &original->event,
- fr_time_from_timeval(&original->when), _rs_event, original) < 0) {
+ if (fr_timer_at(original, event->list->tl, &original->event,
+ fr_time_from_timeval(&original->when),
+ false, _rs_event, original) < 0) {
REDEBUG("Failed inserting new event");
/*
* Delete the original request/event, it's no longer valid
fr_pair_list_append(&original->expect_vps, &search.expect_vps);
/* Disarm the timer for the cleanup event for the original request */
- fr_event_timer_delete(&original->event);
+ fr_timer_delete(&original->event);
/*
* ...nope it's a new request.
*/
*/
original->packet->timestamp = fr_time_from_timeval(&header->ts);
rs_tv_add_ms(&header->ts, conf->stats.timeout, &original->when);
- if (fr_event_timer_at(NULL, event->list, &original->event,
- fr_time_from_timeval(&original->when), _rs_event, original) < 0) {
+ if (fr_timer_at(original, event->list->tl, &original->event,
+ fr_time_from_timeval(&original->when),
+ false, _rs_event, original) < 0) {
REDEBUG("Failed inserting new event");
talloc_free(original);
do {
now = fr_time_from_timeval(&header->ts);
- } while (fr_event_timer_run(el, &now) == 1);
+ } while (fr_timer_list_run(el->tl, &now) == 1);
count++;
rs_packet_process(count, event, header, data);
/** Exit the event loop after a given timeout.
*
*/
-static void timeout_event(fr_event_list_t *el, UNUSED fr_time_t now_t, UNUSED void *ctx)
+static void timeout_event(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now_t, void *ctx)
{
- fr_event_loop_exit(el, 1);
+ fr_event_loop_exit(talloc_get_type_abort(ctx, fr_event_list_t), 1);
}
/** Re-open the collectd socket
*
*/
-static void rs_collectd_reopen(fr_event_list_t *el, fr_time_t now, UNUSED void *ctx)
+static void rs_collectd_reopen(fr_timer_list_t *tl, fr_time_t now, UNUSED void *ctx)
{
- static fr_event_timer_t const *event;
+ static fr_timer_t *event;
if (rs_stats_collectd_open(conf) == 0) {
DEBUG2("Stats output socket (re)opened");
ERROR("Will attempt to re-establish connection in %i ms", RS_SOCKET_REOPEN_DELAY);
- if (fr_event_timer_at(NULL, el, &event,
- fr_time_add(now, fr_time_delta_from_msec(RS_SOCKET_REOPEN_DELAY)),
- rs_collectd_reopen, el) < 0) {
+ if (fr_timer_at(NULL, tl, &event,
+ fr_time_add(now, fr_time_delta_from_msec(RS_SOCKET_REOPEN_DELAY)),
+ false, rs_collectd_reopen, NULL) < 0) {
ERROR("Failed inserting re-open event");
RS_ASSERT(0);
}
switch (sig) {
#ifdef HAVE_COLLECTDC_H
case SIGPIPE:
- rs_collectd_reopen(list, fr_time(), list);
+ rs_collectd_reopen(list->tl, fr_time(), list);
break;
#else
case SIGPIPE:
int c;
unsigned int timeout = 0;
- fr_event_timer_t const *timeout_ev = NULL;
+ fr_timer_t *timeout_ev = NULL;
char const *raddb_dir = RADDBDIR;
char const *dict_dir = DICTDIR;
TALLOC_CTX *autofree;
}
if (timeout) {
- if (fr_event_timer_in(NULL, events, &timeout_ev, fr_time_delta_from_sec(timeout),
- timeout_event, NULL) < 0) {
+ if (fr_timer_in(NULL, events->tl, &timeout_ev, fr_time_delta_from_sec(timeout),
+ false, timeout_event, events) < 0) {
ERROR("Failed inserting timeout event");
}
}
*/
typedef struct {
uint64_t id; //!< Monotonically increasing packet counter.
- fr_event_timer_t const *event; //!< Event created when we received the original request.
+ fr_timer_t *event; //!< Event created when we received the original request.
bool logged; //!< Whether any messages regarding this request were logged.
struct timeval when; //!< Time when the packet was received, or next time an event
//!< is scheduled.
fr_pcap_t *in; //!< PCAP handle the original request was received on.
- fr_packet_t *packet; //!< The original packet.
+ fr_packet_t *packet; //!< The original packet.
fr_pair_list_t packet_vps;
- fr_packet_t *expect; //!< Request/response.
+ fr_packet_t *expect; //!< Request/response.
fr_pair_list_t expect_vps;
- fr_packet_t *linked; //!< The subsequent response or forwarded request the packet
+ fr_packet_t *linked; //!< The subsequent response or forwarded request the packet
//!< was linked against.
fr_pair_list_t link_vps; //!< fr_pair_ts used to link retransmissions.
return request;
}
-static void cancel_request(UNUSED fr_event_list_t *el, UNUSED fr_time_t when, void *uctx)
+static void cancel_request(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t when, void *uctx)
{
request_t *request = talloc_get_type_abort(uctx, request_t);
unlang_interpret_signal(request, FR_SIGNAL_CANCEL);
fr_pair_list_t filter_vps;
bool xlat_only = false;
fr_event_list_t *el = NULL;
- fr_event_timer_t const *cancel_timer = NULL;
+ fr_timer_t *cancel_timer = NULL;
fr_client_t *client = NULL;
fr_dict_t *dict = NULL;
fr_dict_t const *dict_check;
}
if (count == 1) {
- fr_event_timer_in(request, el, &cancel_timer, config->max_request_time, cancel_request, request);
+ fr_timer_in(request, el->tl, &cancel_timer, config->max_request_time, false, cancel_request, request);
unlang_interpret_synchronous(el, request);
} else {
}
#endif
- fr_event_timer_in(request, el, &cancel_timer, config->max_request_time, cancel_request, request);
+ fr_timer_in(request, el->tl, &cancel_timer, config->max_request_time, false, cancel_request, request);
unlang_interpret_synchronous(el, request);
talloc_free(request);
#ifndef NDEBUG
# define NDEBUG_LOCATION_ARGS char const *file, int line,
# define NDEBUG_LOCATION_VALS file, line,
+# define NDEBUG_LOCATION_FMT "%s[%d]: "
# define NDEBUG_LOCATION_EXP __FILE__, __LINE__,
# define NDEBUG_LOCATION_NONNULL(_num) ((_num) + 2)
#else
# define NDEBUG_LOCATION_ARGS
# define NDEBUG_LOCATION_VALS
+# define NDEBUG_LOCATION_FMT ""
# define NDEBUG_LOCATION_EXP
# define NDEBUG_LOCATION_NONNULL(_num) (_num)
#endif
fr_bio_dedup_config_t config;
- fr_event_timer_t const *ev;
+ fr_timer_t *ev;
/*
* The "first" entry is cached here so that we can detect when it changes. The insert / delete
FR_DLIST_HEAD(fr_bio_dedup_list) free; //!< free list
};
-static void fr_bio_dedup_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx);
+static void fr_bio_dedup_timer(UNUSED fr_timer_list_t *el, fr_time_t now, void *uctx);
static ssize_t fr_bio_dedup_write(fr_bio_t *bio, void *packet_ctx, void const *buffer, size_t size);
static ssize_t fr_bio_dedup_blocked(fr_bio_dedup_t *my, fr_bio_dedup_entry_t *item, ssize_t rcode);
static void fr_bio_dedup_release(fr_bio_dedup_t *my, fr_bio_dedup_entry_t *item, fr_bio_dedup_release_reason_t reason);
case FR_BIO_DEDUP_STATE_ACTIVE:
/*
* If we're not writing to the socket, just insert the packet into the pending list.
- */
+ */
if (my->bio.write != fr_bio_dedup_write) {
(void) fr_bio_dedup_list_remove(&my->active, item);
fr_bio_dedup_list_insert_tail(&my->pending, item);
/*
* Update the timer. This should never fail.
*/
- if (fr_event_timer_at(my, my->el, &my->ev, first->expires, fr_bio_dedup_timer, my) < 0) return -1;
+ if (fr_timer_at(my, my->el->tl, &my->ev, first->expires, false, fr_bio_dedup_timer, my) < 0) return -1;
my->first = first;
return 0;
*
* @todo - expire items from the pending list, too
*/
-static void fr_bio_dedup_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx)
+static void fr_bio_dedup_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
fr_bio_dedup_t *my = talloc_get_type_abort(uctx, fr_bio_dedup_t);
fr_bio_dedup_entry_t *item;
*/
next = fr_bio_next(&my->bio);
fr_assert(next != NULL);
-
+
/*
* The caller is trying to flush partial data. But we don't have any partial data, so just call
* the next bio to flush it.
/** We have a timeout on the conenction
*
*/
-static void fr_bio_fd_el_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void fr_bio_fd_el_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
fr_bio_fd_t *my = talloc_get_type_abort(uctx, fr_bio_fd_t);
* Set the timeout callback if asked.
*/
if (timeout_cb) {
- if (fr_event_timer_in(my, el, &my->connect.ev, *timeout, fr_bio_fd_el_timeout, my) < 0) {
+ if (fr_timer_in(my, el->tl, &my->connect.ev, *timeout, false, fr_bio_fd_el_timeout, my) < 0) {
goto error;
}
}
* We've picked a random port in what is hopefully a large range. If that works, we're
* done.
*/
- if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) >= 0) goto done;
+ if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) == 0) goto done;
/*
* Hunt & peck. Which is horrible.
sin->sin_port = htons(my->info.cfg->src_port_start + current);
- if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) >= 0) goto done;
+ if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) == 0) goto done;
}
- fr_strerror_const("There are no open ports between 'src_port_start' and 'src_port_end'");
+ /*
+ * The error is a good hint at _why_ we failed to bind.
+ * We expect errno to be EADDRINUSE, anything else is a surprise.
+ */
+ fr_strerror_printf("Failed binding port between 'src_port_start' and 'src_port_end': %s", fr_syserror(errno));
return -1;
}
fr_bio_fd_info_t info;
struct {
- fr_bio_callback_t success; //!< for fr_bio_fd_connect()
- fr_bio_callback_t error; //!< for fr_bio_fd_connect()
- fr_bio_callback_t timeout; //!< for fr_bio_fd_connect()
- fr_event_list_t *el; //!< for fr_bio_fd_connect()
- fr_event_timer_t const *ev; //!< for fr_bio_fd_connect()
+ fr_bio_callback_t success; //!< for fr_bio_fd_connect()
+ fr_bio_callback_t error; //!< for fr_bio_fd_connect()
+ fr_bio_callback_t timeout; //!< for fr_bio_fd_connect()
+ fr_event_list_t *el; //!< for fr_bio_fd_connect()
+ fr_timer_t *ev; //!< for fr_bio_fd_connect()
} connect;
int max_tries; //!< how many times we retry on EINTR
fr_bio_packet_cb_funcs_t cb;
- fr_event_timer_t const *ev; //!< connection timeout
+ fr_timer_t *ev; //!< connection timeout
bool connected;
bool write_blocked;
ssize_t error;
bool all_used; //!< blocked due to no free entries
- fr_event_timer_t const *ev; //!< we only need one timer event: next time we do something
+ fr_timer_t *ev; //!< we only need one timer event: next time we do something
/*
* The first item is cached here so that we can detect when it changes. The insert / delete
FR_DLIST_HEAD(fr_bio_retry_list) free; //!< free lists are better than memory fragmentation
};
-static void fr_bio_retry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx);
-static void fr_bio_retry_expiry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx);
+static void fr_bio_retry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx);
+static void fr_bio_retry_expiry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx);
static ssize_t fr_bio_retry_write(fr_bio_t *bio, void *packet_ctx, void const *buffer, size_t size);
static ssize_t fr_bio_retry_save_write(fr_bio_retry_t *my, fr_bio_retry_entry_t *item, ssize_t rcode);
/*
* Update the timer. This should never fail.
*/
- if (fr_event_timer_at(my, my->info.el, &my->ev, first->retry.end, fr_bio_retry_expiry_timer, my) < 0) return -1;
+ if (fr_timer_at(my, my->info.el->tl, &my->ev, first->retry.end, false, fr_bio_retry_expiry_timer, my) < 0) return -1;
my->next_retry_item = first;
return 0;
/*
* Update the timer. This should never fail.
*/
- if (fr_event_timer_at(my, my->info.el, &my->ev, first->retry.next, fr_bio_retry_timer, my) < 0) return -1;
+ if (fr_timer_at(my, my->info.el->tl, &my->ev, first->retry.next, false, fr_bio_retry_timer, my) < 0) return -1;
my->next_retry_item = first;
return 0;
* when the socket isn't blocked. But the caller might not pay attention to those issues.
*/
if (my->partial) return 0;
-
+
/*
* There must be a next bio.
*/
/** Run an expiry timer event.
*
*/
-static void fr_bio_retry_expiry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx)
+static void fr_bio_retry_expiry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
fr_bio_retry_t *my = talloc_get_type_abort(uctx, fr_bio_retry_t);
fr_bio_retry_entry_t *item;
/** Run a timer event. Usually to write out another packet.
*
*/
-static void fr_bio_retry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx)
+static void fr_bio_retry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
ssize_t rcode;
fr_bio_retry_t *my = talloc_get_type_abort(uctx, fr_bio_retry_t);
*/
next = fr_bio_next(&my->bio);
fr_assert(next != NULL);
-
+
/*
* The caller is trying to flush partial data. But we don't have any partial data, so just call
* the next bio to flush it.
fr_assert(item != NULL);
fr_assert(item->retry.replies == 0);
fr_assert(item != my->partial);
-
+
/*
* Track when the "most recently sent" packet has a reply. This metric is better than most
* others for judging the liveliness of the destination.
return 1;
}
-/** Set a per-packet retry config
+/** Set a per-packet retry config
*
* This function should be called from the #fr_bio_retry_sent_t callback to set a unique retry timer for this
* packet. If no retry configuration is set, then the main one from the alloc() function is used.
return item;
}
-
*/
typedef struct {
fr_event_list_t *el; //!< Event list servicing I/O events.
- fr_event_timer_t const *ev; //!< Multi-Handle timer.
+ fr_timer_t *ev; //!< Multi-Handle timer.
uint64_t transfers; //!< How many transfers are current in progress.
CURLM *mandle; //!< The multi handle.
} fr_curl_handle_t;
/** libcurl's timer expired
*
- * @param[in] el the timer was inserted into.
+ * @param[in] tl the timer was inserted into.
* @param[in] now The current time according to the event loop.
* @param[in] uctx The rlm_fr_curl_thread_t specific to this thread.
*/
-static void _fr_curl_io_timer_expired(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void _fr_curl_io_timer_expired(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
fr_curl_handle_t *mhandle = talloc_get_type_abort(uctx, fr_curl_handle_t);
CURLM *mandle = mhandle->mandle;
fr_curl_handle_t *mhandle = talloc_get_type_abort(ctx, fr_curl_handle_t);
if (timeout_ms < 0) {
- if (fr_event_timer_delete(&mhandle->ev) < 0) {
+ if (fr_timer_delete(&mhandle->ev) < 0) {
PERROR("Failed deleting multi-handle timer");
return -1;
}
* unpleasant recursive behavior that immediately calls another call to the callback
* with a zero timeout...
*
- * Setting a timeout of zero when calling fr_event_timer_in should result in the event
+ * Setting a timeout of zero when calling fr_timer_in should result in the event
* repeating at most twice during one iteration of the event loop.
*
* In a previous version of this code we called curl_multi_socket_action immediately
* if timeout_ms was 0. It was observed that this lead to this callback being called
* ~665 times per request which is why we no longer do that.
*/
- if (fr_event_timer_in(mhandle, mhandle->el, &mhandle->ev,
- fr_time_delta_from_msec(timeout_ms), _fr_curl_io_timer_expired, mhandle) < 0) return -1;
+ if (fr_timer_in(mhandle, mhandle->el->tl, &mhandle->ev,
+ fr_time_delta_from_msec(timeout_ms),
+ false, _fr_curl_io_timer_expired, mhandle) < 0) return -1;
return 0;
}
bool header; //!< for printing statistics
fr_time_t next; //!< The next time we're supposed to send a packet
- fr_event_timer_t const *ev;
+ fr_timer_t *ev;
};
fr_load_t *fr_load_generator_create(TALLOC_CTX *ctx, fr_event_list_t *el, fr_load_config_t *config,
}
}
-static void load_timer(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void load_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
fr_load_t *l = uctx;
fr_time_delta_t delta;
/*
* Set the timer for the next packet.
*/
- if (fr_event_timer_in(l, el, &l->ev, delta, load_timer, l) < 0) {
+ if (fr_timer_in(l, tl, &l->ev, delta, false, load_timer, l) < 0) {
l->state = FR_LOAD_STATE_DRAINING;
return;
}
l->delta = fr_time_delta_div(fr_time_delta_from_sec(l->config->parallel), fr_time_delta_wrap(l->pps));
l->next = fr_time_add(l->step_start, l->delta);
- load_timer(l->el, l->step_start, l);
+ load_timer(l->el->tl, l->step_start, l);
return 0;
}
{
if (!l->ev) return 0;
- return fr_event_timer_delete(&l->ev);
+ return fr_timer_delete(&l->ev);
}
fr_io_instance_t const *inst; //!< parent instance for master IO handler
fr_io_thread_t *thread;
- fr_event_timer_t const *ev; //!< when we clean up the client
+ fr_timer_t *ev; //!< when we clean up the client
fr_rb_tree_t *table; //!< tracking table for packets
fr_heap_t *pending; //!< pending packets for this client
static int track_free(fr_io_track_t *track)
{
- if (track->ev) (void) fr_event_timer_delete(&track->ev);
+ if (track->ev) (void) fr_timer_delete(&track->ev);
talloc_free_children(track);
* struct while the packet is in the outbound
* queue.
*/
- if (old->ev) (void) fr_event_timer_delete(&old->ev);
+ if (old->ev) (void) fr_timer_delete(&old->ev);
return old;
}
if (!fr_rb_delete(client->table, old)) {
fr_assert(0);
}
- if (old->ev) (void) fr_event_timer_delete(&old->ev);
+ if (old->ev) (void) fr_timer_delete(&old->ev);
talloc_set_destructor(old, track_free);
}
-static void client_expiry_timer(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void client_expiry_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
fr_io_client_t *client = talloc_get_type_abort(uctx, fr_io_client_t);
fr_io_instance_t const *inst;
/*
* No event list? We don't need to expire the client.
*/
- if (!el) return;
+ if (!tl) return;
// @todo - print out what we plan on doing next
connection = client->connection;
delay = inst->check_interval;
reset_timer:
- if (fr_event_timer_in(client, el, &client->ev,
- delay, client_expiry_timer, client) < 0) {
+ if (fr_timer_in(client, tl, &client->ev,
+ delay, false, client_expiry_timer, client) < 0) {
ERROR("proto_%s - Failed adding timeout for dynamic client %s. It will be permanent!",
inst->app_io->common.name, client->radclient->shortname);
return;
/*
* Expire cached packets after cleanup_delay time
*/
-static void packet_expiry_timer(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void packet_expiry_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
fr_io_track_t *track = talloc_get_type_abort(uctx, fr_io_track_t);
fr_io_client_t *client = track->client;
* will be cleaned up when the timer
* fires.
*/
- if (fr_event_timer_at(track, el, &track->ev,
- track->expires, packet_expiry_timer, track) == 0) {
+ if (fr_timer_at(track, tl, &track->ev,
+ track->expires,
+ false, packet_expiry_timer, track) == 0) {
DEBUG("proto_%s - cleaning up request in %.6fs", inst->app_io->common.name,
fr_time_delta_unwrap(inst->cleanup_delay) / (double)NSEC);
return;
* the client.
*/
if (client->packets == 0) {
- client_expiry_timer(el, now, client);
+ client_expiry_timer(tl, now, client);
}
}
buffer, buffer_len, written);
if (packet_len <= 0) {
track->discard = true;
- packet_expiry_timer(el, fr_time_wrap(0), track);
+ packet_expiry_timer(el->tl, fr_time_wrap(0), track);
return packet_len;
}
* On dedup this also extends the timer.
*/
setup_timer:
- packet_expiry_timer(el, fr_time_wrap(0), track);
+ packet_expiry_timer(el->tl, fr_time_wrap(0), track);
return buffer_len;
}
*/
if (connection && (inst->ipproto == IPPROTO_UDP)) {
connection = fr_io_connection_alloc(inst, thread, client, -1, connection->address, connection);
- client_expiry_timer(el, fr_time_wrap(0), connection->client);
+ client_expiry_timer(el->tl, fr_time_wrap(0), connection->client);
errno = ECONNREFUSED;
return -1;
* expiry timer, which will close and free the
* connection.
*/
- client_expiry_timer(el, fr_time_wrap(0), client);
+ client_expiry_timer(el->tl, fr_time_wrap(0), client);
return buffer_len;
}
* timed out, so there's nothing more to do. In that case, set up the expiry timers.
*/
if (client->packets == 0) {
- client_expiry_timer(el, fr_time_wrap(0), client);
+ client_expiry_timer(el->tl, fr_time_wrap(0), client);
}
reread:
typedef struct fr_io_track_s {
fr_rb_node_t node; //!< rbtree node in the tracking tree.
- fr_event_timer_t const *ev; //!< when we clean up this tracking entry
+ fr_timer_t *ev; //!< when we clean up this tracking entry
fr_time_t timestamp; //!< when this packet was received
fr_time_t expires; //!< when this packet expires
int packets; //!< number of packets using this entry
fr_schedule_child_status_t status; //!< status of the worker
fr_network_t *nr; //!< the receive data structure
- fr_event_timer_t const *ev; //!< timer for stats_interval
+ fr_timer_t *ev; //!< timer for stats_interval
} fr_schedule_network_t;
}
-static void stats_timer(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void stats_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
fr_schedule_network_t *sn = talloc_get_type_abort(uctx, fr_schedule_network_t);
fr_network_stats_log(sn->nr, sn->sc->log);
- (void) fr_event_timer_at(sn, el, &sn->ev, fr_time_add(now, sn->sc->config->stats_interval), stats_timer, sn);
+ (void) fr_timer_at(sn, tl, &sn->ev, fr_time_add(now, sn->sc->config->stats_interval), false, stats_timer, sn);
}
/** Initialize and run the network thread.
* Print out statistics for this network IO handler.
*/
if (fr_time_delta_ispos(sc->config->stats_interval)) {
- (void) fr_event_timer_in(sn, el, &sn->ev, sn->sc->config->stats_interval, stats_timer, sn);
+ (void) fr_timer_in(sn, el->tl, &sn->ev, sn->sc->config->stats_interval, false, stats_timer, sn);
}
/*
* Call the main event processing loop of the network
fr_time_t checked_timeout; //!< when we last checked the tails of the queues
- fr_event_timer_t const *ev_cleanup; //!< timer for max_request_time
+ fr_timer_t *ev_cleanup; //!< timer for max_request_time
fr_worker_channel_t *channel; //!< list of channels
};
static void worker_request_bootstrap(fr_worker_t *worker, fr_channel_data_t *cd, fr_time_t now);
static void worker_send_reply(fr_worker_t *worker, request_t *request, bool do_not_respond, fr_time_t now);
-static void worker_max_request_time(UNUSED fr_event_list_t *el, UNUSED fr_time_t when, void *uctx);
+static void worker_max_request_time(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t when, void *uctx);
static void worker_max_request_timer(fr_worker_t *worker);
/** Callback which handles a message being received on the worker side.
* thread more than max_request_time seconds ago. In the interest of not adding a
* timer for every packet, the requests are given a 1 second leeway.
*
- * @param[in] el the worker's event list
+ * @param[in] tl the worker's timer list.
* @param[in] when the current time
* @param[in] uctx the fr_worker_t.
*/
-static void worker_max_request_time(UNUSED fr_event_list_t *el, UNUSED fr_time_t when, void *uctx)
+static void worker_max_request_time(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t when, void *uctx)
{
fr_time_t now = fr_time();
request_t *request;
cleanup = fr_time_add(request->async->recv_time, worker->config.max_request_time);
DEBUG2("Resetting cleanup timer to +%pV", fr_box_time_delta(worker->config.max_request_time));
- if (fr_event_timer_at(worker, worker->el, &worker->ev_cleanup,
- cleanup, worker_max_request_time, worker) < 0) {
+ if (fr_timer_at(worker, worker->el->tl, &worker->ev_cleanup,
+ cleanup, false, worker_max_request_time, worker) < 0) {
ERROR("Failed inserting max_request_time timer");
}
}
fr_ldap_directory_t *directory; //!< The type of directory we're connected to.
trunk_t *trunk; //!< Connection trunk
fr_ldap_thread_t *t; //!< Thread this connection is associated with
- fr_event_timer_t const *ev; //!< Event to close the thread when it has been idle.
+ fr_timer_t *ev; //!< Event to close the thread when it has been idle.
} fr_ldap_thread_trunk_t;
typedef struct fr_ldap_referral_s fr_ldap_referral_t;
trunk_request_t *treq; //!< Trunk request this query is associated with
fr_ldap_connection_t *ldap_conn; //!< LDAP connection this query is running on.
- fr_event_timer_t const *ev; //!< Event for timing out the query
+ fr_timer_t *ev; //!< Event for timing out the query
char **referral_urls; //!< Referral results to follow
fr_dlist_head_t referrals; //!< List of parsed referrals
/** Callback for closing idle LDAP trunk
*
*/
-static void _ldap_trunk_idle_timeout(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void _ldap_trunk_idle_timeout(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
fr_ldap_thread_trunk_t *ttrunk = talloc_get_type_abort(uctx, fr_ldap_thread_trunk_t);
/*
* There are still pending queries - insert a new event
*/
- (void) fr_event_timer_in(ttrunk, el, &ttrunk->ev, ttrunk->t->config->idle_timeout,
- _ldap_trunk_idle_timeout, ttrunk);
+ (void) fr_timer_in(ttrunk, tl, &ttrunk->ev, ttrunk->t->config->idle_timeout,
+ false, _ldap_trunk_idle_timeout, ttrunk);
}
}
/*
* Reset the idle timeout event
*/
- (void) fr_event_timer_in(ttrunk, el, &ttrunk->ev,
- ttrunk->t->config->idle_timeout, _ldap_trunk_idle_timeout, ttrunk);
+ (void) fr_timer_in(ttrunk, el->tl, &ttrunk->ev,
+ ttrunk->t->config->idle_timeout, false, _ldap_trunk_idle_timeout, ttrunk);
do {
/*
/*
* Remove the timeout event
*/
- if (query->ev) fr_event_timer_delete(&query->ev);
+ if (query->ev) fr_timer_delete(&query->ev);
query->result = result;
/*
* Insert event to close trunk if it becomes idle
*/
- if (!fr_cond_assert_msg(fr_event_timer_in(found, thread->el, &found->ev, thread->config->idle_timeout,
- _ldap_trunk_idle_timeout, found) == 0, "cannot insert trunk idle event")) goto error;
+ if (!fr_cond_assert_msg(fr_timer_in(found, thread->el->tl, &found->ev, thread->config->idle_timeout,
+ false, _ldap_trunk_idle_timeout, found) == 0, "cannot insert trunk idle event")) goto error;
/*
* Attempt to discover what type directory we are talking to
DEBUG4("redis handle %p - Timeout in %pV seconds", h, fr_box_time_delta(timeout));
- if (fr_event_timer_in(h, conn->el, &h->timer,
+ if (fr_timer_in(h, conn->el, &h->timer,
timeout, _redis_io_service_timer_expired, conn) < 0) {
PERROR("redis timeout %p - Failed adding timeout", h);
}
bool write_set; //!< We're listening for writes.
bool ignore_disconnect_cb; //!< Ensure that redisAsyncFree doesn't cause
///< a callback loop.
- fr_event_timer_t const *timer; //!< Connection timer.
+ fr_timer_t *timer; //!< Connection timer.
redisAsyncContext *ac; //!< Async handle for hiredis.
connection_shutdown_t shutdown; //!< Signal the connection handle to start shutting down.
connection_failed_t failed; //!< Callback for 'failed' notification.
- fr_event_timer_t const *ev; //!< State transition timer.
+ fr_timer_t *ev; //!< State transition timer.
fr_time_delta_t connection_timeout; //!< How long to wait in the
//!< #CONNECTION_STATE_CONNECTING state.
/** The requisite period of time has passed, try and re-open the connection
*
- * @param[in] el the time event occurred on.
+ * @param[in] tl containing the timer event.
* @param[in] now The current time.
* @param[in] uctx The #connection_t the fd is associated with.
*/
-static void _reconnect_delay_done(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void _reconnect_delay_done(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
connection_t *conn = talloc_get_type_abort(uctx, connection_t);
STATE_TRANSITION(CONNECTION_STATE_CLOSED);
- fr_event_timer_delete(&conn->ev);
+ fr_timer_delete(&conn->ev);
/*
* If there's a close callback, call it, so that the
*
* Connection wasn't opened within the configured period of time
*
- * @param[in] el the time event occurred on.
+ * @param[in] tl timer list the event belonged to.
* @param[in] now The current time.
* @param[in] uctx The #connection_t the fd is associated with.
*/
-static void _connection_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void _connection_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
connection_t *conn = talloc_get_type_abort(uctx, connection_t);
* timeout period.
*/
if (fr_time_delta_ispos(conn->connection_timeout)) {
- if (fr_event_timer_in(conn, conn->pub.el, &conn->ev,
- conn->connection_timeout, _connection_timeout, conn) < 0) {
+ if (fr_timer_in(conn, conn->pub.el->tl, &conn->ev,
+ conn->connection_timeout, false, _connection_timeout, conn) < 0) {
/*
* Can happen when the event loop is exiting
*/
/*
* Explicit error occurred, delete the connection timer
*/
- fr_event_timer_delete(&conn->ev);
+ fr_timer_delete(&conn->ev);
/*
* Record what state the connection is currently in
case CONNECTION_STATE_SHUTDOWN: /* Failed during shutdown */
if (fr_time_delta_ispos(conn->reconnection_delay)) {
DEBUG2("Delaying reconnection by %pVs", fr_box_time_delta(conn->reconnection_delay));
- if (fr_event_timer_in(conn, conn->pub.el, &conn->ev,
- conn->reconnection_delay, _reconnect_delay_done, conn) < 0) {
+ if (fr_timer_in(conn, conn->pub.el->tl, &conn->ev,
+ conn->reconnection_delay, false, _reconnect_delay_done, conn) < 0) {
/*
* Can happen when the event loop is exiting
*/
BAD_STATE_TRANSITION(CONNECTION_STATE_HALTED);
}
- fr_event_timer_delete(&conn->ev);
+ fr_timer_delete(&conn->ev);
STATE_TRANSITION(CONNECTION_STATE_HALTED);
WATCH_PRE(conn);
STATE_TRANSITION(CONNECTION_STATE_CONNECTED);
- fr_event_timer_delete(&conn->ev);
+ fr_timer_delete(&conn->ev);
WATCH_PRE(conn);
if (conn->open) {
HANDLER_BEGIN(conn, conn->open);
* set, then add the timer.
*/
if (fr_time_delta_ispos(conn->connection_timeout)) {
- if (fr_event_timer_in(conn, conn->pub.el, &conn->ev,
- conn->connection_timeout, _connection_timeout, conn) < 0) {
+ if (fr_timer_in(conn, conn->pub.el->tl, &conn->ev,
+ conn->connection_timeout, false, _connection_timeout, conn) < 0) {
PERROR("Failed setting connection_timeout event, failing connection");
/*
/*
* Explicitly cancel any pending events
*/
- fr_event_timer_delete(&conn->ev);
+ fr_timer_delete(&conn->ev);
/*
* Don't allow the connection to be
exec->pid = -1;
}
- if (exec->ev) fr_event_timer_delete(&exec->ev);
+ if (exec->ev) fr_timer_delete(&exec->ev);
}
/*
}
exec->pid = -1; /* pid_t is signed */
- if (exec->ev) fr_event_timer_delete(&exec->ev);
+ if (exec->ev) fr_timer_delete(&exec->ev);
/*
* Process exit notifications (EV_PROC) and file
/*
* Callback when an exec times out.
*/
-static void exec_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void exec_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
fr_exec_state_t *exec = uctx; /* may not be talloced */
bool exit_timeout;
/*
* Child has already exited - unlang can resume
*/
- if (exec->ev) fr_event_timer_delete(&exec->ev);
+ if (exec->ev) fr_timer_delete(&exec->ev);
unlang_interpret_mark_runnable(exec->request);
}
}
* Setup event to kill the child process after a period of time.
*/
if (fr_time_delta_ispos(timeout) &&
- (fr_event_timer_in(ctx, el, &exec->ev, timeout, exec_timeout, exec) < 0)) goto fail_and_close;
+ (fr_timer_in(ctx, el->tl, &exec->ev, timeout, true, exec_timeout, exec) < 0)) goto fail_and_close;
return 0;
}
#include <freeradius-devel/server/request.h>
#include <freeradius-devel/util/pair.h>
#include <freeradius-devel/util/talloc.h>
+#include <freeradius-devel/util/timer.h>
#include <sys/types.h>
#ifdef __cplusplus
int stderr_fd; //!< for producing error messages.
- fr_event_timer_t const *ev; //!< for timing out the child
+ fr_timer_t *ev; //!< for timing out the child
fr_event_pid_t const *ev_pid; //!< for cleaning up the process
fr_exec_fail_t failed; //!< what kind of failure
#include <systemd/sd-daemon.h>
static fr_time_delta_t sd_watchdog_interval;
-static fr_event_timer_t const *sd_watchdog_ev;
+static fr_timer_t *sd_watchdog_ev;
/** Reoccurring watchdog event to inform systemd we're still alive
*
- * Note actually a very good indicator of aliveness as the main event
+ * Not actually a very good indicator of aliveness as the main event
* loop doesn't actually do any packet processing.
*/
-static void sd_watchdog_event(fr_event_list_t *our_el, UNUSED fr_time_t now, void *ctx)
+static void sd_watchdog_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx)
{
DEBUG("Emitting systemd watchdog notification");
sd_notify(0, "WATCHDOG=1");
- if (fr_event_timer_in(NULL, our_el, &sd_watchdog_ev,
- sd_watchdog_interval,
- sd_watchdog_event, ctx) < 0) {
+ if (fr_timer_in(NULL, tl, &sd_watchdog_ev,
+ sd_watchdog_interval,
+ true, sd_watchdog_event, ctx) < 0) {
ERROR("Failed to insert watchdog event");
}
}
/*
* Start placating the watchdog (if told to do so).
*/
- if (fr_time_delta_ispos(sd_watchdog_interval)) sd_watchdog_event(event_list, fr_time_wrap(0), NULL);
+ if (fr_time_delta_ispos(sd_watchdog_interval)) sd_watchdog_event(event_list->tl, fr_time_wrap(0), NULL);
#endif
ret = fr_event_loop(event_list);
if (under_systemd) {
INFO("Informing systemd we're stopping");
sd_notify(0, "STOPPING=1");
- fr_event_timer_delete(&sd_watchdog_ev);
+ fr_timer_delete(&sd_watchdog_ev);
}
}
#endif
/** @name Timers
* @{
*/
- fr_event_timer_t const *lifetime_ev; //!< Maximum time this connection can be open.
+ fr_timer_t *lifetime_ev; //!< Maximum time this connection can be open.
/** @} */
};
/** @name Timers
* @{
*/
- fr_event_timer_t const *manage_ev; //!< Periodic connection management event.
+ fr_timer_t *manage_ev; //!< Periodic connection management event.
/** @} */
/** @name Log rate limiting entries
static void trunk_rebalance(trunk_t *trunk);
static void trunk_manage(trunk_t *trunk, fr_time_t now);
-static void _trunk_timer(fr_event_list_t *el, fr_time_t now, void *uctx);
+static void _trunk_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx);
static void trunk_backlog_drain(trunk_t *trunk);
/** Compare two protocol requests
{
trunk_t *trunk = tconn->pub.trunk;
- if (tconn->lifetime_ev) fr_event_timer_delete(&tconn->lifetime_ev);
+ if (tconn->lifetime_ev) fr_timer_delete(&tconn->lifetime_ev);
switch (tconn->pub.state) {
case TRUNK_CONN_ACTIVE:
/** Trigger a reconnection of the trunk connection
*
- * @param[in] el Event list the timer was inserted into.
+ * @param[in] tl timer list the timer was inserted into.
* @param[in] now Current time.
* @param[in] uctx The tconn.
*/
-static void _trunk_connection_lifetime_expire(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void _trunk_connection_lifetime_expire(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
* connection periodically.
*/
if (fr_time_delta_ispos(trunk->conf.lifetime)) {
- if (fr_event_timer_in(tconn, trunk->el, &tconn->lifetime_ev,
- trunk->conf.lifetime, _trunk_connection_lifetime_expire, tconn) < 0) {
+ if (fr_timer_in(tconn, trunk->el->tl, &tconn->lifetime_ev,
+ trunk->conf.lifetime, false, _trunk_connection_lifetime_expire, tconn) < 0) {
PERROR("Failed inserting connection reconnection timer event, halting connection");
connection_signal_shutdown(tconn->pub.conn);
return;
/*
* Remove the reconnect event
*/
- if (fr_time_delta_ispos(trunk->conf.lifetime)) fr_event_timer_delete(&tconn->lifetime_ev);
+ if (fr_time_delta_ispos(trunk->conf.lifetime)) fr_timer_delete(&tconn->lifetime_ev);
/*
* Remove the I/O events
/** Event to periodically call the connection management function
*
- * @param[in] el this event belongs to.
+ * @param[in] tl this event belongs to.
* @param[in] now current time.
* @param[in] uctx The trunk.
*/
-static void _trunk_timer(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void _trunk_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
trunk_t *trunk = talloc_get_type_abort(uctx, trunk_t);
trunk_manage(trunk, now);
if (fr_time_delta_ispos(trunk->conf.manage_interval)) {
- if (fr_event_timer_in(trunk, el, &trunk->manage_ev, trunk->conf.manage_interval,
- _trunk_timer, trunk) < 0) {
+ if (fr_timer_in(trunk, tl, &trunk->manage_ev, trunk->conf.manage_interval,
+ false, _trunk_timer, trunk) < 0) {
PERROR("Failed inserting trunk management event");
/* Not much we can do, hopefully the trunk will be freed soon */
}
* Insert the event timer to manage
* the interval between managing connections.
*/
- if (fr_event_timer_in(trunk, trunk->el, &trunk->manage_ev, trunk->conf.manage_interval,
- _trunk_timer, trunk) < 0) {
+ if (fr_timer_in(trunk, trunk->el->tl, &trunk->manage_ev, trunk->conf.manage_interval,
+ false, _trunk_timer, trunk) < 0) {
PERROR("Failed inserting trunk management event");
return -1;
}
{
if (!trunk->started || !trunk->managing_connections) return 0;
- if (fr_event_timer_in(trunk, trunk->el, &trunk->manage_ev, fr_time_delta_wrap(0), _trunk_timer, trunk) < 0) {
+ if (fr_timer_in(trunk, trunk->el->tl, &trunk->manage_ev, fr_time_delta_wrap(0),
+ false, _trunk_timer, trunk) < 0) {
PERROR("Failed inserting trunk management event");
return -1;
}
* We really don't want this firing after
* we've freed everything.
*/
- fr_event_timer_delete(&trunk->manage_ev);
+ fr_timer_delete(&trunk->manage_ev);
/*
* Now free the connections in each of the lists.
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
TEST_CHECK(trunk != NULL);
if (!el) return;
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
TEST_CHECK(trunk != NULL);
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
TEST_CHECK(trunk != NULL);
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
fr_talloc_fault_setup();
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
preq = talloc_zero(ctx, test_proto_request_t);
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
/* Need to provide a timer starting value above zero */
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5));
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
/* Need to provide a timer starting value above zero */
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5));
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
/* Need to provide a timer starting value above zero */
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5));
DEBUG_LVL_SET;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
/* Need to provide a timer starting value above zero */
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5));
return UNLANG_ACTION_PUSHED_CHILD;
}
-static void instruction_timeout_handler(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx);
+static void instruction_timeout_handler(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx);
/** Update the current result after each instruction, and after popping each stack frame
*
if (fr_time_delta_ispos(instruction->actions.retry.mrd)) {
retry->timeout = fr_time_add(fr_time(), instruction->actions.retry.mrd);
- if (fr_event_timer_at(retry, unlang_interpret_event_list(request), &retry->ev, retry->timeout,
- instruction_timeout_handler, request) < 0) {
+ if (fr_timer_at(retry, unlang_interpret_event_list(request)->tl, &retry->ev, retry->timeout,
+ false, instruction_timeout_handler, request) < 0) {
RPEDEBUG("Failed inserting event");
goto fail;
}
}
}
-static void instruction_timeout_handler(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx)
+static void instruction_timeout_handler(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx)
{
unlang_retry_t *retry = talloc_get_type_abort(ctx, unlang_retry_t);
request_t *request = talloc_get_type_abort(retry->request, request_t);
* loop. This means the request is always in a consistent state when
* the timeout event fires, even if that's state is waiting on I/O.
*/
-static void unlang_cancel_event(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void unlang_cancel_event(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
request_t *request = talloc_get_type_abort(uctx, request_t);
{
fr_value_box_t *timeout;
fr_event_list_t *el = unlang_interpret_event_list(request);
- fr_event_timer_t const **ev_p, **ev_p_og;
+ fr_timer_t **ev_p, **ev_p_og;
fr_value_box_t *vb;
fr_time_t when = fr_time_from_sec(0); /* Invalid clang complaints if we don't set this */
*/
ev_p = ev_p_og = request_data_get(request, (void *)unlang_cancel_xlat, 0);
if (ev_p) {
- if (*ev_p) when = fr_event_timer_when(*ev_p); /* *ev_p should never be NULL, really... */
+ if (*ev_p) when = fr_timer_when(*ev_p); /* *ev_p should never be NULL, really... */
} else {
/*
* Must not be parented from the request
* as this is freed by request data.
*/
- MEM(ev_p = talloc_zero(NULL, fr_event_timer_t const *));
+ MEM(ev_p = talloc_zero(NULL, fr_timer_t *));
}
- if (unlikely(fr_event_timer_in(ev_p, el, ev_p,
- timeout ? timeout->vb_time_delta : fr_time_delta_from_sec(0),
- unlang_cancel_event, request) < 0)) {
+ if (unlikely(fr_timer_in(ev_p, el->tl, ev_p,
+ timeout ? timeout->vb_time_delta : fr_time_delta_from_sec(0),
+ false, unlang_cancel_event, request) < 0)) {
RPERROR("Failed inserting cancellation event");
talloc_free(ev_p);
return XLAT_ACTION_FAIL;
}
if (unlikely(request_data_add(request, (void *)unlang_cancel_xlat, 0,
- UNCONST(fr_event_timer_t **, ev_p), true, true, false) < 0)) {
+ UNCONST(fr_timer_t **, ev_p), true, true, false) < 0)) {
RPERROR("Failed associating cancellation event with request");
talloc_free(ev_p);
return XLAT_ACTION_FAIL;
#include "tmpl.h"
static unlang_action_t unlang_module_resume(rlm_rcode_t *p_result, request_t *request, unlang_stack_frame_t *frame);
-static void unlang_module_event_retry_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *ctx);
+static void unlang_module_event_retry_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *ctx);
/** Push a module or submodule onto the stack for evaluation
*
if (!state->retry.config) {
fr_retry_init(&state->retry, fr_time(), retry_cfg);
- if (fr_event_timer_at(state, unlang_interpret_event_list(request), &state->ev,
- state->retry.next, unlang_module_event_retry_handler, request) < 0) {
+ if (fr_timer_at(state, unlang_interpret_event_list(request)->tl, &state->ev,
+ state->retry.next,
+ false, unlang_module_event_retry_handler, request) < 0) {
RPEDEBUG("Failed inserting event");
return UNLANG_ACTION_FAIL;
}
/** Call the callback registered for a retry event
*
- * @param[in] el the event timer was inserted into.
+ * @param[in] tl the event timer was inserted into.
* @param[in] now The current time, as held by the event_list.
* @param[in] ctx the stack frame
*
*/
-static void unlang_module_event_retry_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *ctx)
+static void unlang_module_event_retry_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *ctx)
{
request_t *request = talloc_get_type_abort(ctx, request_t);
unlang_stack_t *stack = request->stack;
/*
* Reset the timer.
*/
- if (fr_event_timer_at(state, unlang_interpret_event_list(request), &state->ev, state->retry.next,
- unlang_module_event_retry_handler, request) < 0) {
+ if (fr_timer_at(state, unlang_interpret_event_list(request)->tl, &state->ev, state->retry.next,
+ false, unlang_module_event_retry_handler, request) < 0) {
RPEDEBUG("Failed inserting event");
unlang_interpret_mark_runnable(request); /* and let the caller figure out what's up */
}
fr_retry_init(&state->retry, now, &frame->instruction->actions.retry);
- if (fr_event_timer_at(state, unlang_interpret_event_list(request),
- &state->ev, state->retry.next,
- unlang_module_event_retry_handler, request) < 0) {
+ if (fr_timer_at(state, unlang_interpret_event_list(request)->tl,
+ &state->ev, state->retry.next,
+ false, unlang_module_event_retry_handler, request) < 0) {
RPEDEBUG("Failed inserting event");
goto fail;
}
module_instance_t const *mi; //!< Module instance to pass to callbacks.
request_t *request;
- fr_event_timer_t const *ev; //!< retry timer just for this module.
+ fr_timer_t *ev; //!< retry timer just for this module.
fr_retry_t retry; //!< retry timers, etc.
/** @} */
/** Event handler to free a detached child
*
*/
-static void unlang_detached_max_request_time(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void unlang_detached_max_request_time(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
request_t *request = talloc_get_type_abort(uctx, request_t);
vp = fr_pair_find_by_da(&request->control_pairs, NULL, request_attr_request_lifetime);
if (!vp || (vp->vp_uint32 > 0)) {
fr_time_delta_t when = fr_time_delta_wrap(0);
- const fr_event_timer_t **ev_p;
+ fr_timer_t **ev_p;
if (!vp) {
when = fr_time_delta_add(when, fr_time_delta_from_sec(30)); /* default to 30s if not set */
ev_p = talloc_size(request, sizeof(*ev_p));
memset(ev_p, 0, sizeof(*ev_p));
- if (fr_event_timer_in(request, unlang_interpret_event_list(request), ev_p, when,
- unlang_detached_max_request_time, request) < 0) {
+ if (fr_timer_in(request, unlang_interpret_event_list(request)->tl, ev_p, when,
+ false, unlang_detached_max_request_time, request) < 0) {
talloc_free(ev_p);
return -1;
}
fr_time_delta_t timeout;
request_t *request;
rindent_t indent;
- fr_event_timer_t const *ev;
+ fr_timer_t *ev;
fr_value_box_list_t result;
} unlang_frame_state_timeout_t;
-static void unlang_timeout_handler(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx)
+static void unlang_timeout_handler(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx)
{
unlang_frame_state_timeout_t *state = talloc_get_type_abort(ctx, unlang_frame_state_timeout_t);
request_t *request = talloc_get_type_abort(state->request, request_t);
timeout = fr_time_add(fr_time(), state->timeout);
- if (fr_event_timer_at(state, unlang_interpret_event_list(request), &state->ev, timeout,
- unlang_timeout_handler, state) < 0) {
+ if (fr_timer_at(state, unlang_interpret_event_list(request)->tl, &state->ev, timeout,
+ false, unlang_timeout_handler, state) < 0) {
RPEDEBUG("Failed inserting event");
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
fr_retry_state_t state;
fr_time_t timeout;
uint32_t count;
- fr_event_timer_t const *ev;
+ fr_timer_t *ev;
} unlang_retry_t;
/** Our interpreter stack, as distinct from the C stack
///< of the execution.
} unlang_frame_state_xlat_t;
-/** Wrap an #fr_event_timer_t providing data needed for unlang events
+/** Wrap an #fr_timer_t providing data needed for unlang events
*
*/
typedef struct {
xlat_inst_t *inst; //!< xlat instance data.
xlat_thread_inst_t *thread; //!< Thread specific xlat instance.
void const *rctx; //!< rctx data to pass to callbacks.
- fr_event_timer_t const *ev; //!< Event in this worker's event heap.
+ fr_timer_t *ev; //!< Event in this worker's event heap.
} unlang_xlat_event_t;
typedef struct {
fr_unlang_xlat_retry_t retry_cb; //!< callback to run on timeout
void *rctx; //!< rctx data to pass to timeout callback
- fr_event_timer_t const *ev; //!< retry timer just for this xlat
+ fr_timer_t *ev; //!< retry timer just for this xlat
fr_retry_t retry; //!< retry timers, etc.
} unlang_xlat_retry_t;
static int _unlang_xlat_event_free(unlang_xlat_event_t *ev)
{
if (ev->ev) {
- (void) fr_event_timer_delete(&(ev->ev));
+ (void) fr_timer_delete(&(ev->ev));
return 0;
}
/** Call the callback registered for a timeout event
*
- * @param[in] el the event timer was inserted into.
+ * @param[in] tl the event timer was inserted into.
* @param[in] now The current time, as held by the event_list.
* @param[in] uctx unlang_module_event_t structure holding callbacks.
*
*/
-static void unlang_xlat_event_timeout_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx)
+static void unlang_xlat_event_timeout_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
- unlang_xlat_event_t *ev = talloc_get_type_abort(uctx, unlang_xlat_event_t);
+ unlang_xlat_event_t *ev = talloc_get_type_abort(uctx, unlang_xlat_event_t);
/*
* If the timeout's fired then the xlat must necessarily
ev->thread = xlat_thread_instance_find(state->exp);
ev->rctx = rctx;
- if (fr_event_timer_at(request, unlang_interpret_event_list(request),
- &ev->ev, when, unlang_xlat_event_timeout_handler, ev) < 0) {
+ if (fr_timer_at(request, unlang_interpret_event_list(request)->tl,
+ &ev->ev, when,
+ false, unlang_xlat_event_timeout_handler, ev) < 0) {
RPEDEBUG("Failed inserting event");
talloc_free(ev);
return -1;
*/
static int _unlang_xlat_retry_free(unlang_xlat_retry_t *ev)
{
- if (ev->ev) (void) fr_event_timer_delete(&(ev->ev));
+ if (ev->ev) (void) fr_timer_delete(&(ev->ev));
return 0;
}
/** Call the callback registered for a timeout event
*
- * @param[in] el the event timer was inserted into.
+ * @param[in] tl the event timer was inserted into.
* @param[in] now The current time, as held by the event_list.
* @param[in] uctx unlang_module_event_t structure holding callbacks.
*
*/
-static void unlang_xlat_event_retry_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx)
+static void unlang_xlat_event_retry_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
unlang_xlat_retry_t *ev = talloc_get_type_abort(uctx, unlang_xlat_retry_t);
request_t *request = ev->request;
/*
* Reset the timer.
*/
- if (fr_event_timer_at(ev, unlang_interpret_event_list(request), &ev->ev, ev->retry.next,
- unlang_xlat_event_retry_handler, request) < 0) {
+ if (fr_timer_at(ev, unlang_interpret_event_list(request)->tl, &ev->ev, ev->retry.next,
+ false, unlang_xlat_event_retry_handler, request) < 0) {
RPEDEBUG("Failed inserting event");
talloc_free(ev);
unlang_interpret_mark_runnable(request);
fr_retry_init(&ev->retry, fr_time(), retry_cfg);
- if (fr_event_timer_at(request, unlang_interpret_event_list(request),
- &ev->ev, ev->retry.next, unlang_xlat_event_retry_handler, ev) < 0) {
+ if (fr_timer_at(request, unlang_interpret_event_list(request)->tl,
+ &ev->ev, ev->retry.next,
+ false, unlang_xlat_event_retry_handler, ev) < 0) {
RPEDEBUG("Failed inserting event");
talloc_free(ev);
return XLAT_ACTION_FAIL;
*/
RCSID("$Id$")
+#define _EVENT_LIST_PRIVATE 1
+typedef struct fr_event_list_s fr_event_list_t;
+
#include <freeradius-devel/util/dlist.h>
#include <freeradius-devel/util/event.h>
-#include <freeradius-devel/util/lst.h>
+#include <freeradius-devel/util/timer.h>
#include <freeradius-devel/util/log.h>
#include <freeradius-devel/util/rb.h>
#include <freeradius-devel/util/strerror.h>
# define SO_GET_FILTER SO_ATTACH_FILTER
#endif
-#ifdef WITH_EVENT_DEBUG
-# define EVENT_DEBUG(fmt, ...) printf("EVENT:");printf(fmt, ## __VA_ARGS__);printf("\n");
-# ifndef EVENT_REPORT_FREQ
-# define EVENT_REPORT_FREQ 5
-# endif
-#else
-# define EVENT_DEBUG(...)
-#endif
-
static fr_table_num_sorted_t const kevent_filter_table[] = {
#ifdef EVFILT_AIO
{ L("EVFILT_AIO"), EVFILT_AIO },
static int log_conf_kq;
#endif
-/** A timer event
- *
- */
-struct fr_event_timer {
- fr_time_t when; //!< When this timer should fire.
-
- fr_event_timer_cb_t callback; //!< Callback to execute when the timer fires.
- void const *uctx; //!< Context pointer to pass to the callback.
-
- TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
-
- fr_event_timer_t const **parent; //!< A pointer to the parent structure containing the timer
- ///< event.
-
- fr_lst_index_t lst_id; //!< Where to store opaque lst data.
- fr_dlist_t entry; //!< List of deferred timer events.
-
- fr_event_list_t *el; //!< Event list containing this timer.
-
-#ifndef NDEBUG
- char const *file; //!< Source file this event was last updated in.
- int line; //!< Line this event was last updated on.
-#endif
-};
-
typedef enum {
FR_EVENT_FD_SOCKET = 1, //!< is a socket.
FR_EVENT_FD_FILE = 2, //!< is a file.
*/
typedef struct {
fr_dlist_t entry; //!< Linked list of callback.
- fr_event_timer_cb_t callback; //!< The callback to call.
+ fr_event_post_cb_t callback; //!< The callback to call.
void *uctx; //!< Context for the callback.
} fr_event_post_t;
/** Stores all information relating to an event list
*
*/
-struct fr_event_list {
- fr_lst_t *times; //!< of timer events to be executed.
- fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue.
+struct fr_event_list_s {
+ struct fr_event_list_pub_s pub; //!< Next event list in the chain.
+ fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue.
- int will_exit; //!< Will exit on next call to fr_event_corral.
- int exit; //!< If non-zero event loop will prevent the addition
- ///< of new events, and will return immediately
- ///< from the corral/service function.
+ int will_exit; //!< Will exit on next call to fr_event_corral.
+ int exit; //!< If non-zero event loop will prevent the addition
+ ///< of new events, and will return immediately
+ ///< from the corral/service function.
- fr_event_time_source_t time; //!< Where our time comes from.
- fr_time_t now; //!< The last time the event list was serviced.
- bool dispatch; //!< Whether the event list is currently dispatching events.
+ bool dispatch; //!< Whether the event list is currently dispatching events.
- int num_fd_events; //!< Number of events in this event list.
+ int num_fd_events; //!< Number of events in this event list.
- int kq; //!< instance associated with this event list.
+ int kq; //!< instance associated with this event list.
- fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle...
- fr_dlist_head_t post_callbacks; //!< post-processing callbacks
+ fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle...
+ fr_dlist_head_t post_callbacks; //!< post-processing callbacks
- fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're
- ///< waiting to reap.
+ fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're
+ ///< waiting to reap.
- struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */
+ struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */
- bool in_handler; //!< Deletes should be deferred until after the
- ///< handlers complete.
+ bool in_handler; //!< Deletes should be deferred until after the
+ ///< handlers complete.
- fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion.
- fr_dlist_head_t ev_to_add; //!< dlist of events to add
+ fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion.
#ifdef WITH_EVENT_DEBUG
- fr_event_timer_t const *report; //!< Report event.
+ fr_timer_t *report; //!< Report event.
#endif
};
}
}
-/** Compare two timer events to see which one should occur first
- *
- * @param[in] a the first timer event.
- * @param[in] b the second timer event.
- * @return
- * - +1 if a should occur later than b.
- * - -1 if a should occur earlier than b.
- * - 0 if both events occur at the same time.
- */
-static int8_t fr_event_timer_cmp(void const *a, void const *b)
-{
- fr_event_timer_t const *ev_a = a, *ev_b = b;
-
- return fr_time_cmp(ev_a->when, ev_b->when);
-}
-
/** Compare two file descriptor handles
*
* @param[in] one the first file descriptor handle.
{
if (unlikely(!el)) return -1;
- return fr_lst_num_elements(el->times);
+ return fr_timer_list_num_events(el->pub.tl);
}
/** Return the kq associated with an event list.
*/
fr_time_t fr_event_list_time(fr_event_list_t *el)
{
- if (el->dispatch) {
- return el->now;
- } else {
- return el->time();
- }
+ return el->pub.tl->time();
}
/** Placeholder callback to avoid branches in service loop
}
#endif
-/** Remove an event from the event loop
- *
- * @param[in] ev to free.
- * @return
- * - 0 on success.
- * - -1 on failure.
- */
-static int _event_timer_free(fr_event_timer_t *ev)
-{
- fr_event_list_t *el = ev->el;
- fr_event_timer_t const **ev_p;
-
- if (fr_dlist_entry_in_list(&ev->entry)) {
- (void) fr_dlist_remove(&el->ev_to_add, ev);
- } else {
- int ret = fr_lst_extract(el->times, ev);
- char const *err_file;
- int err_line;
-
-#ifndef NDEBUG
- err_file = ev->file;
- err_line = ev->line;
-#else
- err_file = "not-available";
- err_line = 0;
-#endif
-
-
- /*
- * Events MUST be in the lst (or the insertion list).
- */
- if (!fr_cond_assert_msg(ret == 0,
- "Event %p, lst_id %u, allocd %s[%d], was not found in the event lst or "
- "insertion list when freed: %s", ev, ev->lst_id, err_file, err_line,
- fr_strerror())) return -1;
- }
-
- ev_p = ev->parent;
- fr_assert(*(ev->parent) == ev);
- *ev_p = NULL;
-
- return 0;
-}
-
-/** Insert a timer event into an event list
- *
- * @note The talloc parent of the memory returned in ev_p must not be changed.
- * If the lifetime of the event needs to be bound to another context
- * this function should be called with the existing event pointed to by
- * ev_p.
- *
- * @param[in] ctx to bind lifetime of the event to.
- * @param[in] el to insert event into.
- * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
- * in a temporal sense, not in a memory structure or dependency sense.
- * @param[in] when we should run the event.
- * @param[in] callback function to execute if the event fires.
- * @param[in] uctx user data to pass to the event.
- * @return
- * - 0 on success.
- * - -1 on failure.
- */
-int _fr_event_timer_at(NDEBUG_LOCATION_ARGS
- TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p,
- fr_time_t when, fr_event_timer_cb_t callback, void const *uctx)
-{
- fr_event_timer_t *ev;
-
- if (unlikely(!el)) {
- fr_strerror_const("Invalid arguments: NULL event list");
- return -1;
- }
-
- if (unlikely(!callback)) {
- fr_strerror_const("Invalid arguments: NULL callback");
- return -1;
- }
-
- if (unlikely(!ev_p)) {
- fr_strerror_const("Invalid arguments: NULL ev_p");
- return -1;
- }
-
- if (unlikely(el->exit)) {
- fr_strerror_const("Event loop exiting");
- return -1;
- }
-
- /*
- * If there is an event, reuse it instead of freeing it
- * and allocating a new one. This is to reduce memory
- * churn for repeat events.
- */
- if (!*ev_p) {
- new_event:
- ev = talloc_zero(el, fr_event_timer_t);
- if (unlikely(!ev)) return -1;
-
- EVENT_DEBUG("%p - %s[%i] Added new timer %p", el, file, line, ev);
-
- /*
- * Bind the lifetime of the event to the specified
- * talloc ctx. If the talloc ctx is freed, the
- * event will also be freed.
- */
- if (ctx != el) talloc_link_ctx(ctx, ev);
-
- talloc_set_destructor(ev, _event_timer_free);
- ev->lst_id = 0;
-
- } else {
- ev = UNCONST(fr_event_timer_t *, *ev_p);
-
- EVENT_DEBUG("%p - %s[%i] Re-armed timer %p", el, file, line, ev);
-
- /*
- * We can't disarm the linking context due to
- * limitations in talloc, so if the linking
- * context changes, we need to free the old
- * event, and allocate a new one.
- *
- * Freeing the event also removes it from the lst.
- */
- if (unlikely(ev->linked_ctx != ctx)) {
- talloc_free(ev);
- goto new_event;
- }
-
- /*
- * Event may have fired, in which case the event
- * will no longer be in the event loop, so check
- * if it's in the lst before extracting it.
- */
- if (!fr_dlist_entry_in_list(&ev->entry)) {
- int ret;
- char const *err_file;
- int err_line;
-
- ret = fr_lst_extract(el->times, ev);
-
-#ifndef NDEBUG
- err_file = ev->file;
- err_line = ev->line;
-#else
- err_file = "not-available";
- err_line = 0;
-#endif
-
- /*
- * Events MUST be in the lst (or the insertion list).
- */
- if (!fr_cond_assert_msg(ret == 0,
- "Event %p, lst_id %u, allocd %s[%d], was not found in the event "
- "lst or insertion list when freed: %s", ev, ev->lst_id,
- err_file, err_line, fr_strerror())) return -1;
- }
- }
-
- ev->el = el;
- ev->when = when;
- ev->callback = callback;
- ev->uctx = uctx;
- ev->linked_ctx = ctx;
- ev->parent = ev_p;
-#ifndef NDEBUG
- ev->file = file;
- ev->line = line;
-#endif
-
- if (el->in_handler) {
- /*
- * Don't allow an event to be inserted
- * into the deferred insertion list
- * multiple times.
- */
- if (!fr_dlist_entry_in_list(&ev->entry)) fr_dlist_insert_head(&el->ev_to_add, ev);
- } else if (unlikely(fr_lst_insert(el->times, ev) < 0)) {
- fr_strerror_const_push("Failed inserting event");
- talloc_set_destructor(ev, NULL);
- *ev_p = NULL;
- talloc_free(ev);
- return -1;
- }
-
- *ev_p = ev;
-
- return 0;
-}
-
-/** Insert a timer event into an event list
- *
- * @note The talloc parent of the memory returned in ev_p must not be changed.
- * If the lifetime of the event needs to be bound to another context
- * this function should be called with the existing event pointed to by
- * ev_p.
- *
- * @param[in] ctx to bind lifetime of the event to.
- * @param[in] el to insert event into.
- * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
- * in a temporal sense, not in a memory structure or dependency sense.
- * @param[in] delta In how many nanoseconds to wait before should we execute the event.
- * @param[in] callback function to execute if the event fires.
- * @param[in] uctx user data to pass to the event.
- * @return
- * - 0 on success.
- * - -1 on failure.
- */
-int _fr_event_timer_in(NDEBUG_LOCATION_ARGS
- TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p,
- fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx)
-{
- return _fr_event_timer_at(NDEBUG_LOCATION_VALS
- ctx, el, ev_p, fr_time_add(el->time(), delta), callback, uctx);
-}
-
-/** Delete a timer event from the event list
- *
- * @param[in] ev_p of the event being deleted.
- * @return
- * - 0 on success.
- * - -1 on failure.
- */
-int fr_event_timer_delete(fr_event_timer_t const **ev_p)
-{
- fr_event_timer_t *ev;
- int ret;
-
- if (unlikely(!*ev_p)) return 0;
-
- ev = UNCONST(fr_event_timer_t *, *ev_p);
- ret = talloc_free(ev);
-
- /*
- * Don't leave a garbage pointer value
- * in the parent.
- */
- if (likely(ret == 0)) *ev_p = NULL;
- return 0;
-}
-
-/** Internal timestamp representing when the timer should fire
- *
- * @return When the timestamp should fire.
- */
-fr_time_t fr_event_timer_when(fr_event_timer_t const *ev)
-{
- return ev->when;
-}
-
/** Remove PID wait event from kevent if the fr_event_pid_t is freed
*
* @param[in] ev to free.
struct kevent evset;
int waiting = 0;
int kq = kqueue();
- fr_time_t now, start = el->time(), end = fr_time_add(start, timeout);
+ fr_time_t now, start = el->pub.tl->time(), end = fr_time_add(start, timeout);
if (unlikely(kq < 0)) goto force;
/*
* Keep draining process exits as they come in...
*/
- while ((waiting > 0) && fr_time_gt(end, (now = el->time()))) {
+ while ((waiting > 0) && fr_time_gt(end, (now = el->pub.tl->time()))) {
struct kevent kev;
int ret;
* - < 0 on error
* - 0 on success
*/
-int fr_event_post_insert(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx)
+int fr_event_post_insert(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx)
{
fr_event_post_t *post;
* - < 0 on error
* - 0 on success
*/
-int fr_event_post_delete(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx)
+int fr_event_post_delete(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx)
{
fr_event_post_t *post, *next;
return -1;
}
-/** Run a single scheduled timer event
- *
- * @param[in] el containing the timer events.
- * @param[in] when Process events scheduled to run before or at this time.
- * @return
- * - 0 no timer events fired.
- * - 1 a timer event fired.
- */
-int fr_event_timer_run(fr_event_list_t *el, fr_time_t *when)
-{
- fr_event_timer_cb_t callback;
- void *uctx;
- fr_event_timer_t *ev;
-
- if (unlikely(!el)) return 0;
-
- if (fr_lst_num_elements(el->times) == 0) {
- *when = fr_time_wrap(0);
- return 0;
- }
-
- ev = fr_lst_peek(el->times);
- if (!ev) {
- *when = fr_time_wrap(0);
- return 0;
- }
-
- /*
- * See if it's time to do this one.
- */
- if (fr_time_gt(ev->when, *when)) {
- *when = ev->when;
- return 0;
- }
-
- callback = ev->callback;
- memcpy(&uctx, &ev->uctx, sizeof(uctx));
-
- fr_assert(*ev->parent == ev);
-
- /*
- * Delete the event before calling it.
- */
- fr_event_timer_delete(ev->parent);
-
- callback(el, *when, uctx);
-
- return 1;
-}
-
/** Gather outstanding timer and file descriptor events
*
* @param[in] el to process events for.
fr_event_pre_t *pre;
int num_fd_events;
bool timer_event_ready = false;
- fr_event_timer_t *ev;
+ fr_time_t next;
el->num_fd_events = 0;
*/
when = fr_time_delta_wrap(0);
wake = &when;
- el->now = now;
/*
* See when we have to wake up. Either now, if the timer
* events are in the past. Or, we wait for a future
* timer event.
*/
- ev = fr_lst_peek(el->times);
- if (ev) {
- if (fr_time_lteq(ev->when, el->now)) {
+ next = fr_timer_list_when(el->pub.tl);
+ if (fr_time_neq(next, fr_time_wrap(0))) {
+ if (fr_time_lteq(next, now)) {
timer_event_ready = true;
} else if (wait) {
- when = fr_time_sub(ev->when, el->now);
+ when = fr_time_sub(next, now);
} /* else we're not waiting, leave "when == 0" */
* If there are no FD events, we must have woken up from a timer
*/
if (!num_fd_events) {
- el->now = fr_time_add(el->now, when);
if (wait) timer_event_ready = true;
}
/*
return num_fd_events + timer_event_ready;
}
+CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function*/
static inline CC_HINT(always_inline)
void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
{
*/
void fr_event_service(fr_event_list_t *el)
{
+ fr_timer_list_t *etl = el->pub.tl;
int i;
fr_event_post_t *post;
- fr_time_t when;
- fr_event_timer_t *ev;
+ fr_time_t when, now;
if (unlikely(el->exit)) return;
* cause strange interaction effects, spurious calls
* to kevent, and busy loops.
*/
- el->now = el->time();
+ now = etl->time();
/*
* Run all of the timer events. Note that these can add
* new timers!
*/
- if (fr_lst_num_elements(el->times) > 0) {
- el->in_handler = true;
-
- do {
- when = el->now;
- } while (fr_event_timer_run(el, &when) == 1);
+ if (fr_time_neq(fr_timer_list_when(el->pub.tl), fr_time_wrap(0))) {
+ int ret;
- el->in_handler = false;
- }
+ when = now;
- /*
- * New timers can be added while running the timer
- * callback. Instead of being added to the main timer
- * lst, they are instead added to the "to do" list.
- * Once we're finished running the callbacks, we walk
- * through the "to do" list, and add the callbacks to the
- * timer lst.
- *
- * Doing it this way prevents the server from running
- * into an infinite loop. The timer callback MAY add a
- * new timer which is in the past. The loop above would
- * then immediately run the new callback, which could
- * also add an event in the past...
- */
- while ((ev = fr_dlist_head(&el->ev_to_add)) != NULL) {
- (void)fr_dlist_remove(&el->ev_to_add, ev);
- if (unlikely(fr_lst_insert(el->times, ev) < 0)) {
- talloc_free(ev);
- fr_assert_msg(0, "failed inserting lst event: %s", fr_strerror()); /* Die in debug builds */
+ ret = fr_timer_list_run(etl, &when);
+ if (!fr_cond_assert(ret >= 0)) { /* catastrophic error, trigger event loop exit */
+ el->exit = 1;
+ return;
}
+
+ EVENT_DEBUG("%p - %s - Serviced %u timer(s)", el, __FUNCTION__, (unsigned int)ret);
}
- el->now = el->time();
+
+ now = etl->time();
/*
* Run all of the post-processing events.
for (post = fr_dlist_head(&el->post_callbacks);
post != NULL;
post = fr_dlist_next(&el->post_callbacks, post)) {
- post->callback(el, el->now, post->uctx);
+ post->callback(el, now, post->uctx);
}
}
el->dispatch = true;
while (!el->exit) {
- if (unlikely(fr_event_corral(el, el->time(), true)) < 0) break;
+ if (unlikely(fr_event_corral(el, el->pub.tl->time(), true)) < 0) break;
fr_event_service(el);
}
*/
static int _event_list_free(fr_event_list_t *el)
{
- fr_event_timer_t const *ev;
-
- while ((ev = fr_lst_peek(el->times)) != NULL) fr_event_timer_delete(&ev);
-
fr_event_list_reap_signal(el, fr_time_delta_wrap(0), SIGKILL);
talloc_free_children(el);
fr_strerror_const("Out of memory");
return NULL;
}
- el->time = fr_time;
el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */
talloc_set_destructor(el, _event_list_free);
- el->times = fr_lst_talloc_alloc(el, fr_event_timer_cmp, fr_event_timer_t, lst_id, 0);
- if (!el->times) {
- fr_strerror_const("Failed allocating event lst");
+ el->pub.tl = fr_timer_list_lst_alloc(el, NULL);
+ if (!el->pub.tl) {
+ fr_strerror_const("Failed allocating timer list");
error:
talloc_free(el);
return NULL;
fr_dlist_talloc_init(&el->pre_callbacks, fr_event_pre_t, entry);
fr_dlist_talloc_init(&el->post_callbacks, fr_event_post_t, entry);
- fr_dlist_talloc_init(&el->ev_to_add, fr_event_timer_t, entry);
fr_dlist_talloc_init(&el->pid_to_reap, fr_event_pid_reap_t, entry);
fr_dlist_talloc_init(&el->fd_to_free, fr_event_fd_t, entry);
if (status) (void) fr_event_pre_insert(el, status, status_uctx);
goto error;
}
-#ifdef WITH_EVENT_DEBUG
- fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, NULL);
-#endif
-
return el;
}
-/** Override event list time source
- *
- * @param[in] el to set new time function for.
- * @param[in] func to set.
- */
-void fr_event_list_set_time_func(fr_event_list_t *el, fr_event_time_source_t func)
-{
- el->time = func;
-}
-
/** Return whether the event loop has any active events
*
*/
bool fr_event_list_empty(fr_event_list_t *el)
{
- return !fr_lst_num_elements(el->times) && !fr_rb_num_elements(el->fds);
-}
-
-#ifdef WITH_EVENT_DEBUG
-static const fr_time_delta_t decades[18] = {
- { 1 }, { 10 }, { 100 },
- { 1000 }, { 10000 }, { 100000 },
- { 1000000 }, { 10000000 }, { 100000000 },
- { 1000000000 }, { 10000000000 }, { 100000000000 },
- { 1000000000000 }, { 10000000000000 }, { 100000000000000 },
- { 1000000000000000 }, { 10000000000000000 }, { 100000000000000000 },
-};
-
-static const char *decade_names[18] = {
- "1ns", "10ns", "100ns",
- "1us", "10us", "100us",
- "1ms", "10ms", "100ms",
- "1s", "10s", "100s",
- "1Ks", "10Ks", "100Ks",
- "1Ms", "10Ms", "100Ms", /* 1 year is 300Ms */
-};
-
-typedef struct {
- fr_rb_node_t node;
- char const *file;
- int line;
- uint32_t count;
-} fr_event_counter_t;
-
-static int8_t event_timer_location_cmp(void const *one, void const *two)
-{
- fr_event_counter_t const *a = one;
- fr_event_counter_t const *b = two;
-
- CMP_RETURN(a, b, file);
-
- return CMP(a->line, b->line);
-}
-
-
-/** Print out information about the number of events in the event loop
- *
- */
-void fr_event_report(fr_event_list_t *el, fr_time_t now, void *uctx)
-{
- fr_lst_iter_t iter;
- fr_event_timer_t const *ev;
- size_t i;
-
- size_t array[NUM_ELEMENTS(decades)] = { 0 };
- fr_rb_tree_t *locations[NUM_ELEMENTS(decades)];
- TALLOC_CTX *tmp_ctx;
- static pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER;
-
- tmp_ctx = talloc_init_const("temporary stats");
- if (!tmp_ctx) {
- oom:
- EVENT_DEBUG("Can't do report, out of memory");
- talloc_free(tmp_ctx);
- return;
- }
-
- for (i = 0; i < NUM_ELEMENTS(decades); i++) {
- locations[i] = fr_rb_inline_alloc(tmp_ctx, fr_event_counter_t, node, event_timer_location_cmp, NULL);
- if (!locations[i]) goto oom;
- }
-
- /*
- * Show which events are due, when they're due,
- * and where they were allocated
- */
- for (ev = fr_lst_iter_init(el->times, &iter);
- ev != NULL;
- ev = fr_lst_iter_next(el->times, &iter)) {
- fr_time_delta_t diff = fr_time_sub(ev->when, now);
-
- for (i = 0; i < NUM_ELEMENTS(decades); i++) {
- if ((fr_time_delta_cmp(diff, decades[i]) <= 0) || (i == NUM_ELEMENTS(decades) - 1)) {
- fr_event_counter_t find = { .file = ev->file, .line = ev->line };
- fr_event_counter_t *counter;
-
- counter = fr_rb_find(locations[i], &find);
- if (!counter) {
- counter = talloc(locations[i], fr_event_counter_t);
- if (!counter) goto oom;
- counter->file = ev->file;
- counter->line = ev->line;
- counter->count = 1;
- fr_rb_insert(locations[i], counter);
- } else {
- counter->count++;
- }
-
- array[i]++;
- break;
- }
- }
- }
-
- pthread_mutex_lock(&print_lock);
- EVENT_DEBUG("%p - Event list stats", el);
- EVENT_DEBUG(" fd events : %"PRIu64, fr_event_list_num_fds(el));
- EVENT_DEBUG(" events last iter : %u", el->num_fd_events);
- EVENT_DEBUG(" num timer events : %"PRIu64, fr_event_list_num_timers(el));
-
- for (i = 0; i < NUM_ELEMENTS(decades); i++) {
- fr_rb_iter_inorder_t event_iter;
- void *node;
-
- if (!array[i]) continue;
-
- if (i == 0) {
- EVENT_DEBUG(" events <= %5s : %zu", decade_names[i], array[i]);
- } else if (i == (NUM_ELEMENTS(decades) - 1)) {
- EVENT_DEBUG(" events > %5s : %zu", decade_names[i - 1], array[i]);
- } else {
- EVENT_DEBUG(" events %5s - %5s : %zu", decade_names[i - 1], decade_names[i], array[i]);
- }
-
- for (node = fr_rb_iter_init_inorder(&event_iter, locations[i]);
- node;
- node = fr_rb_iter_next_inorder(&event_iter)) {
- fr_event_counter_t *counter = talloc_get_type_abort(node, fr_event_counter_t);
-
- EVENT_DEBUG(" : %u allocd at %s[%d]",
- counter->count, counter->file, counter->line);
- }
- }
- pthread_mutex_unlock(&print_lock);
-
- fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, uctx);
- talloc_free(tmp_ctx);
-}
-
-#ifndef NDEBUG
-void fr_event_timer_dump(fr_event_list_t *el)
-{
- fr_lst_iter_t iter;
- fr_event_timer_t *ev;
- fr_time_t now;
-
- now = el->time();
-
- EVENT_DEBUG("Time is now %"PRId64"", fr_time_unwrap(now));
-
- for (ev = fr_lst_iter_init(el->times, &iter);
- ev;
- ev = fr_lst_iter_next(el->times, &iter)) {
- (void)talloc_get_type_abort(ev, fr_event_timer_t);
- EVENT_DEBUG("%s[%d]: %p time=%" PRId64 " (%c), callback=%p",
- ev->file, ev->line, ev, fr_time_unwrap(ev->when),
- fr_time_gt(now, ev->when) ? '<' : '>', ev->callback);
- }
+ return fr_time_eq(fr_timer_list_when(el->pub.tl), fr_time_wrap(0)) && (fr_rb_num_elements(el->fds) == 0);
}
-#endif
-#endif
-
#ifdef TESTING
-
/*
* cc -g -I .. -c rb.c -o rbtree.o && cc -g -I .. -c isaac.c -o isaac.o && cc -DTESTING -I .. -c event.c -o event_mine.o && cc event_mine.o rbtree.o isaac.o -o event
*
array[i] = array[i - 1];
array[i] += event_rand() & 0xffff;
- fr_event_timer_at(NULL, el, array[i], print_time, array[i]);
+ fr_timer_at(NULL, el, array[i], false, print_time, array[i]);
}
while (fr_event_list_num_timers(el)) {
now = el->time();
when = now;
- if (!fr_event_timer_run(el, &when)) {
+ if (!fr_timer_run(el, &when)) {
int delay = (when - now) / 1000; /* nanoseconds to microseconds */
printf("\tsleep %d microseconds\n", delay);
extern "C" {
#endif
+#include <freeradius-devel/util/timer.h>
+
+/*
+ * Allow public and private versions of the same structures
+ */
+#ifndef _EVENT_LIST_PRIVATE
+typedef struct fr_event_list_pub_s fr_event_list_t;
+#endif
+
+/** Public event list structure
+ *
+ * Make the event timer list available, but nothing else.
+ *
+ * This allows us to access these values without the cost of a function call.
+ */
+struct fr_event_list_pub_s {
+ fr_timer_list_t *tl; //!< The timer list associated with this event loop.
+};
+
#include <freeradius-devel/build.h>
#include <freeradius-devel/missing.h>
#include <freeradius-devel/util/time.h>
+
#include <freeradius-devel/util/talloc.h>
#include <stdbool.h>
#include <sys/event.h>
-/** An opaque file descriptor handle
- */
-typedef struct fr_event_fd fr_event_fd_t;
-/** An opaque event list handle
- */
-typedef struct fr_event_list fr_event_list_t;
+#ifdef WITH_EVENT_DEBUG
+# define EVENT_DEBUG(fmt, ...) printf("EVENT:");printf(fmt, ## __VA_ARGS__);printf("\n");
+# ifndef EVENT_REPORT_FREQ
+# define EVENT_REPORT_FREQ 5
+# endif
+#else
+# define EVENT_DEBUG(...)
+#endif
-/** An opaque timer handle
+/** An opaque file descriptor handle
*/
-typedef struct fr_event_timer fr_event_timer_t;
+typedef struct fr_event_fd fr_event_fd_t;
/** An opaque PID status handle
*/
typedef struct fr_event_pid fr_event_pid_t;
-/** An opaquer user event handle
+/** An opaque user event handle
*/
typedef struct fr_event_user_s fr_event_user_t;
*/
#define FR_EVENT_RESUME(_s, _f) { .offset = offsetof(_s, _f), .op = FR_EVENT_OP_RESUME }
-/** Called when a timer event fires
- *
- * @param[in] now The current time.
- * @param[in] el Event list the timer event was inserted into.
- * @param[in] uctx User ctx passed to #fr_event_timer_in or #fr_event_timer_at.
- */
-typedef void (*fr_event_timer_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx);
-
/** Called after each event loop cycle
*
* Called before calling kqueue to put the thread in a sleeping state.
*/
typedef void (*fr_event_user_cb_t)(fr_event_list_t *el, void *uctx);
-/** Alternative time source, useful for testing
+/** Called when a post event fires
*
- * @return the current time in nanoseconds past the epoch.
+ * @param[in] el Event list the post event was inserted into.
+ * @param[in] now The current time.
+ * @param[in] uctx User ctx passed to #fr_timer_in or #fr_timer_at.
*/
-typedef fr_time_t (*fr_event_time_source_t)(void);
+typedef void (*fr_event_post_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx);
/** Callbacks for the #FR_EVENT_FILTER_IO filter
*/
int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour);
#endif
-int _fr_event_timer_at(NDEBUG_LOCATION_ARGS
- TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev,
- fr_time_t when, fr_event_timer_cb_t callback, void const *uctx);
-#define fr_event_timer_at(...) _fr_event_timer_at(NDEBUG_LOCATION_EXP __VA_ARGS__)
-
-int _fr_event_timer_in(NDEBUG_LOCATION_ARGS
- TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev,
- fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx);
-#define fr_event_timer_in(...) _fr_event_timer_in(NDEBUG_LOCATION_EXP __VA_ARGS__)
-
-int fr_event_timer_delete(fr_event_timer_t const **ev);
-
-fr_time_t fr_event_timer_when(fr_event_timer_t const *ev) CC_HINT(nonnull);
-
int _fr_event_pid_wait(NDEBUG_LOCATION_ARGS
TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p,
pid_t pid, fr_event_pid_cb_t wait_fn, void *uctx)
unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t timeout, int signal);
-int fr_event_timer_run(fr_event_list_t *el, fr_time_t *when);
-
int _fr_event_user_insert(NDEBUG_LOCATION_ARGS
TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p,
bool trigger, fr_event_user_cb_t callback, void *uctx);
int fr_event_pre_insert(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx) CC_HINT(nonnull(1,2));
int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx) CC_HINT(nonnull(1,2));
-int fr_event_post_insert(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx) CC_HINT(nonnull(1,2));
-int fr_event_post_delete(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx) CC_HINT(nonnull(1,2));
+int fr_event_post_insert(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) CC_HINT(nonnull(1,2));
+int fr_event_post_delete(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) CC_HINT(nonnull(1,2));
int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait);
void fr_event_service(fr_event_list_t *el);
int fr_event_loop(fr_event_list_t *el);
fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_ctx);
-void fr_event_list_set_time_func(fr_event_list_t *el, fr_event_time_source_t func);
bool fr_event_list_empty(fr_event_list_t *el);
-#ifdef WITH_EVENT_DEBUG
-void fr_event_report(fr_event_list_t *el, fr_time_t now, void *uctx);
-# ifndef NDEBUG
-void fr_event_timer_dump(fr_event_list_t *el);
-# endif
-#endif
-
#ifdef __cplusplus
}
#endif
edit.c \
encode.c \
event.c \
+ timer.c \
ext.c \
fifo.c \
file.c \
SRC_CFLAGS += -sMAIN_MODULE=1 -sUSE_PTHREADS=1
TGT_LDFLAGS += --no-entry -sALLOW_MEMORY_GROWTH=1 -sFORCE_FILESYSTEM=1 -sEXPORT_ALL=1 -sLINKABLE=1 -sMODULARIZE=1 -sEXPORT_ES6=1 -sEXPORT_NAME=libfreeradiusUtil -sEXPORTED_RUNTIME_METHODS=ccall,cwrap,setValue,getValue --preload-file=$(top_builddir)/share/dictionary@/share/dictionary
endif
-
FR_DLIST_HEAD(_name ## _slab) reserved; \
FR_DLIST_HEAD(_name ## _slab) avail; \
fr_event_list_t *el; \
- fr_event_timer_t const *ev; \
+ fr_timer_t *ev; \
fr_slab_config_t config; \
unsigned int in_use; \
unsigned int high_water_mark; \
* up to half of the element count between the high water mark \
* and the current number in use. \
*/ \
- static void _ ## _name ## _slab_cleanup(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) \
+ static void _ ## _name ## _slab_cleanup(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) \
{ \
_name ## _slab_list_t *slab_list = talloc_get_type_abort(uctx, _name ## _slab_list_t); \
_name ## _slab_t *slab = NULL, *next_slab = NULL; \
} \
slab_list->high_water_mark -= cleared; \
finish: \
- (void) fr_event_timer_in(slab_list, el, &slab_list->ev, slab_list->config.interval, \
- _ ## _name ## _slab_cleanup, slab_list); \
+ (void) fr_timer_in(slab_list, tl, &slab_list->ev, slab_list->config.interval, false, \
+ _ ## _name ## _slab_cleanup, slab_list); \
} \
\
/** Allocate a slab list to manage slabs of allocated memory \
_name ## _slab_init(&slab->reserved); \
_name ## _slab_init(&slab->avail); \
if (el) { \
- if (unlikely(fr_event_timer_in(slab, el, &slab->ev, config->interval, _ ## _name ## _slab_cleanup, slab) < 0)) { \
+ if (unlikely(fr_timer_in(slab, el->tl, &slab->ev, config->interval, false, _ ## _name ## _slab_cleanup, slab) < 0)) { \
talloc_free(slab); \
return NULL; \
}; \
*/
#include <freeradius-devel/util/acutest.h>
#include <freeradius-devel/util/acutest_helpers.h>
-
+#include <freeradius-devel/util/timer.h>
#include "slab.h"
typedef struct {
fr_slab_config_t slab_config = def_slab_config;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
slab_config.max_elements = 6;
test_slab_list = test_slab_list_alloc(NULL, el, &slab_config, NULL, NULL, NULL, true, false);
fr_slab_config_t slab_config = def_slab_config;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
slab_config.min_elements = 16;
slab_config.max_elements = 20;
fr_slab_config_t slab_config = def_slab_config;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
slab_config.min_elements = 0;
slab_config.max_elements = 20;
fr_slab_config_t slab_config = def_slab_config;
el = fr_event_list_alloc(ctx, NULL, NULL);
- fr_event_list_set_time_func(el, test_time);
+ fr_timer_list_set_time_func(el->tl, test_time);
slab_config.min_elements = 0;
slab_config.max_elements = 20;
--- /dev/null
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/** Various types of event timer list
+ *
+ * @file src/lib/util/timer.c
+ *
+ * @copyright 2025 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
+ */
+
+#define _TIMER_PRIVATE 1
+typedef struct fr_timer_list_s fr_timer_list_t;
+
+#include <freeradius-devel/util/debug.h>
+#include <freeradius-devel/util/time.h>
+#include <freeradius-devel/util/dlist.h>
+#include <freeradius-devel/util/event.h>
+#include <freeradius-devel/util/strerror.h>
+#include <freeradius-devel/util/timer.h>
+#include <freeradius-devel/util/value.h>
+#include <freeradius-devel/util/lst.h>
+#include <freeradius-devel/util/rb.h>
+#include <stdbool.h>
+#include <talloc.h>
+
+FR_DLIST_TYPES(timer)
+FR_DLIST_TYPEDEFS(timer, fr_timer_head_t, fr_timer_entry_t)
+
+/** What type of event list the timer is inserted into
+ *
+ */
+typedef enum {
+ TIMER_LIST_TYPE_LST = 1, //!< Self-sorting timer list based on a left leaning skeleton tree.
+ TIMER_LIST_TYPE_ORDERED = 2 //!< Strictly ordered list of events in a dlist.
+} timer_list_type_t;
+
+/** An event timer list
+ *
+ */
+struct fr_timer_list_s {
+ struct fr_timer_list_pub_s pub; //!< Public interface to the event timer list.
+
+ union {
+ fr_lst_t *lst; //!< of timer events to be executed.
+ timer_head_t ordered; //!< A list of timer events to be executed.
+ };
+ timer_list_type_t type;
+ bool in_handler; //!< Whether we're currently in a callback.
+
+ timer_head_t deferred; //!< A list of timer events to be inserted, after
+ ///< the current batch has been processed.
+ ///< This prevents "busy" timer loops, where
+ ///< other events may starve, or we may never exit.
+
+ fr_timer_list_t *parent; //!< Parent list to insert event into (if any).
+ fr_timer_t *parent_ev; //!< Event in the parent's event loop.
+
+#ifdef WITH_EVENT_DEBUG
+ fr_timer_t *report; //!< Used to trigger periodict reports about the event timer list.
+#endif
+};
+
+/** A timer event
+ *
+ */
+struct fr_timer_s {
+ fr_time_t when; //!< When this timer should fire.
+
+ fr_timer_cb_t callback; //!< Callback to execute when the timer fires.
+ void const *uctx; //!< Context pointer to pass to the callback.
+
+ TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
+
+ fr_timer_t **parent; //!< A pointer to the parent structure containing the timer
+ ///< event.
+ fr_lst_index_t lst_idx; //!< Where to store opaque lst data, not used for ordered lists.
+
+ fr_timer_entry_t entry; //!< Entry in a list of timer events.
+
+ bool free_on_fire; //!< Whether to free the event when it fires.
+
+ fr_timer_list_t *tl; //!< The event list this timer is part of.
+ ///< This is set to NULL when an event is disarmed,
+ ///< but all other fields are left intact.
+
+#ifndef NDEBUG
+ char const *file; //!< Source file this event was last updated in.
+ int line; //!< Line this event was last updated on.
+#endif
+};
+
+FR_DLIST_FUNCS(timer, fr_timer_t, entry)
+
+#define CHECK_PARENT(_ev) \
+ fr_assert_msg(!(_ev)->parent || (*(_ev)->parent == ev), \
+ "Event %p, allocd %s[%d], parent field points to %p", (_ev), (_ev)->file, (_ev)->line, *(_ev)->parent);
+
+/** Specialisation function to insert a timer
+ *
+ * @param[in] tl Timer list to insert into.
+ * @param[in] ev Timer event to insert.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+typedef int (*timer_insert_t)(fr_timer_list_t *tl, fr_timer_t *ev);
+
+/** Specialisation function to delete a timer
+ *
+ * @param[in] ev Timer event to delete.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+typedef int (*timer_disarm_t)(fr_timer_t *ev);
+
+/** Specialisation function to execute any pending timers
+ *
+ * @param[in] tl Timer list to execute.
+ * @param[in,out] when Our current time, updated to the next event time (i.e. the next time we'll need to run something)
+ * @return
+ * - 0 no timer events fired.
+ * - 1 a timer event fired.
+ */
+typedef int (*timer_list_run_t)(fr_timer_list_t *tl, fr_time_t *when);
+
+/** Return the soonest timer event
+ *
+ * @param[in] tl to get the head of.
+ * @return
+ * - The head of the list.
+ * - NULL if the list is empty.
+ */
+typedef fr_timer_t *(*timer_list_head_t)(fr_timer_list_t *tl);
+
+/** Process any deferred timer events
+ *
+ * @param[in] tl to process deferred events for.
+ * @return
+ * - The head of the list.
+ * - NULL if the list is empty.
+ */
+typedef int (*timer_list_deferred_t)(fr_timer_list_t *tl);
+
+/** Return the number of elements in the list
+ *
+ * @param[in] tl to get the number of elements from.
+ * @return
+ * - The number of elements in the list.
+ */
+typedef uint64_t (*timer_list_num_elements_t)(fr_timer_list_t *tl);
+
+typedef struct {
+ timer_insert_t insert; //!< Function to insert a timer event.
+ timer_disarm_t disarm; //!< Function to delete a timer event.
+
+ timer_list_run_t run; //!< Function to run a timer event.
+ timer_list_head_t head; //!< Function to get the head of the list.
+ timer_list_deferred_t deferred; //!< Function to process deferred events.
+ timer_list_num_elements_t num_events; //!< Function to get the number of elements in the list.
+} timer_list_funcs_t;
+
+#define EVENT_ARMED(_ev) ((_ev)->tl != NULL)
+
+static int timer_lst_insert_at(fr_timer_list_t *tl, fr_timer_t *ev);
+static int timer_ordered_insert_at(fr_timer_list_t *tl, fr_timer_t *ev);
+
+static int timer_lst_disarm(fr_timer_t *ev);
+static int timer_ordered_disarm(fr_timer_t *ev);
+
+static int timer_list_lst_run(fr_timer_list_t *tl, fr_time_t *when);
+static int timer_list_ordered_run(fr_timer_list_t *tl, fr_time_t *when);
+
+static fr_timer_t *timer_list_lst_head(fr_timer_list_t *tl);
+static fr_timer_t *timer_list_ordered_head(fr_timer_list_t *tl);
+
+static int timer_list_lst_deferred(fr_timer_list_t *tl);
+static int timer_list_ordered_deferred(fr_timer_list_t *tl);
+
+static uint64_t timer_list_lst_num_events(fr_timer_list_t *tl);
+static uint64_t timer_list_ordered_num_events(fr_timer_list_t *tl);
+
+/** Functions for performing operations on various types of timer list
+ *
+ */
+static timer_list_funcs_t const timer_funcs[] = {
+ [TIMER_LIST_TYPE_LST] = {
+ .insert = timer_lst_insert_at,
+ .disarm = timer_lst_disarm,
+
+ .run = timer_list_lst_run,
+ .head = timer_list_lst_head,
+ .deferred = timer_list_lst_deferred,
+ .num_events = timer_list_lst_num_events
+ },
+ [TIMER_LIST_TYPE_ORDERED] = {
+ .insert = timer_ordered_insert_at,
+ .disarm = timer_ordered_disarm,
+
+ .run = timer_list_ordered_run,
+ .head = timer_list_ordered_head,
+ .deferred = timer_list_ordered_deferred,
+ .num_events = timer_list_ordered_num_events
+ }
+};
+
+/** Compare two timer events to see which one should occur first
+ *
+ * @param[in] a the first timer event.
+ * @param[in] b the second timer event.
+ * @return
+ * - +1 if a should occur later than b.
+ * - -1 if a should occur earlier than b.
+ * - 0 if both events occur at the same time.
+ */
+static int8_t timer_cmp(void const *a, void const *b)
+{
+ fr_timer_t const *ev_a = a, *ev_b = b;
+
+ return fr_time_cmp(ev_a->when, ev_b->when);
+}
+
+/** This callback fires in the parent to execute events in this sublist
+ *
+ * @param[in] parent_tl Parent event timer list.
+ * @param[in] when When the parent timer fired.
+ * @param[in] uctx Sublist to execute.
+ */
+static void _parent_timer_cb(UNUSED fr_timer_list_t *parent_tl, fr_time_t when, void *uctx)
+{
+ /*
+ * We're in the parent timer, so we need to run the
+ * events in the child timer list.
+ */
+ (void)fr_timer_list_run(talloc_get_type_abort(uctx, fr_timer_list_t), &when);
+}
+
+/** Utility function to update parent timers
+ *
+ * @param[in] tl to update parent timers for.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static inline CC_HINT(always_inline) int timer_list_parent_update(fr_timer_list_t *tl)
+{
+ fr_timer_t *ev;
+
+ if (!tl->parent) return 0;
+
+ ev = timer_funcs[tl->type].head(tl);
+ /*
+ * No events, disarm the timer
+ */
+ if (!ev) {
+ /*
+ * Disables the timer in the parent, does not free the memory
+ */
+ if (tl->parent) fr_timer_disarm(tl->parent_ev);
+ return 0;
+ }
+
+ if (tl->parent_ev && EVENT_ARMED(tl->parent_ev) &&
+ fr_time_eq(ev->when, tl->parent_ev->when)) return 0; /* noop */
+
+ /*
+ * Re-arm the timer
+ */
+ return fr_timer_at(tl->parent, tl->parent, &tl->parent_ev,
+ ev->when, false, _parent_timer_cb, tl);
+}
+
+/** Insert a timer event into a single event timer list
+ *
+ * @param[in] tl to insert the event into.
+ * @param[in] ev to insert.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static int timer_lst_insert_at(fr_timer_list_t *tl, fr_timer_t *ev)
+{
+ if (unlikely(fr_lst_insert(tl->lst, ev) < 0)) {
+ fr_strerror_const_push("Failed inserting timer into lst");
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Insert an event into an ordered timer list
+ *
+ * Timer must be in order, i.e. either before first event, or after last event
+ *
+ * @param[in] tl to insert the event into.
+ * @param[in] ev to insert.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static int timer_ordered_insert_at(fr_timer_list_t *tl, fr_timer_t *ev)
+{
+ fr_timer_t *tail;
+
+ tail = timer_tail(&tl->ordered);
+ if (tail && fr_time_lt(ev->when, tail->when)) {
+ fr_strerror_const("Event being inserted must occurr _after_ the last event");
+ return -1;
+ }
+
+ if (unlikely(timer_insert_tail(&tl->ordered, ev) < 0)) {
+ fr_strerror_const_push("Failed inserting timer into ordered list");
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Remove an event from the event loop
+ *
+ * @param[in] ev to free.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static int _timer_free(fr_timer_t *ev)
+{
+ fr_timer_t **ev_p;
+ int ret;
+
+ ret = fr_timer_disarm(ev); /* Is a noop if ev->tl == NULL */
+ if (ret < 0) return ret;
+
+ CHECK_PARENT(ev);
+ ev_p = ev->parent;
+ *ev_p = NULL;
+
+ return 0;
+}
+
+/** Insert a timer event into an event list
+ *
+ * @note The talloc parent of the memory returned in ev_p must not be changed.
+ * If the lifetime of the event needs to be bound to another context
+ * this function should be called with the existing event pointed to by
+ * ev_p.
+ *
+ * @param[in] ctx to bind lifetime of the event to.
+ * @param[in] tl to insert event into.
+ * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
+ * in a temporal sense, not in a memory structure or dependency sense.
+ * @param[in] when we should run the event.
+ * @param[in] free_on_fire Whether event memory should be freed if the event fires.
+ * @param[in] callback function to execute if the event fires.
+ * @param[in] uctx user data to pass to the event.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+int _fr_timer_at(NDEBUG_LOCATION_ARGS
+ TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev_p,
+ fr_time_t when,
+ bool free_on_fire, fr_timer_cb_t callback, void const *uctx)
+{
+ fr_timer_t *ev;
+
+ /*
+ * If there is an event, reuse it instead of freeing it
+ * and allocating a new one. This is to reduce memory
+ * churn for repeat events.
+ */
+ if (!*ev_p) {
+ new_event:
+ ev = talloc_zero(tl, fr_timer_t);
+ if (unlikely(!ev)) {
+ fr_strerror_const("Out of memory");
+ return -1;
+ }
+
+ EVENT_DEBUG("%p - " NDEBUG_LOCATION_FMT "Added new timer %p", tl, NDEBUG_LOCATION_VALS ev);
+ /*
+ * Bind the lifetime of the event to the specified
+ * talloc ctx. If the talloc ctx is freed, the
+ * event will also be freed.
+ */
+ if (ctx != tl) talloc_link_ctx(ctx, ev);
+
+ talloc_set_destructor(ev, _timer_free);
+ } else {
+ ev = UNCONST(fr_timer_t *, *ev_p);
+
+ EVENT_DEBUG("%p - " NDEBUG_LOCATION_FMT "Re-armed timer %p", tl, NDEBUG_LOCATION_VALS ev);
+
+ /*
+ * We can't disarm the linking context due to
+ * limitations in talloc, so if the linking
+ * context changes, we need to free the old
+ * event, and allocate a new one.
+ *
+ * Freeing the event also removes it from the lst.
+ */
+ if (unlikely(ev->linked_ctx != ctx)) {
+ talloc_free(ev);
+ goto new_event;
+ }
+
+ /*
+ * If the event is associated with a list, we need
+ * to disarm it, before we can rearm it.
+ */
+ if (EVENT_ARMED(ev)) {
+ int ret;
+ char const *err_file;
+ int err_line;
+
+ /*
+ * Removed event from the event list or the
+ * deferred list.
+ */
+ ret = fr_timer_disarm(ev);
+#ifndef NDEBUG
+ err_file = ev->file;
+ err_line = ev->line;
+#else
+ err_file = "not-available";
+ err_line = 0;
+#endif
+
+ /*
+ * Events MUST be in the lst (or the insertion list).
+ */
+ if (!fr_cond_assert_msg(ret == 0,
+ "Event %p, allocd %s[%d], was not found in the event "
+ "list or deferred list when re-armed: %s", ev,
+ err_file, err_line, fr_strerror())) return -1;
+ }
+ }
+
+ ev->tl = tl; /* This indicates the event memory is bound to an avent loop */
+ ev->when = when;
+ ev->free_on_fire = free_on_fire;
+ ev->callback = callback;
+ ev->uctx = uctx;
+ ev->linked_ctx = ctx;
+ ev->parent = ev_p;
+#ifndef NDEBUG
+ ev->file = file;
+ ev->line = line;
+#endif
+
+ /*
+ * No updating needed as the events are deferred
+ */
+ if (tl->in_handler) {
+ /*
+ * ...a little hacky, but we need to verify that
+ * we're not inserting an event that's earlier
+ * than the last event in the list for ordered
+ * lists.
+ *
+ * Otherwise we'd end up doing this when we tried
+ * to move all the deferred events into the timer
+ * list, and end up making that O(n) instead of O(1).
+ */
+ if (tl->type == TIMER_LIST_TYPE_ORDERED) {
+ fr_timer_t *head = timer_list_ordered_head(tl);
+
+ if (head && fr_time_lt(ev->when, head->when)) {
+ fr_strerror_const("Event being inserted must occurr _after_ the last event");
+
+ insert_failed:
+ talloc_set_destructor(ev, NULL);
+ talloc_free(ev);
+ *ev_p = NULL;
+ return -1;
+ }
+ }
+
+ if (!fr_cond_assert_msg(timer_insert_tail(&tl->deferred, ev) == 0,
+ "Failed inserting event into deferred list")) {
+ goto insert_failed;
+ }
+ } else {
+ int ret;
+
+ ret = timer_funcs[tl->type].insert(tl, ev);
+ if (unlikely(ret < 0)) goto insert_failed;
+
+ /*
+ * We need to update the parent timer
+ * to ensure it fires at the correct time.
+ */
+ if (unlikely(timer_list_parent_update(tl) < 0)) return -1;
+ }
+
+ *ev_p = ev;
+
+ return 0;
+}
+
+/** Insert a timer event into an event list
+ *
+ * @note The talloc parent of the memory returned in ev_p must not be changed.
+ * If the lifetime of the event needs to be bound to another context
+ * this function should be called with the existing event pointed to by
+ * ev_p.
+ *
+ * @param[in] ctx to bind lifetime of the event to.
+ * @param[in] tl to insert event into.
+ * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
+ * in a temporal sense, not in a memory structure or dependency sense.
+ * @param[in] delta In how many nanoseconds to wait before should we execute the event.
+ * @param[in] callback function to execute if the event fires.
+ * @param[in] uctx user data to pass to the event.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+int _fr_timer_in(NDEBUG_LOCATION_ARGS
+ TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev_p,
+ fr_time_delta_t delta,
+ bool free_on_fire, fr_timer_cb_t callback, void const *uctx)
+{
+ return _fr_timer_at(NDEBUG_LOCATION_VALS
+ ctx, tl, ev_p, fr_time_add(tl->pub.time(), delta),
+ free_on_fire, callback, uctx);
+}
+
+static int timer_lst_disarm(fr_timer_t *ev)
+{
+ fr_timer_list_t *tl = ev->tl;
+
+ if (timer_in_list(&tl->deferred,ev)) {
+ (void)timer_remove(&tl->deferred, ev);
+ } else {
+ int ret = fr_lst_extract(tl->lst, ev);
+ char const *err_file;
+ int err_line;
+
+#ifndef NDEBUG
+ err_file = ev->file;
+ err_line = ev->line;
+#else
+ err_file = "not-available";
+ err_line = 0;
+#endif
+
+
+ /*
+ * Events MUST be in the lst (or the insertion list).
+ */
+ if (!fr_cond_assert_msg(ret == 0,
+ "Event %p, lst_id %u, allocd %s[%d], was not found in the event lst or "
+ "insertion list when freed: %s", ev, ev->lst_idx, err_file, err_line,
+ fr_strerror())) return -1;
+ }
+
+ return 0;
+}
+
+/** Remove a timer from a timer list, but don't free it
+ *
+ * @param[in] ev to remove.
+ */
+static int timer_ordered_disarm(fr_timer_t *ev)
+{
+ /*
+ * Check the check is still valid (sanity check)
+ */
+ (void)talloc_get_type_abort(ev, fr_timer_t);;
+
+ /*
+ * Already dissassociated from a list, nothing to do.
+ */
+ if (!ev->tl) return 0;
+
+ /*
+ * This *MUST* be in the timer list if it has a non-NULL tl pointer.
+ */
+ if (unlikely(!fr_cond_assert(timer_in_list(&ev->tl->ordered, ev)))) return -1;
+
+ (void)timer_remove(&ev->tl->ordered, ev);
+
+ return 0;
+}
+
+/** Remove an event from the event list, but don't free the memory
+ *
+ * @param[in] ev to remove from the event list.
+ */
+int fr_timer_disarm(fr_timer_t *ev)
+{
+ fr_timer_list_t *tl = ev->tl;
+
+ if (!EVENT_ARMED(ev)) {
+ EVENT_DEBUG("Asked to disarm inactive timer %p (noop)", ev);
+ return 0; /* Noop */
+ }
+
+ EVENT_DEBUG("Disarming timer %p", ev);
+
+ CHECK_PARENT(ev);
+
+ /*
+ * If the event is deferred, it's not in the event list proper
+ * so just remove it, and set the tl pointer to NULL.
+ */
+ if (timer_in_list(&tl->deferred,ev)) {
+ (void)timer_remove(&tl->deferred, ev);
+ } else {
+ int ret = timer_funcs[ev->tl->type].disarm(ev);
+ if (ret < 0) return ret;
+ }
+ ev->tl = NULL;
+
+ return timer_list_parent_update(tl);
+}
+
+/** Delete a timer event and free its memory
+ *
+ * @param[in] ev_p of the event being deleted.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+int fr_timer_delete(fr_timer_t **ev_p)
+{
+ fr_timer_t *ev;
+ int ret;
+
+ if (unlikely(!*ev_p)) return 0;
+
+ ev = *ev_p;
+ ret = talloc_free(ev); /* Destructor removed event from any lists */
+
+ /*
+ * Don't leave a garbage pointer value
+ * if parent is not ev_p.
+ */
+ if (likely(ret == 0)) {
+ *ev_p = NULL;
+ } else {
+ EVENT_DEBUG("Deleting timer %p failed: %s", ev, fr_strerror_peek());
+ }
+
+ return 0;
+}
+
+/** Internal timestamp representing when the timer should fire
+ *
+ * @return When the timestamp should fire.
+ */
+fr_time_t fr_timer_when(fr_timer_t *ev)
+{
+ return ev->when;
+}
+
+/** Check if a timer event is armed
+ *
+ * @param[in] ev to check.
+ * @return
+ * - true if the event is armed.
+ * - false if the event is not armed.
+ */
+bool fr_timer_armed(fr_timer_t *ev)
+{
+ return ev && EVENT_ARMED(ev);
+}
+
+/** Run all scheduled timer events in a lst
+ *
+ * @param[in] tl containing the timer events.
+ * @param[in] when Process events scheduled to run before or at this time.
+ * - Set to 0 if no more events.
+ * - Set to the next event time if there are more events.
+ * @return
+ * - 0 no timer events fired.
+ * - 1 a timer event fired.
+ */
+static int timer_list_lst_run(fr_timer_list_t *tl, fr_time_t *when)
+{
+ fr_timer_cb_t callback;
+ void *uctx;
+ fr_timer_t *ev;
+ int fired = 0;
+
+ while (fr_lst_num_elements(tl->lst) > 0) {
+ ev = fr_lst_peek(tl->lst);
+
+ /*
+ * See if it's time to do this one.
+ */
+ if (fr_time_gt(ev->when, *when)) {
+ *when = ev->when;
+ done:
+ return fired;
+ }
+
+ callback = ev->callback;
+ memcpy(&uctx, &ev->uctx, sizeof(uctx));
+
+ CHECK_PARENT(ev);
+
+ /*
+ * Disarm the event before calling it.
+ *
+ * This leaves the memory in place,
+ * but dissassociates it from the list.
+ *
+ * We use the public function as it
+ * handles more cases.
+ */
+ if (!fr_cond_assert(fr_timer_disarm(ev) == 0)) return -2;
+ EVENT_DEBUG("Running timer %p", ev);
+ if (ev->free_on_fire) talloc_free(ev);
+
+ callback(tl, *when, uctx);
+
+ fired++;
+ }
+
+ *when = fr_time_wrap(0);
+
+ goto done;
+}
+
+/** Run all scheduled events in an ordered list
+ *
+ * @param[in] tl containing the timer events.
+ * @param[in] when Process events scheduled to run before or at this time.
+ * - Set to 0 if no more events.
+ * - Set to the next event time if there are more events.
+ * @return
+ * - < 0 if we failed to updated the parent list.
+ * - 0 no timer events fired.
+ * - >0 number of timer event fired.
+ */
+static int timer_list_ordered_run(fr_timer_list_t *tl, fr_time_t *when)
+{
+ fr_timer_cb_t callback;
+ void *uctx;
+ fr_timer_t *ev;
+ unsigned int fired = 0;
+
+ while ((ev = timer_head(&tl->ordered))) {
+ /*
+ * See if it's time to do this one.
+ */
+ if (fr_time_gt(ev->when, *when)) {
+ *when = ev->when;
+ done:
+ return fired;
+ }
+
+ callback = ev->callback;
+ memcpy(&uctx, &ev->uctx, sizeof(uctx));
+
+ CHECK_PARENT(ev);
+
+ /*
+ * Disarm the event before calling it.
+ *
+ * This leaves the memory in place,
+ * but dissassociates it from the list.
+ *
+ * We use the public function as it
+ * handles more cases.
+ */
+ if (!fr_cond_assert(fr_timer_disarm(ev) == 0)) return -2;
+
+ EVENT_DEBUG("Running timer %p", ev);
+ if (ev->free_on_fire) talloc_free(ev);
+
+ callback(tl, *when, uctx);
+
+ fired++;
+ }
+
+ *when = fr_time_wrap(0);
+
+ goto done;
+}
+
+/** Execute any pending events in the event loop
+ *
+ * @param[in] tl to execute events in.
+ * @param[in] when Process events scheduled to run before or at this time.
+ * - Set to 0 if no more events.
+ * - Set to the next event time if there are more events.
+ * @return
+ * - < 0 if we failed to updated the parent list.
+ * - 0 no timer events fired.
+ * - >0 number of timer event fired.
+ */
+int fr_timer_list_run(fr_timer_list_t *tl, fr_time_t *when)
+{
+ int ret;
+
+ tl->in_handler = true;
+ ret = timer_funcs[tl->type].run(tl, when);
+ tl->in_handler = false;
+
+ /*
+ * Now we've executed all the pending events,
+ * now merge the deferred events into the main
+ * event list.
+ *
+ * The events don't need to be modified as they
+ * were initialised completely before being
+ * placed in the deffered list.
+ */
+ if (timer_num_elements(&tl->deferred) > 0) {
+ if (unlikely(timer_funcs[tl->type].deferred(tl) < 0)) return -1;
+ if (unlikely(timer_list_parent_update(tl) < 0)) return -1;
+ /*
+ * We ran some events, and have no deferred
+ * events to insert, so we need to forcefully
+ * update the parent timer.
+ */
+ } else if(ret > 0) {
+ if (unlikely(timer_list_parent_update(tl) < 0)) return -1;
+ }
+
+ return ret;
+}
+
+/** Return the head of the event list
+ *
+ * @param[in] tl to get the head of.
+ * @return
+ * - The head of the trie.
+ * - NULL, if there's no head.
+ */
+static fr_timer_t *timer_list_lst_head(fr_timer_list_t *tl)
+{
+ return fr_lst_peek(tl->lst);
+}
+
+/** Return the head of the ordered list
+ *
+ * @param[in] tl to get the head of.
+ * @return
+ * - The head of the trie.
+ * - NULL, if there's no head.
+ */
+static fr_timer_t *timer_list_ordered_head(fr_timer_list_t *tl)
+{
+ return timer_head(&tl->ordered);
+}
+
+/** Insert a timer event into a the lst
+ *
+ * @param[in] tl to move events in.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static int timer_list_lst_deferred(fr_timer_list_t *tl)
+{
+ fr_timer_t *ev;
+
+ while((ev = timer_pop_head(&tl->deferred))) {
+ if (unlikely(timer_lst_insert_at(tl, ev)) < 0) {
+ timer_insert_head(&tl->deferred, ev); /* Don't lose track of events we failed to insert */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/** Move all deferred events into the ordered event list
+ *
+ * This operation is O(1).
+ *
+ * @param[in] tl to move events in.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static int timer_list_ordered_deferred(fr_timer_list_t *tl)
+{
+#ifndef NDEBUG
+ {
+ fr_timer_t *head, *tail;
+
+ head = timer_head(&tl->deferred);
+ tail = timer_tail(&tl->ordered);
+
+ /*
+ * Something has gone catastrophically wrong if the
+ * deferred event is earlier than the last event in
+ * the ordered list, given all the checks we do.
+ */
+ fr_cond_assert_msg(!head || !tail || fr_time_gteq(head->when, tail->when),
+ "Deferred event is earlier than the last event in the ordered list");
+ }
+#endif
+
+ /*
+ * O(1) operation. Much better than moving the
+ * events individually.
+ */
+ timer_move_head(&tl->ordered, &tl->deferred);
+
+ return 0;
+}
+
+static uint64_t timer_list_lst_num_events(fr_timer_list_t *tl)
+{
+ return fr_lst_num_elements(tl->lst);
+}
+
+static uint64_t timer_list_ordered_num_events(fr_timer_list_t *tl)
+{
+ return timer_num_elements(&tl->ordered);
+}
+
+/** Return number of pending events
+ *
+ * @note This includes deferred events, i.e. those yet to be inserted into the main list
+ *
+ * @param[in] tl to get the number of events from.
+ * @return
+ * - The number of events in the list.
+ */
+uint64_t fr_timer_list_num_events(fr_timer_list_t *tl)
+{
+ uint64_t num = timer_funcs[tl->type].num_events(tl);
+
+ return num + timer_num_elements(&tl->deferred);
+}
+
+/** Return the time of the next event
+ *
+ * @param[in] tl to get the next event time from.
+ * @return
+ * - >0 the time of the next event.
+ * - 0 if there are no more events.
+ */
+fr_time_t fr_timer_list_when(fr_timer_list_t *tl)
+{
+ fr_timer_t *ev = timer_funcs[tl->type].head(tl);
+
+ if (ev) return ev->when;
+
+ return fr_time_wrap(0);
+}
+
+/** Override event list time source
+ *
+ * @param[in] tl to set new time function for.
+ * @param[in] func to set.
+ */
+void fr_timer_list_set_time_func(fr_timer_list_t *tl, fr_event_time_source_t func)
+{
+ tl->pub.time = func;
+}
+
+/** Cleanup all timers currently in the list
+ *
+ * @param[in] tl to cleanup.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static int _timer_list_free(fr_timer_list_t *tl)
+{
+ fr_timer_t *ev;
+
+ if (unlikely(tl->in_handler)) {
+ fr_strerror_const("Cannot free event timer list while in handler");
+ return -1;
+ }
+
+ if (tl->parent_ev) fr_timer_delete(&tl->parent_ev);
+
+ while ((ev = timer_funcs[tl->type].head(tl))) {
+ if (talloc_free(ev) < 0) return -1;
+ }
+
+ return 0;
+}
+
+static fr_timer_list_t *timer_list_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent)
+{
+ fr_timer_list_t *tl;
+
+ tl = talloc_zero(ctx, fr_timer_list_t);
+ if (unlikely(tl == NULL)) {
+ fr_strerror_const("Out of memory");
+ return NULL;
+ }
+
+ timer_talloc_init(&tl->deferred);
+ if (tl->parent) {
+ tl->parent = parent;
+ tl->pub.time = parent->pub.time;
+ } else {
+ tl->pub.time = fr_time;
+ }
+ talloc_set_destructor(tl, _timer_list_free);
+
+ return tl;
+}
+
+/** Allocate a new lst based timer list
+ *
+ * @param[in] ctx to insert head timer event into.
+ * @param[in] parent to insert the head timer event into.
+ */
+fr_timer_list_t *fr_timer_list_lst_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent)
+{
+ fr_timer_list_t *tl;
+
+ if (unlikely((tl = timer_list_alloc(ctx, parent)) == NULL)) return NULL;
+
+ tl->lst = fr_lst_talloc_alloc(tl, timer_cmp, fr_timer_t, lst_idx, 0);
+ if (unlikely(tl->lst == NULL)) {
+ fr_strerror_const("Failed allocating timer list");
+ talloc_free(tl);
+ return NULL;
+ }
+ tl->type = TIMER_LIST_TYPE_LST;
+
+#ifdef WITH_EVENT_REPORT
+ fr_timer_in(tl, tl, &tl->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), false, fr_timer_report, NULL);
+#endif
+
+ return tl;
+}
+
+/** Allocate a new sorted event timer list
+ *
+ * @param[in] ctx to allocate the event timer list from.
+ * @param[in] parent to insert the head timer event into.
+ */
+fr_timer_list_t *fr_timer_list_ordered_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent)
+{
+ fr_timer_list_t *tl;
+
+ if (unlikely((tl = timer_list_alloc(ctx, parent)) == NULL)) return NULL;
+
+ timer_talloc_init(&tl->ordered);
+ tl->type = TIMER_LIST_TYPE_ORDERED;
+
+ return tl;
+}
+
+#if defined(WITH_EVENT_DEBUG) && !defined(NDEBUG)
+static const fr_time_delta_t decades[18] = {
+ { 1 }, { 10 }, { 100 },
+ { 1000 }, { 10000 }, { 100000 },
+ { 1000000 }, { 10000000 }, { 100000000 },
+ { 1000000000 }, { 10000000000 }, { 100000000000 },
+ { 1000000000000 }, { 10000000000000 }, { 100000000000000 },
+ { 1000000000000000 }, { 10000000000000000 }, { 100000000000000000 },
+};
+
+static const char *decade_names[18] = {
+ "1ns", "10ns", "100ns",
+ "1us", "10us", "100us",
+ "1ms", "10ms", "100ms",
+ "1s", "10s", "100s",
+ "1Ks", "10Ks", "100Ks",
+ "1Ms", "10Ms", "100Ms", /* 1 year is 300Ms */
+};
+
+typedef struct {
+ fr_rb_node_t node;
+ char const *file;
+ int line;
+ uint32_t count;
+} fr_event_counter_t;
+
+static int8_t timer_location_cmp(void const *one, void const *two)
+{
+ fr_event_counter_t const *a = one;
+ fr_event_counter_t const *b = two;
+
+ CMP_RETURN(a, b, file);
+
+ return CMP(a->line, b->line);
+}
+
+static int _event_report_process(fr_rb_tree_t **locations, size_t array[], fr_time_t now, fr_timer_t *ev)
+{
+ fr_time_delta_t diff = fr_time_sub(ev->when, now);
+ size_t i;
+
+ for (i = 0; i < NUM_ELEMENTS(decades); i++) {
+ if ((fr_time_delta_cmp(diff, decades[i]) <= 0) || (i == NUM_ELEMENTS(decades) - 1)) {
+ fr_event_counter_t find = { .file = ev->file, .line = ev->line };
+ fr_event_counter_t *counter;
+
+ counter = fr_rb_find(locations[i], &find);
+ if (!counter) {
+ counter = talloc(locations[i], fr_event_counter_t);
+ if (!counter) {
+ EVENT_DEBUG("Can't do report, out of memory");
+ return -1;
+ }
+ counter->file = ev->file;
+ counter->line = ev->line;
+ counter->count = 1;
+ fr_rb_insert(locations[i], counter);
+ } else {
+ counter->count++;
+ }
+
+ array[i]++;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/** Print out information about timer events in the event loop
+ *
+ */
+void fr_timer_report(fr_timer_list_t *tl, fr_time_t now, void *uctx)
+{
+ fr_lst_iter_t iter;
+ fr_timer_t *ev;
+ size_t i;
+
+ size_t array[NUM_ELEMENTS(decades)] = { 0 };
+ fr_rb_tree_t *locations[NUM_ELEMENTS(decades)];
+ TALLOC_CTX *tmp_ctx;
+ static pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER;
+
+ tmp_ctx = talloc_init_const("temporary stats");
+ if (!tmp_ctx) {
+ oom:
+ EVENT_DEBUG("Can't do report, out of memory");
+ talloc_free(tmp_ctx);
+ return;
+ }
+
+ for (i = 0; i < NUM_ELEMENTS(decades); i++) {
+ locations[i] = fr_rb_inline_alloc(tmp_ctx, fr_event_counter_t, node, timer_location_cmp, NULL);
+ if (!locations[i]) goto oom;
+ }
+
+ switch (tl->type) {
+ case TIMER_LIST_TYPE_LST:
+ /*
+ * Show which events are due, when they're due,
+ * and where they were allocated
+ */
+ for (ev = fr_lst_iter_init(tl->lst, &iter);
+ ev != NULL;
+ ev = fr_lst_iter_next(tl->lst, &iter)) {
+ if (_event_report_process(locations, array, now, ev) < 0) goto oom;
+ }
+ break;
+
+ case TIMER_LIST_TYPE_ORDERED:
+ /*
+ * Show which events are due, when they're due,
+ * and where they were allocated
+ */
+ for (ev = timer_head(&tl->ordered);
+ ev != NULL;
+ ev = timer_next(&tl->ordered, ev)) {
+ if (_event_report_process(locations, array, now, ev) < 0) goto oom;
+ }
+ break;
+ }
+
+ pthread_mutex_lock(&print_lock);
+ EVENT_DEBUG("num timer events: %"PRIu64, fr_timer_list_num_events(tl));
+
+ for (i = 0; i < NUM_ELEMENTS(decades); i++) {
+ fr_rb_iter_inorder_t event_iter;
+ void *node;
+
+ if (!array[i]) continue;
+
+ if (i == 0) {
+ EVENT_DEBUG(" events <= %5s : %zu", decade_names[i], array[i]);
+ } else if (i == (NUM_ELEMENTS(decades) - 1)) {
+ EVENT_DEBUG(" events > %5s : %zu", decade_names[i - 1], array[i]);
+ } else {
+ EVENT_DEBUG(" events %5s - %5s : %zu", decade_names[i - 1], decade_names[i], array[i]);
+ }
+
+ for (node = fr_rb_iter_init_inorder(&event_iter, locations[i]);
+ node;
+ node = fr_rb_iter_next_inorder(&event_iter)) {
+ fr_event_counter_t *counter = talloc_get_type_abort(node, fr_event_counter_t);
+
+ EVENT_DEBUG(" : %u allocd at %s[%d]",
+ counter->count, counter->file, counter->line);
+ }
+ }
+ pthread_mutex_unlock(&print_lock);
+
+ fr_timer_in(tl, tl, &tl->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), false, fr_timer_report, uctx);
+ talloc_free(tmp_ctx);
+}
+
+void fr_timer_dump(fr_timer_list_t *tl)
+{
+ fr_lst_iter_t iter;
+ fr_timer_t *ev;
+ fr_time_t now = tl->pub.time(); /* Get the current time */
+
+#define TIMER_DUMP(_ev) \
+ EVENT_DEBUG("%s[%d]: %p time=%" PRId64 " (%c), callback=%p", \
+ (_ev)->file, (_ev)->line, _ev, fr_time_unwrap((_ev)->when), \
+ fr_time_gt(now, (_ev)->when) ? '<' : '>', (_ev)->callback);
+
+ EVENT_DEBUG("Time is now %"PRId64"", fr_time_unwrap(now));
+
+ switch (tl->type) {
+ case TIMER_LIST_TYPE_LST:
+ EVENT_DEBUG("Dumping lst timer list");
+
+ for (ev = fr_lst_iter_init(tl->lst, &iter);
+ ev;
+ ev = fr_lst_iter_next(tl->lst, &iter)) {
+ (void)talloc_get_type_abort(ev, fr_timer_t);
+ TIMER_DUMP(ev);
+ }
+ break;
+
+ case TIMER_LIST_TYPE_ORDERED:
+ EVENT_DEBUG("Dumping ordered timer list");
+
+ for (ev = timer_head(&tl->ordered);
+ ev;
+ ev = timer_next(&tl->ordered, ev)) {
+ (void)talloc_get_type_abort(ev, fr_timer_t);
+ TIMER_DUMP(ev);
+ }
+ break;
+ }
+}
+#endif
--- /dev/null
+#pragma once
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/** Timer lists with event callbacks
+ *
+ * @file src/lib/util/event.h
+ *
+ * @copyright 2025 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
+ */
+RCSIDH(timer_h, "$Id$")
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <freeradius-devel/util/time.h>
+#include <freeradius-devel/util/talloc.h>
+
+/*
+ * Allow public and private versions of the same structures
+ */
+#ifdef _CONST
+# error _CONST can only be defined in the local header
+#endif
+#ifndef _TIMER_PRIVATE
+typedef struct fr_timer_list_pub_s fr_timer_list_t;
+# define _CONST const
+#else
+# define _CONST
+#endif
+
+/** Alternative time source, useful for testing
+ *
+ * @return the current time in nanoseconds past the epoch.
+ */
+typedef fr_time_t (*fr_event_time_source_t)(void);
+
+/** Public event timer list structure
+ *
+ * Make the current list time, and time source available, but nothing else.
+ *
+ * This allows us to access these values without the cost of a function call.
+ */
+struct fr_timer_list_pub_s {
+ fr_event_time_source_t _CONST time; //!< Time source this list uses to get the current time
+ ///< when calculating deltas (fr_timer_in).
+};
+
+/** An opaque timer handle
+ */
+typedef struct fr_timer_s fr_timer_t;
+
+/** Called when a timer event fires
+ *
+ * @param[in] tl timer list event was inserted into.
+ * @param[in] now The current time.
+ * @param[in] uctx User ctx passed to #fr_timer_in or #fr_timer_at.
+ */
+typedef void (*fr_timer_cb_t)(fr_timer_list_t *tl, fr_time_t now, void *uctx);
+
+int _fr_timer_at(NDEBUG_LOCATION_ARGS
+ TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev,
+ fr_time_t when, bool free_on_fire, fr_timer_cb_t callback, void const *uctx)
+ CC_HINT(nonnull(NDEBUG_LOCATION_NONNULL(2), NDEBUG_LOCATION_NONNULL(3), NDEBUG_LOCATION_NONNULL(6)));
+#define fr_timer_at(...) _fr_timer_at(NDEBUG_LOCATION_EXP __VA_ARGS__)
+
+int _fr_timer_in(NDEBUG_LOCATION_ARGS
+ TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev,
+ fr_time_delta_t delta, bool free_on_fire, fr_timer_cb_t callback, void const *uctx)
+ CC_HINT(nonnull(NDEBUG_LOCATION_NONNULL(2), NDEBUG_LOCATION_NONNULL(3), NDEBUG_LOCATION_NONNULL(6)));
+#define fr_timer_in(...) _fr_timer_in(NDEBUG_LOCATION_EXP __VA_ARGS__)
+
+int fr_timer_disarm(fr_timer_t *ev) CC_HINT(nonnull); /* disarms but does not free */
+
+int fr_timer_delete(fr_timer_t **ev_p) CC_HINT(nonnull); /* disarms AND frees */
+
+fr_time_t fr_timer_when(fr_timer_t *ev) CC_HINT(nonnull);
+
+bool fr_timer_armed(fr_timer_t *ev);
+
+int fr_timer_list_run(fr_timer_list_t *tl, fr_time_t *when) CC_HINT(nonnull);
+
+uint64_t fr_timer_list_num_events(fr_timer_list_t *tl) CC_HINT(nonnull);
+
+fr_time_t fr_timer_list_when(fr_timer_list_t *tl) CC_HINT(nonnull);
+
+void fr_timer_list_set_time_func(fr_timer_list_t *tl, fr_event_time_source_t func) CC_HINT(nonnull);
+
+fr_timer_list_t *fr_timer_list_lst_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent);
+
+fr_timer_list_t *fr_timer_list_ordered_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent);
+
+#ifdef WITH_EVENT_DEBUG
+void fr_timer_report(fr_timer_list_t *tl, fr_time_t now, void *uctx);
+void fr_timer_dump(fr_timer_list_t *tl);
+#endif
+
+#undef _CONST
+
+#ifdef __cplusplus
+}
+#endif
/*
* Send one BFD packet.
*/
-static void bfd_send_packet(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx)
+static void bfd_send_packet(UNUSED fr_timer_list_t *el, UNUSED fr_time_t now, void *ctx)
{
bfd_session_t *session = ctx;
bfd_packet_t bfd;
/*
* Send one BFD packet.
*/
-static void bfd_unlang_send_packet(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx)
+static void bfd_unlang_send_packet(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx)
{
bfd_session_t *session = ctx;
bfd_packet_t *bfd;
{
uint64_t interval, base;
uint64_t jitter;
- fr_event_timer_cb_t cb;
+ fr_timer_cb_t cb;
if (session->ev_packet) return;
/*
* Reset the timers.
*/
- fr_event_timer_delete(&session->ev_packet);
+ fr_timer_delete(&session->ev_packet);
if (fr_time_delta_cmp(session->desired_min_tx_interval, session->remote_min_rx_interval) >= 0) {
interval = fr_time_delta_unwrap(session->desired_min_tx_interval);
cb = bfd_send_packet;
}
- if (fr_event_timer_in(session, session->el, &session->ev_packet,
- fr_time_delta_wrap(interval),
- cb, session) < 0) {
+ if (fr_timer_in(session, session->el->tl, &session->ev_packet,
+ fr_time_delta_wrap(interval),
+ false, cb, session) < 0) {
fr_assert("Failed to insert event" == NULL);
}
}
/*
* We failed to see a packet.
*/
-static void bfd_detection_timeout(UNUSED fr_event_list_t *el, fr_time_t now, void *ctx)
+static void bfd_detection_timeout(UNUSED fr_timer_list_t *tl, fr_time_t now, void *ctx)
{
bfd_session_t *session = ctx;
uint64_t delay;
fr_time_delta_t delta;
- fr_event_timer_delete(&session->ev_timeout);
+ fr_timer_delete(&session->ev_timeout);
delay = fr_time_delta_unwrap(session->detection_time);
delay *= session->detect_multi;
timeout = fr_time_add(when, delta);
- if (fr_event_timer_at(session, session->el, &session->ev_timeout,
- timeout, bfd_detection_timeout, session) < 0) {
+ if (fr_timer_at(session, session->el->tl, &session->ev_timeout,
+ timeout, false, bfd_detection_timeout, session) < 0) {
fr_assert("Failed to insert event" == NULL);
}
}
*/
static int bfd_stop_control(bfd_session_t *session)
{
- fr_event_timer_delete(&session->ev_timeout);
- fr_event_timer_delete(&session->ev_packet);
+ fr_timer_delete(&session->ev_timeout);
+ fr_timer_delete(&session->ev_packet);
return 1;
}
fr_event_list_t *el; //!< event list
fr_network_t *nr; //!< network side of things
- struct sockaddr_storage remote_sockaddr; //!< cached for laziness
+ struct sockaddr_storage remote_sockaddr; //!< cached for laziness
socklen_t remote_salen;
struct sockaddr_storage local_sockaddr; //!< cached for laziness
/*
* Internal state management
*/
- fr_event_timer_t const *ev_timeout; //!< when we time out for not receiving a packet
- fr_event_timer_t const *ev_packet; //!< for when we next send a packet
- fr_time_t last_recv; //!< last received packet
- fr_time_t next_recv; //!< when we next expect to receive a packet
- fr_time_t last_sent; //!< the last time we sent a packet
+ fr_timer_t *ev_timeout; //!< when we time out for not receiving a packet
+ fr_timer_t *ev_packet; //!< for when we next send a packet
+ fr_time_t last_recv; //!< last received packet
+ fr_time_t next_recv; //!< when we next expect to receive a packet
+ fr_time_t last_sent; //!< the last time we sent a packet
bfd_session_state_t session_state; //!< our view of the session state
bfd_session_state_t remote_session_state; //!< their view of the session state
proto_cron_crontab_t const *inst;
- fr_event_timer_t const *ev; //!< for writing statistics
+ fr_timer_t *ev; //!< for writing statistics
fr_listen_t *parent; //!< master IO handler
* Called when tm.tm_sec == 0. If it isn't zero, then it means
* that the timer is late, and we treat it as if tm.tm_sec == 0.
*/
-static void do_cron(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void do_cron(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
proto_cron_crontab_thread_t *thread = uctx;
struct tm tm;
cf_section_name2(thread->inst->parent->server_cs), buffer, end - start);
}
- if (fr_event_timer_at(thread, el, &thread->ev, fr_time_add(now, fr_time_delta_from_sec(end - start)),
- do_cron, thread) < 0) {
+ if (fr_timer_at(thread, tl, &thread->ev, fr_time_add(now, fr_time_delta_from_sec(end - start)),
+ false, do_cron, thread) < 0) {
fr_assert(0);
}
thread->inst = inst;
thread->bootstrap = true;
- do_cron(el, fr_time(), thread);
+ do_cron(el->tl, fr_time(), thread);
}
static char const *mod_name(fr_listen_t *li)
off_t header_offset; //!< offset of the current header we're reading
off_t read_offset; //!< where we're reading from in filename_work
- fr_event_timer_t const *ev; //!< for detail file timers.
+ fr_timer_t *ev; //!< for detail file timers.
pthread_mutex_t worker_mutex; //!< for the workers
int num_workers; //!< number of workers
if (has_worker) return;
- if (thread->ev) fr_event_timer_delete(&thread->ev);
+ if (thread->ev) fr_timer_delete(&thread->ev);
work_init(thread, false);
}
/*
* Start polling again after a timeout.
*/
-static void work_retry_timer(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void work_retry_timer(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
proto_detail_file_thread_t *thread = talloc_get_type_abort(uctx, proto_detail_file_thread_t);
DEBUG3("proto_detail (%s): Waiting %.6fs for lock on file %s",
thread->name, fr_time_delta_unwrap(delay) / (double)NSEC, inst->filename_work);
- if (fr_event_timer_in(thread, thread->el, &thread->ev,
- delay, work_retry_timer, thread) < 0) {
+ if (fr_timer_in(thread, thread->el->tl, &thread->ev, delay,
+ false, work_retry_timer, thread) < 0) {
ERROR("Failed inserting retry timer for %s", inst->filename_work);
}
return 0;
*/
DEBUG3("Waiting %d.000000s for new files in %s", inst->poll_interval, thread->name);
- if (fr_event_timer_in(thread, thread->el, &thread->ev,
- fr_time_delta_from_sec(inst->poll_interval), work_retry_timer, thread) < 0) {
+ if (fr_timer_in(thread, thread->el->tl, &thread->ev,
+ fr_time_delta_from_sec(inst->poll_interval),
+ false, work_retry_timer, thread) < 0) {
ERROR("Failed inserting poll timer for %s", inst->filename_work);
}
return;
* therefore change permissions, so that libkqueue can
* read it.
*/
- if (fr_event_timer_in(thread, thread->el, &thread->ev,
- fr_time_delta_from_sec(1), work_retry_timer, thread) < 0) {
+ if (fr_timer_in(thread, thread->el->tl, &thread->ev,
+ fr_time_delta_from_sec(1), false, work_retry_timer, thread) < 0) {
ERROR("Failed inserting poll timer for %s", thread->filename_work);
}
}
size_t packet_len; //!< for retransmissions
fr_retry_t retry; //!< our retry timers
- fr_event_timer_t const *ev; //!< retransmission timer
+ fr_timer_t *ev; //!< retransmission timer
fr_dlist_t entry; //!< for the retransmission list
} fr_detail_entry_t;
}
-static void work_retransmit(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void work_retransmit(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
fr_detail_entry_t *track = talloc_get_type_abort(uctx, fr_detail_entry_t);
proto_detail_work_thread_t *thread = track->parent;
DEBUG("%s - packet %d failed during processing. Will retransmit in %.6fs",
thread->name, track->id, fr_time_delta_unwrap(track->retry.rt) / (double)NSEC);
- if (fr_event_timer_at(thread, thread->el, &track->ev,
- track->retry.next, work_retransmit, track) < 0) {
+ if (fr_timer_at(thread, thread->el->tl, &track->ev,
+ track->retry.next, false, work_retransmit, track) < 0) {
ERROR("%s - Failed inserting retransmission timeout", thread->name);
fail:
if (inst->track_progress && (track->done_offset > 0)) goto mark_done;
* Whilst persistent search LDAP servers don't provide cookies as such
* we treat change numbers, if provided, as cookies.
*/
- fr_event_timer_in(sync, conn->conn->el, &sync->cookie_ev, inst->cookie_interval, ldap_sync_cookie_event, sync);
+ fr_timer_in(sync, conn->conn->el->tl, &sync->cookie_ev, inst->cookie_interval,
+ false, ldap_sync_cookie_event, sync);
return 0;
}
CONF_SECTION *cs; //!< Config section where this sync was defined.
//!< Used for logging.
- fr_event_timer_t const *ev; //!< Event for retrying cookie load
+ fr_timer_t *ev; //!< Event for retrying cookie load
/*
* Callbacks for various events
* A cookie at the head says that all the previous changes have been
* completed, so the cookie can be sent.
*/
-void ldap_sync_cookie_event(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+void ldap_sync_cookie_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
sync_state_t *sync = talloc_get_type_abort(uctx, sync_state_t);
sync_packet_ctx_t *sync_packet_ctx;
ldap_sync_cookie_send(sync_packet_ctx);
finish:
- (void) fr_event_timer_in(sync, el, &sync->cookie_ev, sync->inst->cookie_interval, ldap_sync_cookie_event, sync);
+ (void) fr_timer_in(sync, tl, &sync->cookie_ev, sync->inst->cookie_interval,
+ false, ldap_sync_cookie_event, sync);
}
/** Enqueue a new cookie store packet
* Looks at the head of the list of pending sync packets for unsent
* change packets and sends any up to the first cookie.
*/
-static void ldap_sync_retry_event(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void ldap_sync_retry_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
sync_state_t *sync = talloc_get_type_abort(uctx, sync_state_t);
sync_packet_ctx_t *sync_packet_ctx = NULL;
* packets - reschedule a retry event.
*/
if (sync_packet_ctx) {
- (void) fr_event_timer_in(sync, el, &sync->retry_ev, sync->inst->retry_interval,
- ldap_sync_retry_event, sync);
+ (void) fr_timer_in(sync, tl, &sync->retry_ev, sync->inst->retry_interval,
+ false, ldap_sync_retry_event, sync);
}
}
* Send the packet and if it fails to send add a retry event
*/
if ((ldap_sync_entry_send_network(sync_packet_ctx) < 0) &&
- (fr_event_timer_in(sync, sync->conn->conn->el, &sync->retry_ev,
- sync->inst->retry_interval, ldap_sync_retry_event, sync) < 0)) {
+ (fr_timer_in(sync, sync->conn->conn->el->tl, &sync->retry_ev,
+ sync->inst->retry_interval, false, ldap_sync_retry_event, sync) < 0)) {
PERROR("Inserting LDAP sync retry timer failed");
}
* Performs complete re-initialization of a connection. Called during socket_open
* to create the initial connection and again any time we need to reopen the connection.
*
- * @param[in] el the event list managing listen event.
+ * @param[in] tl the event list managing listen event.
* @param[in] now current time.
* @param[in] user_ctx Listener.
*/
-static void proto_ldap_connection_init(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *user_ctx)
+static void proto_ldap_connection_init(fr_timer_list_t *tl, UNUSED fr_time_t now, void *user_ctx)
{
fr_listen_t *listen = talloc_get_type_abort(user_ctx, fr_listen_t);
proto_ldap_sync_ldap_thread_t *thread = talloc_get_type_abort(listen->thread_instance, proto_ldap_sync_ldap_thread_t);
PERROR("Failed (re)initialising connection, will retry in %pV seconds",
fr_box_time_delta(inst->handle_config.reconnection_delay));
- if (fr_event_timer_in(thread, thread->el, &thread->conn_retry_ev,
- inst->handle_config.reconnection_delay,
- proto_ldap_connection_init, listen) < 0) {
+ if (fr_timer_in(thread, tl, &thread->conn_retry_ev,
+ inst->handle_config.reconnection_delay,
+ false, proto_ldap_connection_init, listen) < 0) {
FATAL("Failed inserting event: %s", fr_strerror());
}
}
/** Timer event to retry running "load Cookie" on failures
*
*/
-static void proto_ldap_cookie_load_retry(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) {
+static void proto_ldap_cookie_load_retry(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
+{
proto_ldap_cookie_load_retry_ctx *retry_ctx = talloc_get_type_abort(uctx, proto_ldap_cookie_load_retry_ctx);
DEBUG2("Retrying \"load Cookie\" for sync no %ld", retry_ctx->sync_no);
retry_ctx->thread) < 0) {
ERROR("Failed retrying \"load Cookie\". Will try again in %pV seconds",
fr_box_time_delta(retry_ctx->inst->handle_config.reconnection_delay));
- (void) fr_event_timer_in(retry_ctx->thread->conn->h, el,
- &retry_ctx->inst->parent->sync_config[retry_ctx->sync_no]->ev,
- retry_ctx->inst->handle_config.reconnection_delay,
- proto_ldap_cookie_load_retry, retry_ctx);
+ (void) fr_timer_in(retry_ctx->thread->conn->h, tl,
+ &retry_ctx->inst->parent->sync_config[retry_ctx->sync_no]->ev,
+ retry_ctx->inst->handle_config.reconnection_delay,
+ false, proto_ldap_cookie_load_retry, retry_ctx);
return;
}
talloc_free(retry_ctx);
.sync_no = packet_id,
};
- (void) fr_event_timer_in(thread->conn->h, thread->el, &inst->parent->sync_config[packet_id]->ev,
- inst->handle_config.reconnection_delay,
- proto_ldap_cookie_load_retry, retry_ctx);
+ (void) fr_timer_in(thread->conn->h, thread->el->tl, &inst->parent->sync_config[packet_id]->ev,
+ inst->handle_config.reconnection_delay,
+ false, proto_ldap_cookie_load_retry, retry_ctx);
}
break;
if (prev == CONNECTION_STATE_CONNECTED) {
ERROR("LDAP connection closed. Scheduling restart in %pVs",
fr_box_time_delta(inst->handle_config.reconnection_delay));
- if (fr_event_timer_in(thread, thread->el, &thread->conn_retry_ev,
- inst->handle_config.reconnection_delay,
- proto_ldap_connection_init, listen) < 0) {
+ if (fr_timer_in(thread, thread->el->tl, &thread->conn_retry_ev,
+ inst->handle_config.reconnection_delay,
+ false, proto_ldap_connection_init, listen) < 0) {
FATAL("Failed inserting event: %s", fr_strerror());
}
}
if (ldap_conn->fd < 0) {
connection_failed:
- if (fr_event_timer_in(thread, thread->el, &thread->conn_retry_ev,
- inst->handle_config.reconnection_delay,
- proto_ldap_connection_init, listen) < 0) {
+ if (fr_timer_in(thread, thread->el->tl, &thread->conn_retry_ev,
+ inst->handle_config.reconnection_delay,
+ false, proto_ldap_connection_init, listen) < 0) {
FATAL("Failed inserting event: %s", fr_strerror());
}
return;
/*
* Initialise the connection
*/
- proto_ldap_connection_init(el, fr_event_list_time(el), li);
+ proto_ldap_connection_init(el->tl, fr_event_list_time(el), li);
}
static int mod_instantiate(module_inst_ctx_t const *mctx)
uint32_t changes_since_cookie; //!< How many changes have been added since
//!< the last cookie was stored.
- fr_event_timer_t const *cookie_ev; //!< Timer event for sending cookies.
- fr_event_timer_t const *retry_ev; //!< Timer event for retrying failed changes.
+ fr_timer_t *cookie_ev; //!< Timer event for sending cookies.
+ fr_timer_t *retry_ev; //!< Timer event for retrying failed changes.
fr_pair_list_t trigger_args; //!< Arguments to make available in triggers.
};
fr_listen_t *parent; //!< master IO handler.
fr_listen_t *li; //!< Our listener.
- fr_event_timer_t const *conn_retry_ev; //!< When to retry re-establishing the conn.
+ fr_timer_t *conn_retry_ev; //!< When to retry re-establishing the conn.
connection_t *conn; //!< Our connection to the LDAP directory.
} proto_ldap_sync_ldap_thread_t;
int ldap_sync_cookie_store(sync_state_t *sync, bool refresh);
-void ldap_sync_cookie_event(fr_event_list_t *el, fr_time_t now, void *uctx);
+void ldap_sync_cookie_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx);
int ldap_sync_cookie_send(sync_packet_ctx_t *sync_packet_ctx);
/*
* Register event to store cookies at a regular interval
*/
- if (fr_event_timer_in(sync, conn->conn->el, &sync->cookie_ev,
- inst->cookie_interval, ldap_sync_cookie_event, sync) < 0) {
+ if (fr_timer_in(sync, conn->conn->el->tl, &sync->cookie_ev,
+ inst->cookie_interval, false, ldap_sync_cookie_event, sync) < 0) {
PERROR("Inserting LDAP cookie timer failed");
goto error;
}
fr_stats_t stats; //!< statistics for this socket
int fd; //!< for CSV files
- fr_event_timer_t const *ev; //!< for writing statistics
+ fr_timer_t *ev; //!< for writing statistics
fr_listen_t *parent; //!< master IO handler
} proto_load_step_thread_t;
}
-static void write_stats(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void write_stats(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
proto_load_step_thread_t *thread = uctx;
size_t len;
char buffer[1024];
- (void) fr_event_timer_in(thread, el, &thread->ev, fr_time_delta_from_sec(1), write_stats, thread);
+ (void) fr_timer_in(thread, tl, &thread->ev, fr_time_delta_from_sec(1), false, write_stats, thread);
len = fr_load_generator_stats_sprint(thread->l, now, buffer, sizeof(buffer));
if (write(thread->fd, buffer, len) < 0) {
return;
}
- (void) fr_event_timer_in(thread, thread->el, &thread->ev, fr_time_delta_from_sec(1), write_stats, thread);
+ (void) fr_timer_in(thread, thread->el->tl, &thread->ev, fr_time_delta_from_sec(1), false, write_stats, thread);
len = fr_load_generator_stats_sprint(thread->l, fr_time(), buffer, sizeof(buffer));
if (write(thread->fd, buffer, len) < 0) {
/** Callback when LDAP query times out
*
*/
-static void ldap_query_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void ldap_query_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
fr_ldap_query_t *query = talloc_get_type_abort(uctx, fr_ldap_query_t);
- trunk_request_t *treq;
+ trunk_request_t *treq;
request_t *request;
/*
goto query_error;
}
- if (fr_event_timer_in(query, unlang_interpret_event_list(request), &query->ev, handle_config->res_timeout,
- ldap_query_timeout, query) < 0) {
+ if (fr_timer_in(query, unlang_interpret_event_list(request)->tl, &query->ev, handle_config->res_timeout,
+ false, ldap_query_timeout, query) < 0) {
REDEBUG("Unable to set timeout for LDAP query");
trunk_request_signal_cancel(query->treq);
goto query_error;
fr_time_t last_sent; //!< last time we sent a packet.
fr_time_t last_idle; //!< last time we had nothing to do
- fr_event_timer_t const *zombie_ev; //!< Zombie timeout.
+ fr_timer_t *zombie_ev; //!< Zombie timeout.
bool status_checking; //!< whether we're doing status checks
bio_request_t *status_u; //!< for sending status check packets
size_t partial; //!< partially sent data
radius_track_entry_t *rr; //!< ID tracking, resend count, etc.
- fr_event_timer_t const *ev; //!< timer for retransmissions
+ fr_timer_t *ev; //!< timer for retransmissions
fr_retry_t retry; //!< retransmission timers
};
u->num_replies = 0; /* Reset */
u->retry.start = fr_time_wrap(0);
- if (u->ev) (void) fr_event_timer_delete(&u->ev);
+ if (u->ev) (void) fr_timer_delete(&u->ev);
bio_request_reset(u);
}
*
* Setup retries, or fail the connection.
*/
-static void conn_init_timeout(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void conn_init_timeout(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
connection_t *conn = talloc_get_type_abort(uctx, connection_t);
bio_handle_t *h;
return;
case FR_RETRY_CONTINUE:
- if (fr_event_fd_insert(h, NULL, el, h->fd, conn_init_writable, NULL,
+ if (fr_event_fd_insert(h, NULL, conn->el, h->fd, conn_init_writable, NULL,
conn_init_error, conn) < 0) {
PERROR("%s - Failed inserting FD event", h->ctx.module_name);
connection_signal_reconnect(conn, CONNECTION_FAILED);
/** Perform the next step of init and negotiation.
*
*/
-static void conn_init_next(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void conn_init_next(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
connection_t *conn = talloc_get_type_abort(uctx, connection_t);
bio_handle_t *h = talloc_get_type_abort(conn->h, bio_handle_t);
- if (fr_event_fd_insert(h, NULL, el, h->fd, conn_init_writable, NULL, conn_init_error, conn) < 0) {
+ if (fr_event_fd_insert(h, NULL, conn->el, h->fd, conn_init_writable, NULL, conn_init_error, conn) < 0) {
PERROR("%s - Failed inserting FD event", h->ctx.module_name);
connection_signal_reconnect(conn, CONNECTION_FAILED);
}
/*
* Set the timer for the next retransmit.
*/
- if (fr_event_timer_at(h, el, &u->ev, u->retry.next, conn_init_next, conn) < 0) {
+ if (fr_timer_at(h, el->tl, &u->ev, u->retry.next, false, conn_init_next, conn) < 0) {
connection_signal_reconnect(conn, CONNECTION_FAILED);
}
return;
h->ctx.module_name, (u->retry.count == 1) ? "Originated" : "Retransmitted",
fr_box_time_delta(u->retry.rt));
- if (fr_event_timer_at(h, el, &u->ev, u->retry.next, conn_init_timeout, conn) < 0) {
+ if (fr_timer_at(h, el->tl, &u->ev, u->retry.next, false, conn_init_timeout, conn) < 0) {
PERROR("%s - Failed inserting timer event", h->ctx.module_name);
goto fail;
}
fr_assert(h->fd >= 0);
- if (h->status_u) fr_event_timer_delete(&h->status_u->ev);
+ if (h->status_u) fr_timer_delete(&h->status_u->ev);
/*
* The connection code will take care of deleting the FD from the event loop.
/*
* Reset the Status-Server checks.
*/
- if (h->status_u && h->status_u->ev) (void) fr_event_timer_delete(&h->status_u->ev);
+ if (h->status_u && h->status_u->ev) (void) fr_timer_delete(&h->status_u->ev);
}
break;
/** Revive a connection after "revive_interval"
*
*/
-static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void revive_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
bio_handle_t *h = talloc_get_type_abort(tconn->conn->h, bio_handle_t);
/** Mark a connection dead after "zombie_interval"
*
*/
-static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void zombie_timeout(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
bio_handle_t *h = talloc_get_type_abort(tconn->conn->h, bio_handle_t);
/*
* Revive the connection after a time.
*/
- if (fr_event_timer_at(h, el, &h->zombie_ev,
- fr_time_add(now, h->ctx.inst->revive_interval), revive_timeout, tconn) < 0) {
+ if (fr_timer_at(h, tl, &h->zombie_ev,
+ fr_time_add(now, h->ctx.inst->revive_interval), false,
+ revive_timeout, tconn) < 0) {
ERROR("Failed inserting revive timeout for connection");
trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED);
}
trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED);
}
} else {
- if (fr_event_timer_at(h, el, &h->zombie_ev, fr_time_add(now, h->ctx.inst->zombie_period),
- zombie_timeout, tconn) < 0) {
+ if (fr_timer_at(h, el->tl, &h->zombie_ev, fr_time_add(now, h->ctx.inst->zombie_period),
+ false, zombie_timeout, tconn) < 0) {
ERROR("Failed inserting zombie timeout for connection");
trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED);
}
/** Handle retries for a status check
*
*/
-static void status_check_next(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void status_check_next(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
bio_handle_t *h = talloc_get_type_abort(tconn->conn->h, bio_handle_t);
/*
* Set the timer for the next retransmit.
*/
- if (fr_event_timer_at(h, h->ctx.el, &u->ev, u->retry.next, status_check_next, treq->tconn) < 0) {
+ if (fr_timer_at(h, h->ctx.el->tl, &u->ev, u->retry.next, false, status_check_next, treq->tconn) < 0) {
trunk_connection_signal_reconnect(treq->tconn, CONNECTION_FAILED);
}
return;
* queued for sendmmsg but never actually
* sent.
*/
- if (u->ev) (void) fr_event_timer_delete(&u->ev);
+ if (u->ev) (void) fr_timer_delete(&u->ev);
}
/*
bio_request_t *u = preq_to_reset;
bio_handle_t *h = talloc_get_type_abort(conn->h, bio_handle_t);
- if (u->ev) (void)fr_event_timer_delete(&u->ev);
+ if (u->ev) (void)fr_timer_delete(&u->ev);
bio_request_reset(u);
if (h->ctx.inst->mode == RLM_RADIUS_MODE_REPLICATE) return;
fr_assert_msg(!u->ev, "bio_request_t freed with active timer");
- if (u->ev) (void) fr_event_timer_delete(&u->ev);
+ if (u->ev) (void) fr_timer_delete(&u->ev);
fr_assert(u->rr == NULL);
TALLOC_CTX *log_ctx; //!< Prevent unneeded memory allocation by keeping a
//!< permanent pool, to store log entries.
fr_dlist_head_t queries; //!< Outstanding queries on this connection.
- fr_event_timer_t const *read_ev; //!< Polling event for reading query results.
- fr_event_timer_t const *write_ev; //!< Polling event for sending queries.
+ fr_timer_t *read_ev; //!< Polling event for reading query results.
+ fr_timer_t *write_ev; //!< Polling event for sending queries.
uint poll_interval; //!< Interval between read polling.
uint poll_count; //!< How many consecutive polls had no available results.
} rlm_sql_cassandra_conn_t;
}
}
-static void sql_trunk_connection_read_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void sql_trunk_connection_read_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
rlm_sql_cassandra_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_cassandra_conn_t);
cassandra_query_t *cass_query, *next_query = NULL;
* There are still outstanding queries, add another polling event
*/
if (fr_dlist_num_elements(&c->queries)) {
- if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(c->poll_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, tl, &c->read_ev, fr_time_delta_from_usec(c->poll_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
}
}
-static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void sql_trunk_connection_write_poll(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
* This "notify" callback sets up the appropriate polling events.
*/
CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function */
-static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, UNUSED fr_event_list_t *el,
+static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, fr_event_list_t *el,
trunk_connection_event_t notify_on, UNUSED void *uctx)
{
rlm_sql_cassandra_conn_t *c = talloc_get_type_abort(conn->h, rlm_sql_cassandra_conn_t);
switch (notify_on) {
case TRUNK_CONN_EVENT_NONE:
- if (c->read_ev) fr_event_timer_delete(&c->read_ev);
- if (c->write_ev) fr_event_timer_delete(&c->write_ev);
+ if (c->read_ev) fr_timer_delete(&c->read_ev);
+ if (c->write_ev) fr_timer_delete(&c->write_ev);
return;
case TRUNK_CONN_EVENT_BOTH:
case TRUNK_CONN_EVENT_READ:
if (fr_dlist_num_elements(&c->queries)) {
- if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(c->poll_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, el->tl, &c->read_ev, fr_time_delta_from_usec(c->poll_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
}
FALL_THROUGH;
case TRUNK_CONN_EVENT_WRITE:
- if (fr_event_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0),
- sql_trunk_connection_write_poll, tconn) < 0) {
+ if (fr_timer_in(c, el->tl, &c->write_ev, fr_time_delta_from_usec(0),
+ false, sql_trunk_connection_write_poll, tconn) < 0) {
ERROR("Unable to insert polling event");
}
return;
connection_t *conn; //!< Generic connection structure for this connection.
rlm_sql_config_t const *config; //!< SQL instance configuration.
fr_sql_query_t *query_ctx; //!< Current request running on the connection.
- fr_event_timer_t const *read_ev; //!< Timer event for polling reading this connection
- fr_event_timer_t const *write_ev; //!< Timer event for polling writing this connection
+ fr_timer_t *read_ev; //!< Timer event for polling reading this connection
+ fr_timer_t *write_ev; //!< Timer event for polling writing this connection
uint select_interval; //!< How frequently this connection gets polled for select queries.
uint query_interval; //!< How frequently this connection gets polled for other queries.
uint poll_count; //!< How many polls have been done for the current query.
trunk_request_signal_cancel_complete(treq);
}
-static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void sql_trunk_connection_read_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
rlm_sql_oracle_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_oracle_conn_t);
fr_sql_query_t *query_ctx = c->query_ctx;
switch (ret) {
case OCI_STILL_EXECUTING:
ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response");
- if (fr_event_timer_in(c, el, &c->read_ev,
- fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, el, &c->read_ev,
+ fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
return;
ret = OCIBreak(c->ctx, c->error);
if (ret == OCI_STILL_EXECUTING) {
ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response");
- if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
return;
if (request) unlang_interpret_mark_runnable(request);
}
-static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void sql_trunk_connection_write_poll(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
uint poll_interval = (query_ctx && query_ctx->type != SQL_QUERY_SELECT) ? c->query_interval : c->select_interval;
switch (notify_on) {
case TRUNK_CONN_EVENT_NONE:
- if (c->read_ev) fr_event_timer_delete(&c->read_ev);
- if (c->write_ev) fr_event_timer_delete(&c->write_ev);
+ if (c->read_ev) fr_timer_delete(&c->read_ev);
+ if (c->write_ev) fr_timer_delete(&c->write_ev);
return;
case TRUNK_CONN_EVENT_BOTH:
case TRUNK_CONN_EVENT_READ:
if (c->query_ctx) {
- if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(poll_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(poll_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
}
FALL_THROUGH;
case TRUNK_CONN_EVENT_WRITE:
- if (fr_event_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0),
- sql_trunk_connection_write_poll, tconn) < 0) {
+ if (fr_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0),
+ false, sql_trunk_connection_write_poll, tconn) < 0) {
ERROR("Unable to insert polling event");
}
return;
rlm_sql_config_t const *config; /* SQL instance configuration */
SQLUSMALLINT async_mode; /* What Async mode does this driver support */
fr_sql_query_t *query_ctx; /* Current query running on the connection */
- fr_event_timer_t const *read_ev; /* Timer event for polling reading this connection */
- fr_event_timer_t const *write_ev; /* Timer event for polling writing this connection */
+ fr_timer_t *read_ev; /* Timer event for polling reading this connection */
+ fr_timer_t *write_ev; /* Timer event for polling writing this connection */
uint select_interval; /* How frequently this connection gets polled for select queries */
uint query_interval; /* How frequently this connection gets polled for other queries */
uint poll_count; /* How many polls have been done for the current query */
{
rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(h, rlm_sql_unixodbc_conn_t);
- if (c->read_ev) fr_event_timer_delete(&c->read_ev);
- if (c->write_ev) fr_event_timer_delete(&c->write_ev);
+ if (c->read_ev) fr_timer_delete(&c->read_ev);
+ if (c->write_ev) fr_timer_delete(&c->write_ev);
if (c->stmt) SQLFreeHandle(SQL_HANDLE_STMT, c->stmt);
return CONNECTION_STATE_CONNECTED;
}
-static void sql_trunk_connection_init_poll(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void sql_trunk_connection_init_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_unixodbc_conn_t);
SQLRETURN ret;
UNCONST(SQLCHAR *, c->config->sql_password), strlen(c->config->sql_password));
if (ret == SQL_STILL_EXECUTING) {
- if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(c->query_interval),
- sql_trunk_connection_init_poll, c) < 0) {
+ if (fr_timer_in(c, tl, &c->read_ev, fr_time_delta_from_usec(c->query_interval),
+ false, sql_trunk_connection_init_poll, c) < 0) {
ERROR("Unable to insert polling event");
connection_signal_reconnect(c->conn, CONNECTION_FAILED);
}
UNCONST(SQLCHAR *, config->sql_password), strlen(config->sql_password));
if (ret == SQL_STILL_EXECUTING) {
- if (fr_event_timer_in(c, conn->el, &c->read_ev, fr_time_delta_from_usec(c->query_interval),
- sql_trunk_connection_init_poll, c) < 0) {
+ if (fr_timer_in(c, conn->el->tl, &c->read_ev, fr_time_delta_from_usec(c->query_interval),
+ false, sql_trunk_connection_init_poll, c) < 0) {
ERROR("Unable to insert polling event");
goto error;
}
trunk_request_signal_cancel_complete(treq);
}
-static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void sql_trunk_connection_read_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_unixodbc_conn_t);
fr_sql_query_t *query_ctx = c->query_ctx;
}
if (ret == SQL_STILL_EXECUTING) {
ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response");
- if (fr_event_timer_in(c, el, &c->read_ev,
- fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, tl, &c->read_ev,
+ fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
return;
ret = SQLCancel(c->stmt);
if (ret == SQL_STILL_EXECUTING) {
ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response");
- if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, tl, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
return;
if (request) unlang_interpret_mark_runnable(request);
}
-static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void sql_trunk_connection_write_poll(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
* This "notify" callback sets up the appropriate polling events.
*/
CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function */
-static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, UNUSED fr_event_list_t *el,
+static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, fr_event_list_t *el,
trunk_connection_event_t notify_on, UNUSED void *uctx)
{
rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(conn->h, rlm_sql_unixodbc_conn_t);
uint poll_interval = (query_ctx && query_ctx->type != SQL_QUERY_SELECT) ? c->query_interval : c->select_interval;
switch (notify_on) {
case TRUNK_CONN_EVENT_NONE:
- if (c->read_ev) fr_event_timer_delete(&c->read_ev);
- if (c->write_ev) fr_event_timer_delete(&c->write_ev);
+ if (c->read_ev) fr_timer_delete(&c->read_ev);
+ if (c->write_ev) fr_timer_delete(&c->write_ev);
return;
case TRUNK_CONN_EVENT_BOTH:
case TRUNK_CONN_EVENT_READ:
if (c->query_ctx) {
- if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(poll_interval),
- sql_trunk_connection_read_poll, c) < 0) {
+ if (fr_timer_in(c, el->tl, &c->read_ev, fr_time_delta_from_usec(poll_interval),
+ false, sql_trunk_connection_read_poll, c) < 0) {
ERROR("Unable to insert polling event");
}
}
FALL_THROUGH;
case TRUNK_CONN_EVENT_WRITE:
- if (fr_event_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0),
- sql_trunk_connection_write_poll, tconn) < 0) {
+ if (fr_timer_in(c, el->tl, &c->write_ev, fr_time_delta_from_usec(0),
+ false, sql_trunk_connection_write_poll, tconn) < 0) {
ERROR("Unable to insert polling event");
}
return;
fr_time_t last_sent; //!< last time we sent a packet.
fr_time_t last_idle; //!< last time we had nothing to do
- fr_event_timer_t const *zombie_ev; //!< Zombie timeout.
+ fr_timer_t *zombie_ev; //!< Zombie timeout.
trunk_connection_t *tconn; //!< trunk connection
} udp_handle_t;
uint8_t *packet; //!< Packet we write to the network.
size_t packet_len; //!< Length of the packet.
- fr_event_timer_t const *ev; //!< timer for retransmissions
+ fr_timer_t *ev; //!< timer for retransmissions
fr_retry_t retry; //!< retransmission timers
};
u->outstanding = false;
h->active--;
- if (u->ev) (void)fr_event_timer_delete(&u->ev);
+ if (u->ev) (void)fr_timer_delete(&u->ev);
/*
* We've sent 255 packets, and received all replies. Shut the connection down.
/** Revive a connection after "revive_interval"
*
*/
-static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void revive_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
/** Mark a connection dead after "zombie_interval"
*
*/
-static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void zombie_timeout(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
/*
* Revive the connection after a time.
*/
- if (fr_event_timer_at(h, el, &h->zombie_ev,
- fr_time_add(now, h->inst->parent->revive_interval), revive_timeout, h) < 0) {
+ if (fr_timer_at(h, tl, &h->zombie_ev,
+ fr_time_add(now, h->inst->parent->revive_interval), false, revive_timeout, h) < 0) {
ERROR("Failed inserting revive timeout for connection");
trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED);
}
* - true if the connection is zombie.
* - false if the connection is not zombie.
*/
-static bool check_for_zombie(fr_event_list_t *el, trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent)
+static bool check_for_zombie(fr_timer_list_t *tl, trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent)
{
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
WARN("%s - Entering Zombie state - connection %s", h->module_name, h->name);
trunk_connection_signal_inactive(tconn);
- if (fr_event_timer_at(h, el, &h->zombie_ev, fr_time_add(now, h->inst->parent->zombie_period),
- zombie_timeout, h) < 0) {
+ if (fr_timer_at(h, tl, &h->zombie_ev, fr_time_add(now, h->inst->parent->zombie_period),
+ false, zombie_timeout, h) < 0) {
ERROR("Failed inserting zombie timeout for connection");
trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED);
}
* Note that with TCP we don't actually retry on this particular connection, but the retry timer allows us to
* fail over from one connection to another when a connection fails.
*/
-static void request_retry(fr_event_list_t *el, fr_time_t now, void *uctx)
+static void request_retry(fr_timer_list_t *tl, fr_time_t now, void *uctx)
{
- trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t);
+ trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t);
udp_request_t *u = talloc_get_type_abort(treq->preq, udp_request_t);
udp_result_t *r = talloc_get_type_abort(treq->rctx, udp_result_t);
request_t *request = treq->request;
r->rcode = RLM_MODULE_FAIL;
trunk_request_signal_complete(treq);
- check_for_zombie(el, tconn, now, u->retry.start);
+ check_for_zombie(tl, tconn, now, u->retry.start);
}
CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function*/
h->last_sent = u->retry.start;
if (fr_time_lteq(h->first_sent, h->last_idle)) h->first_sent = h->last_sent;
- if (fr_event_timer_at(u, el, &u->ev, u->retry.next, request_retry, treq) < 0) {
+ if (fr_timer_at(u, el->tl, &u->ev, u->retry.next, false, request_retry, treq) < 0) {
RERROR("Failed inserting retransmit timeout for connection");
trunk_request_signal_fail(treq);
}
unbound_io_event_base_t *ev_b; //!< Event base this handle was created for.
- fr_event_timer_t const *timer; //!< Stores the pointer to the enabled timer for
+ fr_timer_t *timer; //!< Stores the pointer to the enabled timer for
///< this event handled. libunbound uses a single
///< handle for managing related FD events and
///< timers, which is weird, but ok...
* given query. The timeout happening causes the timeout against the server
* to be increased for any subsequent queries sent to it.
*/
-static void _unbound_io_service_timer_expired(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void _unbound_io_service_timer_expired(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
unbound_io_event_t *ev = talloc_get_type_abort(uctx, unbound_io_event_t);
* that it fired. This is imperfect but unbound
* doesn't have a callback for receiving errors.
*/
- if (fr_event_timer_delete(&ev->timer) < 0) {
+ if (fr_timer_delete(&ev->timer) < 0) {
PERROR("ubound event %p - Failed disarming timeout", ev);
}
DEBUG4("unbound event %p - Timeout in %pV seconds", ev, fr_box_time_delta(timeout));
- if (fr_event_timer_in(ev, ev->ev_b->el, &ev->timer,
- timeout, _unbound_io_service_timer_expired, ev) < 0) {
+ if (fr_timer_in(ev, ev->ev_b->el->tl, &ev->timer,
+ timeout, false, _unbound_io_service_timer_expired, ev) < 0) {
PERROR("unbound event %p - Failed adding timeout", ev);
if (ev->events & (UB_EV_READ | UB_EV_WRITE)) {
if (ev->events & UB_EV_TIMEOUT) {
DEBUG4("unbound event %p - Disarming timeout", ev);
- if (ev->timer && (fr_event_timer_delete(&ev->timer) < 0)) {
+ if (ev->timer && (fr_timer_delete(&ev->timer) < 0)) {
PERROR("ubound event %p - Failed disarming timeout", ev);
ret = -1;
ev, uctx, ev->uctx);
ev->uctx = uctx;
}
- if (ev->timer && (fr_event_timer_delete(&ev->timer) < 0)) {
+ if (ev->timer && (fr_timer_delete(&ev->timer) < 0)) {
PERROR("ubound event %p - Failed disarming timeout", ev);
ret = -1; /* Continue ? */
DEBUG4("unbound event %p - Timeout in %pV seconds", ev, fr_box_time_delta(timeout));
- if (fr_event_timer_in(ev, ev->ev_b->el, &ev->timer,
- timeout, _unbound_io_service_timer_expired, ev) < 0) {
+ if (fr_timer_in(ev, ev->ev_b->el->tl, &ev->timer,
+ timeout,
+ false, _unbound_io_service_timer_expired, ev) < 0) {
PERROR("unbound event %p - Failed adding timeout", ev);
ret = -1;
DEBUG4("unbound event %p - Disarming timeout", ev);
- if (ev->timer && (fr_event_timer_delete(&ev->timer) < 0)) {
+ if (ev->timer && (fr_timer_delete(&ev->timer) < 0)) {
PERROR("unbound event %p - Failed disarming timeout", ev);
return -1;
uint16_t count; //!< Number of results to return
fr_value_box_list_t list; //!< Where to put the parsed results
TALLOC_CTX *out_ctx; //!< CTX to allocate parsed results in
- fr_event_timer_t const *ev; //!< Event for timeout
+ fr_timer_t *ev; //!< Event for timeout
} unbound_request_t;
/*
* Request has completed remove timeout event and set
* async_id to 0 so ub_cancel() is not called when ur is freed
*/
- if (ur->ev) (void)fr_event_timer_delete(&ur->ev);
+ if (ur->ev) (void)fr_timer_delete(&ur->ev);
ur->async_id = 0;
/*
/** Callback from our timeout event to cancel a request
*
*/
-static void xlat_unbound_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void xlat_unbound_timeout(UNUSED fr_timer_list_t *el, UNUSED fr_time_t now, void *uctx)
{
unbound_request_t *ur = talloc_get_type_abort(uctx, unbound_request_t);
request_t *request = ur->request;
{
unbound_request_t *ur = talloc_get_type_abort(xctx->rctx, unbound_request_t);
- if (ur->ev) (void)fr_event_timer_delete(&ur->ev);
+ if (ur->ev) (void)fr_timer_delete(&ur->ev);
RDEBUG2("Forcefully cancelling pending unbound request");
}
return xlat_unbound_resume(ctx, out, &our_xctx, request, in);
}
- if (fr_event_timer_in(ur, ur->t->ev_b->el, &ur->ev, fr_time_delta_from_msec(inst->timeout),
- xlat_unbound_timeout, ur) < 0) {
+ if (fr_timer_in(ur, ur->t->ev_b->el->tl, &ur->ev, fr_time_delta_from_msec(inst->timeout),
+ false, xlat_unbound_timeout, ur) < 0) {
REDEBUG("Unable to attach unbound timeout_envent");
ub_cancel(t->ev_b->ub, ur->async_id);
return XLAT_ACTION_FAIL;
static void radius_client_retry_release(fr_bio_t *bio, fr_bio_retry_entry_t *retry_ctx, UNUSED fr_bio_retry_release_reason_t reason);
static ssize_t radius_client_retry(fr_bio_t *bio, fr_bio_retry_entry_t *retry_ctx, UNUSED const void *buffer, NDEBUG_UNUSED size_t size);
-static void fr_radius_client_bio_connect_timer(fr_event_list_t *el, fr_time_t now, void *uctx);
+static void fr_radius_client_bio_connect_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx);
fr_bio_packet_t *fr_radius_client_bio_alloc(TALLOC_CTX *ctx, fr_radius_client_config_t *cfg, fr_bio_fd_config_t const *fd_cfg)
{
*/
if ((my->info.fd_info->type == FR_BIO_FD_CONNECTED) && !my->info.connected &&
fr_time_delta_ispos(cfg->connection_timeout) && cfg->retry_cfg.el) {
- if (fr_event_timer_in(my, cfg->el, &my->common.ev, cfg->connection_timeout, fr_radius_client_bio_connect_timer, my) < 0) {
+ if (fr_timer_in(my, cfg->el->tl, &my->common.ev, cfg->connection_timeout, false,
+ fr_radius_client_bio_connect_timer, my) < 0) {
talloc_free(my);
return NULL;
}
/** We failed to connect in the given timeout, the connection is dead.
*
*/
-static void fr_radius_client_bio_connect_timer(NDEBUG_UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
+static void fr_radius_client_bio_connect_timer(NDEBUG_UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
{
fr_radius_client_fd_bio_t *my = talloc_get_type_abort(uctx, fr_radius_client_fd_bio_t);
- fr_assert(!my->retry || (my->info.retry_info->el == el));
+ fr_assert(!my->retry || (my->info.retry_info->el->tl == tl));
if (my->common.cb.failed) my->common.cb.failed(&my->common);
}