From: Arran Cudbard-Bell Date: Wed, 26 Mar 2025 01:21:45 +0000 (-0600) Subject: New nested timer system that avoids constantly reallocating timer memory X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a10fd08074e52de0eb6689f0f97fed94d14b9d2c;p=thirdparty%2Ffreeradius-server.git New nested timer system that avoids constantly reallocating timer memory --- diff --git a/src/bin/radiusd.c b/src/bin/radiusd.c index a89cb528fe4..846f1af0d96 100644 --- a/src/bin/radiusd.c +++ b/src/bin/radiusd.c @@ -168,32 +168,32 @@ do { \ goto cleanup; \ } while (0) -static fr_event_timer_t const *fr_time_sync_ev = NULL; +static fr_timer_t *fr_time_sync_ev = NULL; -static void fr_time_sync_event(fr_event_list_t *el, UNUSED fr_time_t now, UNUSED void *uctx) +static void fr_time_sync_event(fr_timer_list_t *tl, UNUSED fr_time_t now, UNUSED void *uctx) { fr_time_delta_t when = fr_time_delta_from_sec(1); - (void) fr_event_timer_in(el, el, &fr_time_sync_ev, when, fr_time_sync_event, NULL); + (void) fr_timer_in(tl, tl, &fr_time_sync_ev, when, false, fr_time_sync_event, NULL); (void) fr_time_sync(); } #ifndef NDEBUG /** Encourage the server to exit after a period of time * - * @param[in] el The main loop. + * @param[in] tl The main loop. * @param[in] now Current time. Should be 0, when adding the event. * @param[in] uctx Pointer to a fr_time_delta_t indicating how long * the server should run before exit. */ -static void fr_exit_after(fr_event_list_t *el, fr_time_t now, void *uctx) +static void fr_exit_after(fr_timer_list_t *tl, fr_time_t now, void *uctx) { - static fr_event_timer_t const *ev; + static fr_timer_t *ev; fr_time_delta_t exit_after = *(fr_time_delta_t *)uctx; if (fr_time_eq(now, fr_time_wrap(0))) { - if (fr_event_timer_in(el, el, &ev, exit_after, fr_exit_after, uctx) < 0) { + if (fr_timer_in(tl, tl, &ev, exit_after, false, fr_exit_after, uctx) < 0) { PERROR("%s: Failed inserting exit event", program); } return; @@ -978,9 +978,9 @@ int main(int argc, char *argv[]) DEBUG("Global memory protected"); } - fr_time_sync_event(main_loop_event_list(), fr_time(), NULL); + fr_time_sync_event(main_loop_event_list()->tl, fr_time(), NULL); #ifndef NDEBUG - if (fr_time_delta_ispos(exit_after)) fr_exit_after(main_loop_event_list(), fr_time_wrap(0), &exit_after); + if (fr_time_delta_ispos(exit_after)) fr_exit_after(main_loop_event_list()->tl, fr_time_wrap(0), &exit_after); #endif /* * Process requests until HUP or exit. diff --git a/src/bin/radsniff.c b/src/bin/radsniff.c index 73344aa0c2d..bd045ff53ab 100644 --- a/src/bin/radsniff.c +++ b/src/bin/radsniff.c @@ -841,7 +841,7 @@ static void rs_stats_print_csv(rs_update_t *this, rs_stats_t *stats, UNUSED stru /** Process stats for a single interval * */ -static void rs_stats_process(fr_event_list_t *el, fr_time_t now_t, void *ctx) +static void rs_stats_process(fr_timer_list_t *tl, fr_time_t now_t, void *ctx) { size_t i; size_t rs_codes_len = (NUM_ELEMENTS(rs_useful_codes)); @@ -903,13 +903,14 @@ clear: } { - static fr_event_timer_t const *event; + static fr_timer_t *event; now.tv_sec += conf->stats.interval; now.tv_usec = 0; - if (fr_event_timer_at(NULL, el, &event, - fr_time_from_timeval(&now), rs_stats_process, ctx) < 0) { + if (fr_timer_at(NULL, tl, &event, + fr_time_from_timeval(&now), + false, rs_stats_process, ctx) < 0) { ERROR("Failed inserting stats interval event"); } } @@ -939,7 +940,7 @@ static void rs_stats_update_latency(rs_latency_t *stats, struct timeval *latency static int rs_install_stats_processor(rs_stats_t *stats, fr_event_list_t *el, fr_pcap_t *in, struct timeval *now, bool live) { - static fr_event_timer_t const *event; + static fr_timer_t *event; static rs_update_t update; memset(&update, 0, sizeof(update)); @@ -978,8 +979,9 @@ static int rs_install_stats_processor(rs_stats_t *stats, fr_event_list_t *el, rs_tv_add_ms(now, conf->stats.timeout, &(stats->quiet)); } - if (fr_event_timer_at(NULL, events, (void *) &event, - fr_time_from_timeval(now), rs_stats_process, &update) < 0) { + if (fr_timer_at(NULL, events->tl, (void *) &event, + fr_time_from_timeval(now), + false, rs_stats_process, &update) < 0) { ERROR("Failed inserting stats event"); return -1; } @@ -1044,7 +1046,7 @@ static int _request_free(rs_request_t *request) } if (request->event) { - ret = fr_event_timer_delete(&request->event); + ret = fr_timer_delete(&request->event); if (ret < 0) { fr_perror("Failed deleting timer"); RS_ASSERT(0 == 1); @@ -1129,11 +1131,10 @@ static void rs_packet_cleanup(rs_request_t *request) talloc_free(request); } -static void _rs_event(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx) +static void _rs_event(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx) { rs_request_t *request = talloc_get_type_abort(ctx, rs_request_t); - request->event = NULL; rs_packet_cleanup(request); } @@ -1528,7 +1529,7 @@ static void rs_packet_process(uint64_t count, rs_event_t *event, struct pcap_pkt */ fr_pair_list_free(&original->link_vps); fr_packet_free(&original->linked); - fr_event_timer_delete(&original->event); + fr_timer_delete(&original->event); /* * ...nope it's the first response to a request. */ @@ -1544,8 +1545,9 @@ static void rs_packet_process(uint64_t count, rs_event_t *event, struct pcap_pkt original->linked = talloc_steal(original, packet); fr_pair_list_append(&original->link_vps, &decoded); /* Move the vps over */ rs_tv_add_ms(&header->ts, conf->stats.timeout, &original->when); - if (fr_event_timer_at(NULL, event->list, &original->event, - fr_time_from_timeval(&original->when), _rs_event, original) < 0) { + if (fr_timer_at(original, event->list->tl, &original->event, + fr_time_from_timeval(&original->when), + false, _rs_event, original) < 0) { REDEBUG("Failed inserting new event"); /* * Delete the original request/event, it's no longer valid @@ -1752,7 +1754,7 @@ static void rs_packet_process(uint64_t count, rs_event_t *event, struct pcap_pkt fr_pair_list_append(&original->expect_vps, &search.expect_vps); /* Disarm the timer for the cleanup event for the original request */ - fr_event_timer_delete(&original->event); + fr_timer_delete(&original->event); /* * ...nope it's a new request. */ @@ -1813,8 +1815,9 @@ static void rs_packet_process(uint64_t count, rs_event_t *event, struct pcap_pkt */ original->packet->timestamp = fr_time_from_timeval(&header->ts); rs_tv_add_ms(&header->ts, conf->stats.timeout, &original->when); - if (fr_event_timer_at(NULL, event->list, &original->event, - fr_time_from_timeval(&original->when), _rs_event, original) < 0) { + if (fr_timer_at(original, event->list->tl, &original->event, + fr_time_from_timeval(&original->when), + false, _rs_event, original) < 0) { REDEBUG("Failed inserting new event"); talloc_free(original); @@ -1988,7 +1991,7 @@ static void rs_got_packet(fr_event_list_t *el, int fd, UNUSED int flags, void *c do { now = fr_time_from_timeval(&header->ts); - } while (fr_event_timer_run(el, &now) == 1); + } while (fr_timer_list_run(el->tl, &now) == 1); count++; rs_packet_process(count, event, header, data); @@ -2172,9 +2175,9 @@ static void _unmark_link(void *request) /** Exit the event loop after a given timeout. * */ -static void timeout_event(fr_event_list_t *el, UNUSED fr_time_t now_t, UNUSED void *ctx) +static void timeout_event(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now_t, void *ctx) { - fr_event_loop_exit(el, 1); + fr_event_loop_exit(talloc_get_type_abort(ctx, fr_event_list_t), 1); } @@ -2182,9 +2185,9 @@ static void timeout_event(fr_event_list_t *el, UNUSED fr_time_t now_t, UNUSED vo /** Re-open the collectd socket * */ -static void rs_collectd_reopen(fr_event_list_t *el, fr_time_t now, UNUSED void *ctx) +static void rs_collectd_reopen(fr_timer_list_t *tl, fr_time_t now, UNUSED void *ctx) { - static fr_event_timer_t const *event; + static fr_timer_t *event; if (rs_stats_collectd_open(conf) == 0) { DEBUG2("Stats output socket (re)opened"); @@ -2193,9 +2196,9 @@ static void rs_collectd_reopen(fr_event_list_t *el, fr_time_t now, UNUSED void * ERROR("Will attempt to re-establish connection in %i ms", RS_SOCKET_REOPEN_DELAY); - if (fr_event_timer_at(NULL, el, &event, - fr_time_add(now, fr_time_delta_from_msec(RS_SOCKET_REOPEN_DELAY)), - rs_collectd_reopen, el) < 0) { + if (fr_timer_at(NULL, tl, &event, + fr_time_add(now, fr_time_delta_from_msec(RS_SOCKET_REOPEN_DELAY)), + false, rs_collectd_reopen, NULL) < 0) { ERROR("Failed inserting re-open event"); RS_ASSERT(0); } @@ -2241,7 +2244,7 @@ fr_event_list_t *list, int fd, int UNUSED flags, UNUSED void *ctx) switch (sig) { #ifdef HAVE_COLLECTDC_H case SIGPIPE: - rs_collectd_reopen(list, fr_time(), list); + rs_collectd_reopen(list->tl, fr_time(), list); break; #else case SIGPIPE: @@ -2328,7 +2331,7 @@ int main(int argc, char *argv[]) int c; unsigned int timeout = 0; - fr_event_timer_t const *timeout_ev = NULL; + fr_timer_t *timeout_ev = NULL; char const *raddb_dir = RADDBDIR; char const *dict_dir = DICTDIR; TALLOC_CTX *autofree; @@ -3112,8 +3115,8 @@ int main(int argc, char *argv[]) } if (timeout) { - if (fr_event_timer_in(NULL, events, &timeout_ev, fr_time_delta_from_sec(timeout), - timeout_event, NULL) < 0) { + if (fr_timer_in(NULL, events->tl, &timeout_ev, fr_time_delta_from_sec(timeout), + false, timeout_event, events) < 0) { ERROR("Failed inserting timeout event"); } } diff --git a/src/bin/radsniff.h b/src/bin/radsniff.h index 10c2de3c134..24f5e2b34fd 100644 --- a/src/bin/radsniff.h +++ b/src/bin/radsniff.h @@ -183,18 +183,18 @@ typedef struct { */ typedef struct { uint64_t id; //!< Monotonically increasing packet counter. - fr_event_timer_t const *event; //!< Event created when we received the original request. + fr_timer_t *event; //!< Event created when we received the original request. bool logged; //!< Whether any messages regarding this request were logged. struct timeval when; //!< Time when the packet was received, or next time an event //!< is scheduled. fr_pcap_t *in; //!< PCAP handle the original request was received on. - fr_packet_t *packet; //!< The original packet. + fr_packet_t *packet; //!< The original packet. fr_pair_list_t packet_vps; - fr_packet_t *expect; //!< Request/response. + fr_packet_t *expect; //!< Request/response. fr_pair_list_t expect_vps; - fr_packet_t *linked; //!< The subsequent response or forwarded request the packet + fr_packet_t *linked; //!< The subsequent response or forwarded request the packet //!< was linked against. fr_pair_list_t link_vps; //!< fr_pair_ts used to link retransmissions. diff --git a/src/bin/unit_test_module.c b/src/bin/unit_test_module.c index 0fef6f6be58..583c682b540 100644 --- a/src/bin/unit_test_module.c +++ b/src/bin/unit_test_module.c @@ -626,7 +626,7 @@ static request_t *request_clone(request_t *old, int number, CONF_SECTION *server return request; } -static void cancel_request(UNUSED fr_event_list_t *el, UNUSED fr_time_t when, void *uctx) +static void cancel_request(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t when, void *uctx) { request_t *request = talloc_get_type_abort(uctx, request_t); unlang_interpret_signal(request, FR_SIGNAL_CANCEL); @@ -651,7 +651,7 @@ int main(int argc, char *argv[]) fr_pair_list_t filter_vps; bool xlat_only = false; fr_event_list_t *el = NULL; - fr_event_timer_t const *cancel_timer = NULL; + fr_timer_t *cancel_timer = NULL; fr_client_t *client = NULL; fr_dict_t *dict = NULL; fr_dict_t const *dict_check; @@ -1072,7 +1072,7 @@ int main(int argc, char *argv[]) } if (count == 1) { - fr_event_timer_in(request, el, &cancel_timer, config->max_request_time, cancel_request, request); + fr_timer_in(request, el->tl, &cancel_timer, config->max_request_time, false, cancel_request, request); unlang_interpret_synchronous(el, request); } else { @@ -1105,7 +1105,7 @@ int main(int argc, char *argv[]) } #endif - fr_event_timer_in(request, el, &cancel_timer, config->max_request_time, cancel_request, request); + fr_timer_in(request, el->tl, &cancel_timer, config->max_request_time, false, cancel_request, request); unlang_interpret_synchronous(el, request); talloc_free(request); diff --git a/src/include/build.h b/src/include/build.h index e6cc822765a..67158d193ad 100644 --- a/src/include/build.h +++ b/src/include/build.h @@ -262,11 +262,13 @@ do { \ #ifndef NDEBUG # define NDEBUG_LOCATION_ARGS char const *file, int line, # define NDEBUG_LOCATION_VALS file, line, +# define NDEBUG_LOCATION_FMT "%s[%d]: " # define NDEBUG_LOCATION_EXP __FILE__, __LINE__, # define NDEBUG_LOCATION_NONNULL(_num) ((_num) + 2) #else # define NDEBUG_LOCATION_ARGS # define NDEBUG_LOCATION_VALS +# define NDEBUG_LOCATION_FMT "" # define NDEBUG_LOCATION_EXP # define NDEBUG_LOCATION_NONNULL(_num) (_num) #endif diff --git a/src/lib/bio/dedup.c b/src/lib/bio/dedup.c index 38c41bb754b..badd252e5a5 100644 --- a/src/lib/bio/dedup.c +++ b/src/lib/bio/dedup.c @@ -128,7 +128,7 @@ struct fr_bio_dedup_s { fr_bio_dedup_config_t config; - fr_event_timer_t const *ev; + fr_timer_t *ev; /* * The "first" entry is cached here so that we can detect when it changes. The insert / delete @@ -159,7 +159,7 @@ struct fr_bio_dedup_s { FR_DLIST_HEAD(fr_bio_dedup_list) free; //!< free list }; -static void fr_bio_dedup_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx); +static void fr_bio_dedup_timer(UNUSED fr_timer_list_t *el, fr_time_t now, void *uctx); static ssize_t fr_bio_dedup_write(fr_bio_t *bio, void *packet_ctx, void const *buffer, size_t size); static ssize_t fr_bio_dedup_blocked(fr_bio_dedup_t *my, fr_bio_dedup_entry_t *item, ssize_t rcode); static void fr_bio_dedup_release(fr_bio_dedup_t *my, fr_bio_dedup_entry_t *item, fr_bio_dedup_release_reason_t reason); @@ -226,7 +226,7 @@ ssize_t fr_bio_dedup_respond(fr_bio_t *bio, fr_bio_dedup_entry_t *item) case FR_BIO_DEDUP_STATE_ACTIVE: /* * If we're not writing to the socket, just insert the packet into the pending list. - */ + */ if (my->bio.write != fr_bio_dedup_write) { (void) fr_bio_dedup_list_remove(&my->active, item); fr_bio_dedup_list_insert_tail(&my->pending, item); @@ -364,7 +364,7 @@ static int fr_bio_dedup_timer_reset(fr_bio_dedup_t *my) /* * Update the timer. This should never fail. */ - if (fr_event_timer_at(my, my->el, &my->ev, first->expires, fr_bio_dedup_timer, my) < 0) return -1; + if (fr_timer_at(my, my->el->tl, &my->ev, first->expires, false, fr_bio_dedup_timer, my) < 0) return -1; my->first = first; return 0; @@ -761,7 +761,7 @@ static ssize_t fr_bio_dedup_blocked_data(fr_bio_dedup_t *my, uint8_t const *buff * * @todo - expire items from the pending list, too */ -static void fr_bio_dedup_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx) +static void fr_bio_dedup_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx) { fr_bio_dedup_t *my = talloc_get_type_abort(uctx, fr_bio_dedup_t); fr_bio_dedup_entry_t *item; @@ -811,7 +811,7 @@ static ssize_t fr_bio_dedup_write(fr_bio_t *bio, void *packet_ctx, void const *b */ next = fr_bio_next(&my->bio); fr_assert(next != NULL); - + /* * The caller is trying to flush partial data. But we don't have any partial data, so just call * the next bio to flush it. diff --git a/src/lib/bio/fd.c b/src/lib/bio/fd.c index 40eb079a4ee..80432c1cd27 100644 --- a/src/lib/bio/fd.c +++ b/src/lib/bio/fd.c @@ -1222,7 +1222,7 @@ static void fr_bio_fd_el_connect(NDEBUG_UNUSED fr_event_list_t *el, NDEBUG_UNUSE /** We have a timeout on the conenction * */ -static void fr_bio_fd_el_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void fr_bio_fd_el_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { fr_bio_fd_t *my = talloc_get_type_abort(uctx, fr_bio_fd_t); @@ -1327,7 +1327,7 @@ int fr_bio_fd_connect_full(fr_bio_t *bio, fr_event_list_t *el, fr_bio_callback_t * Set the timeout callback if asked. */ if (timeout_cb) { - if (fr_event_timer_in(my, el, &my->connect.ev, *timeout, fr_bio_fd_el_timeout, my) < 0) { + if (fr_timer_in(my, el->tl, &my->connect.ev, *timeout, false, fr_bio_fd_el_timeout, my) < 0) { goto error; } } diff --git a/src/lib/bio/fd_open.c b/src/lib/bio/fd_open.c index d2941103e36..92d2738fe61 100644 --- a/src/lib/bio/fd_open.c +++ b/src/lib/bio/fd_open.c @@ -754,7 +754,7 @@ static int fr_bio_fd_socket_bind(fr_bio_fd_t *my, fr_bio_fd_config_t const *cfg) * We've picked a random port in what is hopefully a large range. If that works, we're * done. */ - if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) >= 0) goto done; + if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) == 0) goto done; /* * Hunt & peck. Which is horrible. @@ -774,10 +774,14 @@ static int fr_bio_fd_socket_bind(fr_bio_fd_t *my, fr_bio_fd_config_t const *cfg) sin->sin_port = htons(my->info.cfg->src_port_start + current); - if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) >= 0) goto done; + if (bind(my->info.socket.fd, (struct sockaddr *) &salocal, salen) == 0) goto done; } - fr_strerror_const("There are no open ports between 'src_port_start' and 'src_port_end'"); + /* + * The error is a good hint at _why_ we failed to bind. + * We expect errno to be EADDRINUSE, anything else is a surprise. + */ + fr_strerror_printf("Failed binding port between 'src_port_start' and 'src_port_end': %s", fr_syserror(errno)); return -1; } diff --git a/src/lib/bio/fd_priv.h b/src/lib/bio/fd_priv.h index a59d2a5b774..04d67fa2fc7 100644 --- a/src/lib/bio/fd_priv.h +++ b/src/lib/bio/fd_priv.h @@ -39,11 +39,11 @@ typedef struct fr_bio_fd_s { fr_bio_fd_info_t info; struct { - fr_bio_callback_t success; //!< for fr_bio_fd_connect() - fr_bio_callback_t error; //!< for fr_bio_fd_connect() - fr_bio_callback_t timeout; //!< for fr_bio_fd_connect() - fr_event_list_t *el; //!< for fr_bio_fd_connect() - fr_event_timer_t const *ev; //!< for fr_bio_fd_connect() + fr_bio_callback_t success; //!< for fr_bio_fd_connect() + fr_bio_callback_t error; //!< for fr_bio_fd_connect() + fr_bio_callback_t timeout; //!< for fr_bio_fd_connect() + fr_event_list_t *el; //!< for fr_bio_fd_connect() + fr_timer_t *ev; //!< for fr_bio_fd_connect() } connect; int max_tries; //!< how many times we retry on EINTR diff --git a/src/lib/bio/packet.h b/src/lib/bio/packet.h index 67853fd048f..68fb998aa29 100644 --- a/src/lib/bio/packet.h +++ b/src/lib/bio/packet.h @@ -91,7 +91,7 @@ struct fr_bio_packet_s { fr_bio_packet_cb_funcs_t cb; - fr_event_timer_t const *ev; //!< connection timeout + fr_timer_t *ev; //!< connection timeout bool connected; bool write_blocked; diff --git a/src/lib/bio/retry.c b/src/lib/bio/retry.c index a0d3078b8bb..6e358e5dea9 100644 --- a/src/lib/bio/retry.c +++ b/src/lib/bio/retry.c @@ -99,7 +99,7 @@ struct fr_bio_retry_s { ssize_t error; bool all_used; //!< blocked due to no free entries - fr_event_timer_t const *ev; //!< we only need one timer event: next time we do something + fr_timer_t *ev; //!< we only need one timer event: next time we do something /* * The first item is cached here so that we can detect when it changes. The insert / delete @@ -124,8 +124,8 @@ struct fr_bio_retry_s { FR_DLIST_HEAD(fr_bio_retry_list) free; //!< free lists are better than memory fragmentation }; -static void fr_bio_retry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx); -static void fr_bio_retry_expiry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx); +static void fr_bio_retry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx); +static void fr_bio_retry_expiry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx); static ssize_t fr_bio_retry_write(fr_bio_t *bio, void *packet_ctx, void const *buffer, size_t size); static ssize_t fr_bio_retry_save_write(fr_bio_retry_t *my, fr_bio_retry_entry_t *item, ssize_t rcode); @@ -160,7 +160,7 @@ static int fr_bio_retry_expiry_timer_reset(fr_bio_retry_t *my) /* * Update the timer. This should never fail. */ - if (fr_event_timer_at(my, my->info.el, &my->ev, first->retry.end, fr_bio_retry_expiry_timer, my) < 0) return -1; + if (fr_timer_at(my, my->info.el->tl, &my->ev, first->retry.end, false, fr_bio_retry_expiry_timer, my) < 0) return -1; my->next_retry_item = first; return 0; @@ -200,7 +200,7 @@ static int fr_bio_retry_timer_reset(fr_bio_retry_t *my) /* * Update the timer. This should never fail. */ - if (fr_event_timer_at(my, my->info.el, &my->ev, first->retry.next, fr_bio_retry_timer, my) < 0) return -1; + if (fr_timer_at(my, my->info.el->tl, &my->ev, first->retry.next, false, fr_bio_retry_timer, my) < 0) return -1; my->next_retry_item = first; return 0; @@ -563,7 +563,7 @@ ssize_t fr_bio_retry_rewrite(fr_bio_t *bio, fr_bio_retry_entry_t *item, const vo * when the socket isn't blocked. But the caller might not pay attention to those issues. */ if (my->partial) return 0; - + /* * There must be a next bio. */ @@ -627,7 +627,7 @@ static ssize_t fr_bio_retry_write_fatal(fr_bio_t *bio, UNUSED void *packet_ctx, /** Run an expiry timer event. * */ -static void fr_bio_retry_expiry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx) +static void fr_bio_retry_expiry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx) { fr_bio_retry_t *my = talloc_get_type_abort(uctx, fr_bio_retry_t); fr_bio_retry_entry_t *item; @@ -666,7 +666,7 @@ static void fr_bio_retry_expiry_timer(UNUSED fr_event_list_t *el, fr_time_t now, /** Run a timer event. Usually to write out another packet. * */ -static void fr_bio_retry_timer(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx) +static void fr_bio_retry_timer(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx) { ssize_t rcode; fr_bio_retry_t *my = talloc_get_type_abort(uctx, fr_bio_retry_t); @@ -728,7 +728,7 @@ static ssize_t fr_bio_retry_write(fr_bio_t *bio, void *packet_ctx, void const *b */ next = fr_bio_next(&my->bio); fr_assert(next != NULL); - + /* * The caller is trying to flush partial data. But we don't have any partial data, so just call * the next bio to flush it. @@ -892,7 +892,7 @@ static ssize_t fr_bio_retry_read(fr_bio_t *bio, void *packet_ctx, void *buffer, fr_assert(item != NULL); fr_assert(item->retry.replies == 0); fr_assert(item != my->partial); - + /* * Track when the "most recently sent" packet has a reply. This metric is better than most * others for judging the liveliness of the destination. @@ -1017,7 +1017,7 @@ int fr_bio_retry_entry_cancel(fr_bio_t *bio, fr_bio_retry_entry_t *item) return 1; } -/** Set a per-packet retry config +/** Set a per-packet retry config * * This function should be called from the #fr_bio_retry_sent_t callback to set a unique retry timer for this * packet. If no retry configuration is set, then the main one from the alloc() function is used. @@ -1197,4 +1197,3 @@ fr_bio_retry_entry_t *fr_bio_retry_item_reserve(fr_bio_t *bio) return item; } - diff --git a/src/lib/curl/base.h b/src/lib/curl/base.h index 9997bda72ac..f0edd5a162d 100644 --- a/src/lib/curl/base.h +++ b/src/lib/curl/base.h @@ -90,7 +90,7 @@ do {\ */ typedef struct { fr_event_list_t *el; //!< Event list servicing I/O events. - fr_event_timer_t const *ev; //!< Multi-Handle timer. + fr_timer_t *ev; //!< Multi-Handle timer. uint64_t transfers; //!< How many transfers are current in progress. CURLM *mandle; //!< The multi handle. } fr_curl_handle_t; diff --git a/src/lib/curl/io.c b/src/lib/curl/io.c index b72064bb98a..2a5d40500d4 100644 --- a/src/lib/curl/io.c +++ b/src/lib/curl/io.c @@ -117,11 +117,11 @@ static inline void _fr_curl_io_demux(fr_curl_handle_t *mhandle, CURLM *mandle) /** libcurl's timer expired * - * @param[in] el the timer was inserted into. + * @param[in] tl the timer was inserted into. * @param[in] now The current time according to the event loop. * @param[in] uctx The rlm_fr_curl_thread_t specific to this thread. */ -static void _fr_curl_io_timer_expired(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void _fr_curl_io_timer_expired(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { fr_curl_handle_t *mhandle = talloc_get_type_abort(uctx, fr_curl_handle_t); CURLM *mandle = mhandle->mandle; @@ -277,7 +277,7 @@ static int _fr_curl_io_timer_modify(CURLM *mandle, long timeout_ms, void *ctx) fr_curl_handle_t *mhandle = talloc_get_type_abort(ctx, fr_curl_handle_t); if (timeout_ms < 0) { - if (fr_event_timer_delete(&mhandle->ev) < 0) { + if (fr_timer_delete(&mhandle->ev) < 0) { PERROR("Failed deleting multi-handle timer"); return -1; } @@ -295,15 +295,16 @@ static int _fr_curl_io_timer_modify(CURLM *mandle, long timeout_ms, void *ctx) * unpleasant recursive behavior that immediately calls another call to the callback * with a zero timeout... * - * Setting a timeout of zero when calling fr_event_timer_in should result in the event + * Setting a timeout of zero when calling fr_timer_in should result in the event * repeating at most twice during one iteration of the event loop. * * In a previous version of this code we called curl_multi_socket_action immediately * if timeout_ms was 0. It was observed that this lead to this callback being called * ~665 times per request which is why we no longer do that. */ - if (fr_event_timer_in(mhandle, mhandle->el, &mhandle->ev, - fr_time_delta_from_msec(timeout_ms), _fr_curl_io_timer_expired, mhandle) < 0) return -1; + if (fr_timer_in(mhandle, mhandle->el->tl, &mhandle->ev, + fr_time_delta_from_msec(timeout_ms), + false, _fr_curl_io_timer_expired, mhandle) < 0) return -1; return 0; } diff --git a/src/lib/io/load.c b/src/lib/io/load.c index fad5700eff4..81fec057d7e 100644 --- a/src/lib/io/load.c +++ b/src/lib/io/load.c @@ -81,7 +81,7 @@ struct fr_load_s { bool header; //!< for printing statistics fr_time_t next; //!< The next time we're supposed to send a packet - fr_event_timer_t const *ev; + fr_timer_t *ev; }; fr_load_t *fr_load_generator_create(TALLOC_CTX *ctx, fr_event_list_t *el, fr_load_config_t *config, @@ -126,7 +126,7 @@ static void fr_load_generator_send(fr_load_t *l, fr_time_t now, int count) } } -static void load_timer(fr_event_list_t *el, fr_time_t now, void *uctx) +static void load_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx) { fr_load_t *l = uctx; fr_time_delta_t delta; @@ -215,7 +215,7 @@ static void load_timer(fr_event_list_t *el, fr_time_t now, void *uctx) /* * Set the timer for the next packet. */ - if (fr_event_timer_in(l, el, &l->ev, delta, load_timer, l) < 0) { + if (fr_timer_in(l, tl, &l->ev, delta, false, load_timer, l) < 0) { l->state = FR_LOAD_STATE_DRAINING; return; } @@ -240,7 +240,7 @@ int fr_load_generator_start(fr_load_t *l) l->delta = fr_time_delta_div(fr_time_delta_from_sec(l->config->parallel), fr_time_delta_wrap(l->pps)); l->next = fr_time_add(l->step_start, l->delta); - load_timer(l->el, l->step_start, l); + load_timer(l->el->tl, l->step_start, l); return 0; } @@ -253,7 +253,7 @@ int fr_load_generator_stop(fr_load_t *l) { if (!l->ev) return 0; - return fr_event_timer_delete(&l->ev); + return fr_timer_delete(&l->ev); } diff --git a/src/lib/io/master.c b/src/lib/io/master.c index a9b20adff73..a05466291bf 100644 --- a/src/lib/io/master.c +++ b/src/lib/io/master.c @@ -127,7 +127,7 @@ struct fr_io_client_s { fr_io_instance_t const *inst; //!< parent instance for master IO handler fr_io_thread_t *thread; - fr_event_timer_t const *ev; //!< when we clean up the client + fr_timer_t *ev; //!< when we clean up the client fr_rb_tree_t *table; //!< tracking table for packets fr_heap_t *pending; //!< pending packets for this client @@ -179,7 +179,7 @@ static fr_event_update_t resume_read[] = { static int track_free(fr_io_track_t *track) { - if (track->ev) (void) fr_event_timer_delete(&track->ev); + if (track->ev) (void) fr_timer_delete(&track->ev); talloc_free_children(track); @@ -1181,7 +1181,7 @@ static fr_io_track_t *fr_io_track_add(fr_io_client_t *client, * struct while the packet is in the outbound * queue. */ - if (old->ev) (void) fr_event_timer_delete(&old->ev); + if (old->ev) (void) fr_timer_delete(&old->ev); return old; } @@ -1203,7 +1203,7 @@ static fr_io_track_t *fr_io_track_add(fr_io_client_t *client, if (!fr_rb_delete(client->table, old)) { fr_assert(0); } - if (old->ev) (void) fr_event_timer_delete(&old->ev); + if (old->ev) (void) fr_timer_delete(&old->ev); talloc_set_destructor(old, track_free); @@ -2022,7 +2022,7 @@ static void mod_event_list_set(fr_listen_t *li, fr_event_list_t *el, void *nr) } -static void client_expiry_timer(fr_event_list_t *el, fr_time_t now, void *uctx) +static void client_expiry_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx) { fr_io_client_t *client = talloc_get_type_abort(uctx, fr_io_client_t); fr_io_instance_t const *inst; @@ -2033,7 +2033,7 @@ static void client_expiry_timer(fr_event_list_t *el, fr_time_t now, void *uctx) /* * No event list? We don't need to expire the client. */ - if (!el) return; + if (!tl) return; // @todo - print out what we plan on doing next connection = client->connection; @@ -2221,8 +2221,8 @@ idle_timeout: delay = inst->check_interval; reset_timer: - if (fr_event_timer_in(client, el, &client->ev, - delay, client_expiry_timer, client) < 0) { + if (fr_timer_in(client, tl, &client->ev, + delay, false, client_expiry_timer, client) < 0) { ERROR("proto_%s - Failed adding timeout for dynamic client %s. It will be permanent!", inst->app_io->common.name, client->radclient->shortname); return; @@ -2235,7 +2235,7 @@ reset_timer: /* * Expire cached packets after cleanup_delay time */ -static void packet_expiry_timer(fr_event_list_t *el, fr_time_t now, void *uctx) +static void packet_expiry_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx) { fr_io_track_t *track = talloc_get_type_abort(uctx, fr_io_track_t); fr_io_client_t *client = track->client; @@ -2257,8 +2257,9 @@ static void packet_expiry_timer(fr_event_list_t *el, fr_time_t now, void *uctx) * will be cleaned up when the timer * fires. */ - if (fr_event_timer_at(track, el, &track->ev, - track->expires, packet_expiry_timer, track) == 0) { + if (fr_timer_at(track, tl, &track->ev, + track->expires, + false, packet_expiry_timer, track) == 0) { DEBUG("proto_%s - cleaning up request in %.6fs", inst->app_io->common.name, fr_time_delta_unwrap(inst->cleanup_delay) / (double)NSEC); return; @@ -2296,7 +2297,7 @@ static void packet_expiry_timer(fr_event_list_t *el, fr_time_t now, void *uctx) * the client. */ if (client->packets == 0) { - client_expiry_timer(el, now, client); + client_expiry_timer(tl, now, client); } } @@ -2362,7 +2363,7 @@ static ssize_t mod_write(fr_listen_t *li, void *packet_ctx, fr_time_t request_ti buffer, buffer_len, written); if (packet_len <= 0) { track->discard = true; - packet_expiry_timer(el, fr_time_wrap(0), track); + packet_expiry_timer(el->tl, fr_time_wrap(0), track); return packet_len; } @@ -2398,7 +2399,7 @@ static ssize_t mod_write(fr_listen_t *li, void *packet_ctx, fr_time_t request_ti * On dedup this also extends the timer. */ setup_timer: - packet_expiry_timer(el, fr_time_wrap(0), track); + packet_expiry_timer(el->tl, fr_time_wrap(0), track); return buffer_len; } @@ -2453,7 +2454,7 @@ static ssize_t mod_write(fr_listen_t *li, void *packet_ctx, fr_time_t request_ti */ if (connection && (inst->ipproto == IPPROTO_UDP)) { connection = fr_io_connection_alloc(inst, thread, client, -1, connection->address, connection); - client_expiry_timer(el, fr_time_wrap(0), connection->client); + client_expiry_timer(el->tl, fr_time_wrap(0), connection->client); errno = ECONNREFUSED; return -1; @@ -2464,7 +2465,7 @@ static ssize_t mod_write(fr_listen_t *li, void *packet_ctx, fr_time_t request_ti * expiry timer, which will close and free the * connection. */ - client_expiry_timer(el, fr_time_wrap(0), client); + client_expiry_timer(el->tl, fr_time_wrap(0), client); return buffer_len; } @@ -2704,7 +2705,7 @@ finish: * timed out, so there's nothing more to do. In that case, set up the expiry timers. */ if (client->packets == 0) { - client_expiry_timer(el, fr_time_wrap(0), client); + client_expiry_timer(el->tl, fr_time_wrap(0), client); } reread: diff --git a/src/lib/io/master.h b/src/lib/io/master.h index 47cdc8ca381..56e8783f936 100644 --- a/src/lib/io/master.h +++ b/src/lib/io/master.h @@ -39,7 +39,7 @@ typedef struct fr_io_client_s fr_io_client_t; typedef struct fr_io_track_s { fr_rb_node_t node; //!< rbtree node in the tracking tree. - fr_event_timer_t const *ev; //!< when we clean up this tracking entry + fr_timer_t *ev; //!< when we clean up this tracking entry fr_time_t timestamp; //!< when this packet was received fr_time_t expires; //!< when this packet expires int packets; //!< number of packets using this entry diff --git a/src/lib/io/schedule.c b/src/lib/io/schedule.c index ac016e431df..81485068bc3 100644 --- a/src/lib/io/schedule.c +++ b/src/lib/io/schedule.c @@ -115,7 +115,7 @@ typedef struct { fr_schedule_child_status_t status; //!< status of the worker fr_network_t *nr; //!< the receive data structure - fr_event_timer_t const *ev; //!< timer for stats_interval + fr_timer_t *ev; //!< timer for stats_interval } fr_schedule_network_t; @@ -301,13 +301,13 @@ fail: } -static void stats_timer(fr_event_list_t *el, fr_time_t now, void *uctx) +static void stats_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx) { fr_schedule_network_t *sn = talloc_get_type_abort(uctx, fr_schedule_network_t); fr_network_stats_log(sn->nr, sn->sc->log); - (void) fr_event_timer_at(sn, el, &sn->ev, fr_time_add(now, sn->sc->config->stats_interval), stats_timer, sn); + (void) fr_timer_at(sn, tl, &sn->ev, fr_time_add(now, sn->sc->config->stats_interval), false, stats_timer, sn); } /** Initialize and run the network thread. @@ -386,7 +386,7 @@ static void *fr_schedule_network_thread(void *arg) * Print out statistics for this network IO handler. */ if (fr_time_delta_ispos(sc->config->stats_interval)) { - (void) fr_event_timer_in(sn, el, &sn->ev, sn->sc->config->stats_interval, stats_timer, sn); + (void) fr_timer_in(sn, el->tl, &sn->ev, sn->sc->config->stats_interval, false, stats_timer, sn); } /* * Call the main event processing loop of the network diff --git a/src/lib/io/worker.c b/src/lib/io/worker.c index 2859e5f4958..cc85dfffb0e 100644 --- a/src/lib/io/worker.c +++ b/src/lib/io/worker.c @@ -131,7 +131,7 @@ struct fr_worker_s { fr_time_t checked_timeout; //!< when we last checked the tails of the queues - fr_event_timer_t const *ev_cleanup; //!< timer for max_request_time + fr_timer_t *ev_cleanup; //!< timer for max_request_time fr_worker_channel_t *channel; //!< list of channels }; @@ -195,7 +195,7 @@ static inline bool is_worker_thread(fr_worker_t const *worker) static void worker_request_bootstrap(fr_worker_t *worker, fr_channel_data_t *cd, fr_time_t now); static void worker_send_reply(fr_worker_t *worker, request_t *request, bool do_not_respond, fr_time_t now); -static void worker_max_request_time(UNUSED fr_event_list_t *el, UNUSED fr_time_t when, void *uctx); +static void worker_max_request_time(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t when, void *uctx); static void worker_max_request_timer(fr_worker_t *worker); /** Callback which handles a message being received on the worker side. @@ -520,11 +520,11 @@ static void worker_stop_request(request_t **request_p) * thread more than max_request_time seconds ago. In the interest of not adding a * timer for every packet, the requests are given a 1 second leeway. * - * @param[in] el the worker's event list + * @param[in] tl the worker's timer list. * @param[in] when the current time * @param[in] uctx the fr_worker_t. */ -static void worker_max_request_time(UNUSED fr_event_list_t *el, UNUSED fr_time_t when, void *uctx) +static void worker_max_request_time(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t when, void *uctx) { fr_time_t now = fr_time(); request_t *request; @@ -575,8 +575,8 @@ static void worker_max_request_timer(fr_worker_t *worker) cleanup = fr_time_add(request->async->recv_time, worker->config.max_request_time); DEBUG2("Resetting cleanup timer to +%pV", fr_box_time_delta(worker->config.max_request_time)); - if (fr_event_timer_at(worker, worker->el, &worker->ev_cleanup, - cleanup, worker_max_request_time, worker) < 0) { + if (fr_timer_at(worker, worker->el->tl, &worker->ev_cleanup, + cleanup, false, worker_max_request_time, worker) < 0) { ERROR("Failed inserting max_request_time timer"); } } diff --git a/src/lib/ldap/base.h b/src/lib/ldap/base.h index f8cb4d694f4..adf2be81ee5 100644 --- a/src/lib/ldap/base.h +++ b/src/lib/ldap/base.h @@ -404,7 +404,7 @@ typedef struct fr_ldap_thread_trunk_s { fr_ldap_directory_t *directory; //!< The type of directory we're connected to. trunk_t *trunk; //!< Connection trunk fr_ldap_thread_t *t; //!< Thread this connection is associated with - fr_event_timer_t const *ev; //!< Event to close the thread when it has been idle. + fr_timer_t *ev; //!< Event to close the thread when it has been idle. } fr_ldap_thread_trunk_t; typedef struct fr_ldap_referral_s fr_ldap_referral_t; @@ -456,7 +456,7 @@ struct fr_ldap_query_s { trunk_request_t *treq; //!< Trunk request this query is associated with fr_ldap_connection_t *ldap_conn; //!< LDAP connection this query is running on. - fr_event_timer_t const *ev; //!< Event for timing out the query + fr_timer_t *ev; //!< Event for timing out the query char **referral_urls; //!< Referral results to follow fr_dlist_head_t referrals; //!< List of parsed referrals diff --git a/src/lib/ldap/connection.c b/src/lib/ldap/connection.c index 0e481afde1a..471f91af0aa 100644 --- a/src/lib/ldap/connection.c +++ b/src/lib/ldap/connection.c @@ -453,7 +453,7 @@ error: /** Callback for closing idle LDAP trunk * */ -static void _ldap_trunk_idle_timeout(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void _ldap_trunk_idle_timeout(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { fr_ldap_thread_trunk_t *ttrunk = talloc_get_type_abort(uctx, fr_ldap_thread_trunk_t); @@ -465,8 +465,8 @@ static void _ldap_trunk_idle_timeout(fr_event_list_t *el, UNUSED fr_time_t now, /* * There are still pending queries - insert a new event */ - (void) fr_event_timer_in(ttrunk, el, &ttrunk->ev, ttrunk->t->config->idle_timeout, - _ldap_trunk_idle_timeout, ttrunk); + (void) fr_timer_in(ttrunk, tl, &ttrunk->ev, ttrunk->t->config->idle_timeout, + false, _ldap_trunk_idle_timeout, ttrunk); } } @@ -703,8 +703,8 @@ static void ldap_trunk_request_demux(fr_event_list_t *el, trunk_connection_t *tc /* * Reset the idle timeout event */ - (void) fr_event_timer_in(ttrunk, el, &ttrunk->ev, - ttrunk->t->config->idle_timeout, _ldap_trunk_idle_timeout, ttrunk); + (void) fr_timer_in(ttrunk, el->tl, &ttrunk->ev, + ttrunk->t->config->idle_timeout, false, _ldap_trunk_idle_timeout, ttrunk); do { /* @@ -864,7 +864,7 @@ static void ldap_trunk_request_demux(fr_event_list_t *el, trunk_connection_t *tc /* * Remove the timeout event */ - if (query->ev) fr_event_timer_delete(&query->ev); + if (query->ev) fr_timer_delete(&query->ev); query->result = result; @@ -971,8 +971,8 @@ fr_ldap_thread_trunk_t *fr_thread_ldap_trunk_get(fr_ldap_thread_t *thread, char /* * Insert event to close trunk if it becomes idle */ - if (!fr_cond_assert_msg(fr_event_timer_in(found, thread->el, &found->ev, thread->config->idle_timeout, - _ldap_trunk_idle_timeout, found) == 0, "cannot insert trunk idle event")) goto error; + if (!fr_cond_assert_msg(fr_timer_in(found, thread->el->tl, &found->ev, thread->config->idle_timeout, + false, _ldap_trunk_idle_timeout, found) == 0, "cannot insert trunk idle event")) goto error; /* * Attempt to discover what type directory we are talking to diff --git a/src/lib/redis/io.c b/src/lib/redis/io.c index 476aefdc5a5..69c757f8f12 100644 --- a/src/lib/redis/io.c +++ b/src/lib/redis/io.c @@ -220,7 +220,7 @@ static void _redis_io_timer_modify(void *uctx, struct timeval tv) DEBUG4("redis handle %p - Timeout in %pV seconds", h, fr_box_time_delta(timeout)); - if (fr_event_timer_in(h, conn->el, &h->timer, + if (fr_timer_in(h, conn->el, &h->timer, timeout, _redis_io_service_timer_expired, conn) < 0) { PERROR("redis timeout %p - Failed adding timeout", h); } diff --git a/src/lib/redis/io.h b/src/lib/redis/io.h index a027da41662..f89ea19b988 100644 --- a/src/lib/redis/io.h +++ b/src/lib/redis/io.h @@ -68,7 +68,7 @@ typedef struct { bool write_set; //!< We're listening for writes. bool ignore_disconnect_cb; //!< Ensure that redisAsyncFree doesn't cause ///< a callback loop. - fr_event_timer_t const *timer; //!< Connection timer. + fr_timer_t *timer; //!< Connection timer. redisAsyncContext *ac; //!< Async handle for hiredis. diff --git a/src/lib/server/connection.c b/src/lib/server/connection.c index e495f01f34d..f235f46e2f0 100644 --- a/src/lib/server/connection.c +++ b/src/lib/server/connection.c @@ -107,7 +107,7 @@ struct connection_s { connection_shutdown_t shutdown; //!< Signal the connection handle to start shutting down. connection_failed_t failed; //!< Callback for 'failed' notification. - fr_event_timer_t const *ev; //!< State transition timer. + fr_timer_t *ev; //!< State transition timer. fr_time_delta_t connection_timeout; //!< How long to wait in the //!< #CONNECTION_STATE_CONNECTING state. @@ -620,11 +620,11 @@ uint64_t connection_get_num_timed_out(connection_t const *conn) /** The requisite period of time has passed, try and re-open the connection * - * @param[in] el the time event occurred on. + * @param[in] tl containing the timer event. * @param[in] now The current time. * @param[in] uctx The #connection_t the fd is associated with. */ -static void _reconnect_delay_done(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void _reconnect_delay_done(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { connection_t *conn = talloc_get_type_abort(uctx, connection_t); @@ -658,7 +658,7 @@ static void connection_state_enter_closed(connection_t *conn) STATE_TRANSITION(CONNECTION_STATE_CLOSED); - fr_event_timer_delete(&conn->ev); + fr_timer_delete(&conn->ev); /* * If there's a close callback, call it, so that the @@ -689,11 +689,11 @@ static void connection_state_enter_closed(connection_t *conn) * * Connection wasn't opened within the configured period of time * - * @param[in] el the time event occurred on. + * @param[in] tl timer list the event belonged to. * @param[in] now The current time. * @param[in] uctx The #connection_t the fd is associated with. */ -static void _connection_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void _connection_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { connection_t *conn = talloc_get_type_abort(uctx, connection_t); @@ -745,8 +745,8 @@ static void connection_state_enter_shutdown(connection_t *conn) * timeout period. */ if (fr_time_delta_ispos(conn->connection_timeout)) { - if (fr_event_timer_in(conn, conn->pub.el, &conn->ev, - conn->connection_timeout, _connection_timeout, conn) < 0) { + if (fr_timer_in(conn, conn->pub.el->tl, &conn->ev, + conn->connection_timeout, false, _connection_timeout, conn) < 0) { /* * Can happen when the event loop is exiting */ @@ -777,7 +777,7 @@ static void connection_state_enter_failed(connection_t *conn) /* * Explicit error occurred, delete the connection timer */ - fr_event_timer_delete(&conn->ev); + fr_timer_delete(&conn->ev); /* * Record what state the connection is currently in @@ -856,8 +856,8 @@ static void connection_state_enter_failed(connection_t *conn) case CONNECTION_STATE_SHUTDOWN: /* Failed during shutdown */ if (fr_time_delta_ispos(conn->reconnection_delay)) { DEBUG2("Delaying reconnection by %pVs", fr_box_time_delta(conn->reconnection_delay)); - if (fr_event_timer_in(conn, conn->pub.el, &conn->ev, - conn->reconnection_delay, _reconnect_delay_done, conn) < 0) { + if (fr_timer_in(conn, conn->pub.el->tl, &conn->ev, + conn->reconnection_delay, false, _reconnect_delay_done, conn) < 0) { /* * Can happen when the event loop is exiting */ @@ -927,7 +927,7 @@ static void connection_state_enter_halted(connection_t *conn) BAD_STATE_TRANSITION(CONNECTION_STATE_HALTED); } - fr_event_timer_delete(&conn->ev); + fr_timer_delete(&conn->ev); STATE_TRANSITION(CONNECTION_STATE_HALTED); WATCH_PRE(conn); @@ -955,7 +955,7 @@ static void connection_state_enter_connected(connection_t *conn) STATE_TRANSITION(CONNECTION_STATE_CONNECTED); - fr_event_timer_delete(&conn->ev); + fr_timer_delete(&conn->ev); WATCH_PRE(conn); if (conn->open) { HANDLER_BEGIN(conn, conn->open); @@ -1014,8 +1014,8 @@ static void connection_state_enter_connecting(connection_t *conn) * set, then add the timer. */ if (fr_time_delta_ispos(conn->connection_timeout)) { - if (fr_event_timer_in(conn, conn->pub.el, &conn->ev, - conn->connection_timeout, _connection_timeout, conn) < 0) { + if (fr_timer_in(conn, conn->pub.el->tl, &conn->ev, + conn->connection_timeout, false, _connection_timeout, conn) < 0) { PERROR("Failed setting connection_timeout event, failing connection"); /* @@ -1456,7 +1456,7 @@ static int _connection_free(connection_t *conn) /* * Explicitly cancel any pending events */ - fr_event_timer_delete(&conn->ev); + fr_timer_delete(&conn->ev); /* * Don't allow the connection to be diff --git a/src/lib/server/exec.c b/src/lib/server/exec.c index c32377b47d4..fc09b0bd242 100644 --- a/src/lib/server/exec.c +++ b/src/lib/server/exec.c @@ -717,7 +717,7 @@ void fr_exec_oneshot_cleanup(fr_exec_state_t *exec, int signal) exec->pid = -1; } - if (exec->ev) fr_event_timer_delete(&exec->ev); + if (exec->ev) fr_timer_delete(&exec->ev); } /* @@ -771,7 +771,7 @@ static void exec_reap(fr_event_list_t *el, pid_t pid, int status, void *uctx) } exec->pid = -1; /* pid_t is signed */ - if (exec->ev) fr_event_timer_delete(&exec->ev); + if (exec->ev) fr_timer_delete(&exec->ev); /* * Process exit notifications (EV_PROC) and file @@ -844,7 +844,7 @@ static void exec_reap(fr_event_list_t *el, pid_t pid, int status, void *uctx) /* * Callback when an exec times out. */ -static void exec_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void exec_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { fr_exec_state_t *exec = uctx; /* may not be talloced */ bool exit_timeout; @@ -938,7 +938,7 @@ static void exec_stdout_read(UNUSED fr_event_list_t *el, int fd, int flags, void /* * Child has already exited - unlang can resume */ - if (exec->ev) fr_event_timer_delete(&exec->ev); + if (exec->ev) fr_timer_delete(&exec->ev); unlang_interpret_mark_runnable(exec->request); } } @@ -1133,7 +1133,7 @@ int fr_exec_oneshot(TALLOC_CTX *ctx, fr_exec_state_t *exec, request_t *request, * Setup event to kill the child process after a period of time. */ if (fr_time_delta_ispos(timeout) && - (fr_event_timer_in(ctx, el, &exec->ev, timeout, exec_timeout, exec) < 0)) goto fail_and_close; + (fr_timer_in(ctx, el->tl, &exec->ev, timeout, true, exec_timeout, exec) < 0)) goto fail_and_close; return 0; } diff --git a/src/lib/server/exec.h b/src/lib/server/exec.h index 7f5335e8e50..ffa15b9bb42 100644 --- a/src/lib/server/exec.h +++ b/src/lib/server/exec.h @@ -38,6 +38,7 @@ extern "C" { #include #include #include +#include #include #ifdef __cplusplus @@ -69,7 +70,7 @@ typedef struct { int stderr_fd; //!< for producing error messages. - fr_event_timer_t const *ev; //!< for timing out the child + fr_timer_t *ev; //!< for timing out the child fr_event_pid_t const *ev_pid; //!< for cleaning up the process fr_exec_fail_t failed; //!< what kind of failure diff --git a/src/lib/server/main_loop.c b/src/lib/server/main_loop.c index 6d2e28c4634..c237b9ef77d 100644 --- a/src/lib/server/main_loop.c +++ b/src/lib/server/main_loop.c @@ -52,22 +52,22 @@ static int self_pipe[2] = { -1, -1 }; #include static fr_time_delta_t sd_watchdog_interval; -static fr_event_timer_t const *sd_watchdog_ev; +static fr_timer_t *sd_watchdog_ev; /** Reoccurring watchdog event to inform systemd we're still alive * - * Note actually a very good indicator of aliveness as the main event + * Not actually a very good indicator of aliveness as the main event * loop doesn't actually do any packet processing. */ -static void sd_watchdog_event(fr_event_list_t *our_el, UNUSED fr_time_t now, void *ctx) +static void sd_watchdog_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx) { DEBUG("Emitting systemd watchdog notification"); sd_notify(0, "WATCHDOG=1"); - if (fr_event_timer_in(NULL, our_el, &sd_watchdog_ev, - sd_watchdog_interval, - sd_watchdog_event, ctx) < 0) { + if (fr_timer_in(NULL, tl, &sd_watchdog_ev, + sd_watchdog_interval, + true, sd_watchdog_event, ctx) < 0) { ERROR("Failed to insert watchdog event"); } } @@ -211,7 +211,7 @@ int main_loop_start(void) /* * Start placating the watchdog (if told to do so). */ - if (fr_time_delta_ispos(sd_watchdog_interval)) sd_watchdog_event(event_list, fr_time_wrap(0), NULL); + if (fr_time_delta_ispos(sd_watchdog_interval)) sd_watchdog_event(event_list->tl, fr_time_wrap(0), NULL); #endif ret = fr_event_loop(event_list); @@ -220,7 +220,7 @@ int main_loop_start(void) if (under_systemd) { INFO("Informing systemd we're stopping"); sd_notify(0, "STOPPING=1"); - fr_event_timer_delete(&sd_watchdog_ev); + fr_timer_delete(&sd_watchdog_ev); } } #endif diff --git a/src/lib/server/trunk.c b/src/lib/server/trunk.c index 45f84ae5917..1f02443906e 100644 --- a/src/lib/server/trunk.c +++ b/src/lib/server/trunk.c @@ -176,7 +176,7 @@ struct trunk_connection_s { /** @name Timers * @{ */ - fr_event_timer_t const *lifetime_ev; //!< Maximum time this connection can be open. + fr_timer_t *lifetime_ev; //!< Maximum time this connection can be open. /** @} */ }; @@ -270,7 +270,7 @@ struct trunk_s { /** @name Timers * @{ */ - fr_event_timer_t const *manage_ev; //!< Periodic connection management event. + fr_timer_t *manage_ev; //!< Periodic connection management event. /** @} */ /** @name Log rate limiting entries @@ -920,7 +920,7 @@ static void trunk_connection_enter_active(trunk_connection_t *tconn); static void trunk_rebalance(trunk_t *trunk); static void trunk_manage(trunk_t *trunk, fr_time_t now); -static void _trunk_timer(fr_event_list_t *el, fr_time_t now, void *uctx); +static void _trunk_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx); static void trunk_backlog_drain(trunk_t *trunk); /** Compare two protocol requests @@ -3213,7 +3213,7 @@ static void trunk_connection_enter_draining_to_free(trunk_connection_t *tconn) { trunk_t *trunk = tconn->pub.trunk; - if (tconn->lifetime_ev) fr_event_timer_delete(&tconn->lifetime_ev); + if (tconn->lifetime_ev) fr_timer_delete(&tconn->lifetime_ev); switch (tconn->pub.state) { case TRUNK_CONN_ACTIVE: @@ -3425,11 +3425,11 @@ static void _trunk_connection_on_shutdown(UNUSED connection_t *conn, /** Trigger a reconnection of the trunk connection * - * @param[in] el Event list the timer was inserted into. + * @param[in] tl timer list the timer was inserted into. * @param[in] now Current time. * @param[in] uctx The tconn. */ -static void _trunk_connection_lifetime_expire(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void _trunk_connection_lifetime_expire(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); @@ -3484,8 +3484,8 @@ static void _trunk_connection_on_connected(UNUSED connection_t *conn, * connection periodically. */ if (fr_time_delta_ispos(trunk->conf.lifetime)) { - if (fr_event_timer_in(tconn, trunk->el, &tconn->lifetime_ev, - trunk->conf.lifetime, _trunk_connection_lifetime_expire, tconn) < 0) { + if (fr_timer_in(tconn, trunk->el->tl, &tconn->lifetime_ev, + trunk->conf.lifetime, false, _trunk_connection_lifetime_expire, tconn) < 0) { PERROR("Failed inserting connection reconnection timer event, halting connection"); connection_signal_shutdown(tconn->pub.conn); return; @@ -3562,7 +3562,7 @@ static void _trunk_connection_on_closed(UNUSED connection_t *conn, /* * Remove the reconnect event */ - if (fr_time_delta_ispos(trunk->conf.lifetime)) fr_event_timer_delete(&tconn->lifetime_ev); + if (fr_time_delta_ispos(trunk->conf.lifetime)) fr_timer_delete(&tconn->lifetime_ev); /* * Remove the I/O events @@ -4493,19 +4493,19 @@ static void trunk_manage(trunk_t *trunk, fr_time_t now) /** Event to periodically call the connection management function * - * @param[in] el this event belongs to. + * @param[in] tl this event belongs to. * @param[in] now current time. * @param[in] uctx The trunk. */ -static void _trunk_timer(fr_event_list_t *el, fr_time_t now, void *uctx) +static void _trunk_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx) { trunk_t *trunk = talloc_get_type_abort(uctx, trunk_t); trunk_manage(trunk, now); if (fr_time_delta_ispos(trunk->conf.manage_interval)) { - if (fr_event_timer_in(trunk, el, &trunk->manage_ev, trunk->conf.manage_interval, - _trunk_timer, trunk) < 0) { + if (fr_timer_in(trunk, tl, &trunk->manage_ev, trunk->conf.manage_interval, + false, _trunk_timer, trunk) < 0) { PERROR("Failed inserting trunk management event"); /* Not much we can do, hopefully the trunk will be freed soon */ } @@ -4790,8 +4790,8 @@ int trunk_start(trunk_t *trunk) * Insert the event timer to manage * the interval between managing connections. */ - if (fr_event_timer_in(trunk, trunk->el, &trunk->manage_ev, trunk->conf.manage_interval, - _trunk_timer, trunk) < 0) { + if (fr_timer_in(trunk, trunk->el->tl, &trunk->manage_ev, trunk->conf.manage_interval, + false, _trunk_timer, trunk) < 0) { PERROR("Failed inserting trunk management event"); return -1; } @@ -4830,7 +4830,8 @@ int trunk_connection_manage_schedule(trunk_t *trunk) { if (!trunk->started || !trunk->managing_connections) return 0; - if (fr_event_timer_in(trunk, trunk->el, &trunk->manage_ev, fr_time_delta_wrap(0), _trunk_timer, trunk) < 0) { + if (fr_timer_in(trunk, trunk->el->tl, &trunk->manage_ev, fr_time_delta_wrap(0), + false, _trunk_timer, trunk) < 0) { PERROR("Failed inserting trunk management event"); return -1; } @@ -4873,7 +4874,7 @@ static int _trunk_free(trunk_t *trunk) * We really don't want this firing after * we've freed everything. */ - fr_event_timer_delete(&trunk->manage_ev); + fr_timer_delete(&trunk->manage_ev); /* * Now free the connections in each of the lists. diff --git a/src/lib/server/trunk_tests.c b/src/lib/server/trunk_tests.c index 50aff3776b5..8d755b89fc0 100644 --- a/src/lib/server/trunk_tests.c +++ b/src/lib/server/trunk_tests.c @@ -396,7 +396,7 @@ static void test_socket_pair_alloc_then_free(void) el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false); TEST_CHECK(trunk != NULL); @@ -438,7 +438,7 @@ static void test_socket_pair_alloc_then_reconnect_then_free(void) if (!el) return; - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false); TEST_CHECK(trunk != NULL); @@ -526,7 +526,7 @@ static void test_socket_pair_alloc_then_connect_timeout(void) el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false); @@ -607,7 +607,7 @@ static void test_socket_pair_alloc_then_reconnect_check_delay(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false); TEST_CHECK(trunk != NULL); @@ -675,7 +675,7 @@ static void test_enqueue_basic(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = test_setup_trunk(ctx, el, &conf, true, NULL); @@ -775,7 +775,7 @@ static void test_enqueue_cancellation_points(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = test_setup_trunk(ctx, el, &conf, false, NULL); preq = talloc_zero(NULL, test_proto_request_t); @@ -1022,7 +1022,7 @@ static void test_partial_to_complete_states(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = test_setup_trunk(ctx, el, &conf, true, NULL); preq = talloc_zero(NULL, test_proto_request_t); @@ -1103,7 +1103,7 @@ static void test_requeue_on_reconnect(void) fr_talloc_fault_setup(); el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = test_setup_trunk(ctx, el, &conf, true, NULL); preq = talloc_zero(ctx, test_proto_request_t); @@ -1385,7 +1385,7 @@ static void test_connection_start_on_enqueue(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); /* Need to provide a timer starting value above zero */ test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5)); @@ -1446,7 +1446,7 @@ static void test_connection_rebalance_requests(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); trunk = test_setup_trunk(ctx, el, &conf, true, NULL); preq = talloc_zero(NULL, test_proto_request_t); @@ -1519,7 +1519,7 @@ static void test_connection_levels_max(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); /* Need to provide a timer starting value above zero */ test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5)); @@ -1681,7 +1681,7 @@ static void test_connection_levels_alternating_edges(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); /* Need to provide a timer starting value above zero */ test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5)); @@ -1845,7 +1845,7 @@ static void test_enqueue_and_io_speed(void) DEBUG_LVL_SET; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); /* Need to provide a timer starting value above zero */ test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5)); diff --git a/src/lib/unlang/interpret.c b/src/lib/unlang/interpret.c index 9d15001d6e5..c3a98af7297 100644 --- a/src/lib/unlang/interpret.c +++ b/src/lib/unlang/interpret.c @@ -302,7 +302,7 @@ unlang_action_t unlang_interpret_push_children(rlm_rcode_t *p_result, request_t return UNLANG_ACTION_PUSHED_CHILD; } -static void instruction_timeout_handler(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx); +static void instruction_timeout_handler(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx); /** Update the current result after each instruction, and after popping each stack frame * @@ -408,8 +408,8 @@ unlang_frame_action_t result_calculate(request_t *request, unlang_stack_frame_t if (fr_time_delta_ispos(instruction->actions.retry.mrd)) { retry->timeout = fr_time_add(fr_time(), instruction->actions.retry.mrd); - if (fr_event_timer_at(retry, unlang_interpret_event_list(request), &retry->ev, retry->timeout, - instruction_timeout_handler, request) < 0) { + if (fr_timer_at(retry, unlang_interpret_event_list(request)->tl, &retry->ev, retry->timeout, + false, instruction_timeout_handler, request) < 0) { RPEDEBUG("Failed inserting event"); goto fail; } @@ -1268,7 +1268,7 @@ void unlang_interpret_signal(request_t *request, fr_signal_t action) } } -static void instruction_timeout_handler(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx) +static void instruction_timeout_handler(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx) { unlang_retry_t *retry = talloc_get_type_abort(ctx, unlang_retry_t); request_t *request = talloc_get_type_abort(retry->request, request_t); @@ -1445,7 +1445,7 @@ static xlat_action_t unlang_cancel_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, * loop. This means the request is always in a consistent state when * the timeout event fires, even if that's state is waiting on I/O. */ -static void unlang_cancel_event(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void unlang_cancel_event(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { request_t *request = talloc_get_type_abort(uctx, request_t); @@ -1480,7 +1480,7 @@ static xlat_action_t unlang_cancel_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, { fr_value_box_t *timeout; fr_event_list_t *el = unlang_interpret_event_list(request); - fr_event_timer_t const **ev_p, **ev_p_og; + fr_timer_t **ev_p, **ev_p_og; fr_value_box_t *vb; fr_time_t when = fr_time_from_sec(0); /* Invalid clang complaints if we don't set this */ @@ -1492,24 +1492,24 @@ static xlat_action_t unlang_cancel_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, */ ev_p = ev_p_og = request_data_get(request, (void *)unlang_cancel_xlat, 0); if (ev_p) { - if (*ev_p) when = fr_event_timer_when(*ev_p); /* *ev_p should never be NULL, really... */ + if (*ev_p) when = fr_timer_when(*ev_p); /* *ev_p should never be NULL, really... */ } else { /* * Must not be parented from the request * as this is freed by request data. */ - MEM(ev_p = talloc_zero(NULL, fr_event_timer_t const *)); + MEM(ev_p = talloc_zero(NULL, fr_timer_t *)); } - if (unlikely(fr_event_timer_in(ev_p, el, ev_p, - timeout ? timeout->vb_time_delta : fr_time_delta_from_sec(0), - unlang_cancel_event, request) < 0)) { + if (unlikely(fr_timer_in(ev_p, el->tl, ev_p, + timeout ? timeout->vb_time_delta : fr_time_delta_from_sec(0), + false, unlang_cancel_event, request) < 0)) { RPERROR("Failed inserting cancellation event"); talloc_free(ev_p); return XLAT_ACTION_FAIL; } if (unlikely(request_data_add(request, (void *)unlang_cancel_xlat, 0, - UNCONST(fr_event_timer_t **, ev_p), true, true, false) < 0)) { + UNCONST(fr_timer_t **, ev_p), true, true, false) < 0)) { RPERROR("Failed associating cancellation event with request"); talloc_free(ev_p); return XLAT_ACTION_FAIL; diff --git a/src/lib/unlang/module.c b/src/lib/unlang/module.c index 03d39d5bfec..eaf5e4e3bb7 100644 --- a/src/lib/unlang/module.c +++ b/src/lib/unlang/module.c @@ -36,7 +36,7 @@ RCSID("$Id$") #include "tmpl.h" static unlang_action_t unlang_module_resume(rlm_rcode_t *p_result, request_t *request, unlang_stack_frame_t *frame); -static void unlang_module_event_retry_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *ctx); +static void unlang_module_event_retry_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *ctx); /** Push a module or submodule onto the stack for evaluation * @@ -387,8 +387,9 @@ unlang_action_t unlang_module_yield_to_retry(request_t *request, module_method_t if (!state->retry.config) { fr_retry_init(&state->retry, fr_time(), retry_cfg); - if (fr_event_timer_at(state, unlang_interpret_event_list(request), &state->ev, - state->retry.next, unlang_module_event_retry_handler, request) < 0) { + if (fr_timer_at(state, unlang_interpret_event_list(request)->tl, &state->ev, + state->retry.next, + false, unlang_module_event_retry_handler, request) < 0) { RPEDEBUG("Failed inserting event"); return UNLANG_ACTION_FAIL; } @@ -685,12 +686,12 @@ static unlang_action_t unlang_module_resume(rlm_rcode_t *p_result, request_t *re /** Call the callback registered for a retry event * - * @param[in] el the event timer was inserted into. + * @param[in] tl the event timer was inserted into. * @param[in] now The current time, as held by the event_list. * @param[in] ctx the stack frame * */ -static void unlang_module_event_retry_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *ctx) +static void unlang_module_event_retry_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *ctx) { request_t *request = talloc_get_type_abort(ctx, request_t); unlang_stack_t *stack = request->stack; @@ -731,8 +732,8 @@ static void unlang_module_event_retry_handler(UNUSED fr_event_list_t *el, fr_tim /* * Reset the timer. */ - if (fr_event_timer_at(state, unlang_interpret_event_list(request), &state->ev, state->retry.next, - unlang_module_event_retry_handler, request) < 0) { + if (fr_timer_at(state, unlang_interpret_event_list(request)->tl, &state->ev, state->retry.next, + false, unlang_module_event_retry_handler, request) < 0) { RPEDEBUG("Failed inserting event"); unlang_interpret_mark_runnable(request); /* and let the caller figure out what's up */ } @@ -887,9 +888,9 @@ static unlang_action_t unlang_module(rlm_rcode_t *p_result, request_t *request, fr_retry_init(&state->retry, now, &frame->instruction->actions.retry); - if (fr_event_timer_at(state, unlang_interpret_event_list(request), - &state->ev, state->retry.next, - unlang_module_event_retry_handler, request) < 0) { + if (fr_timer_at(state, unlang_interpret_event_list(request)->tl, + &state->ev, state->retry.next, + false, unlang_module_event_retry_handler, request) < 0) { RPEDEBUG("Failed inserting event"); goto fail; } diff --git a/src/lib/unlang/module_priv.h b/src/lib/unlang/module_priv.h index f19bb00ad70..7480733b107 100644 --- a/src/lib/unlang/module_priv.h +++ b/src/lib/unlang/module_priv.h @@ -84,7 +84,7 @@ typedef struct { module_instance_t const *mi; //!< Module instance to pass to callbacks. request_t *request; - fr_event_timer_t const *ev; //!< retry timer just for this module. + fr_timer_t *ev; //!< retry timer just for this module. fr_retry_t retry; //!< retry timers, etc. /** @} */ diff --git a/src/lib/unlang/subrequest_child.c b/src/lib/unlang/subrequest_child.c index a571a2eff92..9c5c00cc856 100644 --- a/src/lib/unlang/subrequest_child.c +++ b/src/lib/unlang/subrequest_child.c @@ -50,7 +50,7 @@ fr_dict_attr_autoload_t subrequest_dict_attr[] = { /** Event handler to free a detached child * */ -static void unlang_detached_max_request_time(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void unlang_detached_max_request_time(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { request_t *request = talloc_get_type_abort(uctx, request_t); @@ -74,7 +74,7 @@ int unlang_subrequest_lifetime_set(request_t *request) vp = fr_pair_find_by_da(&request->control_pairs, NULL, request_attr_request_lifetime); if (!vp || (vp->vp_uint32 > 0)) { fr_time_delta_t when = fr_time_delta_wrap(0); - const fr_event_timer_t **ev_p; + fr_timer_t **ev_p; if (!vp) { when = fr_time_delta_add(when, fr_time_delta_from_sec(30)); /* default to 30s if not set */ @@ -94,8 +94,8 @@ int unlang_subrequest_lifetime_set(request_t *request) ev_p = talloc_size(request, sizeof(*ev_p)); memset(ev_p, 0, sizeof(*ev_p)); - if (fr_event_timer_in(request, unlang_interpret_event_list(request), ev_p, when, - unlang_detached_max_request_time, request) < 0) { + if (fr_timer_in(request, unlang_interpret_event_list(request)->tl, ev_p, when, + false, unlang_detached_max_request_time, request) < 0) { talloc_free(ev_p); return -1; } diff --git a/src/lib/unlang/timeout.c b/src/lib/unlang/timeout.c index a33421f8573..a7c724e2417 100644 --- a/src/lib/unlang/timeout.c +++ b/src/lib/unlang/timeout.c @@ -33,12 +33,12 @@ typedef struct { fr_time_delta_t timeout; request_t *request; rindent_t indent; - fr_event_timer_t const *ev; + fr_timer_t *ev; fr_value_box_list_t result; } unlang_frame_state_timeout_t; -static void unlang_timeout_handler(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx) +static void unlang_timeout_handler(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx) { unlang_frame_state_timeout_t *state = talloc_get_type_abort(ctx, unlang_frame_state_timeout_t); request_t *request = talloc_get_type_abort(state->request, request_t); @@ -83,8 +83,8 @@ static unlang_action_t unlang_timeout_set(rlm_rcode_t *p_result, request_t *requ timeout = fr_time_add(fr_time(), state->timeout); - if (fr_event_timer_at(state, unlang_interpret_event_list(request), &state->ev, timeout, - unlang_timeout_handler, state) < 0) { + if (fr_timer_at(state, unlang_interpret_event_list(request)->tl, &state->ev, timeout, + false, unlang_timeout_handler, state) < 0) { RPEDEBUG("Failed inserting event"); *p_result = RLM_MODULE_FAIL; return UNLANG_ACTION_STOP_PROCESSING; diff --git a/src/lib/unlang/unlang_priv.h b/src/lib/unlang/unlang_priv.h index a685fc5f62f..c5ae5eee7fc 100644 --- a/src/lib/unlang/unlang_priv.h +++ b/src/lib/unlang/unlang_priv.h @@ -262,7 +262,7 @@ typedef struct { fr_retry_state_t state; fr_time_t timeout; uint32_t count; - fr_event_timer_t const *ev; + fr_timer_t *ev; } unlang_retry_t; /** Our interpreter stack, as distinct from the C stack diff --git a/src/lib/unlang/xlat.c b/src/lib/unlang/xlat.c index 4df0d7a92f8..47419cc63e7 100644 --- a/src/lib/unlang/xlat.c +++ b/src/lib/unlang/xlat.c @@ -60,7 +60,7 @@ typedef struct { ///< of the execution. } unlang_frame_state_xlat_t; -/** Wrap an #fr_event_timer_t providing data needed for unlang events +/** Wrap an #fr_timer_t providing data needed for unlang events * */ typedef struct { @@ -73,7 +73,7 @@ typedef struct { xlat_inst_t *inst; //!< xlat instance data. xlat_thread_inst_t *thread; //!< Thread specific xlat instance. void const *rctx; //!< rctx data to pass to callbacks. - fr_event_timer_t const *ev; //!< Event in this worker's event heap. + fr_timer_t *ev; //!< Event in this worker's event heap. } unlang_xlat_event_t; typedef struct { @@ -84,7 +84,7 @@ typedef struct { fr_unlang_xlat_retry_t retry_cb; //!< callback to run on timeout void *rctx; //!< rctx data to pass to timeout callback - fr_event_timer_t const *ev; //!< retry timer just for this xlat + fr_timer_t *ev; //!< retry timer just for this xlat fr_retry_t retry; //!< retry timers, etc. } unlang_xlat_retry_t; @@ -97,7 +97,7 @@ typedef struct { static int _unlang_xlat_event_free(unlang_xlat_event_t *ev) { if (ev->ev) { - (void) fr_event_timer_delete(&(ev->ev)); + (void) fr_timer_delete(&(ev->ev)); return 0; } @@ -110,14 +110,14 @@ static int _unlang_xlat_event_free(unlang_xlat_event_t *ev) /** Call the callback registered for a timeout event * - * @param[in] el the event timer was inserted into. + * @param[in] tl the event timer was inserted into. * @param[in] now The current time, as held by the event_list. * @param[in] uctx unlang_module_event_t structure holding callbacks. * */ -static void unlang_xlat_event_timeout_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx) +static void unlang_xlat_event_timeout_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx) { - unlang_xlat_event_t *ev = talloc_get_type_abort(uctx, unlang_xlat_event_t); + unlang_xlat_event_t *ev = talloc_get_type_abort(uctx, unlang_xlat_event_t); /* * If the timeout's fired then the xlat must necessarily @@ -173,8 +173,9 @@ int unlang_xlat_timeout_add(request_t *request, ev->thread = xlat_thread_instance_find(state->exp); ev->rctx = rctx; - if (fr_event_timer_at(request, unlang_interpret_event_list(request), - &ev->ev, when, unlang_xlat_event_timeout_handler, ev) < 0) { + if (fr_timer_at(request, unlang_interpret_event_list(request)->tl, + &ev->ev, when, + false, unlang_xlat_event_timeout_handler, ev) < 0) { RPEDEBUG("Failed inserting event"); talloc_free(ev); return -1; @@ -599,19 +600,19 @@ xlat_action_t unlang_xlat_yield(request_t *request, */ static int _unlang_xlat_retry_free(unlang_xlat_retry_t *ev) { - if (ev->ev) (void) fr_event_timer_delete(&(ev->ev)); + if (ev->ev) (void) fr_timer_delete(&(ev->ev)); return 0; } /** Call the callback registered for a timeout event * - * @param[in] el the event timer was inserted into. + * @param[in] tl the event timer was inserted into. * @param[in] now The current time, as held by the event_list. * @param[in] uctx unlang_module_event_t structure holding callbacks. * */ -static void unlang_xlat_event_retry_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx) +static void unlang_xlat_event_retry_handler(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx) { unlang_xlat_retry_t *ev = talloc_get_type_abort(uctx, unlang_xlat_retry_t); request_t *request = ev->request; @@ -631,8 +632,8 @@ static void unlang_xlat_event_retry_handler(UNUSED fr_event_list_t *el, fr_time_ /* * Reset the timer. */ - if (fr_event_timer_at(ev, unlang_interpret_event_list(request), &ev->ev, ev->retry.next, - unlang_xlat_event_retry_handler, request) < 0) { + if (fr_timer_at(ev, unlang_interpret_event_list(request)->tl, &ev->ev, ev->retry.next, + false, unlang_xlat_event_retry_handler, request) < 0) { RPEDEBUG("Failed inserting event"); talloc_free(ev); unlang_interpret_mark_runnable(request); @@ -714,8 +715,9 @@ xlat_action_t unlang_xlat_yield_to_retry(request_t *request, xlat_func_t resume, fr_retry_init(&ev->retry, fr_time(), retry_cfg); - if (fr_event_timer_at(request, unlang_interpret_event_list(request), - &ev->ev, ev->retry.next, unlang_xlat_event_retry_handler, ev) < 0) { + if (fr_timer_at(request, unlang_interpret_event_list(request)->tl, + &ev->ev, ev->retry.next, + false, unlang_xlat_event_retry_handler, ev) < 0) { RPEDEBUG("Failed inserting event"); talloc_free(ev); return XLAT_ACTION_FAIL; diff --git a/src/lib/util/event.c b/src/lib/util/event.c index 9ec8bf58afd..a7ce23ff39f 100644 --- a/src/lib/util/event.c +++ b/src/lib/util/event.c @@ -29,9 +29,12 @@ */ RCSID("$Id$") +#define _EVENT_LIST_PRIVATE 1 +typedef struct fr_event_list_s fr_event_list_t; + #include #include -#include +#include #include #include #include @@ -64,15 +67,6 @@ DIAG_ON(unused-macros) # define SO_GET_FILTER SO_ATTACH_FILTER #endif -#ifdef WITH_EVENT_DEBUG -# define EVENT_DEBUG(fmt, ...) printf("EVENT:");printf(fmt, ## __VA_ARGS__);printf("\n"); -# ifndef EVENT_REPORT_FREQ -# define EVENT_REPORT_FREQ 5 -# endif -#else -# define EVENT_DEBUG(...) -#endif - static fr_table_num_sorted_t const kevent_filter_table[] = { #ifdef EVFILT_AIO { L("EVFILT_AIO"), EVFILT_AIO }, @@ -96,31 +90,6 @@ static size_t kevent_filter_table_len = NUM_ELEMENTS(kevent_filter_table); static int log_conf_kq; #endif -/** A timer event - * - */ -struct fr_event_timer { - fr_time_t when; //!< When this timer should fire. - - fr_event_timer_cb_t callback; //!< Callback to execute when the timer fires. - void const *uctx; //!< Context pointer to pass to the callback. - - TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to. - - fr_event_timer_t const **parent; //!< A pointer to the parent structure containing the timer - ///< event. - - fr_lst_index_t lst_id; //!< Where to store opaque lst data. - fr_dlist_t entry; //!< List of deferred timer events. - - fr_event_list_t *el; //!< Event list containing this timer. - -#ifndef NDEBUG - char const *file; //!< Source file this event was last updated in. - int line; //!< Line this event was last updated on. -#endif -}; - typedef enum { FR_EVENT_FD_SOCKET = 1, //!< is a socket. FR_EVENT_FD_FILE = 2, //!< is a file. @@ -401,46 +370,43 @@ typedef struct { */ typedef struct { fr_dlist_t entry; //!< Linked list of callback. - fr_event_timer_cb_t callback; //!< The callback to call. + fr_event_post_cb_t callback; //!< The callback to call. void *uctx; //!< Context for the callback. } fr_event_post_t; /** Stores all information relating to an event list * */ -struct fr_event_list { - fr_lst_t *times; //!< of timer events to be executed. - fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue. +struct fr_event_list_s { + struct fr_event_list_pub_s pub; //!< Next event list in the chain. + fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue. - int will_exit; //!< Will exit on next call to fr_event_corral. - int exit; //!< If non-zero event loop will prevent the addition - ///< of new events, and will return immediately - ///< from the corral/service function. + int will_exit; //!< Will exit on next call to fr_event_corral. + int exit; //!< If non-zero event loop will prevent the addition + ///< of new events, and will return immediately + ///< from the corral/service function. - fr_event_time_source_t time; //!< Where our time comes from. - fr_time_t now; //!< The last time the event list was serviced. - bool dispatch; //!< Whether the event list is currently dispatching events. + bool dispatch; //!< Whether the event list is currently dispatching events. - int num_fd_events; //!< Number of events in this event list. + int num_fd_events; //!< Number of events in this event list. - int kq; //!< instance associated with this event list. + int kq; //!< instance associated with this event list. - fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle... - fr_dlist_head_t post_callbacks; //!< post-processing callbacks + fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle... + fr_dlist_head_t post_callbacks; //!< post-processing callbacks - fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're - ///< waiting to reap. + fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're + ///< waiting to reap. - struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */ + struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */ - bool in_handler; //!< Deletes should be deferred until after the - ///< handlers complete. + bool in_handler; //!< Deletes should be deferred until after the + ///< handlers complete. - fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion. - fr_dlist_head_t ev_to_add; //!< dlist of events to add + fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion. #ifdef WITH_EVENT_DEBUG - fr_event_timer_t const *report; //!< Report event. + fr_timer_t *report; //!< Report event. #endif }; @@ -564,22 +530,6 @@ static inline CC_HINT(always_inline) fr_event_fd_cb_t event_fd_func(fr_event_fd_ } } -/** Compare two timer events to see which one should occur first - * - * @param[in] a the first timer event. - * @param[in] b the second timer event. - * @return - * - +1 if a should occur later than b. - * - -1 if a should occur earlier than b. - * - 0 if both events occur at the same time. - */ -static int8_t fr_event_timer_cmp(void const *a, void const *b) -{ - fr_event_timer_t const *ev_a = a, *ev_b = b; - - return fr_time_cmp(ev_a->when, ev_b->when); -} - /** Compare two file descriptor handles * * @param[in] one the first file descriptor handle. @@ -614,7 +564,7 @@ uint64_t fr_event_list_num_timers(fr_event_list_t *el) { if (unlikely(!el)) return -1; - return fr_lst_num_elements(el->times); + return fr_timer_list_num_events(el->pub.tl); } /** Return the kq associated with an event list. @@ -642,11 +592,7 @@ int fr_event_list_kq(fr_event_list_t *el) */ fr_time_t fr_event_list_time(fr_event_list_t *el) { - if (el->dispatch) { - return el->now; - } else { - return el->time(); - } + return el->pub.tl->time(); } /** Placeholder callback to avoid branches in service loop @@ -1386,255 +1332,6 @@ int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, } #endif -/** Remove an event from the event loop - * - * @param[in] ev to free. - * @return - * - 0 on success. - * - -1 on failure. - */ -static int _event_timer_free(fr_event_timer_t *ev) -{ - fr_event_list_t *el = ev->el; - fr_event_timer_t const **ev_p; - - if (fr_dlist_entry_in_list(&ev->entry)) { - (void) fr_dlist_remove(&el->ev_to_add, ev); - } else { - int ret = fr_lst_extract(el->times, ev); - char const *err_file; - int err_line; - -#ifndef NDEBUG - err_file = ev->file; - err_line = ev->line; -#else - err_file = "not-available"; - err_line = 0; -#endif - - - /* - * Events MUST be in the lst (or the insertion list). - */ - if (!fr_cond_assert_msg(ret == 0, - "Event %p, lst_id %u, allocd %s[%d], was not found in the event lst or " - "insertion list when freed: %s", ev, ev->lst_id, err_file, err_line, - fr_strerror())) return -1; - } - - ev_p = ev->parent; - fr_assert(*(ev->parent) == ev); - *ev_p = NULL; - - return 0; -} - -/** Insert a timer event into an event list - * - * @note The talloc parent of the memory returned in ev_p must not be changed. - * If the lifetime of the event needs to be bound to another context - * this function should be called with the existing event pointed to by - * ev_p. - * - * @param[in] ctx to bind lifetime of the event to. - * @param[in] el to insert event into. - * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent - * in a temporal sense, not in a memory structure or dependency sense. - * @param[in] when we should run the event. - * @param[in] callback function to execute if the event fires. - * @param[in] uctx user data to pass to the event. - * @return - * - 0 on success. - * - -1 on failure. - */ -int _fr_event_timer_at(NDEBUG_LOCATION_ARGS - TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p, - fr_time_t when, fr_event_timer_cb_t callback, void const *uctx) -{ - fr_event_timer_t *ev; - - if (unlikely(!el)) { - fr_strerror_const("Invalid arguments: NULL event list"); - return -1; - } - - if (unlikely(!callback)) { - fr_strerror_const("Invalid arguments: NULL callback"); - return -1; - } - - if (unlikely(!ev_p)) { - fr_strerror_const("Invalid arguments: NULL ev_p"); - return -1; - } - - if (unlikely(el->exit)) { - fr_strerror_const("Event loop exiting"); - return -1; - } - - /* - * If there is an event, reuse it instead of freeing it - * and allocating a new one. This is to reduce memory - * churn for repeat events. - */ - if (!*ev_p) { - new_event: - ev = talloc_zero(el, fr_event_timer_t); - if (unlikely(!ev)) return -1; - - EVENT_DEBUG("%p - %s[%i] Added new timer %p", el, file, line, ev); - - /* - * Bind the lifetime of the event to the specified - * talloc ctx. If the talloc ctx is freed, the - * event will also be freed. - */ - if (ctx != el) talloc_link_ctx(ctx, ev); - - talloc_set_destructor(ev, _event_timer_free); - ev->lst_id = 0; - - } else { - ev = UNCONST(fr_event_timer_t *, *ev_p); - - EVENT_DEBUG("%p - %s[%i] Re-armed timer %p", el, file, line, ev); - - /* - * We can't disarm the linking context due to - * limitations in talloc, so if the linking - * context changes, we need to free the old - * event, and allocate a new one. - * - * Freeing the event also removes it from the lst. - */ - if (unlikely(ev->linked_ctx != ctx)) { - talloc_free(ev); - goto new_event; - } - - /* - * Event may have fired, in which case the event - * will no longer be in the event loop, so check - * if it's in the lst before extracting it. - */ - if (!fr_dlist_entry_in_list(&ev->entry)) { - int ret; - char const *err_file; - int err_line; - - ret = fr_lst_extract(el->times, ev); - -#ifndef NDEBUG - err_file = ev->file; - err_line = ev->line; -#else - err_file = "not-available"; - err_line = 0; -#endif - - /* - * Events MUST be in the lst (or the insertion list). - */ - if (!fr_cond_assert_msg(ret == 0, - "Event %p, lst_id %u, allocd %s[%d], was not found in the event " - "lst or insertion list when freed: %s", ev, ev->lst_id, - err_file, err_line, fr_strerror())) return -1; - } - } - - ev->el = el; - ev->when = when; - ev->callback = callback; - ev->uctx = uctx; - ev->linked_ctx = ctx; - ev->parent = ev_p; -#ifndef NDEBUG - ev->file = file; - ev->line = line; -#endif - - if (el->in_handler) { - /* - * Don't allow an event to be inserted - * into the deferred insertion list - * multiple times. - */ - if (!fr_dlist_entry_in_list(&ev->entry)) fr_dlist_insert_head(&el->ev_to_add, ev); - } else if (unlikely(fr_lst_insert(el->times, ev) < 0)) { - fr_strerror_const_push("Failed inserting event"); - talloc_set_destructor(ev, NULL); - *ev_p = NULL; - talloc_free(ev); - return -1; - } - - *ev_p = ev; - - return 0; -} - -/** Insert a timer event into an event list - * - * @note The talloc parent of the memory returned in ev_p must not be changed. - * If the lifetime of the event needs to be bound to another context - * this function should be called with the existing event pointed to by - * ev_p. - * - * @param[in] ctx to bind lifetime of the event to. - * @param[in] el to insert event into. - * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent - * in a temporal sense, not in a memory structure or dependency sense. - * @param[in] delta In how many nanoseconds to wait before should we execute the event. - * @param[in] callback function to execute if the event fires. - * @param[in] uctx user data to pass to the event. - * @return - * - 0 on success. - * - -1 on failure. - */ -int _fr_event_timer_in(NDEBUG_LOCATION_ARGS - TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p, - fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx) -{ - return _fr_event_timer_at(NDEBUG_LOCATION_VALS - ctx, el, ev_p, fr_time_add(el->time(), delta), callback, uctx); -} - -/** Delete a timer event from the event list - * - * @param[in] ev_p of the event being deleted. - * @return - * - 0 on success. - * - -1 on failure. - */ -int fr_event_timer_delete(fr_event_timer_t const **ev_p) -{ - fr_event_timer_t *ev; - int ret; - - if (unlikely(!*ev_p)) return 0; - - ev = UNCONST(fr_event_timer_t *, *ev_p); - ret = talloc_free(ev); - - /* - * Don't leave a garbage pointer value - * in the parent. - */ - if (likely(ret == 0)) *ev_p = NULL; - return 0; -} - -/** Internal timestamp representing when the timer should fire - * - * @return When the timestamp should fire. - */ -fr_time_t fr_event_timer_when(fr_event_timer_t const *ev) -{ - return ev->when; -} - /** Remove PID wait event from kevent if the fr_event_pid_t is freed * * @param[in] ev to free. @@ -2016,7 +1713,7 @@ unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t time struct kevent evset; int waiting = 0; int kq = kqueue(); - fr_time_t now, start = el->time(), end = fr_time_add(start, timeout); + fr_time_t now, start = el->pub.tl->time(), end = fr_time_add(start, timeout); if (unlikely(kq < 0)) goto force; @@ -2058,7 +1755,7 @@ unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t time /* * Keep draining process exits as they come in... */ - while ((waiting > 0) && fr_time_gt(end, (now = el->time()))) { + while ((waiting > 0) && fr_time_gt(end, (now = el->pub.tl->time()))) { struct kevent kev; int ret; @@ -2310,7 +2007,7 @@ int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void * - < 0 on error * - 0 on success */ -int fr_event_post_insert(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx) +int fr_event_post_insert(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) { fr_event_post_t *post; @@ -2332,7 +2029,7 @@ int fr_event_post_insert(fr_event_list_t *el, fr_event_timer_cb_t callback, void * - < 0 on error * - 0 on success */ -int fr_event_post_delete(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx) +int fr_event_post_delete(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) { fr_event_post_t *post, *next; @@ -2352,56 +2049,6 @@ int fr_event_post_delete(fr_event_list_t *el, fr_event_timer_cb_t callback, void return -1; } -/** Run a single scheduled timer event - * - * @param[in] el containing the timer events. - * @param[in] when Process events scheduled to run before or at this time. - * @return - * - 0 no timer events fired. - * - 1 a timer event fired. - */ -int fr_event_timer_run(fr_event_list_t *el, fr_time_t *when) -{ - fr_event_timer_cb_t callback; - void *uctx; - fr_event_timer_t *ev; - - if (unlikely(!el)) return 0; - - if (fr_lst_num_elements(el->times) == 0) { - *when = fr_time_wrap(0); - return 0; - } - - ev = fr_lst_peek(el->times); - if (!ev) { - *when = fr_time_wrap(0); - return 0; - } - - /* - * See if it's time to do this one. - */ - if (fr_time_gt(ev->when, *when)) { - *when = ev->when; - return 0; - } - - callback = ev->callback; - memcpy(&uctx, &ev->uctx, sizeof(uctx)); - - fr_assert(*ev->parent == ev); - - /* - * Delete the event before calling it. - */ - fr_event_timer_delete(ev->parent); - - callback(el, *when, uctx); - - return 1; -} - /** Gather outstanding timer and file descriptor events * * @param[in] el to process events for. @@ -2418,7 +2065,7 @@ int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait) fr_event_pre_t *pre; int num_fd_events; bool timer_event_ready = false; - fr_event_timer_t *ev; + fr_time_t next; el->num_fd_events = 0; @@ -2435,20 +2082,19 @@ int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait) */ when = fr_time_delta_wrap(0); wake = &when; - el->now = now; /* * See when we have to wake up. Either now, if the timer * events are in the past. Or, we wait for a future * timer event. */ - ev = fr_lst_peek(el->times); - if (ev) { - if (fr_time_lteq(ev->when, el->now)) { + next = fr_timer_list_when(el->pub.tl); + if (fr_time_neq(next, fr_time_wrap(0))) { + if (fr_time_lteq(next, now)) { timer_event_ready = true; } else if (wait) { - when = fr_time_sub(ev->when, el->now); + when = fr_time_sub(next, now); } /* else we're not waiting, leave "when == 0" */ @@ -2518,7 +2164,6 @@ int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait) * If there are no FD events, we must have woken up from a timer */ if (!num_fd_events) { - el->now = fr_time_add(el->now, when); if (wait) timer_event_ready = true; } /* @@ -2532,6 +2177,7 @@ int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait) return num_fd_events + timer_event_ready; } +CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function*/ static inline CC_HINT(always_inline) void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags) { @@ -2548,10 +2194,10 @@ void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int fla */ void fr_event_service(fr_event_list_t *el) { + fr_timer_list_t *etl = el->pub.tl; int i; fr_event_post_t *post; - fr_time_t when; - fr_event_timer_t *ev; + fr_time_t when, now; if (unlikely(el->exit)) return; @@ -2685,44 +2331,27 @@ void fr_event_service(fr_event_list_t *el) * cause strange interaction effects, spurious calls * to kevent, and busy loops. */ - el->now = el->time(); + now = etl->time(); /* * Run all of the timer events. Note that these can add * new timers! */ - if (fr_lst_num_elements(el->times) > 0) { - el->in_handler = true; - - do { - when = el->now; - } while (fr_event_timer_run(el, &when) == 1); + if (fr_time_neq(fr_timer_list_when(el->pub.tl), fr_time_wrap(0))) { + int ret; - el->in_handler = false; - } + when = now; - /* - * New timers can be added while running the timer - * callback. Instead of being added to the main timer - * lst, they are instead added to the "to do" list. - * Once we're finished running the callbacks, we walk - * through the "to do" list, and add the callbacks to the - * timer lst. - * - * Doing it this way prevents the server from running - * into an infinite loop. The timer callback MAY add a - * new timer which is in the past. The loop above would - * then immediately run the new callback, which could - * also add an event in the past... - */ - while ((ev = fr_dlist_head(&el->ev_to_add)) != NULL) { - (void)fr_dlist_remove(&el->ev_to_add, ev); - if (unlikely(fr_lst_insert(el->times, ev) < 0)) { - talloc_free(ev); - fr_assert_msg(0, "failed inserting lst event: %s", fr_strerror()); /* Die in debug builds */ + ret = fr_timer_list_run(etl, &when); + if (!fr_cond_assert(ret >= 0)) { /* catastrophic error, trigger event loop exit */ + el->exit = 1; + return; } + + EVENT_DEBUG("%p - %s - Serviced %u timer(s)", el, __FUNCTION__, (unsigned int)ret); } - el->now = el->time(); + + now = etl->time(); /* * Run all of the post-processing events. @@ -2730,7 +2359,7 @@ void fr_event_service(fr_event_list_t *el) for (post = fr_dlist_head(&el->post_callbacks); post != NULL; post = fr_dlist_next(&el->post_callbacks, post)) { - post->callback(el, el->now, post->uctx); + post->callback(el, now, post->uctx); } } @@ -2769,7 +2398,7 @@ CC_HINT(flatten) int fr_event_loop(fr_event_list_t *el) el->dispatch = true; while (!el->exit) { - if (unlikely(fr_event_corral(el, el->time(), true)) < 0) break; + if (unlikely(fr_event_corral(el, el->pub.tl->time(), true)) < 0) break; fr_event_service(el); } @@ -2793,10 +2422,6 @@ CC_HINT(flatten) int fr_event_loop(fr_event_list_t *el) */ static int _event_list_free(fr_event_list_t *el) { - fr_event_timer_t const *ev; - - while ((ev = fr_lst_peek(el->times)) != NULL) fr_event_timer_delete(&ev); - fr_event_list_reap_signal(el, fr_time_delta_wrap(0), SIGKILL); talloc_free_children(el); @@ -2916,13 +2541,12 @@ fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t statu fr_strerror_const("Out of memory"); return NULL; } - el->time = fr_time; el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */ talloc_set_destructor(el, _event_list_free); - el->times = fr_lst_talloc_alloc(el, fr_event_timer_cmp, fr_event_timer_t, lst_id, 0); - if (!el->times) { - fr_strerror_const("Failed allocating event lst"); + el->pub.tl = fr_timer_list_lst_alloc(el, NULL); + if (!el->pub.tl) { + fr_strerror_const("Failed allocating timer list"); error: talloc_free(el); return NULL; @@ -2942,7 +2566,6 @@ fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t statu fr_dlist_talloc_init(&el->pre_callbacks, fr_event_pre_t, entry); fr_dlist_talloc_init(&el->post_callbacks, fr_event_post_t, entry); - fr_dlist_talloc_init(&el->ev_to_add, fr_event_timer_t, entry); fr_dlist_talloc_init(&el->pid_to_reap, fr_event_pid_reap_t, entry); fr_dlist_talloc_init(&el->fd_to_free, fr_event_fd_t, entry); if (status) (void) fr_event_pre_insert(el, status, status_uctx); @@ -2956,187 +2579,17 @@ fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t statu goto error; } -#ifdef WITH_EVENT_DEBUG - fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, NULL); -#endif - return el; } -/** Override event list time source - * - * @param[in] el to set new time function for. - * @param[in] func to set. - */ -void fr_event_list_set_time_func(fr_event_list_t *el, fr_event_time_source_t func) -{ - el->time = func; -} - /** Return whether the event loop has any active events * */ bool fr_event_list_empty(fr_event_list_t *el) { - return !fr_lst_num_elements(el->times) && !fr_rb_num_elements(el->fds); -} - -#ifdef WITH_EVENT_DEBUG -static const fr_time_delta_t decades[18] = { - { 1 }, { 10 }, { 100 }, - { 1000 }, { 10000 }, { 100000 }, - { 1000000 }, { 10000000 }, { 100000000 }, - { 1000000000 }, { 10000000000 }, { 100000000000 }, - { 1000000000000 }, { 10000000000000 }, { 100000000000000 }, - { 1000000000000000 }, { 10000000000000000 }, { 100000000000000000 }, -}; - -static const char *decade_names[18] = { - "1ns", "10ns", "100ns", - "1us", "10us", "100us", - "1ms", "10ms", "100ms", - "1s", "10s", "100s", - "1Ks", "10Ks", "100Ks", - "1Ms", "10Ms", "100Ms", /* 1 year is 300Ms */ -}; - -typedef struct { - fr_rb_node_t node; - char const *file; - int line; - uint32_t count; -} fr_event_counter_t; - -static int8_t event_timer_location_cmp(void const *one, void const *two) -{ - fr_event_counter_t const *a = one; - fr_event_counter_t const *b = two; - - CMP_RETURN(a, b, file); - - return CMP(a->line, b->line); -} - - -/** Print out information about the number of events in the event loop - * - */ -void fr_event_report(fr_event_list_t *el, fr_time_t now, void *uctx) -{ - fr_lst_iter_t iter; - fr_event_timer_t const *ev; - size_t i; - - size_t array[NUM_ELEMENTS(decades)] = { 0 }; - fr_rb_tree_t *locations[NUM_ELEMENTS(decades)]; - TALLOC_CTX *tmp_ctx; - static pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER; - - tmp_ctx = talloc_init_const("temporary stats"); - if (!tmp_ctx) { - oom: - EVENT_DEBUG("Can't do report, out of memory"); - talloc_free(tmp_ctx); - return; - } - - for (i = 0; i < NUM_ELEMENTS(decades); i++) { - locations[i] = fr_rb_inline_alloc(tmp_ctx, fr_event_counter_t, node, event_timer_location_cmp, NULL); - if (!locations[i]) goto oom; - } - - /* - * Show which events are due, when they're due, - * and where they were allocated - */ - for (ev = fr_lst_iter_init(el->times, &iter); - ev != NULL; - ev = fr_lst_iter_next(el->times, &iter)) { - fr_time_delta_t diff = fr_time_sub(ev->when, now); - - for (i = 0; i < NUM_ELEMENTS(decades); i++) { - if ((fr_time_delta_cmp(diff, decades[i]) <= 0) || (i == NUM_ELEMENTS(decades) - 1)) { - fr_event_counter_t find = { .file = ev->file, .line = ev->line }; - fr_event_counter_t *counter; - - counter = fr_rb_find(locations[i], &find); - if (!counter) { - counter = talloc(locations[i], fr_event_counter_t); - if (!counter) goto oom; - counter->file = ev->file; - counter->line = ev->line; - counter->count = 1; - fr_rb_insert(locations[i], counter); - } else { - counter->count++; - } - - array[i]++; - break; - } - } - } - - pthread_mutex_lock(&print_lock); - EVENT_DEBUG("%p - Event list stats", el); - EVENT_DEBUG(" fd events : %"PRIu64, fr_event_list_num_fds(el)); - EVENT_DEBUG(" events last iter : %u", el->num_fd_events); - EVENT_DEBUG(" num timer events : %"PRIu64, fr_event_list_num_timers(el)); - - for (i = 0; i < NUM_ELEMENTS(decades); i++) { - fr_rb_iter_inorder_t event_iter; - void *node; - - if (!array[i]) continue; - - if (i == 0) { - EVENT_DEBUG(" events <= %5s : %zu", decade_names[i], array[i]); - } else if (i == (NUM_ELEMENTS(decades) - 1)) { - EVENT_DEBUG(" events > %5s : %zu", decade_names[i - 1], array[i]); - } else { - EVENT_DEBUG(" events %5s - %5s : %zu", decade_names[i - 1], decade_names[i], array[i]); - } - - for (node = fr_rb_iter_init_inorder(&event_iter, locations[i]); - node; - node = fr_rb_iter_next_inorder(&event_iter)) { - fr_event_counter_t *counter = talloc_get_type_abort(node, fr_event_counter_t); - - EVENT_DEBUG(" : %u allocd at %s[%d]", - counter->count, counter->file, counter->line); - } - } - pthread_mutex_unlock(&print_lock); - - fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, uctx); - talloc_free(tmp_ctx); -} - -#ifndef NDEBUG -void fr_event_timer_dump(fr_event_list_t *el) -{ - fr_lst_iter_t iter; - fr_event_timer_t *ev; - fr_time_t now; - - now = el->time(); - - EVENT_DEBUG("Time is now %"PRId64"", fr_time_unwrap(now)); - - for (ev = fr_lst_iter_init(el->times, &iter); - ev; - ev = fr_lst_iter_next(el->times, &iter)) { - (void)talloc_get_type_abort(ev, fr_event_timer_t); - EVENT_DEBUG("%s[%d]: %p time=%" PRId64 " (%c), callback=%p", - ev->file, ev->line, ev, fr_time_unwrap(ev->when), - fr_time_gt(now, ev->when) ? '<' : '>', ev->callback); - } + return fr_time_eq(fr_timer_list_when(el->pub.tl), fr_time_wrap(0)) && (fr_rb_num_elements(el->fds) == 0); } -#endif -#endif - #ifdef TESTING - /* * cc -g -I .. -c rb.c -o rbtree.o && cc -g -I .. -c isaac.c -o isaac.o && cc -DTESTING -I .. -c event.c -o event_mine.o && cc event_mine.o rbtree.o isaac.o -o event * @@ -3202,13 +2655,13 @@ int main(int argc, char **argv) array[i] = array[i - 1]; array[i] += event_rand() & 0xffff; - fr_event_timer_at(NULL, el, array[i], print_time, array[i]); + fr_timer_at(NULL, el, array[i], false, print_time, array[i]); } while (fr_event_list_num_timers(el)) { now = el->time(); when = now; - if (!fr_event_timer_run(el, &when)) { + if (!fr_timer_run(el, &when)) { int delay = (when - now) / 1000; /* nanoseconds to microseconds */ printf("\tsleep %d microseconds\n", delay); diff --git a/src/lib/util/event.h b/src/lib/util/event.h index d3c470a5e7c..b9919d54ec2 100644 --- a/src/lib/util/event.h +++ b/src/lib/util/event.h @@ -28,31 +28,53 @@ RCSIDH(event_h, "$Id$") extern "C" { #endif +#include + +/* + * Allow public and private versions of the same structures + */ +#ifndef _EVENT_LIST_PRIVATE +typedef struct fr_event_list_pub_s fr_event_list_t; +#endif + +/** Public event list structure + * + * Make the event timer list available, but nothing else. + * + * This allows us to access these values without the cost of a function call. + */ +struct fr_event_list_pub_s { + fr_timer_list_t *tl; //!< The timer list associated with this event loop. +}; + #include #include #include + #include #include #include -/** An opaque file descriptor handle - */ -typedef struct fr_event_fd fr_event_fd_t; -/** An opaque event list handle - */ -typedef struct fr_event_list fr_event_list_t; +#ifdef WITH_EVENT_DEBUG +# define EVENT_DEBUG(fmt, ...) printf("EVENT:");printf(fmt, ## __VA_ARGS__);printf("\n"); +# ifndef EVENT_REPORT_FREQ +# define EVENT_REPORT_FREQ 5 +# endif +#else +# define EVENT_DEBUG(...) +#endif -/** An opaque timer handle +/** An opaque file descriptor handle */ -typedef struct fr_event_timer fr_event_timer_t; +typedef struct fr_event_fd fr_event_fd_t; /** An opaque PID status handle */ typedef struct fr_event_pid fr_event_pid_t; -/** An opaquer user event handle +/** An opaque user event handle */ typedef struct fr_event_user_s fr_event_user_t; @@ -109,14 +131,6 @@ typedef struct { */ #define FR_EVENT_RESUME(_s, _f) { .offset = offsetof(_s, _f), .op = FR_EVENT_OP_RESUME } -/** Called when a timer event fires - * - * @param[in] now The current time. - * @param[in] el Event list the timer event was inserted into. - * @param[in] uctx User ctx passed to #fr_event_timer_in or #fr_event_timer_at. - */ -typedef void (*fr_event_timer_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx); - /** Called after each event loop cycle * * Called before calling kqueue to put the thread in a sleeping state. @@ -162,11 +176,13 @@ typedef void (*fr_event_pid_cb_t)(fr_event_list_t *el, pid_t pid, int status, vo */ typedef void (*fr_event_user_cb_t)(fr_event_list_t *el, void *uctx); -/** Alternative time source, useful for testing +/** Called when a post event fires * - * @return the current time in nanoseconds past the epoch. + * @param[in] el Event list the post event was inserted into. + * @param[in] now The current time. + * @param[in] uctx User ctx passed to #fr_timer_in or #fr_timer_at. */ -typedef fr_time_t (*fr_event_time_source_t)(void); +typedef void (*fr_event_post_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx); /** Callbacks for the #FR_EVENT_FILTER_IO filter */ @@ -244,20 +260,6 @@ int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t, uintptr_ int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour); #endif -int _fr_event_timer_at(NDEBUG_LOCATION_ARGS - TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev, - fr_time_t when, fr_event_timer_cb_t callback, void const *uctx); -#define fr_event_timer_at(...) _fr_event_timer_at(NDEBUG_LOCATION_EXP __VA_ARGS__) - -int _fr_event_timer_in(NDEBUG_LOCATION_ARGS - TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev, - fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx); -#define fr_event_timer_in(...) _fr_event_timer_in(NDEBUG_LOCATION_EXP __VA_ARGS__) - -int fr_event_timer_delete(fr_event_timer_t const **ev); - -fr_time_t fr_event_timer_when(fr_event_timer_t const *ev) CC_HINT(nonnull); - int _fr_event_pid_wait(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p, pid_t pid, fr_event_pid_cb_t wait_fn, void *uctx) @@ -272,8 +274,6 @@ int _fr_event_pid_reap(NDEBUG_LOCATION_ARGS unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t timeout, int signal); -int fr_event_timer_run(fr_event_list_t *el, fr_time_t *when); - int _fr_event_user_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p, bool trigger, fr_event_user_cb_t callback, void *uctx); @@ -287,8 +287,8 @@ int fr_event_user_delete(fr_event_list_t *el, fr_event_user_cb_t user, void *uc int fr_event_pre_insert(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx) CC_HINT(nonnull(1,2)); int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx) CC_HINT(nonnull(1,2)); -int fr_event_post_insert(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx) CC_HINT(nonnull(1,2)); -int fr_event_post_delete(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx) CC_HINT(nonnull(1,2)); +int fr_event_post_insert(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) CC_HINT(nonnull(1,2)); +int fr_event_post_delete(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx) CC_HINT(nonnull(1,2)); int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait); void fr_event_service(fr_event_list_t *el); @@ -298,17 +298,9 @@ bool fr_event_loop_exiting(fr_event_list_t *el); int fr_event_loop(fr_event_list_t *el); fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_ctx); -void fr_event_list_set_time_func(fr_event_list_t *el, fr_event_time_source_t func); bool fr_event_list_empty(fr_event_list_t *el); -#ifdef WITH_EVENT_DEBUG -void fr_event_report(fr_event_list_t *el, fr_time_t now, void *uctx); -# ifndef NDEBUG -void fr_event_timer_dump(fr_event_list_t *el); -# endif -#endif - #ifdef __cplusplus } #endif diff --git a/src/lib/util/libfreeradius-util.mk b/src/lib/util/libfreeradius-util.mk index 180d47ba0da..86a88bf95d9 100644 --- a/src/lib/util/libfreeradius-util.mk +++ b/src/lib/util/libfreeradius-util.mk @@ -35,6 +35,7 @@ SOURCES := \ edit.c \ encode.c \ event.c \ + timer.c \ ext.c \ fifo.c \ file.c \ @@ -121,4 +122,3 @@ ifeq "$(TARGET_IS_WASM)" "yes" SRC_CFLAGS += -sMAIN_MODULE=1 -sUSE_PTHREADS=1 TGT_LDFLAGS += --no-entry -sALLOW_MEMORY_GROWTH=1 -sFORCE_FILESYSTEM=1 -sEXPORT_ALL=1 -sLINKABLE=1 -sMODULARIZE=1 -sEXPORT_ES6=1 -sEXPORT_NAME=libfreeradiusUtil -sEXPORTED_RUNTIME_METHODS=ccall,cwrap,setValue,getValue --preload-file=$(top_builddir)/share/dictionary@/share/dictionary endif - diff --git a/src/lib/util/slab.h b/src/lib/util/slab.h index b40cbdd18e9..3ae803ec9be 100644 --- a/src/lib/util/slab.h +++ b/src/lib/util/slab.h @@ -81,7 +81,7 @@ typedef struct { \ FR_DLIST_HEAD(_name ## _slab) reserved; \ FR_DLIST_HEAD(_name ## _slab) avail; \ fr_event_list_t *el; \ - fr_event_timer_t const *ev; \ + fr_timer_t *ev; \ fr_slab_config_t config; \ unsigned int in_use; \ unsigned int high_water_mark; \ @@ -129,7 +129,7 @@ DIAG_OFF(unused-function) \ * up to half of the element count between the high water mark \ * and the current number in use. \ */ \ - static void _ ## _name ## _slab_cleanup(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) \ + static void _ ## _name ## _slab_cleanup(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) \ { \ _name ## _slab_list_t *slab_list = talloc_get_type_abort(uctx, _name ## _slab_list_t); \ _name ## _slab_t *slab = NULL, *next_slab = NULL; \ @@ -153,8 +153,8 @@ DIAG_OFF(unused-function) \ } \ slab_list->high_water_mark -= cleared; \ finish: \ - (void) fr_event_timer_in(slab_list, el, &slab_list->ev, slab_list->config.interval, \ - _ ## _name ## _slab_cleanup, slab_list); \ + (void) fr_timer_in(slab_list, tl, &slab_list->ev, slab_list->config.interval, false, \ + _ ## _name ## _slab_cleanup, slab_list); \ } \ \ /** Allocate a slab list to manage slabs of allocated memory \ @@ -195,7 +195,7 @@ DIAG_OFF(unused-function) \ _name ## _slab_init(&slab->reserved); \ _name ## _slab_init(&slab->avail); \ if (el) { \ - if (unlikely(fr_event_timer_in(slab, el, &slab->ev, config->interval, _ ## _name ## _slab_cleanup, slab) < 0)) { \ + if (unlikely(fr_timer_in(slab, el->tl, &slab->ev, config->interval, false, _ ## _name ## _slab_cleanup, slab) < 0)) { \ talloc_free(slab); \ return NULL; \ }; \ diff --git a/src/lib/util/slab_tests.c b/src/lib/util/slab_tests.c index b8edb989854..ef3617a5303 100644 --- a/src/lib/util/slab_tests.c +++ b/src/lib/util/slab_tests.c @@ -6,7 +6,7 @@ */ #include #include - +#include #include "slab.h" typedef struct { @@ -485,7 +485,7 @@ static void test_clearup_1(void) fr_slab_config_t slab_config = def_slab_config; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); slab_config.max_elements = 6; test_slab_list = test_slab_list_alloc(NULL, el, &slab_config, NULL, NULL, NULL, true, false); @@ -539,7 +539,7 @@ static void test_clearup_2(void) fr_slab_config_t slab_config = def_slab_config; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); slab_config.min_elements = 16; slab_config.max_elements = 20; @@ -604,7 +604,7 @@ static void test_clearup_3(void) fr_slab_config_t slab_config = def_slab_config; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); slab_config.min_elements = 0; slab_config.max_elements = 20; @@ -698,7 +698,7 @@ static void test_realloc(void) fr_slab_config_t slab_config = def_slab_config; el = fr_event_list_alloc(ctx, NULL, NULL); - fr_event_list_set_time_func(el, test_time); + fr_timer_list_set_time_func(el->tl, test_time); slab_config.min_elements = 0; slab_config.max_elements = 20; diff --git a/src/lib/util/timer.c b/src/lib/util/timer.c new file mode 100644 index 00000000000..c0748cbacda --- /dev/null +++ b/src/lib/util/timer.c @@ -0,0 +1,1254 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + */ + +/** Various types of event timer list + * + * @file src/lib/util/timer.c + * + * @copyright 2025 Arran Cudbard-Bell (a.cudbardb@freeradius.org) + */ + +#define _TIMER_PRIVATE 1 +typedef struct fr_timer_list_s fr_timer_list_t; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +FR_DLIST_TYPES(timer) +FR_DLIST_TYPEDEFS(timer, fr_timer_head_t, fr_timer_entry_t) + +/** What type of event list the timer is inserted into + * + */ +typedef enum { + TIMER_LIST_TYPE_LST = 1, //!< Self-sorting timer list based on a left leaning skeleton tree. + TIMER_LIST_TYPE_ORDERED = 2 //!< Strictly ordered list of events in a dlist. +} timer_list_type_t; + +/** An event timer list + * + */ +struct fr_timer_list_s { + struct fr_timer_list_pub_s pub; //!< Public interface to the event timer list. + + union { + fr_lst_t *lst; //!< of timer events to be executed. + timer_head_t ordered; //!< A list of timer events to be executed. + }; + timer_list_type_t type; + bool in_handler; //!< Whether we're currently in a callback. + + timer_head_t deferred; //!< A list of timer events to be inserted, after + ///< the current batch has been processed. + ///< This prevents "busy" timer loops, where + ///< other events may starve, or we may never exit. + + fr_timer_list_t *parent; //!< Parent list to insert event into (if any). + fr_timer_t *parent_ev; //!< Event in the parent's event loop. + +#ifdef WITH_EVENT_DEBUG + fr_timer_t *report; //!< Used to trigger periodict reports about the event timer list. +#endif +}; + +/** A timer event + * + */ +struct fr_timer_s { + fr_time_t when; //!< When this timer should fire. + + fr_timer_cb_t callback; //!< Callback to execute when the timer fires. + void const *uctx; //!< Context pointer to pass to the callback. + + TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to. + + fr_timer_t **parent; //!< A pointer to the parent structure containing the timer + ///< event. + fr_lst_index_t lst_idx; //!< Where to store opaque lst data, not used for ordered lists. + + fr_timer_entry_t entry; //!< Entry in a list of timer events. + + bool free_on_fire; //!< Whether to free the event when it fires. + + fr_timer_list_t *tl; //!< The event list this timer is part of. + ///< This is set to NULL when an event is disarmed, + ///< but all other fields are left intact. + +#ifndef NDEBUG + char const *file; //!< Source file this event was last updated in. + int line; //!< Line this event was last updated on. +#endif +}; + +FR_DLIST_FUNCS(timer, fr_timer_t, entry) + +#define CHECK_PARENT(_ev) \ + fr_assert_msg(!(_ev)->parent || (*(_ev)->parent == ev), \ + "Event %p, allocd %s[%d], parent field points to %p", (_ev), (_ev)->file, (_ev)->line, *(_ev)->parent); + +/** Specialisation function to insert a timer + * + * @param[in] tl Timer list to insert into. + * @param[in] ev Timer event to insert. + * @return + * - 0 on success. + * - -1 on failure. + */ +typedef int (*timer_insert_t)(fr_timer_list_t *tl, fr_timer_t *ev); + +/** Specialisation function to delete a timer + * + * @param[in] ev Timer event to delete. + * @return + * - 0 on success. + * - -1 on failure. + */ +typedef int (*timer_disarm_t)(fr_timer_t *ev); + +/** Specialisation function to execute any pending timers + * + * @param[in] tl Timer list to execute. + * @param[in,out] when Our current time, updated to the next event time (i.e. the next time we'll need to run something) + * @return + * - 0 no timer events fired. + * - 1 a timer event fired. + */ +typedef int (*timer_list_run_t)(fr_timer_list_t *tl, fr_time_t *when); + +/** Return the soonest timer event + * + * @param[in] tl to get the head of. + * @return + * - The head of the list. + * - NULL if the list is empty. + */ +typedef fr_timer_t *(*timer_list_head_t)(fr_timer_list_t *tl); + +/** Process any deferred timer events + * + * @param[in] tl to process deferred events for. + * @return + * - The head of the list. + * - NULL if the list is empty. + */ +typedef int (*timer_list_deferred_t)(fr_timer_list_t *tl); + +/** Return the number of elements in the list + * + * @param[in] tl to get the number of elements from. + * @return + * - The number of elements in the list. + */ +typedef uint64_t (*timer_list_num_elements_t)(fr_timer_list_t *tl); + +typedef struct { + timer_insert_t insert; //!< Function to insert a timer event. + timer_disarm_t disarm; //!< Function to delete a timer event. + + timer_list_run_t run; //!< Function to run a timer event. + timer_list_head_t head; //!< Function to get the head of the list. + timer_list_deferred_t deferred; //!< Function to process deferred events. + timer_list_num_elements_t num_events; //!< Function to get the number of elements in the list. +} timer_list_funcs_t; + +#define EVENT_ARMED(_ev) ((_ev)->tl != NULL) + +static int timer_lst_insert_at(fr_timer_list_t *tl, fr_timer_t *ev); +static int timer_ordered_insert_at(fr_timer_list_t *tl, fr_timer_t *ev); + +static int timer_lst_disarm(fr_timer_t *ev); +static int timer_ordered_disarm(fr_timer_t *ev); + +static int timer_list_lst_run(fr_timer_list_t *tl, fr_time_t *when); +static int timer_list_ordered_run(fr_timer_list_t *tl, fr_time_t *when); + +static fr_timer_t *timer_list_lst_head(fr_timer_list_t *tl); +static fr_timer_t *timer_list_ordered_head(fr_timer_list_t *tl); + +static int timer_list_lst_deferred(fr_timer_list_t *tl); +static int timer_list_ordered_deferred(fr_timer_list_t *tl); + +static uint64_t timer_list_lst_num_events(fr_timer_list_t *tl); +static uint64_t timer_list_ordered_num_events(fr_timer_list_t *tl); + +/** Functions for performing operations on various types of timer list + * + */ +static timer_list_funcs_t const timer_funcs[] = { + [TIMER_LIST_TYPE_LST] = { + .insert = timer_lst_insert_at, + .disarm = timer_lst_disarm, + + .run = timer_list_lst_run, + .head = timer_list_lst_head, + .deferred = timer_list_lst_deferred, + .num_events = timer_list_lst_num_events + }, + [TIMER_LIST_TYPE_ORDERED] = { + .insert = timer_ordered_insert_at, + .disarm = timer_ordered_disarm, + + .run = timer_list_ordered_run, + .head = timer_list_ordered_head, + .deferred = timer_list_ordered_deferred, + .num_events = timer_list_ordered_num_events + } +}; + +/** Compare two timer events to see which one should occur first + * + * @param[in] a the first timer event. + * @param[in] b the second timer event. + * @return + * - +1 if a should occur later than b. + * - -1 if a should occur earlier than b. + * - 0 if both events occur at the same time. + */ +static int8_t timer_cmp(void const *a, void const *b) +{ + fr_timer_t const *ev_a = a, *ev_b = b; + + return fr_time_cmp(ev_a->when, ev_b->when); +} + +/** This callback fires in the parent to execute events in this sublist + * + * @param[in] parent_tl Parent event timer list. + * @param[in] when When the parent timer fired. + * @param[in] uctx Sublist to execute. + */ +static void _parent_timer_cb(UNUSED fr_timer_list_t *parent_tl, fr_time_t when, void *uctx) +{ + /* + * We're in the parent timer, so we need to run the + * events in the child timer list. + */ + (void)fr_timer_list_run(talloc_get_type_abort(uctx, fr_timer_list_t), &when); +} + +/** Utility function to update parent timers + * + * @param[in] tl to update parent timers for. + * @return + * - 0 on success. + * - -1 on failure. + */ +static inline CC_HINT(always_inline) int timer_list_parent_update(fr_timer_list_t *tl) +{ + fr_timer_t *ev; + + if (!tl->parent) return 0; + + ev = timer_funcs[tl->type].head(tl); + /* + * No events, disarm the timer + */ + if (!ev) { + /* + * Disables the timer in the parent, does not free the memory + */ + if (tl->parent) fr_timer_disarm(tl->parent_ev); + return 0; + } + + if (tl->parent_ev && EVENT_ARMED(tl->parent_ev) && + fr_time_eq(ev->when, tl->parent_ev->when)) return 0; /* noop */ + + /* + * Re-arm the timer + */ + return fr_timer_at(tl->parent, tl->parent, &tl->parent_ev, + ev->when, false, _parent_timer_cb, tl); +} + +/** Insert a timer event into a single event timer list + * + * @param[in] tl to insert the event into. + * @param[in] ev to insert. + * @return + * - 0 on success. + * - -1 on failure. + */ +static int timer_lst_insert_at(fr_timer_list_t *tl, fr_timer_t *ev) +{ + if (unlikely(fr_lst_insert(tl->lst, ev) < 0)) { + fr_strerror_const_push("Failed inserting timer into lst"); + return -1; + } + + return 0; +} + +/** Insert an event into an ordered timer list + * + * Timer must be in order, i.e. either before first event, or after last event + * + * @param[in] tl to insert the event into. + * @param[in] ev to insert. + * @return + * - 0 on success. + * - -1 on failure. + */ +static int timer_ordered_insert_at(fr_timer_list_t *tl, fr_timer_t *ev) +{ + fr_timer_t *tail; + + tail = timer_tail(&tl->ordered); + if (tail && fr_time_lt(ev->when, tail->when)) { + fr_strerror_const("Event being inserted must occurr _after_ the last event"); + return -1; + } + + if (unlikely(timer_insert_tail(&tl->ordered, ev) < 0)) { + fr_strerror_const_push("Failed inserting timer into ordered list"); + return -1; + } + + return 0; +} + +/** Remove an event from the event loop + * + * @param[in] ev to free. + * @return + * - 0 on success. + * - -1 on failure. + */ +static int _timer_free(fr_timer_t *ev) +{ + fr_timer_t **ev_p; + int ret; + + ret = fr_timer_disarm(ev); /* Is a noop if ev->tl == NULL */ + if (ret < 0) return ret; + + CHECK_PARENT(ev); + ev_p = ev->parent; + *ev_p = NULL; + + return 0; +} + +/** Insert a timer event into an event list + * + * @note The talloc parent of the memory returned in ev_p must not be changed. + * If the lifetime of the event needs to be bound to another context + * this function should be called with the existing event pointed to by + * ev_p. + * + * @param[in] ctx to bind lifetime of the event to. + * @param[in] tl to insert event into. + * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent + * in a temporal sense, not in a memory structure or dependency sense. + * @param[in] when we should run the event. + * @param[in] free_on_fire Whether event memory should be freed if the event fires. + * @param[in] callback function to execute if the event fires. + * @param[in] uctx user data to pass to the event. + * @return + * - 0 on success. + * - -1 on failure. + */ +int _fr_timer_at(NDEBUG_LOCATION_ARGS + TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev_p, + fr_time_t when, + bool free_on_fire, fr_timer_cb_t callback, void const *uctx) +{ + fr_timer_t *ev; + + /* + * If there is an event, reuse it instead of freeing it + * and allocating a new one. This is to reduce memory + * churn for repeat events. + */ + if (!*ev_p) { + new_event: + ev = talloc_zero(tl, fr_timer_t); + if (unlikely(!ev)) { + fr_strerror_const("Out of memory"); + return -1; + } + + EVENT_DEBUG("%p - " NDEBUG_LOCATION_FMT "Added new timer %p", tl, NDEBUG_LOCATION_VALS ev); + /* + * Bind the lifetime of the event to the specified + * talloc ctx. If the talloc ctx is freed, the + * event will also be freed. + */ + if (ctx != tl) talloc_link_ctx(ctx, ev); + + talloc_set_destructor(ev, _timer_free); + } else { + ev = UNCONST(fr_timer_t *, *ev_p); + + EVENT_DEBUG("%p - " NDEBUG_LOCATION_FMT "Re-armed timer %p", tl, NDEBUG_LOCATION_VALS ev); + + /* + * We can't disarm the linking context due to + * limitations in talloc, so if the linking + * context changes, we need to free the old + * event, and allocate a new one. + * + * Freeing the event also removes it from the lst. + */ + if (unlikely(ev->linked_ctx != ctx)) { + talloc_free(ev); + goto new_event; + } + + /* + * If the event is associated with a list, we need + * to disarm it, before we can rearm it. + */ + if (EVENT_ARMED(ev)) { + int ret; + char const *err_file; + int err_line; + + /* + * Removed event from the event list or the + * deferred list. + */ + ret = fr_timer_disarm(ev); +#ifndef NDEBUG + err_file = ev->file; + err_line = ev->line; +#else + err_file = "not-available"; + err_line = 0; +#endif + + /* + * Events MUST be in the lst (or the insertion list). + */ + if (!fr_cond_assert_msg(ret == 0, + "Event %p, allocd %s[%d], was not found in the event " + "list or deferred list when re-armed: %s", ev, + err_file, err_line, fr_strerror())) return -1; + } + } + + ev->tl = tl; /* This indicates the event memory is bound to an avent loop */ + ev->when = when; + ev->free_on_fire = free_on_fire; + ev->callback = callback; + ev->uctx = uctx; + ev->linked_ctx = ctx; + ev->parent = ev_p; +#ifndef NDEBUG + ev->file = file; + ev->line = line; +#endif + + /* + * No updating needed as the events are deferred + */ + if (tl->in_handler) { + /* + * ...a little hacky, but we need to verify that + * we're not inserting an event that's earlier + * than the last event in the list for ordered + * lists. + * + * Otherwise we'd end up doing this when we tried + * to move all the deferred events into the timer + * list, and end up making that O(n) instead of O(1). + */ + if (tl->type == TIMER_LIST_TYPE_ORDERED) { + fr_timer_t *head = timer_list_ordered_head(tl); + + if (head && fr_time_lt(ev->when, head->when)) { + fr_strerror_const("Event being inserted must occurr _after_ the last event"); + + insert_failed: + talloc_set_destructor(ev, NULL); + talloc_free(ev); + *ev_p = NULL; + return -1; + } + } + + if (!fr_cond_assert_msg(timer_insert_tail(&tl->deferred, ev) == 0, + "Failed inserting event into deferred list")) { + goto insert_failed; + } + } else { + int ret; + + ret = timer_funcs[tl->type].insert(tl, ev); + if (unlikely(ret < 0)) goto insert_failed; + + /* + * We need to update the parent timer + * to ensure it fires at the correct time. + */ + if (unlikely(timer_list_parent_update(tl) < 0)) return -1; + } + + *ev_p = ev; + + return 0; +} + +/** Insert a timer event into an event list + * + * @note The talloc parent of the memory returned in ev_p must not be changed. + * If the lifetime of the event needs to be bound to another context + * this function should be called with the existing event pointed to by + * ev_p. + * + * @param[in] ctx to bind lifetime of the event to. + * @param[in] tl to insert event into. + * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent + * in a temporal sense, not in a memory structure or dependency sense. + * @param[in] delta In how many nanoseconds to wait before should we execute the event. + * @param[in] callback function to execute if the event fires. + * @param[in] uctx user data to pass to the event. + * @return + * - 0 on success. + * - -1 on failure. + */ +int _fr_timer_in(NDEBUG_LOCATION_ARGS + TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev_p, + fr_time_delta_t delta, + bool free_on_fire, fr_timer_cb_t callback, void const *uctx) +{ + return _fr_timer_at(NDEBUG_LOCATION_VALS + ctx, tl, ev_p, fr_time_add(tl->pub.time(), delta), + free_on_fire, callback, uctx); +} + +static int timer_lst_disarm(fr_timer_t *ev) +{ + fr_timer_list_t *tl = ev->tl; + + if (timer_in_list(&tl->deferred,ev)) { + (void)timer_remove(&tl->deferred, ev); + } else { + int ret = fr_lst_extract(tl->lst, ev); + char const *err_file; + int err_line; + +#ifndef NDEBUG + err_file = ev->file; + err_line = ev->line; +#else + err_file = "not-available"; + err_line = 0; +#endif + + + /* + * Events MUST be in the lst (or the insertion list). + */ + if (!fr_cond_assert_msg(ret == 0, + "Event %p, lst_id %u, allocd %s[%d], was not found in the event lst or " + "insertion list when freed: %s", ev, ev->lst_idx, err_file, err_line, + fr_strerror())) return -1; + } + + return 0; +} + +/** Remove a timer from a timer list, but don't free it + * + * @param[in] ev to remove. + */ +static int timer_ordered_disarm(fr_timer_t *ev) +{ + /* + * Check the check is still valid (sanity check) + */ + (void)talloc_get_type_abort(ev, fr_timer_t);; + + /* + * Already dissassociated from a list, nothing to do. + */ + if (!ev->tl) return 0; + + /* + * This *MUST* be in the timer list if it has a non-NULL tl pointer. + */ + if (unlikely(!fr_cond_assert(timer_in_list(&ev->tl->ordered, ev)))) return -1; + + (void)timer_remove(&ev->tl->ordered, ev); + + return 0; +} + +/** Remove an event from the event list, but don't free the memory + * + * @param[in] ev to remove from the event list. + */ +int fr_timer_disarm(fr_timer_t *ev) +{ + fr_timer_list_t *tl = ev->tl; + + if (!EVENT_ARMED(ev)) { + EVENT_DEBUG("Asked to disarm inactive timer %p (noop)", ev); + return 0; /* Noop */ + } + + EVENT_DEBUG("Disarming timer %p", ev); + + CHECK_PARENT(ev); + + /* + * If the event is deferred, it's not in the event list proper + * so just remove it, and set the tl pointer to NULL. + */ + if (timer_in_list(&tl->deferred,ev)) { + (void)timer_remove(&tl->deferred, ev); + } else { + int ret = timer_funcs[ev->tl->type].disarm(ev); + if (ret < 0) return ret; + } + ev->tl = NULL; + + return timer_list_parent_update(tl); +} + +/** Delete a timer event and free its memory + * + * @param[in] ev_p of the event being deleted. + * @return + * - 0 on success. + * - -1 on failure. + */ +int fr_timer_delete(fr_timer_t **ev_p) +{ + fr_timer_t *ev; + int ret; + + if (unlikely(!*ev_p)) return 0; + + ev = *ev_p; + ret = talloc_free(ev); /* Destructor removed event from any lists */ + + /* + * Don't leave a garbage pointer value + * if parent is not ev_p. + */ + if (likely(ret == 0)) { + *ev_p = NULL; + } else { + EVENT_DEBUG("Deleting timer %p failed: %s", ev, fr_strerror_peek()); + } + + return 0; +} + +/** Internal timestamp representing when the timer should fire + * + * @return When the timestamp should fire. + */ +fr_time_t fr_timer_when(fr_timer_t *ev) +{ + return ev->when; +} + +/** Check if a timer event is armed + * + * @param[in] ev to check. + * @return + * - true if the event is armed. + * - false if the event is not armed. + */ +bool fr_timer_armed(fr_timer_t *ev) +{ + return ev && EVENT_ARMED(ev); +} + +/** Run all scheduled timer events in a lst + * + * @param[in] tl containing the timer events. + * @param[in] when Process events scheduled to run before or at this time. + * - Set to 0 if no more events. + * - Set to the next event time if there are more events. + * @return + * - 0 no timer events fired. + * - 1 a timer event fired. + */ +static int timer_list_lst_run(fr_timer_list_t *tl, fr_time_t *when) +{ + fr_timer_cb_t callback; + void *uctx; + fr_timer_t *ev; + int fired = 0; + + while (fr_lst_num_elements(tl->lst) > 0) { + ev = fr_lst_peek(tl->lst); + + /* + * See if it's time to do this one. + */ + if (fr_time_gt(ev->when, *when)) { + *when = ev->when; + done: + return fired; + } + + callback = ev->callback; + memcpy(&uctx, &ev->uctx, sizeof(uctx)); + + CHECK_PARENT(ev); + + /* + * Disarm the event before calling it. + * + * This leaves the memory in place, + * but dissassociates it from the list. + * + * We use the public function as it + * handles more cases. + */ + if (!fr_cond_assert(fr_timer_disarm(ev) == 0)) return -2; + EVENT_DEBUG("Running timer %p", ev); + if (ev->free_on_fire) talloc_free(ev); + + callback(tl, *when, uctx); + + fired++; + } + + *when = fr_time_wrap(0); + + goto done; +} + +/** Run all scheduled events in an ordered list + * + * @param[in] tl containing the timer events. + * @param[in] when Process events scheduled to run before or at this time. + * - Set to 0 if no more events. + * - Set to the next event time if there are more events. + * @return + * - < 0 if we failed to updated the parent list. + * - 0 no timer events fired. + * - >0 number of timer event fired. + */ +static int timer_list_ordered_run(fr_timer_list_t *tl, fr_time_t *when) +{ + fr_timer_cb_t callback; + void *uctx; + fr_timer_t *ev; + unsigned int fired = 0; + + while ((ev = timer_head(&tl->ordered))) { + /* + * See if it's time to do this one. + */ + if (fr_time_gt(ev->when, *when)) { + *when = ev->when; + done: + return fired; + } + + callback = ev->callback; + memcpy(&uctx, &ev->uctx, sizeof(uctx)); + + CHECK_PARENT(ev); + + /* + * Disarm the event before calling it. + * + * This leaves the memory in place, + * but dissassociates it from the list. + * + * We use the public function as it + * handles more cases. + */ + if (!fr_cond_assert(fr_timer_disarm(ev) == 0)) return -2; + + EVENT_DEBUG("Running timer %p", ev); + if (ev->free_on_fire) talloc_free(ev); + + callback(tl, *when, uctx); + + fired++; + } + + *when = fr_time_wrap(0); + + goto done; +} + +/** Execute any pending events in the event loop + * + * @param[in] tl to execute events in. + * @param[in] when Process events scheduled to run before or at this time. + * - Set to 0 if no more events. + * - Set to the next event time if there are more events. + * @return + * - < 0 if we failed to updated the parent list. + * - 0 no timer events fired. + * - >0 number of timer event fired. + */ +int fr_timer_list_run(fr_timer_list_t *tl, fr_time_t *when) +{ + int ret; + + tl->in_handler = true; + ret = timer_funcs[tl->type].run(tl, when); + tl->in_handler = false; + + /* + * Now we've executed all the pending events, + * now merge the deferred events into the main + * event list. + * + * The events don't need to be modified as they + * were initialised completely before being + * placed in the deffered list. + */ + if (timer_num_elements(&tl->deferred) > 0) { + if (unlikely(timer_funcs[tl->type].deferred(tl) < 0)) return -1; + if (unlikely(timer_list_parent_update(tl) < 0)) return -1; + /* + * We ran some events, and have no deferred + * events to insert, so we need to forcefully + * update the parent timer. + */ + } else if(ret > 0) { + if (unlikely(timer_list_parent_update(tl) < 0)) return -1; + } + + return ret; +} + +/** Return the head of the event list + * + * @param[in] tl to get the head of. + * @return + * - The head of the trie. + * - NULL, if there's no head. + */ +static fr_timer_t *timer_list_lst_head(fr_timer_list_t *tl) +{ + return fr_lst_peek(tl->lst); +} + +/** Return the head of the ordered list + * + * @param[in] tl to get the head of. + * @return + * - The head of the trie. + * - NULL, if there's no head. + */ +static fr_timer_t *timer_list_ordered_head(fr_timer_list_t *tl) +{ + return timer_head(&tl->ordered); +} + +/** Insert a timer event into a the lst + * + * @param[in] tl to move events in. + * @return + * - 0 on success. + * - -1 on failure. + */ +static int timer_list_lst_deferred(fr_timer_list_t *tl) +{ + fr_timer_t *ev; + + while((ev = timer_pop_head(&tl->deferred))) { + if (unlikely(timer_lst_insert_at(tl, ev)) < 0) { + timer_insert_head(&tl->deferred, ev); /* Don't lose track of events we failed to insert */ + return -1; + } + } + + return 0; +} + +/** Move all deferred events into the ordered event list + * + * This operation is O(1). + * + * @param[in] tl to move events in. + * @return + * - 0 on success. + * - -1 on failure. + */ +static int timer_list_ordered_deferred(fr_timer_list_t *tl) +{ +#ifndef NDEBUG + { + fr_timer_t *head, *tail; + + head = timer_head(&tl->deferred); + tail = timer_tail(&tl->ordered); + + /* + * Something has gone catastrophically wrong if the + * deferred event is earlier than the last event in + * the ordered list, given all the checks we do. + */ + fr_cond_assert_msg(!head || !tail || fr_time_gteq(head->when, tail->when), + "Deferred event is earlier than the last event in the ordered list"); + } +#endif + + /* + * O(1) operation. Much better than moving the + * events individually. + */ + timer_move_head(&tl->ordered, &tl->deferred); + + return 0; +} + +static uint64_t timer_list_lst_num_events(fr_timer_list_t *tl) +{ + return fr_lst_num_elements(tl->lst); +} + +static uint64_t timer_list_ordered_num_events(fr_timer_list_t *tl) +{ + return timer_num_elements(&tl->ordered); +} + +/** Return number of pending events + * + * @note This includes deferred events, i.e. those yet to be inserted into the main list + * + * @param[in] tl to get the number of events from. + * @return + * - The number of events in the list. + */ +uint64_t fr_timer_list_num_events(fr_timer_list_t *tl) +{ + uint64_t num = timer_funcs[tl->type].num_events(tl); + + return num + timer_num_elements(&tl->deferred); +} + +/** Return the time of the next event + * + * @param[in] tl to get the next event time from. + * @return + * - >0 the time of the next event. + * - 0 if there are no more events. + */ +fr_time_t fr_timer_list_when(fr_timer_list_t *tl) +{ + fr_timer_t *ev = timer_funcs[tl->type].head(tl); + + if (ev) return ev->when; + + return fr_time_wrap(0); +} + +/** Override event list time source + * + * @param[in] tl to set new time function for. + * @param[in] func to set. + */ +void fr_timer_list_set_time_func(fr_timer_list_t *tl, fr_event_time_source_t func) +{ + tl->pub.time = func; +} + +/** Cleanup all timers currently in the list + * + * @param[in] tl to cleanup. + * @return + * - 0 on success. + * - -1 on failure. + */ +static int _timer_list_free(fr_timer_list_t *tl) +{ + fr_timer_t *ev; + + if (unlikely(tl->in_handler)) { + fr_strerror_const("Cannot free event timer list while in handler"); + return -1; + } + + if (tl->parent_ev) fr_timer_delete(&tl->parent_ev); + + while ((ev = timer_funcs[tl->type].head(tl))) { + if (talloc_free(ev) < 0) return -1; + } + + return 0; +} + +static fr_timer_list_t *timer_list_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent) +{ + fr_timer_list_t *tl; + + tl = talloc_zero(ctx, fr_timer_list_t); + if (unlikely(tl == NULL)) { + fr_strerror_const("Out of memory"); + return NULL; + } + + timer_talloc_init(&tl->deferred); + if (tl->parent) { + tl->parent = parent; + tl->pub.time = parent->pub.time; + } else { + tl->pub.time = fr_time; + } + talloc_set_destructor(tl, _timer_list_free); + + return tl; +} + +/** Allocate a new lst based timer list + * + * @param[in] ctx to insert head timer event into. + * @param[in] parent to insert the head timer event into. + */ +fr_timer_list_t *fr_timer_list_lst_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent) +{ + fr_timer_list_t *tl; + + if (unlikely((tl = timer_list_alloc(ctx, parent)) == NULL)) return NULL; + + tl->lst = fr_lst_talloc_alloc(tl, timer_cmp, fr_timer_t, lst_idx, 0); + if (unlikely(tl->lst == NULL)) { + fr_strerror_const("Failed allocating timer list"); + talloc_free(tl); + return NULL; + } + tl->type = TIMER_LIST_TYPE_LST; + +#ifdef WITH_EVENT_REPORT + fr_timer_in(tl, tl, &tl->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), false, fr_timer_report, NULL); +#endif + + return tl; +} + +/** Allocate a new sorted event timer list + * + * @param[in] ctx to allocate the event timer list from. + * @param[in] parent to insert the head timer event into. + */ +fr_timer_list_t *fr_timer_list_ordered_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent) +{ + fr_timer_list_t *tl; + + if (unlikely((tl = timer_list_alloc(ctx, parent)) == NULL)) return NULL; + + timer_talloc_init(&tl->ordered); + tl->type = TIMER_LIST_TYPE_ORDERED; + + return tl; +} + +#if defined(WITH_EVENT_DEBUG) && !defined(NDEBUG) +static const fr_time_delta_t decades[18] = { + { 1 }, { 10 }, { 100 }, + { 1000 }, { 10000 }, { 100000 }, + { 1000000 }, { 10000000 }, { 100000000 }, + { 1000000000 }, { 10000000000 }, { 100000000000 }, + { 1000000000000 }, { 10000000000000 }, { 100000000000000 }, + { 1000000000000000 }, { 10000000000000000 }, { 100000000000000000 }, +}; + +static const char *decade_names[18] = { + "1ns", "10ns", "100ns", + "1us", "10us", "100us", + "1ms", "10ms", "100ms", + "1s", "10s", "100s", + "1Ks", "10Ks", "100Ks", + "1Ms", "10Ms", "100Ms", /* 1 year is 300Ms */ +}; + +typedef struct { + fr_rb_node_t node; + char const *file; + int line; + uint32_t count; +} fr_event_counter_t; + +static int8_t timer_location_cmp(void const *one, void const *two) +{ + fr_event_counter_t const *a = one; + fr_event_counter_t const *b = two; + + CMP_RETURN(a, b, file); + + return CMP(a->line, b->line); +} + +static int _event_report_process(fr_rb_tree_t **locations, size_t array[], fr_time_t now, fr_timer_t *ev) +{ + fr_time_delta_t diff = fr_time_sub(ev->when, now); + size_t i; + + for (i = 0; i < NUM_ELEMENTS(decades); i++) { + if ((fr_time_delta_cmp(diff, decades[i]) <= 0) || (i == NUM_ELEMENTS(decades) - 1)) { + fr_event_counter_t find = { .file = ev->file, .line = ev->line }; + fr_event_counter_t *counter; + + counter = fr_rb_find(locations[i], &find); + if (!counter) { + counter = talloc(locations[i], fr_event_counter_t); + if (!counter) { + EVENT_DEBUG("Can't do report, out of memory"); + return -1; + } + counter->file = ev->file; + counter->line = ev->line; + counter->count = 1; + fr_rb_insert(locations[i], counter); + } else { + counter->count++; + } + + array[i]++; + break; + } + } + + return 0; +} + +/** Print out information about timer events in the event loop + * + */ +void fr_timer_report(fr_timer_list_t *tl, fr_time_t now, void *uctx) +{ + fr_lst_iter_t iter; + fr_timer_t *ev; + size_t i; + + size_t array[NUM_ELEMENTS(decades)] = { 0 }; + fr_rb_tree_t *locations[NUM_ELEMENTS(decades)]; + TALLOC_CTX *tmp_ctx; + static pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER; + + tmp_ctx = talloc_init_const("temporary stats"); + if (!tmp_ctx) { + oom: + EVENT_DEBUG("Can't do report, out of memory"); + talloc_free(tmp_ctx); + return; + } + + for (i = 0; i < NUM_ELEMENTS(decades); i++) { + locations[i] = fr_rb_inline_alloc(tmp_ctx, fr_event_counter_t, node, timer_location_cmp, NULL); + if (!locations[i]) goto oom; + } + + switch (tl->type) { + case TIMER_LIST_TYPE_LST: + /* + * Show which events are due, when they're due, + * and where they were allocated + */ + for (ev = fr_lst_iter_init(tl->lst, &iter); + ev != NULL; + ev = fr_lst_iter_next(tl->lst, &iter)) { + if (_event_report_process(locations, array, now, ev) < 0) goto oom; + } + break; + + case TIMER_LIST_TYPE_ORDERED: + /* + * Show which events are due, when they're due, + * and where they were allocated + */ + for (ev = timer_head(&tl->ordered); + ev != NULL; + ev = timer_next(&tl->ordered, ev)) { + if (_event_report_process(locations, array, now, ev) < 0) goto oom; + } + break; + } + + pthread_mutex_lock(&print_lock); + EVENT_DEBUG("num timer events: %"PRIu64, fr_timer_list_num_events(tl)); + + for (i = 0; i < NUM_ELEMENTS(decades); i++) { + fr_rb_iter_inorder_t event_iter; + void *node; + + if (!array[i]) continue; + + if (i == 0) { + EVENT_DEBUG(" events <= %5s : %zu", decade_names[i], array[i]); + } else if (i == (NUM_ELEMENTS(decades) - 1)) { + EVENT_DEBUG(" events > %5s : %zu", decade_names[i - 1], array[i]); + } else { + EVENT_DEBUG(" events %5s - %5s : %zu", decade_names[i - 1], decade_names[i], array[i]); + } + + for (node = fr_rb_iter_init_inorder(&event_iter, locations[i]); + node; + node = fr_rb_iter_next_inorder(&event_iter)) { + fr_event_counter_t *counter = talloc_get_type_abort(node, fr_event_counter_t); + + EVENT_DEBUG(" : %u allocd at %s[%d]", + counter->count, counter->file, counter->line); + } + } + pthread_mutex_unlock(&print_lock); + + fr_timer_in(tl, tl, &tl->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), false, fr_timer_report, uctx); + talloc_free(tmp_ctx); +} + +void fr_timer_dump(fr_timer_list_t *tl) +{ + fr_lst_iter_t iter; + fr_timer_t *ev; + fr_time_t now = tl->pub.time(); /* Get the current time */ + +#define TIMER_DUMP(_ev) \ + EVENT_DEBUG("%s[%d]: %p time=%" PRId64 " (%c), callback=%p", \ + (_ev)->file, (_ev)->line, _ev, fr_time_unwrap((_ev)->when), \ + fr_time_gt(now, (_ev)->when) ? '<' : '>', (_ev)->callback); + + EVENT_DEBUG("Time is now %"PRId64"", fr_time_unwrap(now)); + + switch (tl->type) { + case TIMER_LIST_TYPE_LST: + EVENT_DEBUG("Dumping lst timer list"); + + for (ev = fr_lst_iter_init(tl->lst, &iter); + ev; + ev = fr_lst_iter_next(tl->lst, &iter)) { + (void)talloc_get_type_abort(ev, fr_timer_t); + TIMER_DUMP(ev); + } + break; + + case TIMER_LIST_TYPE_ORDERED: + EVENT_DEBUG("Dumping ordered timer list"); + + for (ev = timer_head(&tl->ordered); + ev; + ev = timer_next(&tl->ordered, ev)) { + (void)talloc_get_type_abort(ev, fr_timer_t); + TIMER_DUMP(ev); + } + break; + } +} +#endif diff --git a/src/lib/util/timer.h b/src/lib/util/timer.h new file mode 100644 index 00000000000..9fd7a63c663 --- /dev/null +++ b/src/lib/util/timer.h @@ -0,0 +1,117 @@ +#pragma once + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + */ + +/** Timer lists with event callbacks + * + * @file src/lib/util/event.h + * + * @copyright 2025 Arran Cudbard-Bell (a.cudbardb@freeradius.org) + */ +RCSIDH(timer_h, "$Id$") + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/* + * Allow public and private versions of the same structures + */ +#ifdef _CONST +# error _CONST can only be defined in the local header +#endif +#ifndef _TIMER_PRIVATE +typedef struct fr_timer_list_pub_s fr_timer_list_t; +# define _CONST const +#else +# define _CONST +#endif + +/** Alternative time source, useful for testing + * + * @return the current time in nanoseconds past the epoch. + */ +typedef fr_time_t (*fr_event_time_source_t)(void); + +/** Public event timer list structure + * + * Make the current list time, and time source available, but nothing else. + * + * This allows us to access these values without the cost of a function call. + */ +struct fr_timer_list_pub_s { + fr_event_time_source_t _CONST time; //!< Time source this list uses to get the current time + ///< when calculating deltas (fr_timer_in). +}; + +/** An opaque timer handle + */ +typedef struct fr_timer_s fr_timer_t; + +/** Called when a timer event fires + * + * @param[in] tl timer list event was inserted into. + * @param[in] now The current time. + * @param[in] uctx User ctx passed to #fr_timer_in or #fr_timer_at. + */ +typedef void (*fr_timer_cb_t)(fr_timer_list_t *tl, fr_time_t now, void *uctx); + +int _fr_timer_at(NDEBUG_LOCATION_ARGS + TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev, + fr_time_t when, bool free_on_fire, fr_timer_cb_t callback, void const *uctx) + CC_HINT(nonnull(NDEBUG_LOCATION_NONNULL(2), NDEBUG_LOCATION_NONNULL(3), NDEBUG_LOCATION_NONNULL(6))); +#define fr_timer_at(...) _fr_timer_at(NDEBUG_LOCATION_EXP __VA_ARGS__) + +int _fr_timer_in(NDEBUG_LOCATION_ARGS + TALLOC_CTX *ctx, fr_timer_list_t *tl, fr_timer_t **ev, + fr_time_delta_t delta, bool free_on_fire, fr_timer_cb_t callback, void const *uctx) + CC_HINT(nonnull(NDEBUG_LOCATION_NONNULL(2), NDEBUG_LOCATION_NONNULL(3), NDEBUG_LOCATION_NONNULL(6))); +#define fr_timer_in(...) _fr_timer_in(NDEBUG_LOCATION_EXP __VA_ARGS__) + +int fr_timer_disarm(fr_timer_t *ev) CC_HINT(nonnull); /* disarms but does not free */ + +int fr_timer_delete(fr_timer_t **ev_p) CC_HINT(nonnull); /* disarms AND frees */ + +fr_time_t fr_timer_when(fr_timer_t *ev) CC_HINT(nonnull); + +bool fr_timer_armed(fr_timer_t *ev); + +int fr_timer_list_run(fr_timer_list_t *tl, fr_time_t *when) CC_HINT(nonnull); + +uint64_t fr_timer_list_num_events(fr_timer_list_t *tl) CC_HINT(nonnull); + +fr_time_t fr_timer_list_when(fr_timer_list_t *tl) CC_HINT(nonnull); + +void fr_timer_list_set_time_func(fr_timer_list_t *tl, fr_event_time_source_t func) CC_HINT(nonnull); + +fr_timer_list_t *fr_timer_list_lst_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent); + +fr_timer_list_t *fr_timer_list_ordered_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent); + +#ifdef WITH_EVENT_DEBUG +void fr_timer_report(fr_timer_list_t *tl, fr_time_t now, void *uctx); +void fr_timer_dump(fr_timer_list_t *tl); +#endif + +#undef _CONST + +#ifdef __cplusplus +} +#endif diff --git a/src/listen/bfd/session.c b/src/listen/bfd/session.c index 2571a433e1f..e586c987d4d 100644 --- a/src/listen/bfd/session.c +++ b/src/listen/bfd/session.c @@ -747,7 +747,7 @@ static void bfd_send_init(bfd_session_t *session, bfd_packet_t *bfd) /* * Send one BFD packet. */ -static void bfd_send_packet(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx) +static void bfd_send_packet(UNUSED fr_timer_list_t *el, UNUSED fr_time_t now, void *ctx) { bfd_session_t *session = ctx; bfd_packet_t bfd; @@ -770,7 +770,7 @@ static void bfd_send_packet(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, vo /* * Send one BFD packet. */ -static void bfd_unlang_send_packet(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *ctx) +static void bfd_unlang_send_packet(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *ctx) { bfd_session_t *session = ctx; bfd_packet_t *bfd; @@ -810,14 +810,14 @@ static void bfd_start_packets(bfd_session_t *session) { uint64_t interval, base; uint64_t jitter; - fr_event_timer_cb_t cb; + fr_timer_cb_t cb; if (session->ev_packet) return; /* * Reset the timers. */ - fr_event_timer_delete(&session->ev_packet); + fr_timer_delete(&session->ev_packet); if (fr_time_delta_cmp(session->desired_min_tx_interval, session->remote_min_rx_interval) >= 0) { interval = fr_time_delta_unwrap(session->desired_min_tx_interval); @@ -860,9 +860,9 @@ static void bfd_start_packets(bfd_session_t *session) cb = bfd_send_packet; } - if (fr_event_timer_in(session, session->el, &session->ev_packet, - fr_time_delta_wrap(interval), - cb, session) < 0) { + if (fr_timer_in(session, session->el->tl, &session->ev_packet, + fr_time_delta_wrap(interval), + false, cb, session) < 0) { fr_assert("Failed to insert event" == NULL); } } @@ -940,7 +940,7 @@ static void bfd_set_desired_min_tx_interval(bfd_session_t *session, fr_time_delt /* * We failed to see a packet. */ -static void bfd_detection_timeout(UNUSED fr_event_list_t *el, fr_time_t now, void *ctx) +static void bfd_detection_timeout(UNUSED fr_timer_list_t *tl, fr_time_t now, void *ctx) { bfd_session_t *session = ctx; @@ -990,7 +990,7 @@ static void bfd_set_timeout(bfd_session_t *session, fr_time_t when) uint64_t delay; fr_time_delta_t delta; - fr_event_timer_delete(&session->ev_timeout); + fr_timer_delete(&session->ev_timeout); delay = fr_time_delta_unwrap(session->detection_time); delay *= session->detect_multi; @@ -1000,8 +1000,8 @@ static void bfd_set_timeout(bfd_session_t *session, fr_time_t when) timeout = fr_time_add(when, delta); - if (fr_event_timer_at(session, session->el, &session->ev_timeout, - timeout, bfd_detection_timeout, session) < 0) { + if (fr_timer_at(session, session->el->tl, &session->ev_timeout, + timeout, false, bfd_detection_timeout, session) < 0) { fr_assert("Failed to insert event" == NULL); } } @@ -1012,8 +1012,8 @@ static void bfd_set_timeout(bfd_session_t *session, fr_time_t when) */ static int bfd_stop_control(bfd_session_t *session) { - fr_event_timer_delete(&session->ev_timeout); - fr_event_timer_delete(&session->ev_packet); + fr_timer_delete(&session->ev_timeout); + fr_timer_delete(&session->ev_packet); return 1; } diff --git a/src/listen/bfd/session.h b/src/listen/bfd/session.h index a90d90db4c7..493b64effa7 100644 --- a/src/listen/bfd/session.h +++ b/src/listen/bfd/session.h @@ -45,7 +45,7 @@ typedef struct { fr_event_list_t *el; //!< event list fr_network_t *nr; //!< network side of things - struct sockaddr_storage remote_sockaddr; //!< cached for laziness + struct sockaddr_storage remote_sockaddr; //!< cached for laziness socklen_t remote_salen; struct sockaddr_storage local_sockaddr; //!< cached for laziness @@ -54,11 +54,11 @@ typedef struct { /* * Internal state management */ - fr_event_timer_t const *ev_timeout; //!< when we time out for not receiving a packet - fr_event_timer_t const *ev_packet; //!< for when we next send a packet - fr_time_t last_recv; //!< last received packet - fr_time_t next_recv; //!< when we next expect to receive a packet - fr_time_t last_sent; //!< the last time we sent a packet + fr_timer_t *ev_timeout; //!< when we time out for not receiving a packet + fr_timer_t *ev_packet; //!< for when we next send a packet + fr_time_t last_recv; //!< last received packet + fr_time_t next_recv; //!< when we next expect to receive a packet + fr_time_t last_sent; //!< the last time we sent a packet bfd_session_state_t session_state; //!< our view of the session state bfd_session_state_t remote_session_state; //!< their view of the session state diff --git a/src/listen/cron/proto_cron_crontab.c b/src/listen/cron/proto_cron_crontab.c index 575f50afcdf..6f91ce88ddb 100644 --- a/src/listen/cron/proto_cron_crontab.c +++ b/src/listen/cron/proto_cron_crontab.c @@ -43,7 +43,7 @@ typedef struct { proto_cron_crontab_t const *inst; - fr_event_timer_t const *ev; //!< for writing statistics + fr_timer_t *ev; //!< for writing statistics fr_listen_t *parent; //!< master IO handler @@ -519,7 +519,7 @@ done: * Called when tm.tm_sec == 0. If it isn't zero, then it means * that the timer is late, and we treat it as if tm.tm_sec == 0. */ -static void do_cron(fr_event_list_t *el, fr_time_t now, void *uctx) +static void do_cron(fr_timer_list_t *tl, fr_time_t now, void *uctx) { proto_cron_crontab_thread_t *thread = uctx; struct tm tm; @@ -633,8 +633,8 @@ use_time: cf_section_name2(thread->inst->parent->server_cs), buffer, end - start); } - if (fr_event_timer_at(thread, el, &thread->ev, fr_time_add(now, fr_time_delta_from_sec(end - start)), - do_cron, thread) < 0) { + if (fr_timer_at(thread, tl, &thread->ev, fr_time_add(now, fr_time_delta_from_sec(end - start)), + false, do_cron, thread) < 0) { fr_assert(0); } @@ -669,7 +669,7 @@ static void mod_event_list_set(fr_listen_t *li, fr_event_list_t *el, void *nr) thread->inst = inst; thread->bootstrap = true; - do_cron(el, fr_time(), thread); + do_cron(el->tl, fr_time(), thread); } static char const *mod_name(fr_listen_t *li) diff --git a/src/listen/detail/proto_detail.h b/src/listen/detail/proto_detail.h index d6e360a7de2..8201bc90924 100644 --- a/src/listen/detail/proto_detail.h +++ b/src/listen/detail/proto_detail.h @@ -138,7 +138,7 @@ struct proto_detail_work_thread_s { off_t header_offset; //!< offset of the current header we're reading off_t read_offset; //!< where we're reading from in filename_work - fr_event_timer_t const *ev; //!< for detail file timers. + fr_timer_t *ev; //!< for detail file timers. pthread_mutex_t worker_mutex; //!< for the workers int num_workers; //!< number of workers diff --git a/src/listen/detail/proto_detail_file.c b/src/listen/detail/proto_detail_file.c index 379e894107c..2a32a0d99b8 100644 --- a/src/listen/detail/proto_detail_file.c +++ b/src/listen/detail/proto_detail_file.c @@ -122,7 +122,7 @@ static void mod_vnode_extend(fr_listen_t *li, UNUSED uint32_t fflags) if (has_worker) return; - if (thread->ev) fr_event_timer_delete(&thread->ev); + if (thread->ev) fr_timer_delete(&thread->ev); work_init(thread, false); } @@ -231,7 +231,7 @@ static int work_rename(proto_detail_file_thread_t *thread) /* * Start polling again after a timeout. */ -static void work_retry_timer(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void work_retry_timer(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { proto_detail_file_thread_t *thread = talloc_get_type_abort(uctx, proto_detail_file_thread_t); @@ -280,8 +280,8 @@ static int work_exists(proto_detail_file_thread_t *thread, int fd) DEBUG3("proto_detail (%s): Waiting %.6fs for lock on file %s", thread->name, fr_time_delta_unwrap(delay) / (double)NSEC, inst->filename_work); - if (fr_event_timer_in(thread, thread->el, &thread->ev, - delay, work_retry_timer, thread) < 0) { + if (fr_timer_in(thread, thread->el->tl, &thread->ev, delay, + false, work_retry_timer, thread) < 0) { ERROR("Failed inserting retry timer for %s", inst->filename_work); } return 0; @@ -552,8 +552,9 @@ delay: */ DEBUG3("Waiting %d.000000s for new files in %s", inst->poll_interval, thread->name); - if (fr_event_timer_in(thread, thread->el, &thread->ev, - fr_time_delta_from_sec(inst->poll_interval), work_retry_timer, thread) < 0) { + if (fr_timer_in(thread, thread->el->tl, &thread->ev, + fr_time_delta_from_sec(inst->poll_interval), + false, work_retry_timer, thread) < 0) { ERROR("Failed inserting poll timer for %s", inst->filename_work); } return; @@ -607,8 +608,8 @@ static void mod_event_list_set(fr_listen_t *li, fr_event_list_t *el, UNUSED void * therefore change permissions, so that libkqueue can * read it. */ - if (fr_event_timer_in(thread, thread->el, &thread->ev, - fr_time_delta_from_sec(1), work_retry_timer, thread) < 0) { + if (fr_timer_in(thread, thread->el->tl, &thread->ev, + fr_time_delta_from_sec(1), false, work_retry_timer, thread) < 0) { ERROR("Failed inserting poll timer for %s", thread->filename_work); } } diff --git a/src/listen/detail/proto_detail_work.c b/src/listen/detail/proto_detail_work.c index e3fc43546b0..7fb3d736cbf 100644 --- a/src/listen/detail/proto_detail_work.c +++ b/src/listen/detail/proto_detail_work.c @@ -60,7 +60,7 @@ typedef struct { size_t packet_len; //!< for retransmissions fr_retry_t retry; //!< our retry timers - fr_event_timer_t const *ev; //!< retransmission timer + fr_timer_t *ev; //!< retransmission timer fr_dlist_t entry; //!< for the retransmission list } fr_detail_entry_t; @@ -539,7 +539,7 @@ done: } -static void work_retransmit(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void work_retransmit(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { fr_detail_entry_t *track = talloc_get_type_abort(uctx, fr_detail_entry_t); proto_detail_work_thread_t *thread = track->parent; @@ -608,8 +608,8 @@ static ssize_t mod_write(fr_listen_t *li, void *packet_ctx, UNUSED fr_time_t req DEBUG("%s - packet %d failed during processing. Will retransmit in %.6fs", thread->name, track->id, fr_time_delta_unwrap(track->retry.rt) / (double)NSEC); - if (fr_event_timer_at(thread, thread->el, &track->ev, - track->retry.next, work_retransmit, track) < 0) { + if (fr_timer_at(thread, thread->el->tl, &track->ev, + track->retry.next, false, work_retransmit, track) < 0) { ERROR("%s - Failed inserting retransmission timeout", thread->name); fail: if (inst->track_progress && (track->done_offset > 0)) goto mark_done; diff --git a/src/listen/ldap_sync/persistent_search.c b/src/listen/ldap_sync/persistent_search.c index 314b29c1238..f0acacf7d72 100644 --- a/src/listen/ldap_sync/persistent_search.c +++ b/src/listen/ldap_sync/persistent_search.c @@ -137,7 +137,8 @@ int persistent_sync_state_init(fr_ldap_connection_t *conn, size_t sync_no, proto * Whilst persistent search LDAP servers don't provide cookies as such * we treat change numbers, if provided, as cookies. */ - fr_event_timer_in(sync, conn->conn->el, &sync->cookie_ev, inst->cookie_interval, ldap_sync_cookie_event, sync); + fr_timer_in(sync, conn->conn->el->tl, &sync->cookie_ev, inst->cookie_interval, + false, ldap_sync_cookie_event, sync); return 0; } diff --git a/src/listen/ldap_sync/proto_ldap_sync.h b/src/listen/ldap_sync/proto_ldap_sync.h index ce7baf701f4..2a4a5815cba 100644 --- a/src/listen/ldap_sync/proto_ldap_sync.h +++ b/src/listen/ldap_sync/proto_ldap_sync.h @@ -132,7 +132,7 @@ struct sync_config_s { CONF_SECTION *cs; //!< Config section where this sync was defined. //!< Used for logging. - fr_event_timer_t const *ev; //!< Event for retrying cookie load + fr_timer_t *ev; //!< Event for retrying cookie load /* * Callbacks for various events diff --git a/src/listen/ldap_sync/proto_ldap_sync_ldap.c b/src/listen/ldap_sync/proto_ldap_sync_ldap.c index 67bb815dc00..100c8e9ab64 100644 --- a/src/listen/ldap_sync/proto_ldap_sync_ldap.c +++ b/src/listen/ldap_sync/proto_ldap_sync_ldap.c @@ -252,7 +252,7 @@ int ldap_sync_cookie_store(sync_state_t *sync, bool refresh) * A cookie at the head says that all the previous changes have been * completed, so the cookie can be sent. */ -void ldap_sync_cookie_event(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +void ldap_sync_cookie_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { sync_state_t *sync = talloc_get_type_abort(uctx, sync_state_t); sync_packet_ctx_t *sync_packet_ctx; @@ -269,7 +269,8 @@ void ldap_sync_cookie_event(fr_event_list_t *el, UNUSED fr_time_t now, void *uct ldap_sync_cookie_send(sync_packet_ctx); finish: - (void) fr_event_timer_in(sync, el, &sync->cookie_ev, sync->inst->cookie_interval, ldap_sync_cookie_event, sync); + (void) fr_timer_in(sync, tl, &sync->cookie_ev, sync->inst->cookie_interval, + false, ldap_sync_cookie_event, sync); } /** Enqueue a new cookie store packet @@ -371,7 +372,7 @@ static int ldap_sync_entry_send_network(sync_packet_ctx_t *sync_packet_ctx) * Looks at the head of the list of pending sync packets for unsent * change packets and sends any up to the first cookie. */ -static void ldap_sync_retry_event(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void ldap_sync_retry_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { sync_state_t *sync = talloc_get_type_abort(uctx, sync_state_t); sync_packet_ctx_t *sync_packet_ctx = NULL; @@ -391,8 +392,8 @@ static void ldap_sync_retry_event(fr_event_list_t *el, UNUSED fr_time_t now, voi * packets - reschedule a retry event. */ if (sync_packet_ctx) { - (void) fr_event_timer_in(sync, el, &sync->retry_ev, sync->inst->retry_interval, - ldap_sync_retry_event, sync); + (void) fr_timer_in(sync, tl, &sync->retry_ev, sync->inst->retry_interval, + false, ldap_sync_retry_event, sync); } } @@ -513,8 +514,8 @@ int ldap_sync_entry_send(sync_state_t *sync, uint8_t const uuid[SYNC_UUID_LENGTH * Send the packet and if it fails to send add a retry event */ if ((ldap_sync_entry_send_network(sync_packet_ctx) < 0) && - (fr_event_timer_in(sync, sync->conn->conn->el, &sync->retry_ev, - sync->inst->retry_interval, ldap_sync_retry_event, sync) < 0)) { + (fr_timer_in(sync, sync->conn->conn->el->tl, &sync->retry_ev, + sync->inst->retry_interval, false, ldap_sync_retry_event, sync) < 0)) { PERROR("Inserting LDAP sync retry timer failed"); } @@ -532,11 +533,11 @@ static void _proto_ldap_socket_open_connected(connection_t *conn, UNUSED connect * Performs complete re-initialization of a connection. Called during socket_open * to create the initial connection and again any time we need to reopen the connection. * - * @param[in] el the event list managing listen event. + * @param[in] tl the event list managing listen event. * @param[in] now current time. * @param[in] user_ctx Listener. */ -static void proto_ldap_connection_init(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *user_ctx) +static void proto_ldap_connection_init(fr_timer_list_t *tl, UNUSED fr_time_t now, void *user_ctx) { fr_listen_t *listen = talloc_get_type_abort(user_ctx, fr_listen_t); proto_ldap_sync_ldap_thread_t *thread = talloc_get_type_abort(listen->thread_instance, proto_ldap_sync_ldap_thread_t); @@ -553,9 +554,9 @@ static void proto_ldap_connection_init(UNUSED fr_event_list_t *el, UNUSED fr_tim PERROR("Failed (re)initialising connection, will retry in %pV seconds", fr_box_time_delta(inst->handle_config.reconnection_delay)); - if (fr_event_timer_in(thread, thread->el, &thread->conn_retry_ev, - inst->handle_config.reconnection_delay, - proto_ldap_connection_init, listen) < 0) { + if (fr_timer_in(thread, tl, &thread->conn_retry_ev, + inst->handle_config.reconnection_delay, + false, proto_ldap_connection_init, listen) < 0) { FATAL("Failed inserting event: %s", fr_strerror()); } } @@ -852,7 +853,8 @@ static int proto_ldap_cookie_load_send(TALLOC_CTX *ctx, proto_ldap_sync_ldap_t c /** Timer event to retry running "load Cookie" on failures * */ -static void proto_ldap_cookie_load_retry(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) { +static void proto_ldap_cookie_load_retry(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) +{ proto_ldap_cookie_load_retry_ctx *retry_ctx = talloc_get_type_abort(uctx, proto_ldap_cookie_load_retry_ctx); DEBUG2("Retrying \"load Cookie\" for sync no %ld", retry_ctx->sync_no); @@ -860,10 +862,10 @@ static void proto_ldap_cookie_load_retry(fr_event_list_t *el, UNUSED fr_time_t n retry_ctx->thread) < 0) { ERROR("Failed retrying \"load Cookie\". Will try again in %pV seconds", fr_box_time_delta(retry_ctx->inst->handle_config.reconnection_delay)); - (void) fr_event_timer_in(retry_ctx->thread->conn->h, el, - &retry_ctx->inst->parent->sync_config[retry_ctx->sync_no]->ev, - retry_ctx->inst->handle_config.reconnection_delay, - proto_ldap_cookie_load_retry, retry_ctx); + (void) fr_timer_in(retry_ctx->thread->conn->h, tl, + &retry_ctx->inst->parent->sync_config[retry_ctx->sync_no]->ev, + retry_ctx->inst->handle_config.reconnection_delay, + false, proto_ldap_cookie_load_retry, retry_ctx); return; } talloc_free(retry_ctx); @@ -973,9 +975,9 @@ static ssize_t proto_ldap_child_mod_write(fr_listen_t *li, void *packet_ctx, UNU .sync_no = packet_id, }; - (void) fr_event_timer_in(thread->conn->h, thread->el, &inst->parent->sync_config[packet_id]->ev, - inst->handle_config.reconnection_delay, - proto_ldap_cookie_load_retry, retry_ctx); + (void) fr_timer_in(thread->conn->h, thread->el->tl, &inst->parent->sync_config[packet_id]->ev, + inst->handle_config.reconnection_delay, + false, proto_ldap_cookie_load_retry, retry_ctx); } break; @@ -1166,9 +1168,9 @@ static void _proto_ldap_socket_closed(UNUSED connection_t *conn, connection_stat if (prev == CONNECTION_STATE_CONNECTED) { ERROR("LDAP connection closed. Scheduling restart in %pVs", fr_box_time_delta(inst->handle_config.reconnection_delay)); - if (fr_event_timer_in(thread, thread->el, &thread->conn_retry_ev, - inst->handle_config.reconnection_delay, - proto_ldap_connection_init, listen) < 0) { + if (fr_timer_in(thread, thread->el->tl, &thread->conn_retry_ev, + inst->handle_config.reconnection_delay, + false, proto_ldap_connection_init, listen) < 0) { FATAL("Failed inserting event: %s", fr_strerror()); } } @@ -1194,9 +1196,9 @@ static void _proto_ldap_socket_open_connected(connection_t *conn, UNUSED connect if (ldap_conn->fd < 0) { connection_failed: - if (fr_event_timer_in(thread, thread->el, &thread->conn_retry_ev, - inst->handle_config.reconnection_delay, - proto_ldap_connection_init, listen) < 0) { + if (fr_timer_in(thread, thread->el->tl, &thread->conn_retry_ev, + inst->handle_config.reconnection_delay, + false, proto_ldap_connection_init, listen) < 0) { FATAL("Failed inserting event: %s", fr_strerror()); } return; @@ -1275,7 +1277,7 @@ static void mod_event_list_set(fr_listen_t *li, fr_event_list_t *el, void *nr) /* * Initialise the connection */ - proto_ldap_connection_init(el, fr_event_list_time(el), li); + proto_ldap_connection_init(el->tl, fr_event_list_time(el), li); } static int mod_instantiate(module_inst_ctx_t const *mctx) diff --git a/src/listen/ldap_sync/proto_ldap_sync_ldap.h b/src/listen/ldap_sync/proto_ldap_sync_ldap.h index cfea0e27263..a0bf375c326 100644 --- a/src/listen/ldap_sync/proto_ldap_sync_ldap.h +++ b/src/listen/ldap_sync/proto_ldap_sync_ldap.h @@ -69,8 +69,8 @@ struct sync_state_s { uint32_t changes_since_cookie; //!< How many changes have been added since //!< the last cookie was stored. - fr_event_timer_t const *cookie_ev; //!< Timer event for sending cookies. - fr_event_timer_t const *retry_ev; //!< Timer event for retrying failed changes. + fr_timer_t *cookie_ev; //!< Timer event for sending cookies. + fr_timer_t *retry_ev; //!< Timer event for retrying failed changes. fr_pair_list_t trigger_args; //!< Arguments to make available in triggers. }; @@ -118,7 +118,7 @@ typedef struct { fr_listen_t *parent; //!< master IO handler. fr_listen_t *li; //!< Our listener. - fr_event_timer_t const *conn_retry_ev; //!< When to retry re-establishing the conn. + fr_timer_t *conn_retry_ev; //!< When to retry re-establishing the conn. connection_t *conn; //!< Our connection to the LDAP directory. } proto_ldap_sync_ldap_thread_t; @@ -162,7 +162,7 @@ sync_state_t *sync_state_alloc(TALLOC_CTX *ctx, fr_ldap_connection_t *conn, prot int ldap_sync_cookie_store(sync_state_t *sync, bool refresh); -void ldap_sync_cookie_event(fr_event_list_t *el, fr_time_t now, void *uctx); +void ldap_sync_cookie_event(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx); int ldap_sync_cookie_send(sync_packet_ctx_t *sync_packet_ctx); diff --git a/src/listen/ldap_sync/rfc4533.c b/src/listen/ldap_sync/rfc4533.c index 05e8afe2833..3ddb4784636 100644 --- a/src/listen/ldap_sync/rfc4533.c +++ b/src/listen/ldap_sync/rfc4533.c @@ -152,8 +152,8 @@ int rfc4533_sync_init(fr_ldap_connection_t *conn, size_t sync_no, proto_ldap_syn /* * Register event to store cookies at a regular interval */ - if (fr_event_timer_in(sync, conn->conn->el, &sync->cookie_ev, - inst->cookie_interval, ldap_sync_cookie_event, sync) < 0) { + if (fr_timer_in(sync, conn->conn->el->tl, &sync->cookie_ev, + inst->cookie_interval, false, ldap_sync_cookie_event, sync) < 0) { PERROR("Inserting LDAP cookie timer failed"); goto error; } diff --git a/src/listen/load/proto_load_step.c b/src/listen/load/proto_load_step.c index ccadf4e068a..47110347907 100644 --- a/src/listen/load/proto_load_step.c +++ b/src/listen/load/proto_load_step.c @@ -52,7 +52,7 @@ typedef struct { fr_stats_t stats; //!< statistics for this socket int fd; //!< for CSV files - fr_event_timer_t const *ev; //!< for writing statistics + fr_timer_t *ev; //!< for writing statistics fr_listen_t *parent; //!< master IO handler } proto_load_step_thread_t; @@ -235,13 +235,13 @@ static int mod_generate(fr_time_t now, void *uctx) } -static void write_stats(fr_event_list_t *el, fr_time_t now, void *uctx) +static void write_stats(fr_timer_list_t *tl, fr_time_t now, void *uctx) { proto_load_step_thread_t *thread = uctx; size_t len; char buffer[1024]; - (void) fr_event_timer_in(thread, el, &thread->ev, fr_time_delta_from_sec(1), write_stats, thread); + (void) fr_timer_in(thread, tl, &thread->ev, fr_time_delta_from_sec(1), false, write_stats, thread); len = fr_load_generator_stats_sprint(thread->l, now, buffer, sizeof(buffer)); if (write(thread->fd, buffer, len) < 0) { @@ -322,7 +322,7 @@ static void mod_event_list_set(fr_listen_t *li, fr_event_list_t *el, void *nr) return; } - (void) fr_event_timer_in(thread, thread->el, &thread->ev, fr_time_delta_from_sec(1), write_stats, thread); + (void) fr_timer_in(thread, thread->el->tl, &thread->ev, fr_time_delta_from_sec(1), false, write_stats, thread); len = fr_load_generator_stats_sprint(thread->l, fr_time(), buffer, sizeof(buffer)); if (write(thread->fd, buffer, len) < 0) { diff --git a/src/modules/rlm_ldap/rlm_ldap.c b/src/modules/rlm_ldap/rlm_ldap.c index 45667674645..2055efdc491 100644 --- a/src/modules/rlm_ldap/rlm_ldap.c +++ b/src/modules/rlm_ldap/rlm_ldap.c @@ -534,10 +534,10 @@ static int ldap_uri_part_escape(fr_value_box_t *vb, UNUSED void *uctx) /** Callback when LDAP query times out * */ -static void ldap_query_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void ldap_query_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { fr_ldap_query_t *query = talloc_get_type_abort(uctx, fr_ldap_query_t); - trunk_request_t *treq; + trunk_request_t *treq; request_t *request; /* @@ -822,8 +822,8 @@ static xlat_action_t ldap_xlat(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out, goto query_error; } - if (fr_event_timer_in(query, unlang_interpret_event_list(request), &query->ev, handle_config->res_timeout, - ldap_query_timeout, query) < 0) { + if (fr_timer_in(query, unlang_interpret_event_list(request)->tl, &query->ev, handle_config->res_timeout, + false, ldap_query_timeout, query) < 0) { REDEBUG("Unable to set timeout for LDAP query"); trunk_request_signal_cancel(query->treq); goto query_error; diff --git a/src/modules/rlm_radius/bio.c b/src/modules/rlm_radius/bio.c index 1a1c5b53f73..57b7d626537 100644 --- a/src/modules/rlm_radius/bio.c +++ b/src/modules/rlm_radius/bio.c @@ -99,7 +99,7 @@ typedef struct { fr_time_t last_sent; //!< last time we sent a packet. fr_time_t last_idle; //!< last time we had nothing to do - fr_event_timer_t const *zombie_ev; //!< Zombie timeout. + fr_timer_t *zombie_ev; //!< Zombie timeout. bool status_checking; //!< whether we're doing status checks bio_request_t *status_u; //!< for sending status check packets @@ -132,7 +132,7 @@ struct bio_request_s { size_t partial; //!< partially sent data radius_track_entry_t *rr; //!< ID tracking, resend count, etc. - fr_event_timer_t const *ev; //!< timer for retransmissions + fr_timer_t *ev; //!< timer for retransmissions fr_retry_t retry; //!< retransmission timers }; @@ -232,7 +232,7 @@ static void status_check_reset(bio_handle_t *h, bio_request_t *u) u->num_replies = 0; /* Reset */ u->retry.start = fr_time_wrap(0); - if (u->ev) (void) fr_event_timer_delete(&u->ev); + if (u->ev) (void) fr_timer_delete(&u->ev); bio_request_reset(u); } @@ -350,7 +350,7 @@ static void conn_init_error(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED in * * Setup retries, or fail the connection. */ -static void conn_init_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) +static void conn_init_timeout(UNUSED fr_timer_list_t *tl, fr_time_t now, void *uctx) { connection_t *conn = talloc_get_type_abort(uctx, connection_t); bio_handle_t *h; @@ -384,7 +384,7 @@ static void conn_init_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) return; case FR_RETRY_CONTINUE: - if (fr_event_fd_insert(h, NULL, el, h->fd, conn_init_writable, NULL, + if (fr_event_fd_insert(h, NULL, conn->el, h->fd, conn_init_writable, NULL, conn_init_error, conn) < 0) { PERROR("%s - Failed inserting FD event", h->ctx.module_name); connection_signal_reconnect(conn, CONNECTION_FAILED); @@ -398,12 +398,12 @@ static void conn_init_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) /** Perform the next step of init and negotiation. * */ -static void conn_init_next(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void conn_init_next(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { connection_t *conn = talloc_get_type_abort(uctx, connection_t); bio_handle_t *h = talloc_get_type_abort(conn->h, bio_handle_t); - if (fr_event_fd_insert(h, NULL, el, h->fd, conn_init_writable, NULL, conn_init_error, conn) < 0) { + if (fr_event_fd_insert(h, NULL, conn->el, h->fd, conn_init_writable, NULL, conn_init_error, conn) < 0) { PERROR("%s - Failed inserting FD event", h->ctx.module_name); connection_signal_reconnect(conn, CONNECTION_FAILED); } @@ -507,7 +507,7 @@ static void conn_init_readable(fr_event_list_t *el, UNUSED int fd, UNUSED int fl /* * Set the timer for the next retransmit. */ - if (fr_event_timer_at(h, el, &u->ev, u->retry.next, conn_init_next, conn) < 0) { + if (fr_timer_at(h, el->tl, &u->ev, u->retry.next, false, conn_init_next, conn) < 0) { connection_signal_reconnect(conn, CONNECTION_FAILED); } return; @@ -593,7 +593,7 @@ static void conn_init_writable(fr_event_list_t *el, UNUSED int fd, UNUSED int fl h->ctx.module_name, (u->retry.count == 1) ? "Originated" : "Retransmitted", fr_box_time_delta(u->retry.rt)); - if (fr_event_timer_at(h, el, &u->ev, u->retry.next, conn_init_timeout, conn) < 0) { + if (fr_timer_at(h, el->tl, &u->ev, u->retry.next, false, conn_init_timeout, conn) < 0) { PERROR("%s - Failed inserting timer event", h->ctx.module_name); goto fail; } @@ -613,7 +613,7 @@ static int _bio_handle_free(bio_handle_t *h) fr_assert(h->fd >= 0); - if (h->status_u) fr_event_timer_delete(&h->status_u->ev); + if (h->status_u) fr_timer_delete(&h->status_u->ev); /* * The connection code will take care of deleting the FD from the event loop. @@ -874,7 +874,7 @@ static connection_state_t conn_failed(void *handle, connection_state_t state, UN /* * Reset the Status-Server checks. */ - if (h->status_u && h->status_u->ev) (void) fr_event_timer_delete(&h->status_u->ev); + if (h->status_u && h->status_u->ev) (void) fr_timer_delete(&h->status_u->ev); } break; @@ -1252,7 +1252,7 @@ static int encode(bio_handle_t *h, request_t *request, bio_request_t *u, uint8_t /** Revive a connection after "revive_interval" * */ -static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void revive_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); bio_handle_t *h = talloc_get_type_abort(tconn->conn->h, bio_handle_t); @@ -1264,7 +1264,7 @@ static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, voi /** Mark a connection dead after "zombie_interval" * */ -static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) +static void zombie_timeout(fr_timer_list_t *tl, fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); bio_handle_t *h = talloc_get_type_abort(tconn->conn->h, bio_handle_t); @@ -1290,8 +1290,9 @@ static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) /* * Revive the connection after a time. */ - if (fr_event_timer_at(h, el, &h->zombie_ev, - fr_time_add(now, h->ctx.inst->revive_interval), revive_timeout, tconn) < 0) { + if (fr_timer_at(h, tl, &h->zombie_ev, + fr_time_add(now, h->ctx.inst->revive_interval), false, + revive_timeout, tconn) < 0) { ERROR("Failed inserting revive timeout for connection"); trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED); } @@ -1370,8 +1371,8 @@ static bool check_for_zombie(fr_event_list_t *el, trunk_connection_t *tconn, fr_ trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED); } } else { - if (fr_event_timer_at(h, el, &h->zombie_ev, fr_time_add(now, h->ctx.inst->zombie_period), - zombie_timeout, tconn) < 0) { + if (fr_timer_at(h, el->tl, &h->zombie_ev, fr_time_add(now, h->ctx.inst->zombie_period), + false, zombie_timeout, tconn) < 0) { ERROR("Failed inserting zombie timeout for connection"); trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED); } @@ -1841,7 +1842,7 @@ static void protocol_error_reply(bio_request_t *u, bio_handle_t *h) /** Handle retries for a status check * */ -static void status_check_next(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void status_check_next(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); bio_handle_t *h = talloc_get_type_abort(tconn->conn->h, bio_handle_t); @@ -1880,7 +1881,7 @@ static void status_check_reply(trunk_request_t *treq, fr_time_t now) /* * Set the timer for the next retransmit. */ - if (fr_event_timer_at(h, h->ctx.el, &u->ev, u->retry.next, status_check_next, treq->tconn) < 0) { + if (fr_timer_at(h, h->ctx.el->tl, &u->ev, u->retry.next, false, status_check_next, treq->tconn) < 0) { trunk_connection_signal_reconnect(treq->tconn, CONNECTION_FAILED); } return; @@ -2087,7 +2088,7 @@ static void request_cancel(UNUSED connection_t *conn, void *preq_to_reset, * queued for sendmmsg but never actually * sent. */ - if (u->ev) (void) fr_event_timer_delete(&u->ev); + if (u->ev) (void) fr_timer_delete(&u->ev); } /* @@ -2105,7 +2106,7 @@ static void request_conn_release(connection_t *conn, void *preq_to_reset, UNUSED bio_request_t *u = preq_to_reset; bio_handle_t *h = talloc_get_type_abort(conn->h, bio_handle_t); - if (u->ev) (void)fr_event_timer_delete(&u->ev); + if (u->ev) (void)fr_timer_delete(&u->ev); bio_request_reset(u); if (h->ctx.inst->mode == RLM_RADIUS_MODE_REPLICATE) return; @@ -2246,7 +2247,7 @@ static int _bio_request_free(bio_request_t *u) fr_assert_msg(!u->ev, "bio_request_t freed with active timer"); - if (u->ev) (void) fr_event_timer_delete(&u->ev); + if (u->ev) (void) fr_timer_delete(&u->ev); fr_assert(u->rr == NULL); diff --git a/src/modules/rlm_sql/drivers/rlm_sql_cassandra/rlm_sql_cassandra.c b/src/modules/rlm_sql/drivers/rlm_sql_cassandra/rlm_sql_cassandra.c index cc2c807191b..848cce7067c 100644 --- a/src/modules/rlm_sql/drivers/rlm_sql_cassandra/rlm_sql_cassandra.c +++ b/src/modules/rlm_sql/drivers/rlm_sql_cassandra/rlm_sql_cassandra.c @@ -141,8 +141,8 @@ typedef struct { TALLOC_CTX *log_ctx; //!< Prevent unneeded memory allocation by keeping a //!< permanent pool, to store log entries. fr_dlist_head_t queries; //!< Outstanding queries on this connection. - fr_event_timer_t const *read_ev; //!< Polling event for reading query results. - fr_event_timer_t const *write_ev; //!< Polling event for sending queries. + fr_timer_t *read_ev; //!< Polling event for reading query results. + fr_timer_t *write_ev; //!< Polling event for sending queries. uint poll_interval; //!< Interval between read polling. uint poll_count; //!< How many consecutive polls had no available results. } rlm_sql_cassandra_conn_t; @@ -455,7 +455,7 @@ static void sql_trunk_request_mux(UNUSED fr_event_list_t *el, trunk_connection_t } } -static void sql_trunk_connection_read_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void sql_trunk_connection_read_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { rlm_sql_cassandra_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_cassandra_conn_t); cassandra_query_t *cass_query, *next_query = NULL; @@ -536,14 +536,14 @@ static void sql_trunk_connection_read_poll(UNUSED fr_event_list_t *el, UNUSED fr * There are still outstanding queries, add another polling event */ if (fr_dlist_num_elements(&c->queries)) { - if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(c->poll_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, tl, &c->read_ev, fr_time_delta_from_usec(c->poll_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } } } -static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void sql_trunk_connection_write_poll(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); @@ -558,22 +558,22 @@ static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED f * This "notify" callback sets up the appropriate polling events. */ CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function */ -static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, UNUSED fr_event_list_t *el, +static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, fr_event_list_t *el, trunk_connection_event_t notify_on, UNUSED void *uctx) { rlm_sql_cassandra_conn_t *c = talloc_get_type_abort(conn->h, rlm_sql_cassandra_conn_t); switch (notify_on) { case TRUNK_CONN_EVENT_NONE: - if (c->read_ev) fr_event_timer_delete(&c->read_ev); - if (c->write_ev) fr_event_timer_delete(&c->write_ev); + if (c->read_ev) fr_timer_delete(&c->read_ev); + if (c->write_ev) fr_timer_delete(&c->write_ev); return; case TRUNK_CONN_EVENT_BOTH: case TRUNK_CONN_EVENT_READ: if (fr_dlist_num_elements(&c->queries)) { - if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(c->poll_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, el->tl, &c->read_ev, fr_time_delta_from_usec(c->poll_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } } @@ -582,8 +582,8 @@ static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connec FALL_THROUGH; case TRUNK_CONN_EVENT_WRITE: - if (fr_event_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0), - sql_trunk_connection_write_poll, tconn) < 0) { + if (fr_timer_in(c, el->tl, &c->write_ev, fr_time_delta_from_usec(0), + false, sql_trunk_connection_write_poll, tconn) < 0) { ERROR("Unable to insert polling event"); } return; diff --git a/src/modules/rlm_sql/drivers/rlm_sql_oracle/rlm_sql_oracle.c b/src/modules/rlm_sql/drivers/rlm_sql_oracle/rlm_sql_oracle.c index d157225b289..ae930a3f6a7 100644 --- a/src/modules/rlm_sql/drivers/rlm_sql_oracle/rlm_sql_oracle.c +++ b/src/modules/rlm_sql/drivers/rlm_sql_oracle/rlm_sql_oracle.c @@ -66,8 +66,8 @@ typedef struct { connection_t *conn; //!< Generic connection structure for this connection. rlm_sql_config_t const *config; //!< SQL instance configuration. fr_sql_query_t *query_ctx; //!< Current request running on the connection. - fr_event_timer_t const *read_ev; //!< Timer event for polling reading this connection - fr_event_timer_t const *write_ev; //!< Timer event for polling writing this connection + fr_timer_t *read_ev; //!< Timer event for polling reading this connection + fr_timer_t *write_ev; //!< Timer event for polling writing this connection uint select_interval; //!< How frequently this connection gets polled for select queries. uint query_interval; //!< How frequently this connection gets polled for other queries. uint poll_count; //!< How many polls have been done for the current query. @@ -432,7 +432,7 @@ static void sql_request_cancel_mux(UNUSED fr_event_list_t *el, trunk_connection_ trunk_request_signal_cancel_complete(treq); } -static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void sql_trunk_connection_read_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { rlm_sql_oracle_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_oracle_conn_t); fr_sql_query_t *query_ctx = c->query_ctx; @@ -464,9 +464,9 @@ static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t switch (ret) { case OCI_STILL_EXECUTING: ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response"); - if (fr_event_timer_in(c, el, &c->read_ev, - fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, el, &c->read_ev, + fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } return; @@ -504,8 +504,8 @@ static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t ret = OCIBreak(c->ctx, c->error); if (ret == OCI_STILL_EXECUTING) { ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response"); - if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } return; @@ -521,7 +521,7 @@ static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t if (request) unlang_interpret_mark_runnable(request); } -static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void sql_trunk_connection_write_poll(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); @@ -543,15 +543,15 @@ static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connec uint poll_interval = (query_ctx && query_ctx->type != SQL_QUERY_SELECT) ? c->query_interval : c->select_interval; switch (notify_on) { case TRUNK_CONN_EVENT_NONE: - if (c->read_ev) fr_event_timer_delete(&c->read_ev); - if (c->write_ev) fr_event_timer_delete(&c->write_ev); + if (c->read_ev) fr_timer_delete(&c->read_ev); + if (c->write_ev) fr_timer_delete(&c->write_ev); return; case TRUNK_CONN_EVENT_BOTH: case TRUNK_CONN_EVENT_READ: if (c->query_ctx) { - if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(poll_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(poll_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } } @@ -560,8 +560,8 @@ static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connec FALL_THROUGH; case TRUNK_CONN_EVENT_WRITE: - if (fr_event_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0), - sql_trunk_connection_write_poll, tconn) < 0) { + if (fr_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0), + false, sql_trunk_connection_write_poll, tconn) < 0) { ERROR("Unable to insert polling event"); } return; diff --git a/src/modules/rlm_sql/drivers/rlm_sql_unixodbc/rlm_sql_unixodbc.c b/src/modules/rlm_sql/drivers/rlm_sql_unixodbc/rlm_sql_unixodbc.c index dca8006a553..91f9276d6f8 100644 --- a/src/modules/rlm_sql/drivers/rlm_sql_unixodbc/rlm_sql_unixodbc.c +++ b/src/modules/rlm_sql/drivers/rlm_sql_unixodbc/rlm_sql_unixodbc.c @@ -41,8 +41,8 @@ typedef struct { rlm_sql_config_t const *config; /* SQL instance configuration */ SQLUSMALLINT async_mode; /* What Async mode does this driver support */ fr_sql_query_t *query_ctx; /* Current query running on the connection */ - fr_event_timer_t const *read_ev; /* Timer event for polling reading this connection */ - fr_event_timer_t const *write_ev; /* Timer event for polling writing this connection */ + fr_timer_t *read_ev; /* Timer event for polling reading this connection */ + fr_timer_t *write_ev; /* Timer event for polling writing this connection */ uint select_interval; /* How frequently this connection gets polled for select queries */ uint query_interval; /* How frequently this connection gets polled for other queries */ uint poll_count; /* How many polls have been done for the current query */ @@ -114,8 +114,8 @@ static void _sql_connection_close(UNUSED fr_event_list_t *el, void *h, UNUSED vo { rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(h, rlm_sql_unixodbc_conn_t); - if (c->read_ev) fr_event_timer_delete(&c->read_ev); - if (c->write_ev) fr_event_timer_delete(&c->write_ev); + if (c->read_ev) fr_timer_delete(&c->read_ev); + if (c->write_ev) fr_timer_delete(&c->write_ev); if (c->stmt) SQLFreeHandle(SQL_HANDLE_STMT, c->stmt); @@ -169,7 +169,7 @@ static connection_state_t sql_trunk_connection_init_stmt(rlm_sql_unixodbc_conn_t return CONNECTION_STATE_CONNECTED; } -static void sql_trunk_connection_init_poll(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void sql_trunk_connection_init_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_unixodbc_conn_t); SQLRETURN ret; @@ -180,8 +180,8 @@ static void sql_trunk_connection_init_poll(fr_event_list_t *el, UNUSED fr_time_t UNCONST(SQLCHAR *, c->config->sql_password), strlen(c->config->sql_password)); if (ret == SQL_STILL_EXECUTING) { - if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(c->query_interval), - sql_trunk_connection_init_poll, c) < 0) { + if (fr_timer_in(c, tl, &c->read_ev, fr_time_delta_from_usec(c->query_interval), + false, sql_trunk_connection_init_poll, c) < 0) { ERROR("Unable to insert polling event"); connection_signal_reconnect(c->conn, CONNECTION_FAILED); } @@ -255,8 +255,8 @@ static connection_state_t _sql_connection_init(void **h, connection_t *conn, voi UNCONST(SQLCHAR *, config->sql_password), strlen(config->sql_password)); if (ret == SQL_STILL_EXECUTING) { - if (fr_event_timer_in(c, conn->el, &c->read_ev, fr_time_delta_from_usec(c->query_interval), - sql_trunk_connection_init_poll, c) < 0) { + if (fr_timer_in(c, conn->el->tl, &c->read_ev, fr_time_delta_from_usec(c->query_interval), + false, sql_trunk_connection_init_poll, c) < 0) { ERROR("Unable to insert polling event"); goto error; } @@ -363,7 +363,7 @@ static void sql_request_cancel_mux(UNUSED fr_event_list_t *el, trunk_connection_ trunk_request_signal_cancel_complete(treq); } -static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void sql_trunk_connection_read_poll(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(uctx, rlm_sql_unixodbc_conn_t); fr_sql_query_t *query_ctx = c->query_ctx; @@ -385,9 +385,9 @@ static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t } if (ret == SQL_STILL_EXECUTING) { ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response"); - if (fr_event_timer_in(c, el, &c->read_ev, - fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, tl, &c->read_ev, + fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } return; @@ -419,8 +419,8 @@ static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t ret = SQLCancel(c->stmt); if (ret == SQL_STILL_EXECUTING) { ROPTIONAL(RDEBUG3, DEBUG3, "Still awaiting response"); - if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, tl, &c->read_ev, fr_time_delta_from_usec(query_ctx->type == SQL_QUERY_SELECT ? c->select_interval : c->query_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } return; @@ -435,7 +435,7 @@ static void sql_trunk_connection_read_poll(fr_event_list_t *el, UNUSED fr_time_t if (request) unlang_interpret_mark_runnable(request); } -static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void sql_trunk_connection_write_poll(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); @@ -449,7 +449,7 @@ static void sql_trunk_connection_write_poll(UNUSED fr_event_list_t *el, UNUSED f * This "notify" callback sets up the appropriate polling events. */ CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function */ -static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, UNUSED fr_event_list_t *el, +static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connection_t *conn, fr_event_list_t *el, trunk_connection_event_t notify_on, UNUSED void *uctx) { rlm_sql_unixodbc_conn_t *c = talloc_get_type_abort(conn->h, rlm_sql_unixodbc_conn_t); @@ -457,15 +457,15 @@ static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connec uint poll_interval = (query_ctx && query_ctx->type != SQL_QUERY_SELECT) ? c->query_interval : c->select_interval; switch (notify_on) { case TRUNK_CONN_EVENT_NONE: - if (c->read_ev) fr_event_timer_delete(&c->read_ev); - if (c->write_ev) fr_event_timer_delete(&c->write_ev); + if (c->read_ev) fr_timer_delete(&c->read_ev); + if (c->write_ev) fr_timer_delete(&c->write_ev); return; case TRUNK_CONN_EVENT_BOTH: case TRUNK_CONN_EVENT_READ: if (c->query_ctx) { - if (fr_event_timer_in(c, el, &c->read_ev, fr_time_delta_from_usec(poll_interval), - sql_trunk_connection_read_poll, c) < 0) { + if (fr_timer_in(c, el->tl, &c->read_ev, fr_time_delta_from_usec(poll_interval), + false, sql_trunk_connection_read_poll, c) < 0) { ERROR("Unable to insert polling event"); } } @@ -474,8 +474,8 @@ static void sql_trunk_connection_notify(UNUSED trunk_connection_t *tconn, connec FALL_THROUGH; case TRUNK_CONN_EVENT_WRITE: - if (fr_event_timer_in(c, el, &c->write_ev, fr_time_delta_from_usec(0), - sql_trunk_connection_write_poll, tconn) < 0) { + if (fr_timer_in(c, el->tl, &c->write_ev, fr_time_delta_from_usec(0), + false, sql_trunk_connection_write_poll, tconn) < 0) { ERROR("Unable to insert polling event"); } return; diff --git a/src/modules/rlm_tacacs/rlm_tacacs_tcp.c b/src/modules/rlm_tacacs/rlm_tacacs_tcp.c index bf8f14d3f7e..e4741f8621d 100644 --- a/src/modules/rlm_tacacs/rlm_tacacs_tcp.c +++ b/src/modules/rlm_tacacs/rlm_tacacs_tcp.c @@ -128,7 +128,7 @@ typedef struct { fr_time_t last_sent; //!< last time we sent a packet. fr_time_t last_idle; //!< last time we had nothing to do - fr_event_timer_t const *zombie_ev; //!< Zombie timeout. + fr_timer_t *zombie_ev; //!< Zombie timeout. trunk_connection_t *tconn; //!< trunk connection } udp_handle_t; @@ -148,7 +148,7 @@ struct udp_request_s { uint8_t *packet; //!< Packet we write to the network. size_t packet_len; //!< Length of the packet. - fr_event_timer_t const *ev; //!< timer for retransmissions + fr_timer_t *ev; //!< timer for retransmissions fr_retry_t retry; //!< retransmission timers }; @@ -211,7 +211,7 @@ static void udp_request_reset(udp_handle_t *h, udp_request_t *u) u->outstanding = false; h->active--; - if (u->ev) (void)fr_event_timer_delete(&u->ev); + if (u->ev) (void)fr_timer_delete(&u->ev); /* * We've sent 255 packets, and received all replies. Shut the connection down. @@ -643,7 +643,7 @@ static int encode(udp_handle_t *h, request_t *request, udp_request_t *u) /** Revive a connection after "revive_interval" * */ -static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void revive_timeout(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t); @@ -655,7 +655,7 @@ static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, voi /** Mark a connection dead after "zombie_interval" * */ -static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) +static void zombie_timeout(fr_timer_list_t *tl, fr_time_t now, void *uctx) { trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t); udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t); @@ -672,8 +672,8 @@ static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) /* * Revive the connection after a time. */ - if (fr_event_timer_at(h, el, &h->zombie_ev, - fr_time_add(now, h->inst->parent->revive_interval), revive_timeout, h) < 0) { + if (fr_timer_at(h, tl, &h->zombie_ev, + fr_time_add(now, h->inst->parent->revive_interval), false, revive_timeout, h) < 0) { ERROR("Failed inserting revive timeout for connection"); trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED); } @@ -703,7 +703,7 @@ static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx) * - true if the connection is zombie. * - false if the connection is not zombie. */ -static bool check_for_zombie(fr_event_list_t *el, trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent) +static bool check_for_zombie(fr_timer_list_t *tl, trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent) { udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t); @@ -733,8 +733,8 @@ static bool check_for_zombie(fr_event_list_t *el, trunk_connection_t *tconn, fr_ WARN("%s - Entering Zombie state - connection %s", h->module_name, h->name); trunk_connection_signal_inactive(tconn); - if (fr_event_timer_at(h, el, &h->zombie_ev, fr_time_add(now, h->inst->parent->zombie_period), - zombie_timeout, h) < 0) { + if (fr_timer_at(h, tl, &h->zombie_ev, fr_time_add(now, h->inst->parent->zombie_period), + false, zombie_timeout, h) < 0) { ERROR("Failed inserting zombie timeout for connection"); trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED); } @@ -747,9 +747,9 @@ static bool check_for_zombie(fr_event_list_t *el, trunk_connection_t *tconn, fr_ * Note that with TCP we don't actually retry on this particular connection, but the retry timer allows us to * fail over from one connection to another when a connection fails. */ -static void request_retry(fr_event_list_t *el, fr_time_t now, void *uctx) +static void request_retry(fr_timer_list_t *tl, fr_time_t now, void *uctx) { - trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t); + trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t); udp_request_t *u = talloc_get_type_abort(treq->preq, udp_request_t); udp_result_t *r = talloc_get_type_abort(treq->rctx, udp_result_t); request_t *request = treq->request; @@ -786,7 +786,7 @@ static void request_retry(fr_event_list_t *el, fr_time_t now, void *uctx) r->rcode = RLM_MODULE_FAIL; trunk_request_signal_complete(treq); - check_for_zombie(el, tconn, now, u->retry.start); + check_for_zombie(tl, tconn, now, u->retry.start); } CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function*/ @@ -985,7 +985,7 @@ static void request_mux(fr_event_list_t *el, h->last_sent = u->retry.start; if (fr_time_lteq(h->first_sent, h->last_idle)) h->first_sent = h->last_sent; - if (fr_event_timer_at(u, el, &u->ev, u->retry.next, request_retry, treq) < 0) { + if (fr_timer_at(u, el->tl, &u->ev, u->retry.next, false, request_retry, treq) < 0) { RERROR("Failed inserting retransmit timeout for connection"); trunk_request_signal_fail(treq); } diff --git a/src/modules/rlm_unbound/io.c b/src/modules/rlm_unbound/io.c index 6467943ea8a..c660eb86b1e 100644 --- a/src/modules/rlm_unbound/io.c +++ b/src/modules/rlm_unbound/io.c @@ -60,7 +60,7 @@ typedef struct { unbound_io_event_base_t *ev_b; //!< Event base this handle was created for. - fr_event_timer_t const *timer; //!< Stores the pointer to the enabled timer for + fr_timer_t *timer; //!< Stores the pointer to the enabled timer for ///< this event handled. libunbound uses a single ///< handle for managing related FD events and ///< timers, which is weird, but ok... @@ -150,7 +150,7 @@ static void _unbound_io_event_free(struct ub_event *ub_ev) * given query. The timeout happening causes the timeout against the server * to be increased for any subsequent queries sent to it. */ -static void _unbound_io_service_timer_expired(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void _unbound_io_service_timer_expired(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { unbound_io_event_t *ev = talloc_get_type_abort(uctx, unbound_io_event_t); @@ -232,7 +232,7 @@ static void _unbound_io_service_errored(UNUSED fr_event_list_t *el, * that it fired. This is imperfect but unbound * doesn't have a callback for receiving errors. */ - if (fr_event_timer_delete(&ev->timer) < 0) { + if (fr_timer_delete(&ev->timer) < 0) { PERROR("ubound event %p - Failed disarming timeout", ev); } @@ -307,8 +307,8 @@ static int _unbound_io_event_activate(struct ub_event *ub_ev, struct timeval *tv DEBUG4("unbound event %p - Timeout in %pV seconds", ev, fr_box_time_delta(timeout)); - if (fr_event_timer_in(ev, ev->ev_b->el, &ev->timer, - timeout, _unbound_io_service_timer_expired, ev) < 0) { + if (fr_timer_in(ev, ev->ev_b->el->tl, &ev->timer, + timeout, false, _unbound_io_service_timer_expired, ev) < 0) { PERROR("unbound event %p - Failed adding timeout", ev); if (ev->events & (UB_EV_READ | UB_EV_WRITE)) { @@ -347,7 +347,7 @@ static int _unbound_io_event_deactivate(struct ub_event *ub_ev) if (ev->events & UB_EV_TIMEOUT) { DEBUG4("unbound event %p - Disarming timeout", ev); - if (ev->timer && (fr_event_timer_delete(&ev->timer) < 0)) { + if (ev->timer && (fr_timer_delete(&ev->timer) < 0)) { PERROR("ubound event %p - Failed disarming timeout", ev); ret = -1; @@ -382,7 +382,7 @@ static int _unbound_io_timer_modify(struct ub_event *ub_ev, UNUSED struct ub_eve ev, uctx, ev->uctx); ev->uctx = uctx; } - if (ev->timer && (fr_event_timer_delete(&ev->timer) < 0)) { + if (ev->timer && (fr_timer_delete(&ev->timer) < 0)) { PERROR("ubound event %p - Failed disarming timeout", ev); ret = -1; /* Continue ? */ @@ -392,8 +392,9 @@ static int _unbound_io_timer_modify(struct ub_event *ub_ev, UNUSED struct ub_eve DEBUG4("unbound event %p - Timeout in %pV seconds", ev, fr_box_time_delta(timeout)); - if (fr_event_timer_in(ev, ev->ev_b->el, &ev->timer, - timeout, _unbound_io_service_timer_expired, ev) < 0) { + if (fr_timer_in(ev, ev->ev_b->el->tl, &ev->timer, + timeout, + false, _unbound_io_service_timer_expired, ev) < 0) { PERROR("unbound event %p - Failed adding timeout", ev); ret = -1; @@ -413,7 +414,7 @@ static int _unbound_io_timer_deactivate(struct ub_event *ub_ev) DEBUG4("unbound event %p - Disarming timeout", ev); - if (ev->timer && (fr_event_timer_delete(&ev->timer) < 0)) { + if (ev->timer && (fr_timer_delete(&ev->timer) < 0)) { PERROR("unbound event %p - Failed disarming timeout", ev); return -1; diff --git a/src/modules/rlm_unbound/rlm_unbound.c b/src/modules/rlm_unbound/rlm_unbound.c index 14a4371bf4a..349490be601 100644 --- a/src/modules/rlm_unbound/rlm_unbound.c +++ b/src/modules/rlm_unbound/rlm_unbound.c @@ -61,7 +61,7 @@ typedef struct { uint16_t count; //!< Number of results to return fr_value_box_list_t list; //!< Where to put the parsed results TALLOC_CTX *out_ctx; //!< CTX to allocate parsed results in - fr_event_timer_t const *ev; //!< Event for timeout + fr_timer_t *ev; //!< Event for timeout } unbound_request_t; /* @@ -115,7 +115,7 @@ static void xlat_unbound_callback(void *mydata, int rcode, void *packet, int pac * Request has completed remove timeout event and set * async_id to 0 so ub_cancel() is not called when ur is freed */ - if (ur->ev) (void)fr_event_timer_delete(&ur->ev); + if (ur->ev) (void)fr_timer_delete(&ur->ev); ur->async_id = 0; /* @@ -256,7 +256,7 @@ resume: /** Callback from our timeout event to cancel a request * */ -static void xlat_unbound_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void xlat_unbound_timeout(UNUSED fr_timer_list_t *el, UNUSED fr_time_t now, void *uctx) { unbound_request_t *ur = talloc_get_type_abort(uctx, unbound_request_t); request_t *request = ur->request; @@ -274,7 +274,7 @@ static void xlat_unbound_signal(xlat_ctx_t const *xctx, request_t *request, UNUS { unbound_request_t *ur = talloc_get_type_abort(xctx->rctx, unbound_request_t); - if (ur->ev) (void)fr_event_timer_delete(&ur->ev); + if (ur->ev) (void)fr_timer_delete(&ur->ev); RDEBUG2("Forcefully cancelling pending unbound request"); } @@ -408,8 +408,8 @@ static xlat_action_t xlat_unbound(TALLOC_CTX *ctx, fr_dcursor_t *out, return xlat_unbound_resume(ctx, out, &our_xctx, request, in); } - if (fr_event_timer_in(ur, ur->t->ev_b->el, &ur->ev, fr_time_delta_from_msec(inst->timeout), - xlat_unbound_timeout, ur) < 0) { + if (fr_timer_in(ur, ur->t->ev_b->el->tl, &ur->ev, fr_time_delta_from_msec(inst->timeout), + false, xlat_unbound_timeout, ur) < 0) { REDEBUG("Unable to attach unbound timeout_envent"); ub_cancel(t->ev_b->ub, ur->async_id); return XLAT_ACTION_FAIL; diff --git a/src/protocols/radius/client.c b/src/protocols/radius/client.c index 7f8aab3f8a7..79bc5c37352 100644 --- a/src/protocols/radius/client.c +++ b/src/protocols/radius/client.c @@ -45,7 +45,7 @@ static bool radius_client_retry_response(fr_bio_t *bio, fr_bio_retry_entry_t **r static void radius_client_retry_release(fr_bio_t *bio, fr_bio_retry_entry_t *retry_ctx, UNUSED fr_bio_retry_release_reason_t reason); static ssize_t radius_client_retry(fr_bio_t *bio, fr_bio_retry_entry_t *retry_ctx, UNUSED const void *buffer, NDEBUG_UNUSED size_t size); -static void fr_radius_client_bio_connect_timer(fr_event_list_t *el, fr_time_t now, void *uctx); +static void fr_radius_client_bio_connect_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx); fr_bio_packet_t *fr_radius_client_bio_alloc(TALLOC_CTX *ctx, fr_radius_client_config_t *cfg, fr_bio_fd_config_t const *fd_cfg) { @@ -166,7 +166,8 @@ fr_radius_client_fd_bio_t *fr_radius_client_fd_bio_alloc(TALLOC_CTX *ctx, size_t */ if ((my->info.fd_info->type == FR_BIO_FD_CONNECTED) && !my->info.connected && fr_time_delta_ispos(cfg->connection_timeout) && cfg->retry_cfg.el) { - if (fr_event_timer_in(my, cfg->el, &my->common.ev, cfg->connection_timeout, fr_radius_client_bio_connect_timer, my) < 0) { + if (fr_timer_in(my, cfg->el->tl, &my->common.ev, cfg->connection_timeout, false, + fr_radius_client_bio_connect_timer, my) < 0) { talloc_free(my); return NULL; } @@ -688,11 +689,11 @@ int fr_radius_client_bio_force_id(fr_bio_packet_t *bio, int code, int id) /** We failed to connect in the given timeout, the connection is dead. * */ -static void fr_radius_client_bio_connect_timer(NDEBUG_UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) +static void fr_radius_client_bio_connect_timer(NDEBUG_UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) { fr_radius_client_fd_bio_t *my = talloc_get_type_abort(uctx, fr_radius_client_fd_bio_t); - fr_assert(!my->retry || (my->info.retry_info->el == el)); + fr_assert(!my->retry || (my->info.retry_info->el->tl == tl)); if (my->common.cb.failed) my->common.cb.failed(&my->common); }