*/
talloc_steal(query->treq, query);
- fr_trunk_request_signal_cancel(query->treq);
+ trunk_request_signal_cancel(query->treq);
/*
* Once we've called cancel, the treq is no
query = fr_ldap_search_alloc(ctx, base_dn, scope, filter, attrs, serverctrls, clientctrls);
- switch (fr_trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch (trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
query = fr_ldap_modify_alloc(ctx, dn, mods, serverctrls, clientctrls);
- switch (fr_trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch (trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
query = fr_ldap_extended_alloc(ctx, reqoid, reqdata, serverctrls, clientctrls);
- switch (fr_trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch (trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
/*
* If the connection this query was using has no pending queries and
- * is no-longer associated with a fr_connection_t then free it
+ * is no-longer associated with a connection_t then free it
*/
if (!query->ldap_conn->conn && (fr_dlist_num_elements(&query->ldap_conn->refs) == 0) &&
(fr_rb_num_elements(query->ldap_conn->queries) == 0)) talloc_free(query->ldap_conn);
fr_ldap_directory_t *directory; //!< The type of directory we're connected to.
fr_ldap_config_t const *config; //!< rlm_ldap connection configuration.
- fr_connection_t *conn; //!< Connection state handle.
+ connection_t *conn; //!< Connection state handle.
fr_ldap_state_t state; //!< LDAP connection state machine.
typedef struct {
fr_rb_tree_t *trunks; //!< Tree of LDAP trunks used by this thread
fr_ldap_config_t *config; //!< Module instance config
- fr_trunk_conf_t *trunk_conf; //!< Module trunk config
- fr_trunk_conf_t *bind_trunk_conf; //!< Trunk config for bind auth trunk
+ trunk_conf_t *trunk_conf; //!< Module trunk config
+ trunk_conf_t *bind_trunk_conf; //!< Trunk config for bind auth trunk
fr_event_list_t *el; //!< Thread event list for callbacks / timeouts
fr_ldap_thread_trunk_t *bind_trunk; //!< LDAP trunk used for bind auths
fr_rb_tree_t *binds; //!< Tree of outstanding bind auths
char const *bind_dn; //!< DN connection is bound as
fr_ldap_config_t config; //!< Config used for this connection
fr_ldap_directory_t *directory; //!< The type of directory we're connected to.
- fr_trunk_t *trunk; //!< Connection trunk
+ trunk_t *trunk; //!< Connection trunk
fr_ldap_thread_t *t; //!< Thread this connection is associated with
fr_event_timer_t const *ev; //!< Event to close the thread when it has been idle.
} fr_ldap_thread_trunk_t;
int msgid; //!< The unique identifier for this query.
///< Uniqueness is only per connection.
- fr_trunk_request_t *treq; //!< Trunk request this query is associated with
+ trunk_request_t *treq; //!< Trunk request this query is associated with
fr_ldap_connection_t *ldap_conn; //!< LDAP connection this query is running on.
fr_event_timer_t const *ev; //!< Event for timing out the query
typedef struct {
fr_rb_node_t node; //!< Entry in the tree of outstanding bind requests.
fr_ldap_thread_t *thread; //!< This bind is being run by.
- fr_trunk_request_t *treq; //!< Trunk request this bind is associated with.
+ trunk_request_t *treq; //!< Trunk request this bind is associated with.
int msgid; //!< libldap msgid for this bind.
request_t *request; //!< this bind relates to.
fr_ldap_bind_type_t type; //!< type of bind.
*/
fr_ldap_connection_t *fr_ldap_connection_alloc(TALLOC_CTX *ctx);
-fr_connection_t *fr_ldap_connection_state_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+connection_t *fr_ldap_connection_state_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
fr_ldap_config_t const *config, char const *log_prefix);
int fr_ldap_connection_configure(fr_ldap_connection_t *c, fr_ldap_config_t const *config);
char const *bind_dn, char const *bind_password,
request_t *request, fr_ldap_config_t const *config);
-fr_trunk_state_t fr_thread_ldap_trunk_state(fr_ldap_thread_t *thread, char const *uri, char const *bind_dn);
+trunk_state_t fr_thread_ldap_trunk_state(fr_ldap_thread_t *thread, char const *uri, char const *bind_dn);
fr_ldap_thread_trunk_t *fr_thread_ldap_bind_trunk_get(fr_ldap_thread_t *thread);
/*
* Bind auth ctx is freed by trunk request free.
*/
- fr_trunk_request_signal_complete(bind_auth_ctx->treq);
+ trunk_request_signal_complete(bind_auth_ctx->treq);
} else {
/*
* If there is no trunk request, the request failed, and we need to free the ctx
RWARN("Cancelling bind auth");
if (bind_auth_ctx->msgid > 0) fr_rb_remove(bind_auth_ctx->thread->binds, bind_auth_ctx);
- fr_trunk_request_signal_cancel(bind_auth_ctx->treq);
+ trunk_request_signal_cancel(bind_auth_ctx->treq);
}
/** Initiate an async LDAP bind for authentication
unlang_action_t fr_ldap_bind_auth_async(request_t *request, fr_ldap_thread_t *thread, char const *bind_dn, char const *password)
{
fr_ldap_bind_auth_ctx_t *bind_auth_ctx;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_ldap_thread_trunk_t *ttrunk = fr_thread_ldap_bind_trunk_get(thread);
- fr_trunk_enqueue_t ret;
+ trunk_enqueue_t ret;
if (!ttrunk) {
ERROR("Failed to get trunk connection for LDAP bind");
return UNLANG_ACTION_FAIL;
}
- treq = fr_trunk_request_alloc(ttrunk->trunk, request);
+ treq = trunk_request_alloc(ttrunk->trunk, request);
if (!treq) {
ERROR ("Failed to allocate trunk request for LDAP bind");
return UNLANG_ACTION_FAIL;
.password = password
};
- ret = fr_trunk_request_enqueue(&bind_auth_ctx->treq, ttrunk->trunk, request, bind_auth_ctx, NULL);
+ ret = trunk_request_enqueue(&bind_auth_ctx->treq, ttrunk->trunk, request, bind_auth_ctx, NULL);
switch (ret) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
ERROR("Failed to enqueue bind request");
- fr_trunk_request_free(&treq);
+ trunk_request_free(&treq);
return UNLANG_ACTION_FAIL;
}
* re-parent the connection to the NULL ctx so that it remains
* until all the queries have been dealt with.
*/
-static void _ldap_connection_close_watch(fr_connection_t *conn, UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state, void *uctx)
+static void _ldap_connection_close_watch(connection_t *conn, UNUSED connection_state_t prev,
+ UNUSED connection_state_t state, void *uctx)
{
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(uctx, fr_ldap_connection_t);
* @param[in] conn Being initialised.
* @param[in] uctx Our LDAP connection handle (a #fr_ldap_connection_t).
* @return
- * - FR_CONNECTION_STATE_CONNECTING on success.
- * - FR_CONNECTION_STATE_FAILED on failure.
+ * - connection_STATE_CONNECTING on success.
+ * - connection_STATE_FAILED on failure.
*/
-static fr_connection_state_t _ldap_connection_init(void **h, fr_connection_t *conn, void *uctx)
+static connection_state_t _ldap_connection_init(void **h, connection_t *conn, void *uctx)
{
fr_ldap_config_t const *config = uctx;
fr_ldap_connection_t *c;
if (fr_ldap_connection_configure(c, config) < 0) {
error:
talloc_free(c);
- return FR_CONNECTION_STATE_FAILED;
+ return connection_STATE_FAILED;
}
/* Don't block */
state = fr_ldap_state_next(c);
if (state == FR_LDAP_STATE_ERROR) goto error;
- fr_connection_add_watch_pre(conn, FR_CONNECTION_STATE_CLOSED, _ldap_connection_close_watch, true, c);
+ connection_add_watch_pre(conn, connection_STATE_CLOSED, _ldap_connection_close_watch, true, c);
*h = c; /* Set the handle */
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
/** Alloc a self re-establishing connection to an LDAP server
* @param[in] config to use to bind the connection to an LDAP server.
* @param[in] log_prefix to prepend to connection state messages.
*/
-fr_connection_t *fr_ldap_connection_state_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+connection_t *fr_ldap_connection_state_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
fr_ldap_config_t const *config, char const *log_prefix)
{
- fr_connection_t *conn;
+ connection_t *conn;
- conn = fr_connection_alloc(ctx, el,
- &(fr_connection_funcs_t){
+ conn = connection_alloc(ctx, el,
+ &(connection_funcs_t){
.init = _ldap_connection_init,
.close = _ldap_connection_close
},
- &(fr_connection_conf_t){
+ &(connection_conf_t){
.connection_timeout = config->net_timeout,
.reconnection_delay = config->reconnection_delay
},
*
* Ensure the request is removed from the list of outstanding requests
*/
-static void ldap_request_cancel(UNUSED fr_connection_t *conn, void *preq, UNUSED fr_trunk_cancel_reason_t reason,
+static void ldap_request_cancel(UNUSED connection_t *conn, void *preq, UNUSED trunk_cancel_reason_t reason,
UNUSED void *uctx) {
fr_ldap_query_t *query = talloc_get_type_abort(preq, fr_ldap_query_t);
* @param[in] el For timer management.
* @param[in] tconn The trunk connection handle
* @param[in] conn The specific connection queries will be cancelled on
- * @param[in] uctx Context provided to fr_trunk_alloc
+ * @param[in] uctx Context provided to trunk_alloc
*/
-static void ldap_request_cancel_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn,
- fr_connection_t *conn, UNUSED void *uctx)
+static void ldap_request_cancel_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn,
+ connection_t *conn, UNUSED void *uctx)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(conn->h, fr_ldap_connection_t);
fr_ldap_query_t *query;
- while ((fr_trunk_connection_pop_cancellation(&treq, tconn)) == 0) {
+ while ((trunk_connection_pop_cancellation(&treq, tconn)) == 0) {
query = talloc_get_type_abort(treq->preq, fr_ldap_query_t);
ldap_abandon_ext(ldap_conn->handle, query->msgid, NULL, NULL);
- fr_trunk_request_signal_cancel_complete(treq);
+ trunk_request_signal_cancel_complete(treq);
}
}
*
*/
static void ldap_request_fail(request_t *request, void *preq, UNUSED void *rctx,
- UNUSED fr_trunk_request_state_t state, UNUSED void *uctx)
+ UNUSED trunk_request_state_t state, UNUSED void *uctx)
{
fr_ldap_query_t *query = talloc_get_type_abort(preq, fr_ldap_query_t);
*/
static void ldap_conn_readable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
- fr_trunk_connection_signal_readable(tconn);
+ trunk_connection_signal_readable(tconn);
}
*/
static void ldap_conn_writable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
- fr_trunk_connection_signal_writable(tconn);
+ trunk_connection_signal_writable(tconn);
}
*/
static void ldap_conn_error(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, int fd_errno, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
ERROR("%s - Connection failed: %s", tconn->conn->name, fr_syserror(fd_errno));
- fr_connection_signal_reconnect(tconn->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(tconn->conn, connection_FAILED);
}
/** Setup callbacks requested by LDAP trunk connections
* @param[in] conn Individual connection callbacks are to be installed for.
* @param[in] el The event list to install events in.
* @param[in] notify_on The types of event the trunk wants to be notified on.
- * @param[in] uctx Context provided to fr_trunk_alloc.
+ * @param[in] uctx Context provided to trunk_alloc.
*/
-static void ldap_trunk_connection_notify(fr_trunk_connection_t *tconn, fr_connection_t *conn,
+static void ldap_trunk_connection_notify(trunk_connection_t *tconn, connection_t *conn,
fr_event_list_t *el,
- fr_trunk_connection_event_t notify_on, UNUSED void *uctx)
+ trunk_connection_event_t notify_on, UNUSED void *uctx)
{
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(conn->h, fr_ldap_connection_t);
fr_event_fd_cb_t read_fn = NULL;
fr_event_fd_cb_t write_fn = NULL;
switch (notify_on) {
- case FR_TRUNK_CONN_EVENT_NONE:
+ case TRUNK_CONN_EVENT_NONE:
fr_event_fd_delete(el, ldap_conn->fd, FR_EVENT_FILTER_IO);
return;
- case FR_TRUNK_CONN_EVENT_READ:
+ case TRUNK_CONN_EVENT_READ:
read_fn = ldap_conn_readable;
break;
- case FR_TRUNK_CONN_EVENT_WRITE:
+ case TRUNK_CONN_EVENT_WRITE:
write_fn = ldap_conn_writable;
break;
- case FR_TRUNK_CONN_EVENT_BOTH:
+ case TRUNK_CONN_EVENT_BOTH:
read_fn = ldap_conn_readable;
write_fn = ldap_conn_writable;
break;
ldap_conn_error,
tconn) < 0) {
PERROR("Failed inserting FD event");
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
* @param[in] el Event list which will be used for I/O and timer events.
* @param[in] conn_conf Configuration of the connection.
* @param[in] log_prefix What to prefix log messages with.
- * @param[in] uctx User context passed to fr_trunk_alloc.
+ * @param[in] uctx User context passed to trunk_alloc.
*/
-static fr_connection_t *ldap_trunk_connection_alloc(fr_trunk_connection_t *tconn, fr_event_list_t *el,
- UNUSED fr_connection_conf_t const *conn_conf,
+static connection_t *ldap_trunk_connection_alloc(trunk_connection_t *tconn, fr_event_list_t *el,
+ UNUSED connection_conf_t const *conn_conf,
char const *log_prefix, void *uctx)
{
fr_ldap_thread_trunk_t *thread_trunk = talloc_get_type_abort(uctx, fr_ldap_thread_trunk_t);
* @param[in] el Event list for timers.
* @param[in] tconn Trunk handle.
* @param[in] conn on which to send the queries
- * @param[in] uctx User context passed to fr_trunk_alloc
+ * @param[in] uctx User context passed to trunk_alloc
*/
-static void ldap_trunk_request_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn,
- fr_connection_t *conn, UNUSED void *uctx)
+static void ldap_trunk_request_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn,
+ connection_t *conn, UNUSED void *uctx)
{
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(conn->h, fr_ldap_connection_t);
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
LDAPURLDesc *referral_url = NULL;
fr_ldap_query_t *query = NULL;
fr_ldap_rcode_t status;
- while (fr_trunk_connection_pop_request(&treq, tconn) == 0) {
+ while (trunk_connection_pop_request(&treq, tconn) == 0) {
LDAPControl *our_serverctrls[LDAP_MAX_CONTROLS + 1];
LDAPControl *our_clientctrls[LDAP_MAX_CONTROLS + 1];
status = LDAP_PROC_ERROR;
ERROR("Invalid LDAP query for trunk connection");
error:
- fr_trunk_request_signal_fail(query->treq);
- if (status == LDAP_PROC_BAD_CONN) fr_trunk_connection_signal_reconnect(tconn,
- FR_CONNECTION_FAILED);
+ trunk_request_signal_fail(query->treq);
+ if (status == LDAP_PROC_BAD_CONN) trunk_connection_signal_reconnect(tconn,
+ connection_FAILED);
continue;
}
*/
fr_rb_insert(query->ldap_conn->queries, query);
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
}
}
* @param[in] conn Connection handle for these results.
* @param[in] uctx Thread specific trunk structure - contains tree of pending queries.
*/
-static void ldap_trunk_request_demux(fr_event_list_t *el, fr_trunk_connection_t *tconn, fr_connection_t *conn, void *uctx)
+static void ldap_trunk_request_demux(fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, void *uctx)
{
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(conn->h, fr_ldap_connection_t);
fr_ldap_thread_trunk_t *ttrunk = talloc_get_type_abort(uctx, fr_ldap_thread_trunk_t);
fr_ldap_query_t find = { .msgid = -1 }, *query = NULL;
request_t *request;
bool really_no_result = false;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
/*
* Reset the idle timeout event
rcode = fr_ldap_error_check(NULL, ldap_conn, NULL, NULL);
if (rcode == LDAP_PROC_BAD_CONN) {
ERROR("Bad LDAP connection");
- fr_connection_signal_reconnect(tconn->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(tconn->conn, connection_FAILED);
}
return;
*/
treq = query->treq;
query->treq = NULL;
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
} while (1);
}
found->uri = found->config.server;
found->bind_dn = found->config.admin_identity;
- found->trunk = fr_trunk_alloc(found, thread->el,
- &(fr_trunk_io_funcs_t){
+ found->trunk = trunk_alloc(found, thread->el,
+ &(trunk_io_funcs_t){
.connection_alloc = ldap_trunk_connection_alloc,
.connection_notify = ldap_trunk_connection_notify,
.request_mux = ldap_trunk_request_mux,
* @param[in] bind_dn to make the connection as
* @return
* - State of a trunk matching the URI and bind DN
- * - FR_TRUNK_STATE_MAX if no matching trunk
+ * - TRUNK_STATE_MAX if no matching trunk
*/
-fr_trunk_state_t fr_thread_ldap_trunk_state(fr_ldap_thread_t *thread, char const *uri, char const *bind_dn)
+trunk_state_t fr_thread_ldap_trunk_state(fr_ldap_thread_t *thread, char const *uri, char const *bind_dn)
{
fr_ldap_thread_trunk_t *found, find = {.uri = uri, .bind_dn = bind_dn};
found = fr_rb_find(thread->trunks, &find);
- return (found) ? found->trunk->state : FR_TRUNK_STATE_MAX;
+ return (found) ? found->trunk->state : TRUNK_STATE_MAX;
}
/** Take pending LDAP bind auths from the queue and send them.
* @param[in] el Event list for timers.
* @param[in] tconn Trunk handle.
* @param[in] conn on which to send the queries
- * @param[in] uctx User context passed to fr_trunk_alloc
+ * @param[in] uctx User context passed to trunk_alloc
*/
-static void ldap_trunk_bind_auth_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn,
- fr_connection_t *conn, void *uctx)
+static void ldap_trunk_bind_auth_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn,
+ connection_t *conn, void *uctx)
{
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(conn->h, fr_ldap_connection_t);
fr_ldap_thread_trunk_t *ttrunk = talloc_get_type_abort(uctx, fr_ldap_thread_trunk_t);
fr_ldap_thread_t *thread = ttrunk->t;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_ldap_bind_auth_ctx_t *bind = NULL;
int ret = 0;
struct berval cred;
request_t *request;
- if (fr_trunk_connection_pop_request(&treq, tconn) != 0) return;
+ if (trunk_connection_pop_request(&treq, tconn) != 0) return;
/* Pacify clang scan */
if (!treq) return;
* the different states are handled by the resume function which then
* marks the request as complete triggering the tidy up.
*/
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
}
/** Read LDAP bind auth responses
* @param[in] conn Connection handle for these results.
* @param[in] uctx Thread specific trunk structure - contains tree of pending queries.
*/
-static void ldap_trunk_bind_auth_demux(UNUSED fr_event_list_t *el, UNUSED fr_trunk_connection_t *tconn,
- fr_connection_t *conn, void *uctx)
+static void ldap_trunk_bind_auth_demux(UNUSED fr_event_list_t *el, UNUSED trunk_connection_t *tconn,
+ connection_t *conn, void *uctx)
{
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(conn->h, fr_ldap_connection_t);
fr_ldap_thread_trunk_t *ttrunk = talloc_get_type_abort(uctx, fr_ldap_thread_trunk_t);
* @param[in] el For timer management.
* @param[in] tconn The trunk connection handle
* @param[in] conn The specific connection binds will be cancelled on
- * @param[in] uctx Context provided to fr_trunk_alloc
+ * @param[in] uctx Context provided to trunk_alloc
*/
-static void ldap_bind_auth_cancel_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn,
- fr_connection_t *conn, UNUSED void *uctx)
+static void ldap_bind_auth_cancel_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn,
+ connection_t *conn, UNUSED void *uctx)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_ldap_connection_t *ldap_conn = talloc_get_type_abort(conn->h, fr_ldap_connection_t);
fr_ldap_bind_auth_ctx_t *bind;
- while ((fr_trunk_connection_pop_cancellation(&treq, tconn)) == 0) {
+ while ((trunk_connection_pop_cancellation(&treq, tconn)) == 0) {
bind = talloc_get_type_abort(treq->preq, fr_ldap_bind_auth_ctx_t);
#ifdef WITH_SASL
if (bind->type == LDAP_BIND_SASL) {
* seems to leave the connection in an unpredictable state
* so safer to restart.
*/
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
} else {
#endif
ldap_abandon_ext(ldap_conn->handle, bind->msgid, NULL, NULL);
#ifdef WITH_SASL
}
#endif
- fr_trunk_request_signal_cancel_complete(treq);
+ trunk_request_signal_cancel_complete(treq);
}
}
*
*/
static void ldap_trunk_bind_auth_fail(request_t *request, void *preq, UNUSED void *rctx,
- UNUSED fr_trunk_request_state_t state, UNUSED void *uctx)
+ UNUSED trunk_request_state_t state, UNUSED void *uctx)
{
fr_ldap_bind_auth_ctx_t *bind = talloc_get_type_abort(preq, fr_ldap_bind_auth_ctx_t);
ttrunk->uri = ttrunk->config.server;
ttrunk->bind_dn = ttrunk->config.admin_identity;
- ttrunk->trunk = fr_trunk_alloc(ttrunk, thread->el,
- &(fr_trunk_io_funcs_t){
+ ttrunk->trunk = trunk_alloc(ttrunk, thread->el,
+ &(trunk_io_funcs_t){
.connection_alloc = ldap_trunk_connection_alloc,
.connection_notify = ldap_trunk_connection_notify,
.request_mux = ldap_trunk_bind_auth_mux,
{
fr_ldap_query_t *query;
static char const *attrs[] = LDAP_DIRECTORY_ATTRS;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
ttrunk->directory = talloc_zero(ctx, fr_ldap_directory_t);
if (!ttrunk->directory) return -1;
- treq = fr_trunk_request_alloc(ttrunk->trunk, NULL);
+ treq = trunk_request_alloc(ttrunk->trunk, NULL);
if (!treq) return -1;
query = fr_ldap_search_alloc(treq, "", LDAP_SCOPE_BASE, "(objectclass=*)", attrs, NULL, NULL);
query->parser = ldap_trunk_directory_alloc_read;
query->treq = treq;
- fr_trunk_request_enqueue(&query->treq, ttrunk->trunk, NULL, query, ttrunk->directory);
+ trunk_request_enqueue(&query->treq, ttrunk->trunk, NULL, query, ttrunk->directory);
return 0;
}
if (!edir_ctx->query || !edir_ctx->query->treq) return;
- fr_trunk_request_signal_cancel(edir_ctx->query->treq);
+ trunk_request_signal_cancel(edir_ctx->query->treq);
}
/** Initiate retrieval of the universal password from Novell eDirectory
/** Callback to send LDAP referral queries when a trunk becomes active
*
*/
-static void _ldap_referral_send(UNUSED fr_trunk_t *trunk, UNUSED fr_trunk_state_t prev,
- UNUSED fr_trunk_state_t state, void *uctx)
+static void _ldap_referral_send(UNUSED trunk_t *trunk, UNUSED trunk_state_t prev,
+ UNUSED trunk_state_t state, void *uctx)
{
fr_ldap_referral_t *referral = talloc_get_type_abort(uctx, fr_ldap_referral_t);
fr_ldap_query_t *query = referral->query;
* Enqueue referral query on active trunk connection
*/
query->referral = referral;
- switch (fr_trunk_request_enqueue(&query->treq, referral->ttrunk->trunk, request, query, NULL)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch (trunk_request_enqueue(&query->treq, referral->ttrunk->trunk, request, query, NULL)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
fr_ldap_referral_t *referral;
LDAPURLDesc temp_desc;
- fr_trunk_request_signal_complete(query->treq);
+ trunk_request_signal_complete(query->treq);
query->treq = NULL;
if (query->referral_depth > 1) {
fr_dlist_insert_tail(&query->referrals, referral);
if (fr_thread_ldap_trunk_state(t, referral->host_uri,
- referral->identity) != FR_TRUNK_STATE_ACTIVE) {
+ referral->identity) != TRUNK_STATE_ACTIVE) {
ROPTIONAL(RDEBUG3, DEBUG3,
"No active LDAP trunk for URI %s, bound as %s",
referral->host_uri, referral->identity);
* We have an active trunk enqueue the request
*/
query->referral = referral;
- switch (fr_trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch (trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
continue;
}
referral->ttrunk = ttrunk;
- fr_trunk_add_watch(ttrunk->trunk, FR_TRUNK_STATE_ACTIVE, _ldap_referral_send, true, referral);
+ trunk_add_watch(ttrunk->trunk, TRUNK_STATE_ACTIVE, _ldap_referral_send, true, referral);
ROPTIONAL(RDEBUG4, DEBUG4, "Watch inserted to send referral query on active trunk");
}
fr_ldap_referral_t *referral = NULL;
fr_ldap_thread_trunk_t *ttrunk;
- fr_trunk_request_signal_complete(query->treq);
+ trunk_request_signal_complete(query->treq);
query->treq = NULL;
while ((referral = fr_dlist_next(&query->referrals, referral))) {
if (fr_thread_ldap_trunk_state(t, referral->host_uri,
- referral->identity) != FR_TRUNK_STATE_ACTIVE) {
+ referral->identity) != TRUNK_STATE_ACTIVE) {
ROPTIONAL(RDEBUG3, DEBUG3, "No active LDAP trunk for URI %s, bind DN %s",
referral->host_uri, referral->identity);
continue;
* We have an active trunk enqueue the request
*/
query->referral = referral;
- switch(fr_trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch(trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
continue;
}
referral->ttrunk = ttrunk;
- fr_trunk_add_watch(ttrunk->trunk, FR_TRUNK_STATE_ACTIVE, _ldap_referral_send, true, referral);
+ trunk_add_watch(ttrunk->trunk, TRUNK_STATE_ACTIVE, _ldap_referral_send, true, referral);
ROPTIONAL(RDEBUG4, DEBUG4, "Watch inserted to send referral query on active trunk");
}
RWARN("Cancelling SASL bind auth");
if (bind_auth_ctx->msgid > 0) fr_rb_remove(bind_auth_ctx->thread->binds, bind_auth_ctx);
- fr_trunk_request_signal_cancel(bind_auth_ctx->treq);
+ trunk_request_signal_cancel(bind_auth_ctx->treq);
}
/** Handle the return code from parsed LDAP results to set the module rcode
break;
case LDAP_PROC_CONTINUE:
- if (fr_trunk_request_requeue(bind_auth_ctx->treq) != FR_TRUNK_ENQUEUE_OK) {
+ if (trunk_request_requeue(bind_auth_ctx->treq) != TRUNK_ENQUEUE_OK) {
ret = LDAP_PROC_ERROR;
break;
}
/*
* Will free bind_auth_ctx
*/
- fr_trunk_request_signal_complete(bind_auth_ctx->treq);
+ trunk_request_signal_complete(bind_auth_ctx->treq);
} else {
/*
* If there is no trunk request, the request failed, and we need to free the ctx
char const *identity, char const *password, char const *proxy, char const *realm)
{
fr_ldap_bind_auth_ctx_t *bind_auth_ctx;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_ldap_thread_trunk_t *ttrunk = fr_thread_ldap_bind_trunk_get(thread);
- fr_trunk_enqueue_t ret;
+ trunk_enqueue_t ret;
if (!ttrunk) {
ERROR("Failed to get trunk connection for LDAP bind");
return UNLANG_ACTION_FAIL;
}
- treq = fr_trunk_request_alloc(ttrunk->trunk, request);
+ treq = trunk_request_alloc(ttrunk->trunk, request);
if (!treq) {
ERROR("Failed to allocate trunk request for LDAP bind");
return UNLANG_ACTION_FAIL;
.realm = realm,
};
- ret = fr_trunk_request_enqueue(&bind_auth_ctx->treq, ttrunk->trunk, request, bind_auth_ctx, NULL);
+ ret = trunk_request_enqueue(&bind_auth_ctx->treq, ttrunk->trunk, request, bind_auth_ctx, NULL);
switch (ret) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
ERROR("Failed to enqueue bind request");
- fr_trunk_request_free(&treq);
+ trunk_request_free(&treq);
return UNLANG_ACTION_FAIL;
}
*/
case FR_LDAP_STATE_BIND:
STATE_TRANSITION(FR_LDAP_STATE_RUN);
- fr_connection_signal_connected(c->conn);
+ connection_signal_connected(c->conn);
break;
/*
case FR_LDAP_STATE_RUN: /* There's no next state for run, so this an error */
case FR_LDAP_STATE_ERROR:
STATE_TRANSITION(FR_LDAP_STATE_INIT);
- fr_connection_signal_reconnect(c->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(c->conn, connection_FAILED);
/*
* The old connection has been freed, so specifically return the INIT state
*/
STATE_TRANSITION(FR_LDAP_STATE_ERROR);
fr_ldap_state_next(c);
}
-
*/
static void _redis_disconnected(redisAsyncContext const *ac, UNUSED int status)
{
- fr_connection_t *conn = talloc_get_type_abort(ac->data, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(ac->data, connection_t);
fr_redis_handle_t *h = conn->h;
/*
DEBUG4("Signalled by hiredis, connection disconnected");
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
/** Called by hiredis to indicate the connection is live
*/
static void _redis_connected(redisAsyncContext const *ac, UNUSED int status)
{
- fr_connection_t *conn = talloc_get_type_abort(ac->data, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(ac->data, connection_t);
DEBUG4("Signalled by hiredis, connection is open");
- fr_connection_signal_connected(conn);
+ connection_signal_connected(conn);
}
/** Redis FD became readable
*/
static void _redis_io_service_readable(UNUSED fr_event_list_t *el, int fd, UNUSED int flags, void *uctx)
{
- fr_connection_t const *conn = talloc_get_type_abort_const(uctx, fr_connection_t);
+ connection_t const *conn = talloc_get_type_abort_const(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
DEBUG4("redis handle %p - FD %i now readable", h, fd);
*/
static void _redis_io_service_writable(UNUSED fr_event_list_t *el, int fd, UNUSED int flags, void *uctx)
{
- fr_connection_t const *conn = talloc_get_type_abort_const(uctx, fr_connection_t);
+ connection_t const *conn = talloc_get_type_abort_const(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
DEBUG4("redis handle %p - FD %i now writable", h, fd);
static void _redis_io_service_errored(UNUSED fr_event_list_t *el, int fd, UNUSED int flags,
int fd_errno, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
DEBUG4("redis handle %p - FD %i errored: %s", h, fd, fr_syserror(fd_errno));
/*
* Connection state machine will handle reconnecting
*/
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
/** Deal with the method hiredis uses to register/unregister interest in a file descriptor
*
*/
-static void _redis_io_common(fr_connection_t *conn, fr_redis_handle_t *h, bool read, bool write)
+static void _redis_io_common(connection_t *conn, fr_redis_handle_t *h, bool read, bool write)
{
redisContext *c = &(h->ac->c);
fr_event_list_t *el = conn->el;
*/
static void _redis_io_add_read(void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
_redis_io_common(conn, h, true, h->write_set);
*/
static void _redis_io_del_read(void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
_redis_io_common(conn, h, false, h->write_set);
*/
static void _redis_io_add_write(void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
_redis_io_common(conn, h, h->read_set, true);
*/
static void _redis_io_del_write(void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
_redis_io_common(conn, h, h->read_set, false);
*/
static void _redis_io_service_timer_expired(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_connection_t const *conn = talloc_get_type_abort_const(uctx, fr_connection_t);
+ connection_t const *conn = talloc_get_type_abort_const(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
DEBUG4("redis handle %p - Timeout", h);
*/
static void _redis_io_timer_modify(void *uctx, struct timeval tv)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
fr_time_delta_t timeout;
*/
static void _redis_io_free(void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_redis_handle_t *h = conn->h;
DEBUG4("redis handle %p - Freed", h);
/** Configures async I/O callbacks for an existing redisAsyncContext
*
*/
-static int fr_redis_io_setup(redisAsyncContext *ac, fr_connection_t const *conn)
+static int fr_redis_io_setup(redisAsyncContext *ac, connection_t const *conn)
{
if (ac->ev.data != NULL) return REDIS_ERR;
* signalling the connection state machine.
* @param[in] uctx User context.
* @return
- * - #FR_CONNECTION_STATE_CONNECTING if a file descriptor was successfully created.
- * - #FR_CONNECTION_STATE_FAILED if we could not open a valid handle.
+ * - #connection_STATE_CONNECTING if a file descriptor was successfully created.
+ * - #connection_STATE_FAILED if we could not open a valid handle.
*/
-static fr_connection_state_t _redis_io_connection_init(void **h_out, fr_connection_t *conn, void *uctx)
+static connection_state_t _redis_io_connection_init(void **h_out, connection_t *conn, void *uctx)
{
fr_redis_io_conf_t *conf = uctx;
char const *host = conf->hostname;
h->ac = redisAsyncConnect(host, port);
if (!h->ac) {
ERROR("Failed allocating handle for %s:%u", host, port);
- return FR_CONNECTION_STATE_FAILED;
+ return connection_STATE_FAILED;
}
if (h->ac->err) {
ERROR("Failed allocating handle for %s:%u: %s", host, port, h->ac->errstr);
error:
redisAsyncFree(h->ac);
- return FR_CONNECTION_STATE_FAILED;
+ return connection_STATE_FAILED;
}
/*
fr_dlist_talloc_init(&h->ignore, fr_redis_sqn_ignore_t, entry);
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
/** Gracefully signal that the connection should shutdown
*
*/
-static fr_connection_state_t _redis_io_connection_shutdown(UNUSED fr_event_list_t *el, void *h, UNUSED void *uctx)
+static connection_state_t _redis_io_connection_shutdown(UNUSED fr_event_list_t *el, void *h, UNUSED void *uctx)
{
fr_redis_handle_t *our_h = talloc_get_type_abort(h, fr_redis_handle_t);
redisAsyncDisconnect(our_h->ac); /* Should not free the handle */
- return FR_CONNECTION_STATE_SHUTDOWN;
+ return connection_STATE_SHUTDOWN;
}
/** Notification that the connection has errored and must be closed
/** Allocate an async redis I/O connection
*
*/
-fr_connection_t *fr_redis_connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
- fr_connection_conf_t const *conn_conf, fr_redis_io_conf_t const *io_conf,
+connection_t *fr_redis_connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+ connection_conf_t const *conn_conf, fr_redis_io_conf_t const *io_conf,
char const *log_prefix)
{
- fr_connection_t *conn;
+ connection_t *conn;
/*
* We don't specify an open callback
* as hiredis handles switching over
* within hireds, and calls us when
* the connection is open.
*/
- conn = fr_connection_alloc(ctx, el,
- &(fr_connection_funcs_t){
+ conn = connection_alloc(ctx, el,
+ &(connection_funcs_t){
.init = _redis_io_connection_init,
.close = _redis_io_connection_close,
.shutdown = _redis_io_connection_shutdown
* @param[in] conn To retrieve async ctx from.
* @return The async ctx.
*/
-redisAsyncContext *fr_redis_connection_get_async_ctx(fr_connection_t *conn)
+redisAsyncContext *fr_redis_connection_get_async_ctx(connection_t *conn)
{
fr_redis_handle_t *h = conn->h;
return h->ac;
*
* There are three layers of wrapping structures
*
- * fr_connection_t -> fr_redis_handle_t -> redisAsyncContext
+ * connection_t -> fr_redis_handle_t -> redisAsyncContext
*
*/
typedef struct {
return false;
}
-fr_connection_t *fr_redis_connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
- fr_connection_conf_t const *conn_conf,
+connection_t *fr_redis_connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+ connection_conf_t const *conn_conf,
fr_redis_io_conf_t const *io_conf,
char const *log_prefix);
-redisAsyncContext *fr_redis_connection_get_async_ctx(fr_connection_t *conn);
+redisAsyncContext *fr_redis_connection_get_async_ctx(connection_t *conn);
#ifdef __cplusplus
}
*/
struct fr_redis_cluster_thread_s {
fr_event_list_t *el;
- fr_trunk_conf_t const *tconf; //!< Configuration for all trunks in the cluster.
+ trunk_conf_t const *tconf; //!< Configuration for all trunks in the cluster.
char *log_prefix; //!< Common log prefix to use for all cluster related
///< messages.
bool delay_start; //!< Prevent connections from spawning immediately.
* encapsulated within the command set, not just within the trunk.
* @{
*/
- fr_trunk_request_t *treq; //!< Trunk request this command set is associated with.
+ trunk_request_t *treq; //!< Trunk request this command set is associated with.
request_t *request; //!< Request this commands set is associated with (if any).
void *rctx; //!< Resume context to write results to.
/** @} */
struct fr_redis_trunk_s {
fr_redis_io_conf_t const *io_conf; //!< Redis I/O configuration. Specifies how to connect
///< to the host this trunk is used to communicate with.
- fr_trunk_t *trunk; //!< Trunk containing all the connections to a specific
+ trunk_t *trunk; //!< Trunk containing all the connections to a specific
///< host.
fr_redis_cluster_thread_t *cluster; //!< Cluster this trunk belongs to.
};
return FR_REDIS_PIPELINE_BAD_CMDS;
}
- switch (fr_trunk_request_enqueue(&cmds->treq, rtrunk->trunk, cmds->request, cmds, cmds->rctx)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch (trunk_request_enqueue(&cmds->treq, rtrunk->trunk, cmds->request, cmds, cmds->rctx)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
return FR_REDIS_PIPELINE_OK;
- case FR_TRUNK_ENQUEUE_DST_UNAVAILABLE:
+ case TRUNK_ENQUEUE_DST_UNAVAILABLE:
return FR_REDIS_PIPELINE_DST_UNAVAILABLE;
default:
{
fr_redis_command_t *cmd;
fr_redis_command_set_t *cmds;
- fr_connection_t *conn = talloc_get_type_abort(ac->ev.data, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(ac->ev.data, connection_t);
fr_redis_handle_t *h = talloc_get_type_abort(conn->h, fr_redis_handle_t);
redisReply *reply = vreply;
/*
* is complete.
*/
if ((fr_dlist_num_elements(&cmds->pending) == 0) &&
- (fr_dlist_num_elements(&cmds->sent) == 0)) fr_trunk_request_signal_complete(cmds->treq);
+ (fr_dlist_num_elements(&cmds->sent) == 0)) trunk_request_signal_complete(cmds->treq);
}
-static fr_connection_t *_redis_pipeline_connection_alloc(fr_trunk_connection_t *tconn, fr_event_list_t *el,
- fr_connection_conf_t const *conf,
+static connection_t *_redis_pipeline_connection_alloc(trunk_connection_t *tconn, fr_event_list_t *el,
+ connection_conf_t const *conf,
char const *log_prefix, void *uctx)
{
fr_redis_trunk_t *rtrunk = talloc_get_type_abort(uctx, fr_redis_trunk_t);
/** Enqueue one or more command sets onto a redis handle
*
* Because the trunk is in always writable mode, _redis_pipeline_mux
- * will be called any time fr_trunk_request_enqueue is called, so there'll only
+ * will be called any time trunk_request_enqueue is called, so there'll only
* ever be one command to dequeue.
*
* @param[in] tconn Trunk connection holding the commands to enqueue.
* @param[in] conn Connection handle containing the fr_redis_handle_t.
* @param[in] uctx fr_redis_cluster_t. Unused.
*/
-static void _redis_pipeline_mux(fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+static void _redis_pipeline_mux(trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_redis_command_set_t *cmds;
fr_redis_command_t *cmd;
fr_redis_handle_t *h = talloc_get_type_abort(conn->h, fr_redis_handle_t);
request_t *request;
- treq = fr_trunk_connection_pop_request(&request, (void *)&cmds, NULL, tconn);
+ treq = trunk_connection_pop_request(&request, (void *)&cmds, NULL, tconn);
while ((cmd = fr_dlist_head(&cmds->pending))) {
/*
* If this fails it probably means the connection
fr_dlist_remove(&cmds->sent, cmd);
fr_dlist_insert_tail(&cmds->pending, cmd);
}
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
return;
}
cmd->sqn = fr_redis_connection_sent_request(h);
fr_dlist_remove(&cmds->pending, cmd);
fr_dlist_insert_tail(&cmds->sent, cmd);
}
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
}
/** Deal with cancellation of sent requests
* on why the commands were cancelled, we either tell the handle to ignore
* them, or move them back into the pending list.
*/
-static void _redis_pipeline_command_set_cancel(fr_connection_t *conn, UNUSED fr_trunk_request_t *treq, void *preq,
- fr_trunk_cancel_reason_t reason, UNUSED void *uctx)
+static void _redis_pipeline_command_set_cancel(connection_t *conn, UNUSED trunk_request_t *treq, void *preq,
+ trunk_cancel_reason_t reason, UNUSED void *uctx)
{
fr_redis_command_set_t *cmds = talloc_get_type_abort(preq, fr_redis_command_set_t);
fr_redis_handle_t *h = conn->h;
* command set back into the correct state for
* execution by another handle.
*/
- case FR_TRUNK_CANCEL_REASON_MOVE:
+ case TRUNK_CANCEL_REASON_MOVE:
fr_dlist_move(&cmds->pending, &cmds->sent);
return;
* Free will take care of cleaning up the
* pending commands.
*/
- case FR_TRUNK_CANCEL_REASON_SIGNAL:
+ case TRUNK_CANCEL_REASON_SIGNAL:
{
fr_redis_command_t *cmd;
}
}
- case FR_TRUNK_CANCEL_REASON_NONE:
+ case TRUNK_CANCEL_REASON_NONE:
fr_assert(0);
return;
}
fr_redis_trunk_t *fr_redis_trunk_alloc(fr_redis_cluster_thread_t *cluster_thread, fr_redis_io_conf_t const *io_conf)
{
fr_redis_trunk_t *rtrunk;
- fr_trunk_io_funcs_t io_funcs = {
+ trunk_io_funcs_t io_funcs = {
.connection_alloc = _redis_pipeline_connection_alloc,
.request_mux = _redis_pipeline_mux,
/* demux called directly by hiredis */
MEM(rtrunk = talloc_zero(cluster_thread, fr_redis_trunk_t));
rtrunk->io_conf = io_conf;
- rtrunk->trunk = fr_trunk_alloc(rtrunk, cluster_thread->el,
+ rtrunk->trunk = trunk_alloc(rtrunk, cluster_thread->el,
&io_funcs, cluster_thread->tconf, cluster_thread->log_prefix, rtrunk,
cluster_thread->delay_start);
if (!rtrunk->trunk) {
* The structures holds the trunk connections to talk to each cluster member.
*
*/
-fr_redis_cluster_thread_t *fr_redis_cluster_thread_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, fr_trunk_conf_t const *tconf)
+fr_redis_cluster_thread_t *fr_redis_cluster_thread_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, trunk_conf_t const *tconf)
{
fr_redis_cluster_thread_t *cluster_thread;
- fr_trunk_conf_t *our_tconf;
+ trunk_conf_t *our_tconf;
MEM(cluster_thread = talloc_zero(ctx, fr_redis_cluster_thread_t));
MEM(our_tconf = talloc_memdup(cluster_thread, tconf, sizeof(*tconf)));
return cluster_thread;
}
-
-
fr_redis_io_conf_t const *conf);
fr_redis_cluster_thread_t *fr_redis_cluster_thread_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
- fr_trunk_conf_t const *tconf);
+ trunk_conf_t const *tconf);
#ifdef __cplusplus
}
fr_redis_command_set_t *cmds;
fr_redis_cluster_thread_t *cluster_thread;
fr_redis_trunk_t *rtrunk;
- fr_connection_conf_t conn_conf;
- fr_trunk_conf_t trunk_conf;
+ connection_conf_t conn_conf;
+ trunk_conf_t trunk_conf;
size_t i;
redis_pipeline_stats_t stats;
*/
#define LOG_PREFIX conn->pub.name
-typedef struct fr_connection_s fr_connection_t;
+typedef struct connection_s connection_t;
#define _CONNECTION_PRIVATE 1
#include <freeradius-devel/server/connection.h>
# include <freeradius-devel/util/stdatomic.h>
#endif
-fr_table_num_ordered_t const fr_connection_states[] = {
- { L("HALTED"), FR_CONNECTION_STATE_HALTED },
- { L("INIT"), FR_CONNECTION_STATE_INIT },
- { L("CONNECTING"), FR_CONNECTION_STATE_CONNECTING },
- { L("TIMEOUT"), FR_CONNECTION_STATE_TIMEOUT },
- { L("CONNECTED"), FR_CONNECTION_STATE_CONNECTED },
- { L("SHUTDOWN"), FR_CONNECTION_STATE_SHUTDOWN },
- { L("FAILED"), FR_CONNECTION_STATE_FAILED },
- { L("CLOSED"), FR_CONNECTION_STATE_CLOSED },
+fr_table_num_ordered_t const connection_states[] = {
+ { L("HALTED"), connection_STATE_HALTED },
+ { L("INIT"), connection_STATE_INIT },
+ { L("CONNECTING"), connection_STATE_CONNECTING },
+ { L("TIMEOUT"), connection_STATE_TIMEOUT },
+ { L("CONNECTED"), connection_STATE_CONNECTED },
+ { L("SHUTDOWN"), connection_STATE_SHUTDOWN },
+ { L("FAILED"), connection_STATE_FAILED },
+ { L("CLOSED"), connection_STATE_CLOSED },
};
-size_t fr_connection_states_len = NUM_ELEMENTS(fr_connection_states);
+size_t connection_states_len = NUM_ELEMENTS(connection_states);
/** Map connection states to trigger names
*
*/
-static fr_table_num_indexed_t const fr_connection_trigger_names[] = {
- [FR_CONNECTION_STATE_HALTED] = { L("connection.halted"), FR_CONNECTION_STATE_HALTED },
- [FR_CONNECTION_STATE_INIT] = { L("connection.init"), FR_CONNECTION_STATE_INIT },
- [FR_CONNECTION_STATE_CONNECTING]= { L("connection.connecting"), FR_CONNECTION_STATE_CONNECTING },
- [FR_CONNECTION_STATE_TIMEOUT] = { L("connection.timeout"), FR_CONNECTION_STATE_TIMEOUT },
- [FR_CONNECTION_STATE_CONNECTED] = { L("connection.connected"), FR_CONNECTION_STATE_CONNECTED },
- [FR_CONNECTION_STATE_SHUTDOWN] = { L("connection.shutdown"), FR_CONNECTION_STATE_SHUTDOWN },
- [FR_CONNECTION_STATE_FAILED] = { L("connection.failed"), FR_CONNECTION_STATE_FAILED },
- [FR_CONNECTION_STATE_CLOSED] = { L("connection.closed"), FR_CONNECTION_STATE_CLOSED }
+static fr_table_num_indexed_t const connection_trigger_names[] = {
+ [connection_STATE_HALTED] = { L("connection.halted"), connection_STATE_HALTED },
+ [connection_STATE_INIT] = { L("connection.init"), connection_STATE_INIT },
+ [connection_STATE_CONNECTING]= { L("connection.connecting"), connection_STATE_CONNECTING },
+ [connection_STATE_TIMEOUT] = { L("connection.timeout"), connection_STATE_TIMEOUT },
+ [connection_STATE_CONNECTED] = { L("connection.connected"), connection_STATE_CONNECTED },
+ [connection_STATE_SHUTDOWN] = { L("connection.shutdown"), connection_STATE_SHUTDOWN },
+ [connection_STATE_FAILED] = { L("connection.failed"), connection_STATE_FAILED },
+ [connection_STATE_CLOSED] = { L("connection.closed"), connection_STATE_CLOSED }
};
-static size_t fr_connection_trigger_names_len = NUM_ELEMENTS(fr_connection_trigger_names);
+static size_t connection_trigger_names_len = NUM_ELEMENTS(connection_trigger_names);
static atomic_uint_fast64_t connection_counter = ATOMIC_VAR_INIT(1);
/** An entry in a watch function list
*
*/
-typedef struct fr_connection_watch_entry_s {
+typedef struct connection_watch_entry_s {
fr_dlist_t entry; //!< List entry.
- fr_connection_watch_t func; //!< Function to call when a connection enters
+ connection_watch_t func; //!< Function to call when a connection enters
///< the state this list belongs to
bool oneshot; //!< Remove the function after it's called once.
bool enabled; //!< Whether the watch entry is enabled.
void *uctx; //!< User data to pass to the function.
-} fr_connection_watch_entry_t;
+} connection_watch_entry_t;
-struct fr_connection_s {
- struct fr_connection_pub_s pub; //!< Public fields
+struct connection_s {
+ struct connection_pub_s pub; //!< Public fields
void *uctx; //!< User data.
bool processing_signals; //!< Processing deferred signals, don't let the deferred
///< signal processor be called multiple times.
- fr_dlist_head_t watch_pre[FR_CONNECTION_STATE_MAX]; //!< Function called before state callback.
- fr_dlist_head_t watch_post[FR_CONNECTION_STATE_MAX]; //!< Function called after state callback.
- fr_connection_watch_entry_t *next_watcher; //!< Hack to insulate watcher iterator from deletions.
+ fr_dlist_head_t watch_pre[connection_STATE_MAX]; //!< Function called before state callback.
+ fr_dlist_head_t watch_post[connection_STATE_MAX]; //!< Function called after state callback.
+ connection_watch_entry_t *next_watcher; //!< Hack to insulate watcher iterator from deletions.
- fr_connection_init_t init; //!< Callback for initialising a connection.
- fr_connection_open_t open; //!< Callback for 'open' notification.
- fr_connection_close_t close; //!< Callback to close a connection.
- fr_connection_shutdown_t shutdown; //!< Signal the connection handle to start shutting down.
- fr_connection_failed_t failed; //!< Callback for 'failed' notification.
+ connection_init_t init; //!< Callback for initialising a connection.
+ connection_open_t open; //!< Callback for 'open' notification.
+ connection_close_t close; //!< Callback to close a connection.
+ connection_shutdown_t shutdown; //!< Signal the connection handle to start shutting down.
+ connection_failed_t failed; //!< Callback for 'failed' notification.
fr_event_timer_t const *ev; //!< State transition timer.
fr_time_delta_t connection_timeout; //!< How long to wait in the
- //!< #FR_CONNECTION_STATE_CONNECTING state.
+ //!< #connection_STATE_CONNECTING state.
fr_time_delta_t reconnection_delay; //!< How long to wait in the
- //!< #FR_CONNECTION_STATE_FAILED state.
+ //!< #connection_STATE_FAILED state.
fr_dlist_head_t deferred_signals; //!< A list of signals we received whilst we were in
///< a handler.
- fr_connection_watch_entry_t *on_halted; //!< Used by the deferred signal processor to learn
+ connection_watch_entry_t *on_halted; //!< Used by the deferred signal processor to learn
///< if a function deeper in the call stack freed
///< the connection.
#define CONN_TRIGGER(_state) do { \
if (conn->pub.triggers) { \
trigger_exec(unlang_interpret_get_thread_default(), \
- NULL, fr_table_str_by_value(fr_connection_trigger_names, _state, "<INVALID>"), true, NULL); \
+ NULL, fr_table_str_by_value(connection_trigger_names, _state, "<INVALID>"), true, NULL); \
} \
} while (0)
#define STATE_TRANSITION(_new) \
do { \
DEBUG2("Connection changed state %s -> %s", \
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_connection_states, _new, "<INVALID>")); \
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(connection_states, _new, "<INVALID>")); \
conn->pub.prev = conn->pub.state; \
conn->pub.state = _new; \
CONN_TRIGGER(_new); \
do { \
if (!fr_cond_assert_msg(0, "Connection %" PRIu64 " invalid transition %s -> %s", \
conn->pub.id, \
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_connection_states, _new, "<INVALID>"))) return; \
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(connection_states, _new, "<INVALID>"))) return; \
} while (0)
#define DEFER_SIGNALS(_conn) ((_conn)->in_handler || (_conn)->signals_pause)
/*
* State transition functions
*/
-static void connection_state_enter_closed(fr_connection_t *conn);
-static void connection_state_enter_failed(fr_connection_t *conn);
-static void connection_state_enter_timeout(fr_connection_t *conn);
-static void connection_state_enter_connected(fr_connection_t *conn);
-static void connection_state_enter_shutdown(fr_connection_t *conn);
-static void connection_state_enter_connecting(fr_connection_t *conn);
-static void connection_state_enter_halted(fr_connection_t *conn);
-static void connection_state_enter_init(fr_connection_t *conn);
+static void connection_state_enter_closed(connection_t *conn);
+static void connection_state_enter_failed(connection_t *conn);
+static void connection_state_enter_timeout(connection_t *conn);
+static void connection_state_enter_connected(connection_t *conn);
+static void connection_state_enter_shutdown(connection_t *conn);
+static void connection_state_enter_connecting(connection_t *conn);
+static void connection_state_enter_halted(connection_t *conn);
+static void connection_state_enter_init(connection_t *conn);
/** Add a deferred signal to the signal list
*
* Once the handler is complete, and all pending C stack state changes
* are complete, the deferred signals are drained and processed.
*/
-static inline void connection_deferred_signal_add(fr_connection_t *conn, connection_dsignal_t signal)
+static inline void connection_deferred_signal_add(connection_t *conn, connection_dsignal_t signal)
{
connection_dsignal_entry_t *dsignal, *prev;
/** Notification function to tell connection_deferred_signal_process that the connection has been freed
*
*/
-static void _deferred_signal_connection_on_halted(UNUSED fr_connection_t *conn,
- UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state, void *uctx)
+static void _deferred_signal_connection_on_halted(UNUSED connection_t *conn,
+ UNUSED connection_state_t prev,
+ UNUSED connection_state_t state, void *uctx)
{
bool *freed = uctx;
*freed = true;
/** Process any deferred signals
*
*/
-static void connection_deferred_signal_process(fr_connection_t *conn)
+static void connection_deferred_signal_process(connection_t *conn)
{
connection_dsignal_entry_t *dsignal;
bool freed = false;
* Get notified if the connection gets freed
* out from under us...
*/
- fr_connection_watch_enable_set_uctx(conn->on_halted, &freed);
+ connection_watch_enable_set_uctx(conn->on_halted, &freed);
conn->processing_signals = true;
while ((dsignal = fr_dlist_head(&conn->deferred_signals))) {
switch (signal) {
case CONNECTION_DSIGNAL_INIT:
- fr_connection_signal_init(conn);
+ connection_signal_init(conn);
break;
case CONNECTION_DSIGNAL_CONNECTED:
- fr_connection_signal_connected(conn);
+ connection_signal_connected(conn);
break;
case CONNECTION_DSIGNAL_RECONNECT_FAILED: /* Reconnect - Failed */
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
break;
case CONNECTION_DSIGNAL_RECONNECT_EXPIRED: /* Reconnect - Expired */
- fr_connection_signal_reconnect(conn, FR_CONNECTION_EXPIRED);
+ connection_signal_reconnect(conn, connection_EXPIRED);
break;
case CONNECTION_DSIGNAL_SHUTDOWN:
- fr_connection_signal_shutdown(conn);
+ connection_signal_shutdown(conn);
break;
case CONNECTION_DSIGNAL_HALT:
- fr_connection_signal_halt(conn);
+ connection_signal_halt(conn);
break;
case CONNECTION_DSIGNAL_FREE: /* Freed */
}
conn->processing_signals = false;
- fr_connection_watch_disable(conn->on_halted);
+ connection_watch_disable(conn->on_halted);
}
/** Pause processing of deferred signals
*
* @param[in] conn to pause signal processing for.
*/
-void fr_connection_signals_pause(fr_connection_t *conn)
+void connection_signals_pause(connection_t *conn)
{
conn->signals_pause++;
}
*
* @param[in] conn to resume signal processing for.
*/
-void fr_connection_signals_resume(fr_connection_t *conn)
+void connection_signals_resume(connection_t *conn)
{
if (conn->signals_pause > 0) conn->signals_pause--;
if (conn->signals_pause > 0) return;
/** Call a list of watch functions associated with a state
*
*/
-static inline void connection_watch_call(fr_connection_t *conn, fr_dlist_head_t *list)
+static inline void connection_watch_call(connection_t *conn, fr_dlist_head_t *list)
{
/*
* Nested watcher calls are not allowed
fr_assert(conn->next_watcher == NULL);
while ((conn->next_watcher = fr_dlist_next(list, conn->next_watcher))) {
- fr_connection_watch_entry_t *entry = conn->next_watcher;
+ connection_watch_entry_t *entry = conn->next_watcher;
bool oneshot = entry->oneshot; /* Watcher could be freed, so store now */
if (!entry->enabled) continue;
entry->oneshot ? "oneshot " : "",
entry->func,
conn,
- fr_table_str_by_value(fr_connection_states, conn->pub.prev, "<INVALID>"),
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"),
+ fr_table_str_by_value(connection_states, conn->pub.prev, "<INVALID>"),
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"),
entry->uctx);
*/
/** Remove a watch function from a pre/post[state] list
*
*/
-static int connection_del_watch(fr_connection_t *conn, fr_dlist_head_t *state_lists,
- fr_connection_state_t state, fr_connection_watch_t watch)
+static int connection_del_watch(connection_t *conn, fr_dlist_head_t *state_lists,
+ connection_state_t state, connection_watch_t watch)
{
- fr_connection_watch_entry_t *entry = NULL;
+ connection_watch_entry_t *entry = NULL;
fr_dlist_head_t *list = &state_lists[state];
while ((entry = fr_dlist_next(list, entry))) {
if (entry->func == watch) {
/*
DEBUG4("Removing %s watcher %p",
- fr_table_str_by_value(fr_connection_states, state, "<INVALID>"),
+ fr_table_str_by_value(connection_states, state, "<INVALID>"),
watch);
*/
if (conn->next_watcher == entry) {
* - -1 if the function wasn't present in the watch list.
* - -2 an invalid state was passed.
*/
-int fr_connection_del_watch_pre(fr_connection_t *conn, fr_connection_state_t state, fr_connection_watch_t watch)
+int connection_del_watch_pre(connection_t *conn, connection_state_t state, connection_watch_t watch)
{
- if (state >= FR_CONNECTION_STATE_MAX) return -2;
+ if (state >= connection_STATE_MAX) return -2;
return connection_del_watch(conn, conn->watch_pre, state, watch);
}
* - -1 if the function wasn't present in the watch list.
* - -2 an invalid state was passed.
*/
-int fr_connection_del_watch_post(fr_connection_t *conn, fr_connection_state_t state, fr_connection_watch_t watch)
+int connection_del_watch_post(connection_t *conn, connection_state_t state, connection_watch_t watch)
{
- if (state >= FR_CONNECTION_STATE_MAX) return -2;
+ if (state >= connection_STATE_MAX) return -2;
return connection_del_watch(conn, conn->watch_post, state, watch);
}
/** Add a watch entry to the pre/post[state] list
*
*/
-static fr_connection_watch_entry_t *connection_add_watch(fr_connection_t *conn, fr_dlist_head_t *list,
- fr_connection_watch_t watch, bool oneshot, void const *uctx)
+static connection_watch_entry_t *connection_add_watch(connection_t *conn, fr_dlist_head_t *list,
+ connection_watch_t watch, bool oneshot, void const *uctx)
{
- fr_connection_watch_entry_t *entry;
+ connection_watch_entry_t *entry;
- MEM(entry = talloc_zero(conn, fr_connection_watch_entry_t));
+ MEM(entry = talloc_zero(conn, connection_watch_entry_t));
entry->func = watch;
entry->oneshot = oneshot;
* - NULL if state value is invalid.
* - A new watch entry handle.
*/
-fr_connection_watch_entry_t *fr_connection_add_watch_pre(fr_connection_t *conn, fr_connection_state_t state,
- fr_connection_watch_t watch, bool oneshot, void const *uctx)
+connection_watch_entry_t *connection_add_watch_pre(connection_t *conn, connection_state_t state,
+ connection_watch_t watch, bool oneshot, void const *uctx)
{
- if (state >= FR_CONNECTION_STATE_MAX) return NULL;
+ if (state >= connection_STATE_MAX) return NULL;
return connection_add_watch(conn, &conn->watch_pre[state], watch, oneshot, uctx);
}
* - NULL if state value is invalid.
* - A new watch entry handle.
*/
-fr_connection_watch_entry_t *fr_connection_add_watch_post(fr_connection_t *conn, fr_connection_state_t state,
- fr_connection_watch_t watch, bool oneshot, void const *uctx)
+connection_watch_entry_t *connection_add_watch_post(connection_t *conn, connection_state_t state,
+ connection_watch_t watch, bool oneshot, void const *uctx)
{
- if (state >= FR_CONNECTION_STATE_MAX) return NULL;
+ if (state >= connection_STATE_MAX) return NULL;
return connection_add_watch(conn, &conn->watch_post[state], watch, oneshot, uctx);
}
*
* @param[in] entry to enabled.
*/
-void fr_connection_watch_enable(fr_connection_watch_entry_t *entry)
+void connection_watch_enable(connection_watch_entry_t *entry)
{
- (void)talloc_get_type_abort(entry, fr_connection_watch_entry_t);
+ (void)talloc_get_type_abort(entry, connection_watch_entry_t);
entry->enabled = true;
}
*
* @param[in] entry to disable.
*/
-void fr_connection_watch_disable(fr_connection_watch_entry_t *entry)
+void connection_watch_disable(connection_watch_entry_t *entry)
{
- (void)talloc_get_type_abort(entry, fr_connection_watch_entry_t);
+ (void)talloc_get_type_abort(entry, connection_watch_entry_t);
entry->enabled = false;
}
* @param[in] entry to enabled.
* @param[in] uctx Opaque data to pass to the callback.
*/
-void fr_connection_watch_enable_set_uctx(fr_connection_watch_entry_t *entry, void const *uctx)
+void connection_watch_enable_set_uctx(connection_watch_entry_t *entry, void const *uctx)
{
- (void)talloc_get_type_abort(entry, fr_connection_watch_entry_t);
+ (void)talloc_get_type_abort(entry, connection_watch_entry_t);
entry->enabled = true;
memcpy(&entry->uctx, &uctx, sizeof(entry->uctx));
}
* @param[in] entry to enabled.
* @param[in] uctx Opaque data to pass to the callback.
*/
-void fr_connection_watch_set_uctx(fr_connection_watch_entry_t *entry, void const *uctx)
+void connection_watch_set_uctx(connection_watch_entry_t *entry, void const *uctx)
{
- (void)talloc_get_type_abort(entry, fr_connection_watch_entry_t);
+ (void)talloc_get_type_abort(entry, connection_watch_entry_t);
memcpy(&entry->uctx, &uctx, sizeof(entry->uctx));
}
* - true if enabled.
* - false if disabled.
*/
-bool fr_connection_watch_is_enabled(fr_connection_watch_entry_t *entry)
+bool connection_watch_is_enabled(connection_watch_entry_t *entry)
{
- (void)talloc_get_type_abort(entry, fr_connection_watch_entry_t);
+ (void)talloc_get_type_abort(entry, connection_watch_entry_t);
return entry->enabled;
}
* @param[in] conn to get count from.
* @return the number of times the connection has reconnected.
*/
-uint64_t fr_connection_get_num_reconnected(fr_connection_t const *conn)
+uint64_t connection_get_num_reconnected(connection_t const *conn)
{
if (conn->pub.reconnected == 0) return 0; /* Has never been initialised */
* @param[in] conn to get count from.
* @return the number of times the connection has timed out whilst connecting.
*/
-uint64_t fr_connection_get_num_timed_out(fr_connection_t const *conn)
+uint64_t connection_get_num_timed_out(connection_t const *conn)
{
return conn->pub.timed_out;
}
*
* @param[in] el the time event occurred on.
* @param[in] now The current time.
- * @param[in] uctx The #fr_connection_t the fd is associated with.
+ * @param[in] uctx The #connection_t the fd is associated with.
*/
static void _reconnect_delay_done(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_FAILED:
- case FR_CONNECTION_STATE_CLOSED:
+ case connection_STATE_FAILED:
+ case connection_STATE_CLOSED:
connection_state_enter_init(conn);
break;
default:
- BAD_STATE_TRANSITION(FR_CONNECTION_STATE_INIT);
+ BAD_STATE_TRANSITION(connection_STATE_INIT);
break;
}
}
/** Close the connection, then wait for another state change
*
*/
-static void connection_state_enter_closed(fr_connection_t *conn)
+static void connection_state_enter_closed(connection_t *conn)
{
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_CONNECTING:
- case FR_CONNECTION_STATE_CONNECTED:
- case FR_CONNECTION_STATE_FAILED:
+ case connection_STATE_CONNECTING:
+ case connection_STATE_CONNECTED:
+ case connection_STATE_FAILED:
break;
default:
- BAD_STATE_TRANSITION(FR_CONNECTION_STATE_CLOSED);
+ BAD_STATE_TRANSITION(connection_STATE_CLOSED);
return;
}
- STATE_TRANSITION(FR_CONNECTION_STATE_CLOSED);
+ STATE_TRANSITION(connection_STATE_CLOSED);
fr_event_timer_delete(&conn->ev);
*
* @param[in] el the time event occurred on.
* @param[in] now The current time.
- * @param[in] uctx The #fr_connection_t the fd is associated with.
+ * @param[in] uctx The #connection_t the fd is associated with.
*/
static void _connection_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
connection_state_enter_timeout(conn);
}
/** Gracefully shutdown the handle
*
*/
-static void connection_state_enter_shutdown(fr_connection_t *conn)
+static void connection_state_enter_shutdown(connection_t *conn)
{
- fr_connection_state_t ret;
+ connection_state_t ret;
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_CONNECTED:
+ case connection_STATE_CONNECTED:
break;
default:
- BAD_STATE_TRANSITION(FR_CONNECTION_STATE_SHUTDOWN);
+ BAD_STATE_TRANSITION(connection_STATE_SHUTDOWN);
return;
}
- STATE_TRANSITION(FR_CONNECTION_STATE_SHUTDOWN);
+ STATE_TRANSITION(connection_STATE_SHUTDOWN);
WATCH_PRE(conn);
{
HANDLER_END(conn);
}
switch (ret) {
- case FR_CONNECTION_STATE_SHUTDOWN:
+ case connection_STATE_SHUTDOWN:
break;
default:
/** Connection failed
*
- * Transition to the FR_CONNECTION_STATE_FAILED state.
+ * Transition to the connection_STATE_FAILED state.
*
* If the connection was open, or couldn't be opened wait for reconnection_delay before transitioning
* back to init.
*
* @param[in] conn that failed.
*/
-static void connection_state_enter_failed(fr_connection_t *conn)
+static void connection_state_enter_failed(connection_t *conn)
{
- fr_connection_state_t prev;
- fr_connection_state_t ret = FR_CONNECTION_STATE_INIT;
+ connection_state_t prev;
+ connection_state_t ret = connection_STATE_INIT;
- fr_assert(conn->pub.state != FR_CONNECTION_STATE_FAILED);
+ fr_assert(conn->pub.state != connection_STATE_FAILED);
/*
* Explicit error occurred, delete the connection timer
/*
* Now transition to failed
*/
- STATE_TRANSITION(FR_CONNECTION_STATE_FAILED);
+ STATE_TRANSITION(connection_STATE_FAILED);
/*
* If there's a failed callback, give it the
if (conn->failed) {
HANDLER_BEGIN(conn, conn->failed);
DEBUG4("Calling failed(h=%p, state=%s, uctx=%p)", conn->pub.h,
- fr_table_str_by_value(fr_connection_states, prev, "<INVALID>"), conn->uctx);
+ fr_table_str_by_value(connection_states, prev, "<INVALID>"), conn->uctx);
ret = conn->failed(conn->pub.h, prev, conn->uctx);
HANDLER_END(conn);
}
* connecting, or when we were connected.
*/
switch (prev) {
- case FR_CONNECTION_STATE_CONNECTED:
- case FR_CONNECTION_STATE_CONNECTING:
- case FR_CONNECTION_STATE_TIMEOUT: /* Timeout means the connection progress past init */
- case FR_CONNECTION_STATE_SHUTDOWN: /* Shutdown means the connection failed whilst shutting down */
+ case connection_STATE_CONNECTED:
+ case connection_STATE_CONNECTING:
+ case connection_STATE_TIMEOUT: /* Timeout means the connection progress past init */
+ case connection_STATE_SHUTDOWN: /* Shutdown means the connection failed whilst shutting down */
connection_state_enter_closed(conn);
break;
* immediately if the failure was due
* to a connection timeout.
*/
- case FR_CONNECTION_STATE_INIT:
+ case connection_STATE_INIT:
break;
/*
* The callback signalled it wants the
* connection to stop.
*/
- case FR_CONNECTION_STATE_HALTED:
+ case connection_STATE_HALTED:
default:
connection_state_enter_halted(conn);
return;
* reconnect timeout.
*/
switch (prev) {
- case FR_CONNECTION_STATE_INIT: /* Failed during initialisation */
- case FR_CONNECTION_STATE_CONNECTED: /* Failed after connecting */
- case FR_CONNECTION_STATE_CONNECTING: /* Failed during connecting */
- case FR_CONNECTION_STATE_SHUTDOWN: /* Failed during shutdown */
+ case connection_STATE_INIT: /* Failed during initialisation */
+ case connection_STATE_CONNECTED: /* Failed after connecting */
+ case connection_STATE_CONNECTING: /* Failed during connecting */
+ case connection_STATE_SHUTDOWN: /* Failed during shutdown */
if (fr_time_delta_ispos(conn->reconnection_delay)) {
DEBUG2("Delaying reconnection by %pVs", fr_box_time_delta(conn->reconnection_delay));
if (fr_event_timer_in(conn, conn->pub.el, &conn->ev,
connection_state_enter_halted(conn);
break;
- case FR_CONNECTION_STATE_TIMEOUT: /* Failed during connecting due to timeout */
+ case connection_STATE_TIMEOUT: /* Failed during connecting due to timeout */
connection_state_enter_init(conn);
break;
* The connection took took long to open. Timeout the attempt and transition
* to the failed state.
*/
-static void connection_state_enter_timeout(fr_connection_t *conn)
+static void connection_state_enter_timeout(connection_t *conn)
{
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_CONNECTING:
- case FR_CONNECTION_STATE_SHUTDOWN:
+ case connection_STATE_CONNECTING:
+ case connection_STATE_SHUTDOWN:
break;
default:
- BAD_STATE_TRANSITION(FR_CONNECTION_STATE_TIMEOUT);
+ BAD_STATE_TRANSITION(connection_STATE_TIMEOUT);
}
ERROR("Connection failed - timed out after %pVs", fr_box_time_delta(conn->connection_timeout));
- STATE_TRANSITION(FR_CONNECTION_STATE_TIMEOUT);
+ STATE_TRANSITION(connection_STATE_TIMEOUT);
conn->pub.timed_out++;
/** Enter the halted state
*
- * Here we wait, until signalled by fr_connection_signal_reconnect.
+ * Here we wait, until signalled by connection_signal_reconnect.
*/
-static void connection_state_enter_halted(fr_connection_t *conn)
+static void connection_state_enter_halted(connection_t *conn)
{
fr_assert(conn->is_closed);
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_FAILED: /* Init failure */
- case FR_CONNECTION_STATE_CLOSED:
+ case connection_STATE_FAILED: /* Init failure */
+ case connection_STATE_CLOSED:
break;
default:
- BAD_STATE_TRANSITION(FR_CONNECTION_STATE_HALTED);
+ BAD_STATE_TRANSITION(connection_STATE_HALTED);
}
fr_event_timer_delete(&conn->ev);
- STATE_TRANSITION(FR_CONNECTION_STATE_HALTED);
+ STATE_TRANSITION(connection_STATE_HALTED);
WATCH_PRE(conn);
WATCH_POST(conn);
}
* sending/receiving actual data.
*
* After this, the connection will only transition states if an API client
- * explicitly calls fr_connection_signal_reconnect.
+ * explicitly calls connection_signal_reconnect.
*
* The connection API cannot monitor the connection for failure conditions.
*
* @param[in] conn Entering the connecting state.
*/
-static void connection_state_enter_connected(fr_connection_t *conn)
+static void connection_state_enter_connected(connection_t *conn)
{
int ret;
- fr_assert(conn->pub.state == FR_CONNECTION_STATE_CONNECTING);
+ fr_assert(conn->pub.state == connection_STATE_CONNECTING);
- STATE_TRANSITION(FR_CONNECTION_STATE_CONNECTED);
+ STATE_TRANSITION(connection_STATE_CONNECTED);
fr_event_timer_delete(&conn->ev);
WATCH_PRE(conn);
ret = conn->open(conn->pub.el, conn->pub.h, conn->uctx);
HANDLER_END(conn);
} else {
- ret = FR_CONNECTION_STATE_CONNECTED;
+ ret = connection_STATE_CONNECTED;
}
switch (ret) {
/*
* Callback agrees everything is connected
*/
- case FR_CONNECTION_STATE_CONNECTED:
+ case connection_STATE_CONNECTED:
DEBUG2("Connection established");
WATCH_POST(conn); /* Only call if we successfully connected */
return;
/*
* Open callback failed
*/
- case FR_CONNECTION_STATE_FAILED:
+ case connection_STATE_FAILED:
default:
PERROR("Connection failed");
connection_state_enter_failed(conn);
/** Enter the connecting state
*
- * After this function returns we wait to be signalled with fr_connection_singal_connected
+ * After this function returns we wait to be signalled with connection_singal_connected
* or for the connection timer to expire.
*
* @param[in] conn Entering the connecting state.
*/
-static void connection_state_enter_connecting(fr_connection_t *conn)
+static void connection_state_enter_connecting(connection_t *conn)
{
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_INIT:
+ case connection_STATE_INIT:
break;
default:
- BAD_STATE_TRANSITION(FR_CONNECTION_STATE_CONNECTING);
+ BAD_STATE_TRANSITION(connection_STATE_CONNECTING);
return;
}
- STATE_TRANSITION(FR_CONNECTION_STATE_CONNECTING);
+ STATE_TRANSITION(connection_STATE_CONNECTING);
WATCH_PRE(conn);
WATCH_POST(conn);
*
* @param[in] conn To initialise.
*/
-static void connection_state_enter_init(fr_connection_t *conn)
+static void connection_state_enter_init(connection_t *conn)
{
- fr_connection_state_t ret;
+ connection_state_t ret;
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_HALTED:
- case FR_CONNECTION_STATE_CLOSED:
- case FR_CONNECTION_STATE_FAILED:
+ case connection_STATE_HALTED:
+ case connection_STATE_CLOSED:
+ case connection_STATE_FAILED:
break;
default:
- BAD_STATE_TRANSITION(FR_CONNECTION_STATE_INIT);
+ BAD_STATE_TRANSITION(connection_STATE_INIT);
return;
}
*/
conn->pub.reconnected++;
- STATE_TRANSITION(FR_CONNECTION_STATE_INIT);
+ STATE_TRANSITION(connection_STATE_INIT);
/*
* If we have an init callback, call it.
ret = conn->init(&conn->pub.h, conn, conn->uctx);
HANDLER_END(conn);
} else {
- ret = FR_CONNECTION_STATE_CONNECTING;
+ ret = connection_STATE_CONNECTING;
}
switch (ret) {
- case FR_CONNECTION_STATE_CONNECTING:
+ case connection_STATE_CONNECTING:
conn->is_closed = false; /* We now have a handle */
WATCH_POST(conn); /* Only call if we successfully initialised the handle */
connection_state_enter_connecting(conn);
return;
- case FR_CONNECTION_STATE_CONNECTED:
+ case connection_STATE_CONNECTED:
conn->is_closed = false; /* We now have a handle */
WATCH_POST(conn); /* Only call if we successfully initialised the handle */
connection_state_enter_connected(conn);
/*
* Initialisation callback failed
*/
- case FR_CONNECTION_STATE_FAILED:
+ case connection_STATE_FAILED:
default:
PERROR("Connection initialisation failed");
connection_state_enter_failed(conn);
/** Asynchronously signal a halted connection to start
*
*/
-void fr_connection_signal_init(fr_connection_t *conn)
+void connection_signal_init(connection_t *conn)
{
DEBUG2("Signalled to start from %s state",
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"));
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"));
if (DEFER_SIGNALS(conn)) {
connection_deferred_signal_add(conn, CONNECTION_DSIGNAL_INIT);
}
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_HALTED:
+ case connection_STATE_HALTED:
connection_state_enter_init(conn);
break;
* signal that the transition has occurred.
*
*/
-void fr_connection_signal_connected(fr_connection_t *conn)
+void connection_signal_connected(connection_t *conn)
{
fr_assert(!conn->open); /* Use one or the other not both! */
DEBUG2("Signalled connected from %s state",
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"));
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"));
if (DEFER_SIGNALS(conn)) {
connection_deferred_signal_add(conn, CONNECTION_DSIGNAL_CONNECTED);
}
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_CONNECTING:
+ case connection_STATE_CONNECTING:
connection_state_enter_connected(conn);
break;
* @param[in] conn to reconnect.
* @param[in] reason Why the connection was signalled to reconnect.
*/
-void fr_connection_signal_reconnect(fr_connection_t *conn, fr_connection_reason_t reason)
+void connection_signal_reconnect(connection_t *conn, connection_reason_t reason)
{
DEBUG2("Signalled to reconnect from %s state",
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"));
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"));
if (DEFER_SIGNALS(conn)) {
- if ((reason == FR_CONNECTION_EXPIRED) && conn->shutdown) {
+ if ((reason == connection_EXPIRED) && conn->shutdown) {
connection_deferred_signal_add(conn, CONNECTION_DSIGNAL_RECONNECT_EXPIRED);
return;
}
}
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_CLOSED: /* Don't circumvent reconnection_delay */
- case FR_CONNECTION_STATE_INIT: /* Already initialising */
+ case connection_STATE_CLOSED: /* Don't circumvent reconnection_delay */
+ case connection_STATE_INIT: /* Already initialising */
break;
- case FR_CONNECTION_STATE_HALTED:
- fr_connection_signal_init(conn);
+ case connection_STATE_HALTED:
+ connection_signal_init(conn);
break;
- case FR_CONNECTION_STATE_SHUTDOWN:
- if (reason == FR_CONNECTION_EXPIRED) break; /* Already shutting down */
+ case connection_STATE_SHUTDOWN:
+ if (reason == connection_EXPIRED) break; /* Already shutting down */
connection_state_enter_failed(conn);
break;
- case FR_CONNECTION_STATE_CONNECTED:
- if (reason == FR_CONNECTION_EXPIRED) {
+ case connection_STATE_CONNECTED:
+ if (reason == connection_EXPIRED) {
if (conn->shutdown) {
connection_state_enter_shutdown(conn);
break;
}
FALL_THROUGH;
- case FR_CONNECTION_STATE_CONNECTING:
- case FR_CONNECTION_STATE_TIMEOUT:
- case FR_CONNECTION_STATE_FAILED:
+ case connection_STATE_CONNECTING:
+ case connection_STATE_TIMEOUT:
+ case connection_STATE_FAILED:
connection_state_enter_failed(conn);
break;
- case FR_CONNECTION_STATE_MAX:
+ case connection_STATE_MAX:
fr_assert(0);
return;
}
* If a shutdown function has been provided, it is called.
* It's then up to the shutdown function to install I/O handlers to signal
* when the connection has finished shutting down and should be closed
- * via #fr_connection_signal_halt.
+ * via #connection_signal_halt.
*
* @param[in] conn to shutdown.
*/
-void fr_connection_signal_shutdown(fr_connection_t *conn)
+void connection_signal_shutdown(connection_t *conn)
{
DEBUG2("Signalled to shutdown from %s state",
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"));
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"));
if (DEFER_SIGNALS(conn)) {
connection_deferred_signal_add(conn, CONNECTION_DSIGNAL_SHUTDOWN);
}
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_HALTED:
- case FR_CONNECTION_STATE_SHUTDOWN:
+ case connection_STATE_HALTED:
+ case connection_STATE_SHUTDOWN:
break;
- case FR_CONNECTION_STATE_INIT:
+ case connection_STATE_INIT:
connection_state_enter_halted(conn);
break;
* The shutdown callback or an FD event it inserts then
* to signal that the connection should be closed.
*/
- case FR_CONNECTION_STATE_CONNECTED:
+ case connection_STATE_CONNECTED:
if (conn->shutdown) {
connection_state_enter_shutdown(conn);
break;
* an active handle which needs to be closed before
* the connection is halted.
*/
- case FR_CONNECTION_STATE_CONNECTING:
- case FR_CONNECTION_STATE_TIMEOUT:
- case FR_CONNECTION_STATE_FAILED:
+ case connection_STATE_CONNECTING:
+ case connection_STATE_TIMEOUT:
+ case connection_STATE_FAILED:
connection_state_enter_closed(conn);
fr_assert(conn->is_closed);
FALL_THROUGH;
- case FR_CONNECTION_STATE_CLOSED:
+ case connection_STATE_CLOSED:
connection_state_enter_halted(conn);
break;
- case FR_CONNECTION_STATE_MAX:
+ case connection_STATE_MAX:
fr_assert(0);
return;
}
*
* @param[in] conn to halt.
*/
-void fr_connection_signal_halt(fr_connection_t *conn)
+void connection_signal_halt(connection_t *conn)
{
DEBUG2("Signalled to halt from %s state",
- fr_table_str_by_value(fr_connection_states, conn->pub.state, "<INVALID>"));
+ fr_table_str_by_value(connection_states, conn->pub.state, "<INVALID>"));
if (DEFER_SIGNALS(conn)) {
connection_deferred_signal_add(conn, CONNECTION_DSIGNAL_HALT);
}
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_HALTED:
+ case connection_STATE_HALTED:
break;
- case FR_CONNECTION_STATE_INIT:
- case FR_CONNECTION_STATE_CLOSED:
+ case connection_STATE_INIT:
+ case connection_STATE_CLOSED:
connection_state_enter_halted(conn);
break;
* an active handle which needs to be closed before
* the connection is halted.
*/
- case FR_CONNECTION_STATE_CONNECTED:
- case FR_CONNECTION_STATE_CONNECTING:
- case FR_CONNECTION_STATE_SHUTDOWN:
- case FR_CONNECTION_STATE_TIMEOUT:
- case FR_CONNECTION_STATE_FAILED:
+ case connection_STATE_CONNECTED:
+ case connection_STATE_CONNECTING:
+ case connection_STATE_SHUTDOWN:
+ case connection_STATE_TIMEOUT:
+ case connection_STATE_FAILED:
connection_state_enter_closed(conn);
fr_assert(conn->is_closed);
connection_state_enter_halted(conn);
break;
- case FR_CONNECTION_STATE_MAX:
+ case connection_STATE_MAX:
fr_assert(0);
return;
}
* @param[in] fd the I/O even occurred for.
* @param[in] flags from_kevent.
* @param[in] fd_errno from kevent.
- * @param[in] uctx The #fr_connection_t this fd is associated with.
+ * @param[in] uctx The #connection_t this fd is associated with.
*/
static void _connection_error(UNUSED fr_event_list_t *el, int fd, UNUSED int flags, int fd_errno, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
ERROR("Connection failed for fd (%u): %s", fd, fr_syserror(fd_errno));
connection_state_enter_failed(conn);
* @param[in] el event list the I/O event occurred on.
* @param[in] fd the I/O even occurred for.
* @param[in] flags from kevent.
- * @param[in] uctx The #fr_connection_t this fd is associated with.
+ * @param[in] uctx The #connection_t this fd is associated with.
*/
static void _connection_writable(fr_event_list_t *el, int fd, UNUSED int flags, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
fr_event_fd_delete(el, fd, FR_EVENT_FILTER_IO);
connection_state_enter_connected(conn);
/** Remove the FD we were watching for connection open/fail from the event loop
*
*/
-static void _connection_signal_on_fd_cleanup(fr_connection_t *conn,
- UNUSED fr_connection_state_t prev, fr_connection_state_t state, void *uctx)
+static void _connection_signal_on_fd_cleanup(connection_t *conn,
+ UNUSED connection_state_t prev, connection_state_t state, void *uctx)
{
int fd = *(talloc_get_type_abort(uctx, int));
* Remove the watch on the one that didn't
*/
switch (state) {
- case FR_CONNECTION_STATE_CLOSED:
- fr_connection_del_watch_pre(conn, FR_CONNECTION_STATE_CONNECTED, _connection_signal_on_fd_cleanup);
+ case connection_STATE_CLOSED:
+ connection_del_watch_pre(conn, connection_STATE_CONNECTED, _connection_signal_on_fd_cleanup);
break;
- case FR_CONNECTION_STATE_CONNECTED:
- fr_connection_del_watch_pre(conn, FR_CONNECTION_STATE_CLOSED, _connection_signal_on_fd_cleanup);
+ case connection_STATE_CONNECTED:
+ connection_del_watch_pre(conn, connection_STATE_CLOSED, _connection_signal_on_fd_cleanup);
break;
default:
* - 0 on success.
* - -1 on failure.
*/
-int fr_connection_signal_on_fd(fr_connection_t *conn, int fd)
+int connection_signal_on_fd(connection_t *conn, int fd)
{
int *fd_s;
* the I/O handlers if the connection
* fails, or is connected.
*/
- fr_connection_add_watch_pre(conn, FR_CONNECTION_STATE_CLOSED,
+ connection_add_watch_pre(conn, connection_STATE_CLOSED,
_connection_signal_on_fd_cleanup, true, fd_s);
- fr_connection_add_watch_pre(conn, FR_CONNECTION_STATE_CONNECTED,
+ connection_add_watch_pre(conn, connection_STATE_CONNECTED,
_connection_signal_on_fd_cleanup, true, fd_s);
return 0;
}
* - 0 connection was freed immediately.
* - 1 connection free was deferred.
*/
-static int _connection_free(fr_connection_t *conn)
+static int _connection_free(connection_t *conn)
{
/*
* Explicitly cancel any pending events
}
switch (conn->pub.state) {
- case FR_CONNECTION_STATE_HALTED:
+ case connection_STATE_HALTED:
break;
/*
* Need to close the connection first
*/
- case FR_CONNECTION_STATE_CONNECTING:
- case FR_CONNECTION_STATE_CONNECTED:
+ case connection_STATE_CONNECTING:
+ case connection_STATE_CONNECTED:
connection_state_enter_closed(conn);
FALL_THROUGH;
/** Allocate a new connection
*
- * After the connection has been allocated, it should be started with a call to #fr_connection_signal_init.
+ * After the connection has been allocated, it should be started with a call to #connection_signal_init.
*
* The connection state machine can detect when the connection is open in one of two ways.
- * - You can install a generic socket open/fail callback, using fr_connection_signal_on_fd.
- * - You can call either #fr_connection_signal_connected or fr_connection_signal_recommend.
+ * - You can install a generic socket open/fail callback, using connection_signal_on_fd.
+ * - You can call either #connection_signal_connected or connection_signal_recommend.
* This allows the connection state machine to work with more difficult library APIs,
* which may not return control to the caller as connections are opened.
*
* @param[in] ctx to allocate connection handle in. If the connection
- * handle is freed, and the #fr_connection_state_t is
- * #FR_CONNECTION_STATE_CONNECTING or #FR_CONNECTION_STATE_CONNECTED the
+ * handle is freed, and the #connection_state_t is
+ * #connection_STATE_CONNECTING or #connection_STATE_CONNECTED the
* close callback will be called.
- * @param[in] el to use for timer events, and to pass to the #fr_connection_open_t callback.
+ * @param[in] el to use for timer events, and to pass to the #connection_open_t callback.
* @param[in] funcs callback functions.
* @param[in] conf our configuration.
* @param[in] log_prefix To prepend to log messages.
* @param[in] uctx User context to pass to callbacks.
* @return
- * - A new #fr_connection_t on success.
+ * - A new #connection_t on success.
* - NULL on failure.
*/
-fr_connection_t *fr_connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
- fr_connection_funcs_t const *funcs,
- fr_connection_conf_t const *conf,
+connection_t *connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+ connection_funcs_t const *funcs,
+ connection_conf_t const *conf,
char const *log_prefix,
void const *uctx)
{
size_t i;
- fr_connection_t *conn;
+ connection_t *conn;
uint64_t id;
fr_assert(el);
- conn = talloc(ctx, fr_connection_t);
+ conn = talloc(ctx, connection_t);
if (!conn) return NULL;
talloc_set_destructor(conn, _connection_free);
id = atomic_fetch_add_explicit(&connection_counter, 1, memory_order_relaxed);
- *conn = (fr_connection_t){
+ *conn = (connection_t){
.pub = {
.id = id,
- .state = FR_CONNECTION_STATE_HALTED,
+ .state = connection_STATE_HALTED,
.el = el
},
.reconnection_delay = conf->reconnection_delay,
memcpy(&conn->uctx, &uctx, sizeof(conn->uctx));
for (i = 0; i < NUM_ELEMENTS(conn->watch_pre); i++) {
- fr_dlist_talloc_init(&conn->watch_pre[i], fr_connection_watch_entry_t, entry);
+ fr_dlist_talloc_init(&conn->watch_pre[i], connection_watch_entry_t, entry);
}
for (i = 0; i < NUM_ELEMENTS(conn->watch_post); i++) {
- fr_dlist_talloc_init(&conn->watch_post[i], fr_connection_watch_entry_t, entry);
+ fr_dlist_talloc_init(&conn->watch_post[i], connection_watch_entry_t, entry);
}
fr_dlist_talloc_init(&conn->deferred_signals, connection_dsignal_entry_t, entry);
/*
* Pre-allocate a on_halt watcher for deferred signal processing
*/
- conn->on_halted = fr_connection_add_watch_post(conn, FR_CONNECTION_STATE_HALTED,
+ conn->on_halted = connection_add_watch_post(conn, connection_STATE_HALTED,
_deferred_signal_connection_on_halted, true, NULL);
- fr_connection_watch_disable(conn->on_halted); /* Start disabled */
+ connection_watch_disable(conn->on_halted); /* Start disabled */
return conn;
}
# error _CONST can only be defined in the local header
#endif
#ifndef _CONNECTION_PRIVATE
-typedef struct fr_connection_pub_s fr_connection_t; /* We use the private version of the fr_connection_t */
+typedef struct connection_pub_s connection_t; /* We use the private version of the connection_t */
# define _CONST const
#else
# define _CONST
#endif
typedef enum {
- FR_CONNECTION_STATE_HALTED = 0, //!< The connection is in a halted stat. It does not have
+ connection_STATE_HALTED = 0, //!< The connection is in a halted stat. It does not have
///< a valid file descriptor, and it will not try and
///< and create one.
- FR_CONNECTION_STATE_INIT, //!< Init state, sets up connection.
- FR_CONNECTION_STATE_CONNECTING, //!< Waiting for connection to establish.
- FR_CONNECTION_STATE_TIMEOUT, //!< Timeout during #FR_CONNECTION_STATE_CONNECTING.
- FR_CONNECTION_STATE_CONNECTED, //!< File descriptor is open (ready for writing).
- FR_CONNECTION_STATE_SHUTDOWN, //!< Connection is shutting down.
- FR_CONNECTION_STATE_FAILED, //!< Connection has failed.
- FR_CONNECTION_STATE_CLOSED, //!< Connection has been closed.
- FR_CONNECTION_STATE_MAX
-} fr_connection_state_t;
+ connection_STATE_INIT, //!< Init state, sets up connection.
+ connection_STATE_CONNECTING, //!< Waiting for connection to establish.
+ connection_STATE_TIMEOUT, //!< Timeout during #connection_STATE_CONNECTING.
+ connection_STATE_CONNECTED, //!< File descriptor is open (ready for writing).
+ connection_STATE_SHUTDOWN, //!< Connection is shutting down.
+ connection_STATE_FAILED, //!< Connection has failed.
+ connection_STATE_CLOSED, //!< Connection has been closed.
+ connection_STATE_MAX
+} connection_state_t;
/** Public fields for the connection
*
* Though these fields are public, they should _NOT_ be modified by clients of
* the connection API.
*/
-struct fr_connection_pub_s {
+struct connection_pub_s {
char const * _CONST name; //!< Prefix to add to log messages.
- fr_connection_state_t _CONST state; //!< Current connection state.
- fr_connection_state_t _CONST prev; //!< The previous state the connection was in.
+ connection_state_t _CONST state; //!< Current connection state.
+ connection_state_t _CONST prev; //!< The previous state the connection was in.
uint64_t _CONST id; //!< Unique identifier for the connection.
void * _CONST h; //!< Connection handle
fr_event_list_t * _CONST el; //!< Event list for timers and I/O events.
};
typedef enum {
- FR_CONNECTION_FAILED = 0, //!< Connection is being reconnected because it failed.
- FR_CONNECTION_EXPIRED //!< Connection is being reconnected because it's at
+ connection_FAILED = 0, //!< Connection is being reconnected because it failed.
+ connection_EXPIRED //!< Connection is being reconnected because it's at
///< the end of its life. In this case we enter the
///< closing state and try and close the connection
///< gracefully.
-} fr_connection_reason_t;
+} connection_reason_t;
typedef struct {
fr_time_delta_t connection_timeout; //!< How long to wait for the connection to open
//!< or for shutdown to close the connection.
fr_time_delta_t reconnection_delay; //!< How long to wait after failures.
-} fr_connection_conf_t;
+} connection_conf_t;
-typedef struct fr_connection_watch_entry_s fr_connection_watch_entry_t;
+typedef struct connection_watch_entry_s connection_watch_entry_t;
-extern fr_table_num_ordered_t const fr_connection_states[];
-extern size_t fr_connection_states_len;
+extern fr_table_num_ordered_t const connection_states[];
+extern size_t connection_states_len;
/** Callback for the initialise state
*
* for library I/O callbacks.
* @param[in] uctx User context.
* @return
- * - #FR_CONNECTION_STATE_CONNECTING if a handle was successfully created.
- * - #FR_CONNECTION_STATE_FAILED if we could not create a handle.
+ * - #connection_STATE_CONNECTING if a handle was successfully created.
+ * - #connection_STATE_FAILED if we could not create a handle.
*/
-typedef fr_connection_state_t (*fr_connection_init_t)(void **h_out, fr_connection_t *conn, void *uctx);
+typedef connection_state_t (*connection_init_t)(void **h_out, connection_t *conn, void *uctx);
/** Notification that the connection is now open
*
* @param[in] h Handle that was successfully opened.
* @param[in] uctx User context.
* @return
- * - #FR_CONNECTION_STATE_CONNECTED if the handle is usable.
- * - #FR_CONNECTION_STATE_FAILED if the handle is unusable.
+ * - #connection_STATE_CONNECTED if the handle is usable.
+ * - #connection_STATE_FAILED if the handle is unusable.
*/
-typedef fr_connection_state_t (*fr_connection_open_t)(fr_event_list_t *el, void *h, void *uctx);
+typedef connection_state_t (*connection_open_t)(fr_event_list_t *el, void *h, void *uctx);
/** Start the process of gracefully shutting down the connection
*
* @param[in] h Handle that needs to be closed.
* @param[in] uctx User context.
* @return
- * - #FR_CONNECTION_STATE_SHUTDOWN if the handle has shutdown.
- * - #FR_CONNECTION_STATE_FAILED if the handle is unusable, and we
+ * - #connection_STATE_SHUTDOWN if the handle has shutdown.
+ * - #connection_STATE_FAILED if the handle is unusable, and we
* should just transition directly to failed.
*/
-typedef fr_connection_state_t (*fr_connection_shutdown_t)(fr_event_list_t *el, void *h, void *uctx);
+typedef connection_state_t (*connection_shutdown_t)(fr_event_list_t *el, void *h, void *uctx);
/** Notification that a connection attempt has failed
*
- * @note If the callback frees the connection, it must return #FR_CONNECTION_STATE_HALTED.
+ * @note If the callback frees the connection, it must return #connection_STATE_HALTED.
*
* @param[in] h Handle that failed.
* @param[in] state the connection was in when it failed. Usually one of:
- * - #FR_CONNECTION_STATE_CONNECTING the connection attempt explicitly failed.
- * - #FR_CONNECTION_STATE_CONNECTED something called #fr_connection_signal_reconnect.
- * - #FR_CONNECTION_STATE_TIMEOUT the connection attempt timed out.
+ * - #connection_STATE_CONNECTING the connection attempt explicitly failed.
+ * - #connection_STATE_CONNECTED something called #connection_signal_reconnect.
+ * - #connection_STATE_TIMEOUT the connection attempt timed out.
* @param[in] uctx User context.
* @return
- * - #FR_CONNECTION_STATE_INIT to transition to the init state.
- * - #FR_CONNECTION_STATE_HALTED To prevent further reconnection
+ * - #connection_STATE_INIT to transition to the init state.
+ * - #connection_STATE_HALTED To prevent further reconnection
* attempts Can be restarted with
- * #fr_connection_signal_init().
+ * #connection_signal_init().
*/
-typedef fr_connection_state_t (*fr_connection_failed_t)(void *h, fr_connection_state_t state, void *uctx);
+typedef connection_state_t (*connection_failed_t)(void *h, connection_state_t state, void *uctx);
/** Notification that the connection has errored and must be closed
*
* @param[in] h Handle to close.
* @param[in] uctx User context.
*/
-typedef void (*fr_connection_close_t)(fr_event_list_t *el, void *h, void *uctx);
+typedef void (*connection_close_t)(fr_event_list_t *el, void *h, void *uctx);
/** Holds a complete set of functions for a connection
*
*/
typedef struct {
- fr_connection_init_t init;
- fr_connection_open_t open;
- fr_connection_shutdown_t shutdown;
- fr_connection_failed_t failed;
- fr_connection_close_t close;
-} fr_connection_funcs_t;
+ connection_init_t init;
+ connection_open_t open;
+ connection_shutdown_t shutdown;
+ connection_failed_t failed;
+ connection_close_t close;
+} connection_funcs_t;
/** Receive a notification when a connection enters a particular state
*
* @param[in] conn Being watched.
* @param[in] prev State we came from.
* @param[in] state State that was entered (the current state)
- * @param[in] uctx that was passed to fr_connection_add_watch_*.
+ * @param[in] uctx that was passed to connection_add_watch_*.
*/
-typedef void(*fr_connection_watch_t)(fr_connection_t *conn,
- fr_connection_state_t prev, fr_connection_state_t state, void *uctx);
+typedef void(*connection_watch_t)(connection_t *conn,
+ connection_state_t prev, connection_state_t state, void *uctx);
/** @name Add watcher functions that get called before (pre) the state callback and after (post)
* @{
*/
-fr_connection_watch_entry_t *fr_connection_add_watch_pre(fr_connection_t *conn, fr_connection_state_t state,
- fr_connection_watch_t watch, bool oneshot, void const *uctx);
+connection_watch_entry_t *connection_add_watch_pre(connection_t *conn, connection_state_t state,
+ connection_watch_t watch, bool oneshot, void const *uctx);
-fr_connection_watch_entry_t *fr_connection_add_watch_post(fr_connection_t *conn, fr_connection_state_t state,
- fr_connection_watch_t watch, bool oneshot, void const *uctx);
+connection_watch_entry_t *connection_add_watch_post(connection_t *conn, connection_state_t state,
+ connection_watch_t watch, bool oneshot, void const *uctx);
-int fr_connection_del_watch_pre(fr_connection_t *conn, fr_connection_state_t state,
- fr_connection_watch_t watch);
+int connection_del_watch_pre(connection_t *conn, connection_state_t state,
+ connection_watch_t watch);
-int fr_connection_del_watch_post(fr_connection_t *conn, fr_connection_state_t state,
- fr_connection_watch_t watch);
+int connection_del_watch_post(connection_t *conn, connection_state_t state,
+ connection_watch_t watch);
-void fr_connection_watch_enable(fr_connection_watch_entry_t *entry);
+void connection_watch_enable(connection_watch_entry_t *entry);
-void fr_connection_watch_disable(fr_connection_watch_entry_t *entry);
+void connection_watch_disable(connection_watch_entry_t *entry);
-void fr_connection_watch_enable_set_uctx(fr_connection_watch_entry_t *entry, void const *uctx);
+void connection_watch_enable_set_uctx(connection_watch_entry_t *entry, void const *uctx);
-void fr_connection_watch_set_uctx(fr_connection_watch_entry_t *entry, void const *uctx);
+void connection_watch_set_uctx(connection_watch_entry_t *entry, void const *uctx);
-bool fr_connection_watch_is_enabled(fr_connection_watch_entry_t *entry);
+bool connection_watch_is_enabled(connection_watch_entry_t *entry);
/** @} */
/** @name Statistics
* @{
*/
-uint64_t fr_connection_get_num_reconnected(fr_connection_t const *conn);
+uint64_t connection_get_num_reconnected(connection_t const *conn);
-uint64_t fr_connection_get_num_timed_out(fr_connection_t const *conn);
+uint64_t connection_get_num_timed_out(connection_t const *conn);
/** @} */
/** @name Signal the connection to change states
* @{
*/
-void fr_connection_signal_init(fr_connection_t *conn);
+void connection_signal_init(connection_t *conn);
-void fr_connection_signal_connected(fr_connection_t *conn);
+void connection_signal_connected(connection_t *conn);
-void fr_connection_signal_reconnect(fr_connection_t *conn, fr_connection_reason_t reason);
+void connection_signal_reconnect(connection_t *conn, connection_reason_t reason);
-void fr_connection_signal_shutdown(fr_connection_t *conn);
+void connection_signal_shutdown(connection_t *conn);
-void fr_connection_signal_halt(fr_connection_t *conn);
+void connection_signal_halt(connection_t *conn);
-void fr_connection_signals_pause(fr_connection_t *conn);
+void connection_signals_pause(connection_t *conn);
-void fr_connection_signals_resume(fr_connection_t *conn);
+void connection_signals_resume(connection_t *conn);
/** @} */
/** @name Install generic I/O events on an FD to signal state changes
* @{
*/
-int fr_connection_signal_on_fd(fr_connection_t *conn, int fd);
+int connection_signal_on_fd(connection_t *conn, int fd);
/** @} */
/** @name Allocate a new connection
* @{
*/
-fr_connection_t *fr_connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
- fr_connection_funcs_t const *funcs, fr_connection_conf_t const *conf,
+connection_t *connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+ connection_funcs_t const *funcs, connection_conf_t const *conf,
char const *log_prefix, void const *uctx);
/** @} */
#ifdef __cplusplus
}
#endif
-
-
* threads leaving the pool in an inconsistent state, and the callbacks
* required to open, close and check the status of connections within the pool.
*
- * @see fr_connection
+ * @see connection
*/
struct fr_pool_s {
int ref; //!< Reference counter to prevent connection
* Allocate a new top level ctx for the create callback
* to hang its memory off of.
*/
- ctx = talloc_init("fr_connection_ctx");
+ ctx = talloc_init("connection_ctx");
if (!ctx) return NULL;
/*
fr_dict_attr_t const *port_da;
fr_pair_t *vp;
- server_da = fr_dict_attr_child_by_num(fr_dict_root(fr_dict_internal()), FR_CONNECTION_POOL_SERVER);
+ server_da = fr_dict_attr_child_by_num(fr_dict_root(fr_dict_internal()), connection_POOL_SERVER);
if (!server_da) {
ERROR("Incomplete dictionary: Missing definition for \"Connection-Pool-Server\"");
return;
}
- port_da = fr_dict_attr_child_by_num(fr_dict_root(fr_dict_internal()), FR_CONNECTION_POOL_PORT);
+ port_da = fr_dict_attr_child_by_num(fr_dict_root(fr_dict_internal()), connection_POOL_PORT);
if (!port_da) {
ERROR("Incomplete dictionary: Missing definition for \"Connection-Pool-Port\"");
return;
# define TALLOC_GET_TYPE_ABORT_NOOP 1
#endif
-typedef struct fr_trunk_request_s fr_trunk_request_t;
-typedef struct fr_trunk_connection_s fr_trunk_connection_t;
-typedef struct fr_trunk_s fr_trunk_t;
+typedef struct trunk_request_s trunk_request_t;
+typedef struct trunk_connection_s trunk_connection_t;
+typedef struct trunk_s trunk_t;
#define _TRUNK_PRIVATE 1
#include <freeradius-devel/server/trunk.h>
/** The maximum number of state logs to record per request
*
*/
-#define FR_TRUNK_REQUEST_STATE_LOG_MAX 20
+#define TRUNK_REQUEST_STATE_LOG_MAX 20
/** Trace state machine changes for a particular request
*
typedef struct {
fr_dlist_head_t *log_head; //!< To allow the log entry to remove itself on free.
fr_dlist_t entry; //!< Entry in the linked list.
- fr_trunk_request_state_t from; //!< What state we transitioned from.
- fr_trunk_request_state_t to; //!< What state we transitioned to.
+ trunk_request_state_t from; //!< What state we transitioned from.
+ trunk_request_state_t to; //!< What state we transitioned to.
- fr_trunk_connection_t *tconn; //!< The request was associated with.
+ trunk_connection_t *tconn; //!< The request was associated with.
///< Pointer may now be invalid, do no de-reference.
uint64_t tconn_id; //!< If the treq was associated with a connection
///< the connection ID.
- fr_trunk_connection_state_t tconn_state; //!< If the treq was associated with a connection
+ trunk_connection_state_t tconn_state; //!< If the treq was associated with a connection
///< the connection state at the time of the
///< state transition.
char const *function; //!< State change occurred in.
int line; //!< Line change occurred on.
-} fr_trunk_request_state_log_t;
+} trunk_request_state_log_t;
#endif
/** Wraps a normal request
*
*/
-struct fr_trunk_request_s {
- struct fr_trunk_request_pub_s pub; //!< Public fields in the trunk request.
+struct trunk_request_s {
+ struct trunk_request_pub_s pub; //!< Public fields in the trunk request.
///< This *MUST* be the first field in this
///< structure.
fr_dlist_t entry; //!< Used to track the trunk request in the conn->sent
///< or trunk->backlog request.
- fr_trunk_cancel_reason_t cancel_reason; //!< Why this request was cancelled.
+ trunk_cancel_reason_t cancel_reason; //!< Why this request was cancelled.
fr_time_t last_freed; //!< Last time this request was freed.
* @dotfile src/lib/server/trunk_conn.gv "Trunk connection state machine"
* @dotfile src/lib/server/trunk_req.gv "Trunk request state machine"
*/
-struct fr_trunk_connection_s {
- struct fr_trunk_connection_pub_s pub; //!< Public fields in the trunk connection.
+struct trunk_connection_s {
+ struct trunk_connection_pub_s pub; //!< Public fields in the trunk connection.
///< This *MUST* be the first field in this
///< structure.
/** @name State
* @{
*/
- fr_trunk_connection_event_t events; //!< The current events we expect to be notified on.
+ trunk_connection_event_t events; //!< The current events we expect to be notified on.
/** @} */
/** @name Request lists
*/
fr_heap_t *pending; //!< Requests waiting to be sent.
- fr_trunk_request_t *partial; //!< Partially written request.
+ trunk_request_t *partial; //!< Partially written request.
fr_dlist_head_t sent; //!< Sent request.
fr_dlist_head_t cancel; //!< Requests in the cancel state.
- fr_trunk_request_t *cancel_partial; //!< Partially written cancellation request.
+ trunk_request_t *cancel_partial; //!< Partially written cancellation request.
fr_dlist_head_t cancel_sent; //!< Sent cancellation request.
/** @} */
/** An entry in a trunk watch function list
*
*/
-typedef struct fr_trunk_watch_entry_s {
+typedef struct trunk_watch_entry_s {
fr_dlist_t entry; //!< List entry.
- fr_trunk_watch_t func; //!< Function to call when a trunk enters
+ trunk_watch_t func; //!< Function to call when a trunk enters
///< the state this list belongs to
bool oneshot; //!< Remove the function after it's called once.
bool enabled; //!< Whether the watch entry is enabled.
void *uctx; //!< User data to pass to the function.
-} fr_trunk_watch_entry_t;
+} trunk_watch_entry_t;
/** Main trunk management handle
*
*/
-struct fr_trunk_s {
- struct fr_trunk_pub_s pub; //!< Public fields in the trunk connection.
+struct trunk_s {
+ struct trunk_pub_s pub; //!< Public fields in the trunk connection.
///< This *MUST* be the first field in this
///< structure.
fr_event_list_t *el; //!< Event list used by this trunk and the connection.
- fr_trunk_conf_t conf; //!< Trunk common configuration.
+ trunk_conf_t conf; //!< Trunk common configuration.
fr_dlist_head_t free_requests; //!< Requests in the unassigned state. Waiting to be
///< enqueued.
/** @name Callbacks
* @{
*/
- fr_trunk_io_funcs_t funcs; //!< I/O functions.
+ trunk_io_funcs_t funcs; //!< I/O functions.
void *in_handler; //!< Which handler we're inside.
void *uctx; //!< Uctx data to pass to alloc.
- fr_dlist_head_t watch[FR_TRUNK_STATE_MAX]; //!< To be called when trunk changes state.
+ fr_dlist_head_t watch[TRUNK_STATE_MAX]; //!< To be called when trunk changes state.
- fr_trunk_watch_entry_t *next_watcher; //!< Watcher about to be run. Used to prevent nested watchers.
+ trunk_watch_entry_t *next_watcher; //!< Watcher about to be run. Used to prevent nested watchers.
/** @} */
/** @name Timers
/** @} */
};
-static conf_parser_t const fr_trunk_config_request[] = {
- { FR_CONF_OFFSET("per_connection_max", fr_trunk_conf_t, max_req_per_conn), .dflt = "2000" },
- { FR_CONF_OFFSET("per_connection_target", fr_trunk_conf_t, target_req_per_conn), .dflt = "1000" },
- { FR_CONF_OFFSET("free_delay", fr_trunk_conf_t, req_cleanup_delay), .dflt = "10.0" },
+static conf_parser_t const trunk_config_request[] = {
+ { FR_CONF_OFFSET("per_connection_max", trunk_conf_t, max_req_per_conn), .dflt = "2000" },
+ { FR_CONF_OFFSET("per_connection_target", trunk_conf_t, target_req_per_conn), .dflt = "1000" },
+ { FR_CONF_OFFSET("free_delay", trunk_conf_t, req_cleanup_delay), .dflt = "10.0" },
CONF_PARSER_TERMINATOR
};
-static conf_parser_t const fr_trunk_config_connection[] = {
- { FR_CONF_OFFSET("connect_timeout", fr_connection_conf_t, connection_timeout), .dflt = "3.0" },
- { FR_CONF_OFFSET("reconnect_delay", fr_connection_conf_t, reconnection_delay), .dflt = "1" },
+static conf_parser_t const trunk_config_connection[] = {
+ { FR_CONF_OFFSET("connect_timeout", connection_conf_t, connection_timeout), .dflt = "3.0" },
+ { FR_CONF_OFFSET("reconnect_delay", connection_conf_t, reconnection_delay), .dflt = "1" },
CONF_PARSER_TERMINATOR
};
#ifndef TRUNK_TESTS
-conf_parser_t const fr_trunk_config[] = {
- { FR_CONF_OFFSET("start", fr_trunk_conf_t, start), .dflt = "5" },
- { FR_CONF_OFFSET("min", fr_trunk_conf_t, min), .dflt = "1" },
- { FR_CONF_OFFSET("max", fr_trunk_conf_t, max), .dflt = "5" },
- { FR_CONF_OFFSET("connecting", fr_trunk_conf_t, connecting), .dflt = "2" },
- { FR_CONF_OFFSET("uses", fr_trunk_conf_t, max_uses), .dflt = "0" },
- { FR_CONF_OFFSET("lifetime", fr_trunk_conf_t, lifetime), .dflt = "0" },
+conf_parser_t const trunk_config[] = {
+ { FR_CONF_OFFSET("start", trunk_conf_t, start), .dflt = "5" },
+ { FR_CONF_OFFSET("min", trunk_conf_t, min), .dflt = "1" },
+ { FR_CONF_OFFSET("max", trunk_conf_t, max), .dflt = "5" },
+ { FR_CONF_OFFSET("connecting", trunk_conf_t, connecting), .dflt = "2" },
+ { FR_CONF_OFFSET("uses", trunk_conf_t, max_uses), .dflt = "0" },
+ { FR_CONF_OFFSET("lifetime", trunk_conf_t, lifetime), .dflt = "0" },
- { FR_CONF_OFFSET("open_delay", fr_trunk_conf_t, open_delay), .dflt = "0.2" },
- { FR_CONF_OFFSET("close_delay", fr_trunk_conf_t, close_delay), .dflt = "10.0" },
+ { FR_CONF_OFFSET("open_delay", trunk_conf_t, open_delay), .dflt = "0.2" },
+ { FR_CONF_OFFSET("close_delay", trunk_conf_t, close_delay), .dflt = "10.0" },
- { FR_CONF_OFFSET("manage_interval", fr_trunk_conf_t, manage_interval), .dflt = "0.2" },
+ { FR_CONF_OFFSET("manage_interval", trunk_conf_t, manage_interval), .dflt = "0.2" },
- { FR_CONF_OFFSET_SUBSECTION("connection", 0, fr_trunk_conf_t, conn_conf, fr_trunk_config_connection), .subcs_size = sizeof(fr_trunk_config_connection) },
- { FR_CONF_POINTER("request", 0, CONF_FLAG_SUBSECTION, NULL), .subcs = (void const *) fr_trunk_config_request },
+ { FR_CONF_OFFSET_SUBSECTION("connection", 0, trunk_conf_t, conn_conf, trunk_config_connection), .subcs_size = sizeof(trunk_config_connection) },
+ { FR_CONF_POINTER("request", 0, CONF_FLAG_SUBSECTION, NULL), .subcs = (void const *) trunk_config_request },
CONF_PARSER_TERMINATOR
};
#ifndef NDEBUG
/** Map request states to trigger names
*
- * Must stay in the same order as #fr_trunk_connection_state_t
+ * Must stay in the same order as #trunk_connection_state_t
*/
-static fr_table_num_indexed_bit_pos_t const fr_trunk_req_trigger_names[] = {
- { L("pool.request_init"), FR_TRUNK_REQUEST_STATE_INIT }, /* 0x0000 - bit 0 */
- { L("pool.request_unassigned"), FR_TRUNK_REQUEST_STATE_UNASSIGNED }, /* 0x0001 - bit 1 */
- { L("pool.request_backlog"), FR_TRUNK_REQUEST_STATE_BACKLOG }, /* 0x0002 - bit 2 */
- { L("pool.request_pending"), FR_TRUNK_REQUEST_STATE_PENDING }, /* 0x0004 - bit 3 */
- { L("pool.request_partial"), FR_TRUNK_REQUEST_STATE_PARTIAL }, /* 0x0008 - bit 4 */
- { L("pool.request_sent"), FR_TRUNK_REQUEST_STATE_SENT }, /* 0x0010 - bit 5 */
- { L("pool.request_complete"), FR_TRUNK_REQUEST_STATE_COMPLETE }, /* 0x0020 - bit 6 */
- { L("pool.request_state_failed"), FR_TRUNK_REQUEST_STATE_FAILED }, /* 0x0040 - bit 7 */
- { L("pool.request_state_cancel"), FR_TRUNK_REQUEST_STATE_CANCEL }, /* 0x0080 - bit 8 */
- { L("pool.request_state_cancel_sent"), FR_TRUNK_REQUEST_STATE_CANCEL_SENT }, /* 0x0100 - bit 9 */
- { L("pool.request_state_cancel_partial"), FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL }, /* 0x0200 - bit 10 */
- { L("pool.request_state_cancel_complete"), FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE }, /* 0x0400 - bit 11 */
- { L("pool.request_state_idle"), FR_TRUNK_REQUEST_STATE_IDLE } /* 0x0800 - bit 12 */
+static fr_table_num_indexed_bit_pos_t const trunk_req_trigger_names[] = {
+ { L("pool.request_init"), TRUNK_REQUEST_STATE_INIT }, /* 0x0000 - bit 0 */
+ { L("pool.request_unassigned"), TRUNK_REQUEST_STATE_UNASSIGNED }, /* 0x0001 - bit 1 */
+ { L("pool.request_backlog"), TRUNK_REQUEST_STATE_BACKLOG }, /* 0x0002 - bit 2 */
+ { L("pool.request_pending"), TRUNK_REQUEST_STATE_PENDING }, /* 0x0004 - bit 3 */
+ { L("pool.request_partial"), TRUNK_REQUEST_STATE_PARTIAL }, /* 0x0008 - bit 4 */
+ { L("pool.request_sent"), TRUNK_REQUEST_STATE_SENT }, /* 0x0010 - bit 5 */
+ { L("pool.request_complete"), TRUNK_REQUEST_STATE_COMPLETE }, /* 0x0020 - bit 6 */
+ { L("pool.request_state_failed"), TRUNK_REQUEST_STATE_FAILED }, /* 0x0040 - bit 7 */
+ { L("pool.request_state_cancel"), TRUNK_REQUEST_STATE_CANCEL }, /* 0x0080 - bit 8 */
+ { L("pool.request_state_cancel_sent"), TRUNK_REQUEST_STATE_CANCEL_SENT }, /* 0x0100 - bit 9 */
+ { L("pool.request_state_cancel_partial"), TRUNK_REQUEST_STATE_CANCEL_PARTIAL }, /* 0x0200 - bit 10 */
+ { L("pool.request_state_cancel_complete"), TRUNK_REQUEST_STATE_CANCEL_COMPLETE }, /* 0x0400 - bit 11 */
+ { L("pool.request_state_idle"), TRUNK_REQUEST_STATE_IDLE } /* 0x0800 - bit 12 */
};
-static size_t fr_trunk_req_trigger_names_len = NUM_ELEMENTS(fr_trunk_req_trigger_names);
+static size_t trunk_req_trigger_names_len = NUM_ELEMENTS(trunk_req_trigger_names);
#endif
-static fr_table_num_ordered_t const fr_trunk_request_states[] = {
- { L("INIT"), FR_TRUNK_REQUEST_STATE_INIT },
- { L("UNASSIGNED"), FR_TRUNK_REQUEST_STATE_UNASSIGNED },
- { L("BACKLOG"), FR_TRUNK_REQUEST_STATE_BACKLOG },
- { L("PENDING"), FR_TRUNK_REQUEST_STATE_PENDING },
- { L("PARTIAL"), FR_TRUNK_REQUEST_STATE_PARTIAL },
- { L("SENT"), FR_TRUNK_REQUEST_STATE_SENT },
- { L("COMPLETE"), FR_TRUNK_REQUEST_STATE_COMPLETE },
- { L("FAILED"), FR_TRUNK_REQUEST_STATE_FAILED },
- { L("CANCEL"), FR_TRUNK_REQUEST_STATE_CANCEL },
- { L("CANCEL-SENT"), FR_TRUNK_REQUEST_STATE_CANCEL_SENT },
- { L("CANCEL-PARTIAL"), FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL },
- { L("CANCEL-COMPLETE"), FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE },
- { L("IDLE"), FR_TRUNK_REQUEST_STATE_IDLE }
+static fr_table_num_ordered_t const trunk_request_states[] = {
+ { L("INIT"), TRUNK_REQUEST_STATE_INIT },
+ { L("UNASSIGNED"), TRUNK_REQUEST_STATE_UNASSIGNED },
+ { L("BACKLOG"), TRUNK_REQUEST_STATE_BACKLOG },
+ { L("PENDING"), TRUNK_REQUEST_STATE_PENDING },
+ { L("PARTIAL"), TRUNK_REQUEST_STATE_PARTIAL },
+ { L("SENT"), TRUNK_REQUEST_STATE_SENT },
+ { L("COMPLETE"), TRUNK_REQUEST_STATE_COMPLETE },
+ { L("FAILED"), TRUNK_REQUEST_STATE_FAILED },
+ { L("CANCEL"), TRUNK_REQUEST_STATE_CANCEL },
+ { L("CANCEL-SENT"), TRUNK_REQUEST_STATE_CANCEL_SENT },
+ { L("CANCEL-PARTIAL"), TRUNK_REQUEST_STATE_CANCEL_PARTIAL },
+ { L("CANCEL-COMPLETE"), TRUNK_REQUEST_STATE_CANCEL_COMPLETE },
+ { L("IDLE"), TRUNK_REQUEST_STATE_IDLE }
};
-static size_t fr_trunk_request_states_len = NUM_ELEMENTS(fr_trunk_request_states);
+static size_t trunk_request_states_len = NUM_ELEMENTS(trunk_request_states);
/** Map connection states to trigger names
*
- * Must stay in the same order as #fr_trunk_connection_state_t
+ * Must stay in the same order as #trunk_connection_state_t
*/
-static fr_table_num_indexed_bit_pos_t const fr_trunk_conn_trigger_names[] = {
- { L("pool.connection_halted"), FR_TRUNK_CONN_HALTED }, /* 0x0000 - bit 0 */
- { L("pool.connection_init"), FR_TRUNK_CONN_INIT }, /* 0x0001 - bit 1 */
- { L("pool.connection_connecting"), FR_TRUNK_CONN_CONNECTING }, /* 0x0002 - bit 2 */
- { L("pool.connection_active"), FR_TRUNK_CONN_ACTIVE }, /* 0x0004 - bit 3 */
- { L("pool.connection_closed"), FR_TRUNK_CONN_CLOSED }, /* 0x0008 - bit 4 */
- { L("pool.connection_full"), FR_TRUNK_CONN_FULL }, /* 0x0010 - bit 5 */
- { L("pool.connection_inactive"), FR_TRUNK_CONN_INACTIVE }, /* 0x0020 - bit 6 */
- { L("pool.connection_inactive_draining"), FR_TRUNK_CONN_INACTIVE_DRAINING }, /* 0x0040 - bit 7 */
- { L("pool.connection_draining"), FR_TRUNK_CONN_DRAINING }, /* 0x0080 - bit 8 */
- { L("pool.connection_draining_to_free"), FR_TRUNK_CONN_DRAINING_TO_FREE } /* 0x0100 - bit 9 */
+static fr_table_num_indexed_bit_pos_t const trunk_conn_trigger_names[] = {
+ { L("pool.connection_halted"), TRUNK_CONN_HALTED }, /* 0x0000 - bit 0 */
+ { L("pool.connection_init"), TRUNK_CONN_INIT }, /* 0x0001 - bit 1 */
+ { L("pool.connection_connecting"), TRUNK_CONN_CONNECTING }, /* 0x0002 - bit 2 */
+ { L("pool.connection_active"), TRUNK_CONN_ACTIVE }, /* 0x0004 - bit 3 */
+ { L("pool.connection_closed"), TRUNK_CONN_CLOSED }, /* 0x0008 - bit 4 */
+ { L("pool.connection_full"), TRUNK_CONN_FULL }, /* 0x0010 - bit 5 */
+ { L("pool.connection_inactive"), TRUNK_CONN_INACTIVE }, /* 0x0020 - bit 6 */
+ { L("pool.connection_inactive_draining"), TRUNK_CONN_INACTIVE_DRAINING }, /* 0x0040 - bit 7 */
+ { L("pool.connection_draining"), TRUNK_CONN_DRAINING }, /* 0x0080 - bit 8 */
+ { L("pool.connection_draining_to_free"), TRUNK_CONN_DRAINING_TO_FREE } /* 0x0100 - bit 9 */
};
-static size_t fr_trunk_conn_trigger_names_len = NUM_ELEMENTS(fr_trunk_conn_trigger_names);
+static size_t trunk_conn_trigger_names_len = NUM_ELEMENTS(trunk_conn_trigger_names);
-static fr_table_num_ordered_t const fr_trunk_states[] = {
- { L("IDLE"), FR_TRUNK_STATE_IDLE },
- { L("ACTIVE"), FR_TRUNK_STATE_ACTIVE },
- { L("PENDING"), FR_TRUNK_STATE_PENDING }
+static fr_table_num_ordered_t const trunk_states[] = {
+ { L("IDLE"), TRUNK_STATE_IDLE },
+ { L("ACTIVE"), TRUNK_STATE_ACTIVE },
+ { L("PENDING"), TRUNK_STATE_PENDING }
};
-static size_t fr_trunk_states_len = NUM_ELEMENTS(fr_trunk_states);
-
-static fr_table_num_ordered_t const fr_trunk_connection_states[] = {
- { L("INIT"), FR_TRUNK_CONN_INIT },
- { L("HALTED"), FR_TRUNK_CONN_HALTED },
- { L("CONNECTING"), FR_TRUNK_CONN_CONNECTING },
- { L("ACTIVE"), FR_TRUNK_CONN_ACTIVE },
- { L("CLOSED"), FR_TRUNK_CONN_CLOSED },
- { L("FULL"), FR_TRUNK_CONN_FULL },
- { L("INACTIVE"), FR_TRUNK_CONN_INACTIVE },
- { L("INACTIVE-DRAINING"), FR_TRUNK_CONN_INACTIVE_DRAINING },
- { L("DRAINING"), FR_TRUNK_CONN_DRAINING },
- { L("DRAINING-TO-FREE"), FR_TRUNK_CONN_DRAINING_TO_FREE }
+static size_t trunk_states_len = NUM_ELEMENTS(trunk_states);
+
+static fr_table_num_ordered_t const trunk_connection_states[] = {
+ { L("INIT"), TRUNK_CONN_INIT },
+ { L("HALTED"), TRUNK_CONN_HALTED },
+ { L("CONNECTING"), TRUNK_CONN_CONNECTING },
+ { L("ACTIVE"), TRUNK_CONN_ACTIVE },
+ { L("CLOSED"), TRUNK_CONN_CLOSED },
+ { L("FULL"), TRUNK_CONN_FULL },
+ { L("INACTIVE"), TRUNK_CONN_INACTIVE },
+ { L("INACTIVE-DRAINING"), TRUNK_CONN_INACTIVE_DRAINING },
+ { L("DRAINING"), TRUNK_CONN_DRAINING },
+ { L("DRAINING-TO-FREE"), TRUNK_CONN_DRAINING_TO_FREE }
};
-static size_t fr_trunk_connection_states_len = NUM_ELEMENTS(fr_trunk_connection_states);
+static size_t trunk_connection_states_len = NUM_ELEMENTS(trunk_connection_states);
-static fr_table_num_ordered_t const fr_trunk_cancellation_reasons[] = {
- { L("FR_TRUNK_CANCEL_REASON_NONE"), FR_TRUNK_CANCEL_REASON_NONE },
- { L("FR_TRUNK_CANCEL_REASON_SIGNAL"), FR_TRUNK_CANCEL_REASON_SIGNAL },
- { L("FR_TRUNK_CANCEL_REASON_MOVE"), FR_TRUNK_CANCEL_REASON_MOVE },
- { L("FR_TRUNK_CANCEL_REASON_REQUEUE"), FR_TRUNK_CANCEL_REASON_REQUEUE }
+static fr_table_num_ordered_t const trunk_cancellation_reasons[] = {
+ { L("TRUNK_CANCEL_REASON_NONE"), TRUNK_CANCEL_REASON_NONE },
+ { L("TRUNK_CANCEL_REASON_SIGNAL"), TRUNK_CANCEL_REASON_SIGNAL },
+ { L("TRUNK_CANCEL_REASON_MOVE"), TRUNK_CANCEL_REASON_MOVE },
+ { L("TRUNK_CANCEL_REASON_REQUEUE"), TRUNK_CANCEL_REASON_REQUEUE }
};
-static size_t fr_trunk_cancellation_reasons_len = NUM_ELEMENTS(fr_trunk_cancellation_reasons);
+static size_t trunk_cancellation_reasons_len = NUM_ELEMENTS(trunk_cancellation_reasons);
-static fr_table_num_ordered_t const fr_trunk_connection_events[] = {
- { L("FR_TRUNK_CONN_EVENT_NONE"), FR_TRUNK_CONN_EVENT_NONE },
- { L("FR_TRUNK_CONN_EVENT_READ"), FR_TRUNK_CONN_EVENT_READ },
- { L("FR_TRUNK_CONN_EVENT_WRITE"), FR_TRUNK_CONN_EVENT_WRITE },
- { L("FR_TRUNK_CONN_EVENT_BOTH"), FR_TRUNK_CONN_EVENT_BOTH },
+static fr_table_num_ordered_t const trunk_connection_events[] = {
+ { L("TRUNK_CONN_EVENT_NONE"), TRUNK_CONN_EVENT_NONE },
+ { L("TRUNK_CONN_EVENT_READ"), TRUNK_CONN_EVENT_READ },
+ { L("TRUNK_CONN_EVENT_WRITE"), TRUNK_CONN_EVENT_WRITE },
+ { L("TRUNK_CONN_EVENT_BOTH"), TRUNK_CONN_EVENT_BOTH },
};
-static size_t fr_trunk_connection_events_len = NUM_ELEMENTS(fr_trunk_connection_events);
+static size_t trunk_connection_events_len = NUM_ELEMENTS(trunk_connection_events);
#define CONN_TRIGGER(_state) do { \
if (trunk->pub.triggers) { \
trigger_exec(unlang_interpret_get_thread_default(), \
- NULL, fr_table_str_by_value(fr_trunk_conn_trigger_names, _state, \
+ NULL, fr_table_str_by_value(trunk_conn_trigger_names, _state, \
"<INVALID>"), true, NULL); \
} \
} while (0)
do { \
_log("[%" PRIu64 "] Trunk connection changed state %s -> %s", \
tconn->pub.conn->id, \
- fr_table_str_by_value(fr_trunk_connection_states, tconn->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_trunk_connection_states, _new, "<INVALID>")); \
+ fr_table_str_by_value(trunk_connection_states, tconn->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(trunk_connection_states, _new, "<INVALID>")); \
tconn->pub.state = _new; \
CONN_TRIGGER(_new); \
trunk_requests_per_connection(NULL, NULL, trunk, fr_time(), false); \
do { \
if (!fr_cond_assert_msg(0, "[%" PRIu64 "] Trunk connection invalid transition %s -> %s", \
tconn->pub.conn->id, \
- fr_table_str_by_value(fr_trunk_connection_states, tconn->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_trunk_connection_states, _new, "<INVALID>"))) return; \
+ fr_table_str_by_value(trunk_connection_states, tconn->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(trunk_connection_states, _new, "<INVALID>"))) return; \
} while (0)
#ifndef NDEBUG
void trunk_request_state_log_entry_add(char const *function, int line,
- fr_trunk_request_t *treq, fr_trunk_request_state_t new) CC_HINT(nonnull);
+ trunk_request_t *treq, trunk_request_state_t new) CC_HINT(nonnull);
#define REQUEST_TRIGGER(_state) do { \
if (trunk->pub.triggers) { \
trigger_exec(unlang_interpret_get_thread_default(), \
- NULL, fr_table_str_by_value(fr_trunk_req_trigger_names, _state, \
+ NULL, fr_table_str_by_value(trunk_req_trigger_names, _state, \
"<INVALID>"), true, NULL); \
} \
} while (0)
request_t *request = treq->pub.request; \
ROPTIONAL(RDEBUG3, DEBUG3, "Trunk request %" PRIu64 " changed state %s -> %s", \
treq->id, \
- fr_table_str_by_value(fr_trunk_request_states, treq->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_trunk_request_states, _new, "<INVALID>")); \
+ fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(trunk_request_states, _new, "<INVALID>")); \
trunk_request_state_log_entry_add(__FUNCTION__, __LINE__, treq, _new); \
treq->pub.state = _new; \
REQUEST_TRIGGER(_new); \
} while (0)
#define REQUEST_BAD_STATE_TRANSITION(_new) \
do { \
- fr_trunk_request_state_log(&default_log, L_ERR, __FILE__, __LINE__, treq); \
+ trunk_request_state_log(&default_log, L_ERR, __FILE__, __LINE__, treq); \
if (!fr_cond_assert_msg(0, "Trunk request %" PRIu64 " invalid transition %s -> %s", \
treq->id, \
- fr_table_str_by_value(fr_trunk_request_states, treq->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_trunk_request_states, _new, "<INVALID>"))) return; \
+ fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(trunk_request_states, _new, "<INVALID>"))) return; \
} while (0)
#else
/** Record a request state transition
request_t *request = treq->pub.request; \
ROPTIONAL(RDEBUG3, DEBUG3, "Trunk request %" PRIu64 " changed state %s -> %s", \
treq->id, \
- fr_table_str_by_value(fr_trunk_request_states, treq->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_trunk_request_states, _new, "<INVALID>")); \
+ fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(trunk_request_states, _new, "<INVALID>")); \
treq->pub.state = _new; \
} while (0)
#define REQUEST_BAD_STATE_TRANSITION(_new) \
do { \
if (!fr_cond_assert_msg(0, "Trunk request %" PRIu64 " invalid transition %s -> %s", \
treq->id, \
- fr_table_str_by_value(fr_trunk_request_states, treq->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_trunk_request_states, _new, "<INVALID>"))) return; \
+ fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(trunk_request_states, _new, "<INVALID>"))) return; \
} while (0)
#endif
ROPTIONAL(RDEBUG3, DEBUG3, "Calling request_cancel(conn=%p, preq=%p, reason=%s, uctx=%p)", \
(_treq)->pub.tconn->pub.conn, \
(_treq)->pub.preq, \
- fr_table_str_by_value(fr_trunk_cancellation_reasons, \
+ fr_table_str_by_value(trunk_cancellation_reasons, \
(_reason), \
"<INVALID>"), \
(_treq)->pub.trunk->uctx); \
(_treq)->pub.request, \
(_treq)->pub.preq, \
(_treq)->pub.rctx, \
- fr_table_str_by_value(fr_trunk_request_states, (_prev_state), "<INVALID>"), \
+ fr_table_str_by_value(trunk_request_states, (_prev_state), "<INVALID>"), \
(_treq)->pub.trunk->uctx); \
(_treq)->pub.trunk->in_handler = (void *)(_treq)->pub.trunk->funcs.request_fail; \
(_treq)->pub.trunk->funcs.request_fail((_treq)->pub.request, (_treq)->pub.preq, (_treq)->pub.rctx, _prev_state, (_treq)->pub.trunk->uctx); \
(_tconn), \
(_tconn)->pub.conn, \
(_tconn)->pub.trunk->el, \
- fr_table_str_by_value(fr_trunk_connection_events, (_events), "<INVALID>"), \
+ fr_table_str_by_value(trunk_connection_events, (_events), "<INVALID>"), \
(_tconn)->pub.trunk->uctx); \
(_tconn)->pub.trunk->in_handler = (void *)(_tconn)->pub.trunk->funcs.connection_notify; \
(_tconn)->pub.trunk->funcs.connection_notify((_tconn), (_tconn)->pub.conn, (_tconn)->pub.trunk->el, (_events), (_tconn)->pub.trunk->uctx); \
#define IN_REQUEST_DEMUX(_trunk) (((_trunk)->funcs.request_demux) && ((_trunk)->in_handler == (void *)(_trunk)->funcs.request_demux))
#define IN_REQUEST_CANCEL_MUX(_trunk) (((_trunk)->funcs.request_cancel_mux) && ((_trunk)->in_handler == (void *)(_trunk)->funcs.request_cancel_mux))
-#define IS_SERVICEABLE(_tconn) ((_tconn)->pub.state & FR_TRUNK_CONN_SERVICEABLE)
-#define IS_PROCESSING(_tconn) ((tconn)->pub.state & FR_TRUNK_CONN_PROCESSING)
+#define IS_SERVICEABLE(_tconn) ((_tconn)->pub.state & TRUNK_CONN_SERVICEABLE)
+#define IS_PROCESSING(_tconn) ((tconn)->pub.state & TRUNK_CONN_PROCESSING)
/** Remove the current request from the backlog
*
do { \
int _ret; \
if ((fr_minmax_heap_num_elements((_tconn)->pub.trunk->active) == 1)) break; \
- if (!fr_cond_assert((_tconn)->pub.state == FR_TRUNK_CONN_ACTIVE)) break; \
+ if (!fr_cond_assert((_tconn)->pub.state == TRUNK_CONN_ACTIVE)) break; \
_ret = fr_minmax_heap_extract((_tconn)->pub.trunk->active, (_tconn)); \
if (!fr_cond_assert_msg(_ret == 0, "Failed extracting conn from active heap: %s", fr_strerror())) break; \
fr_minmax_heap_insert((_tconn)->pub.trunk->active, (_tconn)); \
/** Call a list of watch functions associated with a state
*
*/
-static inline void trunk_watch_call(fr_trunk_t *trunk, fr_dlist_head_t *list, fr_trunk_state_t state)
+static inline void trunk_watch_call(trunk_t *trunk, fr_dlist_head_t *list, trunk_state_t state)
{
/*
* Nested watcher calls are not allowed
fr_assert(trunk->next_watcher == NULL);
while ((trunk->next_watcher = fr_dlist_next(list, trunk->next_watcher))) {
- fr_trunk_watch_entry_t *entry = trunk->next_watcher;
+ trunk_watch_entry_t *entry = trunk->next_watcher;
bool oneshot = entry->oneshot; /* Watcher could be freed, so store now */
if (!entry->enabled) continue;
* - -1 if the function wasn't present in the watch list.
* - -2 if an invalid state was passed.
*/
-int fr_trunk_del_watch(fr_trunk_t *trunk, fr_trunk_state_t state, fr_trunk_watch_t watch)
+int trunk_del_watch(trunk_t *trunk, trunk_state_t state, trunk_watch_t watch)
{
- fr_trunk_watch_entry_t *entry = NULL;
+ trunk_watch_entry_t *entry = NULL;
fr_dlist_head_t *list;
- if (state >= FR_TRUNK_STATE_MAX) return -2;
+ if (state >= TRUNK_STATE_MAX) return -2;
list = &trunk->watch[state];
while ((entry = fr_dlist_next(list, entry))) {
* - NULL if an invalid state is passed.
* - A new watch entry handle on success.
*/
-fr_trunk_watch_entry_t *fr_trunk_add_watch(fr_trunk_t *trunk, fr_trunk_state_t state,
- fr_trunk_watch_t watch, bool oneshot, void const *uctx)
+trunk_watch_entry_t *trunk_add_watch(trunk_t *trunk, trunk_state_t state,
+ trunk_watch_t watch, bool oneshot, void const *uctx)
{
- fr_trunk_watch_entry_t *entry;
+ trunk_watch_entry_t *entry;
fr_dlist_head_t *list;
- if (state >= FR_TRUNK_STATE_MAX) return NULL;
+ if (state >= TRUNK_STATE_MAX) return NULL;
list = &trunk->watch[state];
- MEM(entry = talloc_zero(trunk, fr_trunk_watch_entry_t));
+ MEM(entry = talloc_zero(trunk, trunk_watch_entry_t));
entry->func = watch;
entry->oneshot = oneshot;
#define TRUNK_STATE_TRANSITION(_new) \
do { \
DEBUG3("Trunk changed state %s -> %s", \
- fr_table_str_by_value(fr_trunk_states, trunk->pub.state, "<INVALID>"), \
- fr_table_str_by_value(fr_trunk_states, _new, "<INVALID>")); \
+ fr_table_str_by_value(trunk_states, trunk->pub.state, "<INVALID>"), \
+ fr_table_str_by_value(trunk_states, _new, "<INVALID>")); \
CALL_WATCHERS(trunk, _new); \
trunk->pub.state = _new; \
} while (0)
-static void trunk_request_enter_backlog(fr_trunk_request_t *treq, bool new);
-static void trunk_request_enter_pending(fr_trunk_request_t *treq, fr_trunk_connection_t *tconn, bool new);
-static void trunk_request_enter_partial(fr_trunk_request_t *treq);
-static void trunk_request_enter_sent(fr_trunk_request_t *treq);
-static void trunk_request_enter_idle(fr_trunk_request_t *treq);
-static void trunk_request_enter_failed(fr_trunk_request_t *treq);
-static void trunk_request_enter_complete(fr_trunk_request_t *treq);
-static void trunk_request_enter_cancel(fr_trunk_request_t *treq, fr_trunk_cancel_reason_t reason);
-static void trunk_request_enter_cancel_sent(fr_trunk_request_t *treq);
-static void trunk_request_enter_cancel_complete(fr_trunk_request_t *treq);
+static void trunk_request_enter_backlog(trunk_request_t *treq, bool new);
+static void trunk_request_enter_pending(trunk_request_t *treq, trunk_connection_t *tconn, bool new);
+static void trunk_request_enter_partial(trunk_request_t *treq);
+static void trunk_request_enter_sent(trunk_request_t *treq);
+static void trunk_request_enter_idle(trunk_request_t *treq);
+static void trunk_request_enter_failed(trunk_request_t *treq);
+static void trunk_request_enter_complete(trunk_request_t *treq);
+static void trunk_request_enter_cancel(trunk_request_t *treq, trunk_cancel_reason_t reason);
+static void trunk_request_enter_cancel_sent(trunk_request_t *treq);
+static void trunk_request_enter_cancel_complete(trunk_request_t *treq);
static uint64_t trunk_requests_per_connection(uint16_t *conn_count_out, uint32_t *req_conn_out,
- fr_trunk_t *trunk, fr_time_t now, NDEBUG_UNUSED bool verify);
-
-static int trunk_connection_spawn(fr_trunk_t *trunk, fr_time_t now);
-static inline void trunk_connection_auto_full(fr_trunk_connection_t *tconn);
-static inline void trunk_connection_auto_unfull(fr_trunk_connection_t *tconn);
-static inline void trunk_connection_readable(fr_trunk_connection_t *tconn);
-static inline void trunk_connection_writable(fr_trunk_connection_t *tconn);
-static void trunk_connection_event_update(fr_trunk_connection_t *tconn);
-static void trunk_connection_enter_full(fr_trunk_connection_t *tconn);
-static void trunk_connection_enter_inactive(fr_trunk_connection_t *tconn);
-static void trunk_connection_enter_inactive_draining(fr_trunk_connection_t *tconn);
-static void trunk_connection_enter_draining(fr_trunk_connection_t *tconn);
-static void trunk_connection_enter_draining_to_free(fr_trunk_connection_t *tconn);
-static void trunk_connection_enter_active(fr_trunk_connection_t *tconn);
-
-static void trunk_rebalance(fr_trunk_t *trunk);
-static void trunk_manage(fr_trunk_t *trunk, fr_time_t now);
+ trunk_t *trunk, fr_time_t now, NDEBUG_UNUSED bool verify);
+
+static int trunk_connection_spawn(trunk_t *trunk, fr_time_t now);
+static inline void trunk_connection_auto_full(trunk_connection_t *tconn);
+static inline void trunk_connection_auto_unfull(trunk_connection_t *tconn);
+static inline void trunk_connection_readable(trunk_connection_t *tconn);
+static inline void trunk_connection_writable(trunk_connection_t *tconn);
+static void trunk_connection_event_update(trunk_connection_t *tconn);
+static void trunk_connection_enter_full(trunk_connection_t *tconn);
+static void trunk_connection_enter_inactive(trunk_connection_t *tconn);
+static void trunk_connection_enter_inactive_draining(trunk_connection_t *tconn);
+static void trunk_connection_enter_draining(trunk_connection_t *tconn);
+static void trunk_connection_enter_draining_to_free(trunk_connection_t *tconn);
+static void trunk_connection_enter_active(trunk_connection_t *tconn);
+
+static void trunk_rebalance(trunk_t *trunk);
+static void trunk_manage(trunk_t *trunk, fr_time_t now);
static void _trunk_timer(fr_event_list_t *el, fr_time_t now, void *uctx);
-static void trunk_backlog_drain(fr_trunk_t *trunk);
+static void trunk_backlog_drain(trunk_t *trunk);
/** Compare two protocol requests
*
*/
static int8_t _trunk_request_prioritise(void const *a, void const *b)
{
- fr_trunk_request_t const *treq_a = talloc_get_type_abort_const(a, fr_trunk_request_t);
- fr_trunk_request_t const *treq_b = talloc_get_type_abort_const(b, fr_trunk_request_t);
+ trunk_request_t const *treq_a = talloc_get_type_abort_const(a, trunk_request_t);
+ trunk_request_t const *treq_b = talloc_get_type_abort_const(b, trunk_request_t);
fr_assert(treq_a->pub.trunk == treq_b->pub.trunk);
*
* @param[in] treq to trigger a state change for.
*/
-static void trunk_request_remove_from_conn(fr_trunk_request_t *treq)
+static void trunk_request_remove_from_conn(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_UNASSIGNED:
+ case TRUNK_REQUEST_STATE_UNASSIGNED:
return; /* Not associated with connection */
- case FR_TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_PENDING:
REQUEST_EXTRACT_PENDING(treq);
break;
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_PARTIAL:
REQUEST_EXTRACT_PARTIAL(treq);
break;
- case FR_TRUNK_REQUEST_STATE_SENT:
+ case TRUNK_REQUEST_STATE_SENT:
REQUEST_EXTRACT_SENT(treq);
break;
- case FR_TRUNK_REQUEST_STATE_IDLE:
+ case TRUNK_REQUEST_STATE_IDLE:
REQUEST_EXTRACT_IDLE(treq);
break;
- case FR_TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL:
REQUEST_EXTRACT_CANCEL(treq);
break;
- case FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
+ case TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
REQUEST_EXTRACT_CANCEL_PARTIAL(treq);
break;
- case FR_TRUNK_REQUEST_STATE_CANCEL_SENT:
+ case TRUNK_REQUEST_STATE_CANCEL_SENT:
REQUEST_EXTRACT_CANCEL_SENT(treq);
break;
DO_REQUEST_CONN_RELEASE(treq);
switch (tconn->pub.state){
- case FR_TRUNK_CONN_FULL:
+ case TRUNK_CONN_FULL:
trunk_connection_auto_unfull(tconn); /* Check if we can switch back to active */
- if (tconn->pub.state == FR_TRUNK_CONN_FULL) break; /* Only fallthrough if conn is now active */
+ if (tconn->pub.state == TRUNK_CONN_FULL) break; /* Only fallthrough if conn is now active */
FALL_THROUGH;
- case FR_TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_ACTIVE:
CONN_REORDER(tconn);
break;
/** Transition a request to the unassigned state, in preparation for re-assignment
*
* @note treq->tconn may be inviable after calling
- * if treq->conn and fr_connection_signals_pause are not used.
+ * if treq->conn and connection_signals_pause are not used.
* This is due to call to trunk_request_remove_from_conn.
*
* @param[in] treq to trigger a state change for.
*/
-static void trunk_request_enter_unassigned(fr_trunk_request_t *treq)
+static void trunk_request_enter_unassigned(trunk_request_t *treq)
{
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_t *trunk = treq->pub.trunk;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_UNASSIGNED:
+ case TRUNK_REQUEST_STATE_UNASSIGNED:
return;
- case FR_TRUNK_REQUEST_STATE_BACKLOG:
+ case TRUNK_REQUEST_STATE_BACKLOG:
REQUEST_EXTRACT_BACKLOG(treq);
break;
- case FR_TRUNK_REQUEST_STATE_PENDING:
- case FR_TRUNK_REQUEST_STATE_CANCEL:
- case FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
- case FR_TRUNK_REQUEST_STATE_CANCEL_SENT:
+ case TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
+ case TRUNK_REQUEST_STATE_CANCEL_SENT:
trunk_request_remove_from_conn(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_UNASSIGNED);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_UNASSIGNED);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_UNASSIGNED);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_UNASSIGNED);
}
/** Transition a request to the backlog state, adding it to the backlog of the trunk
*
* @note treq->tconn and treq may be inviable after calling
- * if treq->conn and fr_connection_signals_pause are not used.
+ * if treq->conn and connection_signals_pause are not used.
* This is due to call to trunk_manage.
*
* @param[in] treq to trigger a state change for.
* @param[in] new Whether this is a new request.
*/
-static void trunk_request_enter_backlog(fr_trunk_request_t *treq, bool new)
+static void trunk_request_enter_backlog(trunk_request_t *treq, bool new)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_INIT:
- case FR_TRUNK_REQUEST_STATE_UNASSIGNED:
+ case TRUNK_REQUEST_STATE_INIT:
+ case TRUNK_REQUEST_STATE_UNASSIGNED:
break;
- case FR_TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_PENDING:
REQUEST_EXTRACT_PENDING(treq);
break;
- case FR_TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL:
REQUEST_EXTRACT_CANCEL(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_BACKLOG);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_BACKLOG);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_BACKLOG);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_BACKLOG);
fr_heap_insert(&trunk->backlog, treq); /* Insert into the backlog heap */
/*
* Remember requests only enter the backlog if
* there's no connections which can service them.
*/
- if ((fr_trunk_connection_count_by_state(treq->pub.trunk, FR_TRUNK_CONN_CONNECTING) == 0) ||
- (fr_trunk_connection_count_by_state(treq->pub.trunk, FR_TRUNK_CONN_DRAINING) > 0)) {
- fr_trunk_connection_manage_schedule(treq->pub.trunk);
+ if ((trunk_connection_count_by_state(treq->pub.trunk, TRUNK_CONN_CONNECTING) == 0) ||
+ (trunk_connection_count_by_state(treq->pub.trunk, TRUNK_CONN_DRAINING) > 0)) {
+ trunk_connection_manage_schedule(treq->pub.trunk);
}
}
* All trunk requests being removed from a connection get passed to #trunk_request_remove_from_conn.
*
* @note treq->tconn and treq may be inviable after calling
- * if treq->conn and fr_connection_signals_pause is not used.
+ * if treq->conn and connection_signals_pause is not used.
* This is due to call to trunk_connection_event_update.
*
* @param[in] treq to trigger a state change for.
* @param[in] tconn to enqueue the request on.
* @param[in] new Whether this is a new request.
*/
-static void trunk_request_enter_pending(fr_trunk_request_t *treq, fr_trunk_connection_t *tconn, bool new)
+static void trunk_request_enter_pending(trunk_request_t *treq, trunk_connection_t *tconn, bool new)
{
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_t *trunk = treq->pub.trunk;
fr_assert(tconn->pub.trunk == trunk);
fr_assert(IS_PROCESSING(tconn));
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_INIT:
- case FR_TRUNK_REQUEST_STATE_UNASSIGNED:
+ case TRUNK_REQUEST_STATE_INIT:
+ case TRUNK_REQUEST_STATE_UNASSIGNED:
fr_assert(!treq->pub.tconn);
break;
- case FR_TRUNK_REQUEST_STATE_BACKLOG:
+ case TRUNK_REQUEST_STATE_BACKLOG:
fr_assert(!treq->pub.tconn);
REQUEST_EXTRACT_BACKLOG(treq);
break;
- case FR_TRUNK_REQUEST_STATE_CANCEL: /* Moved from another connection */
+ case TRUNK_REQUEST_STATE_CANCEL: /* Moved from another connection */
REQUEST_EXTRACT_CANCEL(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_PENDING);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_PENDING);
}
/*
*/
treq->pub.tconn = tconn;
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_PENDING);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_PENDING);
{
request_t *request = treq->pub.request;
* Reorder the connection in the heap now it has an
* additional request.
*/
- if (tconn->pub.state == FR_TRUNK_CONN_ACTIVE) CONN_REORDER(tconn);
+ if (tconn->pub.state == TRUNK_CONN_ACTIVE) CONN_REORDER(tconn);
/*
* We have a new request, see if we need to register
*
* @param[in] treq to trigger a state change for.
*/
-static void trunk_request_enter_partial(fr_trunk_request_t *treq)
+static void trunk_request_enter_partial(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PENDING: /* All requests go through pending, even requeued ones */
+ case TRUNK_REQUEST_STATE_PENDING: /* All requests go through pending, even requeued ones */
REQUEST_EXTRACT_PENDING(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_PARTIAL);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_PARTIAL);
}
fr_assert(!tconn->partial);
tconn->partial = treq;
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_PARTIAL);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_PARTIAL);
}
/** Transition a request to the sent state, indicating that it's been sent in its entirety
*
* @note treq->tconn and treq may be inviable after calling
- * if treq->conn and fr_connection_signals_pause is not used.
+ * if treq->conn and connection_signals_pause is not used.
* This is due to call to trunk_connection_event_update.
*
* @param[in] treq to trigger a state change for.
*/
-static void trunk_request_enter_sent(fr_trunk_request_t *treq)
+static void trunk_request_enter_sent(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_PENDING:
REQUEST_EXTRACT_PENDING(treq);
break;
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_PARTIAL:
REQUEST_EXTRACT_PARTIAL(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_SENT);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_SENT);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_SENT);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_SENT);
fr_dlist_insert_tail(&tconn->sent, treq);
/*
*
* @param[in] treq to trigger a state change for.
*/
-static void trunk_request_enter_idle(fr_trunk_request_t *treq)
+static void trunk_request_enter_idle(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_PENDING:
REQUEST_EXTRACT_PENDING(treq);
break;
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_PARTIAL:
REQUEST_EXTRACT_PARTIAL(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_SENT);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_SENT);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_IDLE);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_IDLE);
fr_dlist_insert_tail(&tconn->idle, treq);
if (!treq->sent) {
* of another connection if it's been sent or partially sent.
*
* @note treq->tconn and treq may be inviable after calling
- * if treq->conn and fr_connection_signals_pause is not used.
+ * if treq->conn and connection_signals_pause is not used.
* This is due to call to trunk_connection_event_update.
*
* @param[in] treq to trigger a state change for.
* @param[in] reason Why the request was cancelled.
* Should be one of:
- * - FR_TRUNK_CANCEL_REASON_SIGNAL request cancelled
+ * - TRUNK_CANCEL_REASON_SIGNAL request cancelled
* because of a signal from the interpreter.
- * - FR_TRUNK_CANCEL_REASON_MOVE request cancelled
+ * - TRUNK_CANCEL_REASON_MOVE request cancelled
* because the connection failed and it needs
* to be assigned to a new connection.
- * - FR_TRUNK_CANCEL_REASON_REQUEUE request cancelled
+ * - TRUNK_CANCEL_REASON_REQUEUE request cancelled
* as it needs to be resent on the same connection.
*/
-static void trunk_request_enter_cancel(fr_trunk_request_t *treq, fr_trunk_cancel_reason_t reason)
+static void trunk_request_enter_cancel(trunk_request_t *treq, trunk_cancel_reason_t reason)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_PARTIAL:
REQUEST_EXTRACT_PARTIAL(treq);
break;
- case FR_TRUNK_REQUEST_STATE_SENT:
+ case TRUNK_REQUEST_STATE_SENT:
REQUEST_EXTRACT_SENT(treq);
break;
- case FR_TRUNK_REQUEST_STATE_IDLE:
+ case TRUNK_REQUEST_STATE_IDLE:
REQUEST_EXTRACT_IDLE(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL);
fr_dlist_insert_tail(&tconn->cancel, treq);
treq->cancel_reason = reason;
* request_t *, as we can't guarantee the
* lifetime of the original request_t *.
*/
- if (treq->cancel_reason == FR_TRUNK_CANCEL_REASON_SIGNAL) treq->pub.request = NULL;
+ if (treq->cancel_reason == TRUNK_CANCEL_REASON_SIGNAL) treq->pub.request = NULL;
/*
* Register for I/O write events if we need to.
*
* @param[in] treq to trigger a state change for.
*/
-static void trunk_request_enter_cancel_partial(fr_trunk_request_t *treq)
+static void trunk_request_enter_cancel_partial(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
fr_assert(trunk->funcs.request_cancel_mux);
- fr_assert(treq->cancel_reason == FR_TRUNK_CANCEL_REASON_SIGNAL);
+ fr_assert(treq->cancel_reason == TRUNK_CANCEL_REASON_SIGNAL);
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_CANCEL: /* The only valid state cancel_sent can be reached from */
+ case TRUNK_REQUEST_STATE_CANCEL: /* The only valid state cancel_sent can be reached from */
REQUEST_EXTRACT_CANCEL(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
fr_assert(!tconn->partial);
tconn->cancel_partial = treq;
}
* acknowledges the cancellation request.
*
* @note treq->tconn and treq may be inviable after calling
- * if treq->conn and fr_connection_signals_pause is not used.
+ * if treq->conn and connection_signals_pause is not used.
* This is due to call to trunk_connection_event_update.
*
* @param[in] treq to trigger a state change for.
*/
-static void trunk_request_enter_cancel_sent(fr_trunk_request_t *treq)
+static void trunk_request_enter_cancel_sent(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
fr_assert(trunk->funcs.request_cancel_mux);
- fr_assert(treq->cancel_reason == FR_TRUNK_CANCEL_REASON_SIGNAL);
+ fr_assert(treq->cancel_reason == TRUNK_CANCEL_REASON_SIGNAL);
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
+ case TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
REQUEST_EXTRACT_CANCEL_PARTIAL(treq);
break;
- case FR_TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL:
REQUEST_EXTRACT_CANCEL(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL_SENT);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL_SENT);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL_SENT);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL_SENT);
fr_dlist_insert_tail(&tconn->cancel_sent, treq);
/*
*
* @note treq will be inviable after a call to this function.
* treq->tconn may be inviable after calling
- * if treq->conn and fr_connection_signals_pause is not used.
+ * if treq->conn and connection_signals_pause is not used.
* This is due to call to trunk_request_remove_from_conn.
*
* @param[in] treq to mark as complete.
*/
-static void trunk_request_enter_cancel_complete(fr_trunk_request_t *treq)
+static void trunk_request_enter_cancel_complete(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
if (!fr_cond_assert(!treq->pub.request)) return; /* Only a valid state for request_t * which have been cancelled */
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_CANCEL_SENT:
- case FR_TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL_SENT:
+ case TRUNK_REQUEST_STATE_CANCEL:
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL_COMPLETE);
}
trunk_request_remove_from_conn(treq);
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE);
- fr_trunk_request_free(&treq); /* Free the request */
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_CANCEL_COMPLETE);
+ trunk_request_free(&treq); /* Free the request */
}
/** Request completed successfully, inform the API client and free the request
*
* @param[in] treq to mark as complete.
*/
-static void trunk_request_enter_complete(fr_trunk_request_t *treq)
+static void trunk_request_enter_complete(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_SENT:
- case FR_TRUNK_REQUEST_STATE_PENDING:
- case FR_TRUNK_REQUEST_STATE_IDLE:
+ case TRUNK_REQUEST_STATE_SENT:
+ case TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_IDLE:
trunk_request_remove_from_conn(treq);
break;
default:
- REQUEST_BAD_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_COMPLETE);
+ REQUEST_BAD_STATE_TRANSITION(TRUNK_REQUEST_STATE_COMPLETE);
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_COMPLETE);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_COMPLETE);
DO_REQUEST_COMPLETE(treq);
- fr_trunk_request_free(&treq); /* Free the request */
+ trunk_request_free(&treq); /* Free the request */
}
/** Request failed, inform the API client and free the request
*
* @param[in] treq to mark as failed.
*/
-static void trunk_request_enter_failed(fr_trunk_request_t *treq)
+static void trunk_request_enter_failed(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
- fr_trunk_t *trunk = treq->pub.trunk;
- fr_trunk_request_state_t prev = treq->pub.state;
+ trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_t *trunk = treq->pub.trunk;
+ trunk_request_state_t prev = treq->pub.state;
if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_BACKLOG:
+ case TRUNK_REQUEST_STATE_BACKLOG:
REQUEST_EXTRACT_BACKLOG(treq);
break;
break;
}
- REQUEST_STATE_TRANSITION(FR_TRUNK_REQUEST_STATE_FAILED);
+ REQUEST_STATE_TRANSITION(TRUNK_REQUEST_STATE_FAILED);
DO_REQUEST_FAIL(treq, prev);
- fr_trunk_request_free(&treq); /* Free the request */
+ trunk_request_free(&treq); /* Free the request */
}
/** Check to see if a trunk request can be enqueued
* @param[in] trunk To enqueue requests on.
* @param[in] request associated with the treq (if any).
* @return
- * - FR_TRUNK_ENQUEUE_OK caller should enqueue request on provided tconn.
- * - FR_TRUNK_ENQUEUE_IN_BACKLOG Request should be queued in the backlog.
- * - FR_TRUNK_ENQUEUE_NO_CAPACITY Unable to enqueue request as we have no spare
+ * - TRUNK_ENQUEUE_OK caller should enqueue request on provided tconn.
+ * - TRUNK_ENQUEUE_IN_BACKLOG Request should be queued in the backlog.
+ * - TRUNK_ENQUEUE_NO_CAPACITY Unable to enqueue request as we have no spare
* connections or backlog space.
- * - FR_TRUNK_ENQUEUE_DST_UNAVAILABLE Can't enqueue because the destination is
+ * - TRUNK_ENQUEUE_DST_UNAVAILABLE Can't enqueue because the destination is
* unreachable.
*/
-static fr_trunk_enqueue_t trunk_request_check_enqueue(fr_trunk_connection_t **tconn_out, fr_trunk_t *trunk,
+static trunk_enqueue_t trunk_request_check_enqueue(trunk_connection_t **tconn_out, trunk_t *trunk,
request_t *request)
{
- fr_trunk_connection_t *tconn;
+ trunk_connection_t *tconn;
/*
* If we have an active connection then
* return that.
tconn = fr_minmax_heap_min_peek(trunk->active);
if (tconn) {
*tconn_out = tconn;
- return FR_TRUNK_ENQUEUE_OK;
+ return TRUNK_ENQUEUE_OK;
}
/*
RWARN, WARN, "Refusing to enqueue requests - "
"No active connections and last event was a connection failure");
- return FR_TRUNK_ENQUEUE_DST_UNAVAILABLE;
+ return TRUNK_ENQUEUE_DST_UNAVAILABLE;
}
if (limit > 0) {
uint64_t total_reqs;
- total_reqs = fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL,
- FR_TRUNK_REQUEST_STATE_ALL);
+ total_reqs = trunk_request_count_by_state(trunk, TRUNK_CONN_ALL,
+ TRUNK_REQUEST_STATE_ALL);
if (total_reqs >= limit) {
RATE_LIMIT_LOCAL_ROPTIONAL(&trunk->limit_max_requests_alloc_log,
RWARN, WARN, "Refusing to alloc requests - "
"Limit of %"PRIu64" (max = %u * per_connection_max = %u) "
"requests reached",
limit, trunk->conf.max, trunk->conf.max_req_per_conn);
- return FR_TRUNK_ENQUEUE_NO_CAPACITY;
+ return TRUNK_ENQUEUE_NO_CAPACITY;
}
}
}
- return FR_TRUNK_ENQUEUE_IN_BACKLOG;
+ return TRUNK_ENQUEUE_IN_BACKLOG;
}
/** Enqueue a request which has never been assigned to a connection or was previously cancelled
* from its existing connection with
* #trunk_connection_requests_dequeue.
* @return
- * - FR_TRUNK_ENQUEUE_OK Request was re-enqueued.
- * - FR_TRUNK_ENQUEUE_NO_CAPACITY Request enqueueing failed because we're at capacity.
- * - FR_TRUNK_ENQUEUE_DST_UNAVAILABLE Enqueuing failed for some reason.
+ * - TRUNK_ENQUEUE_OK Request was re-enqueued.
+ * - TRUNK_ENQUEUE_NO_CAPACITY Request enqueueing failed because we're at capacity.
+ * - TRUNK_ENQUEUE_DST_UNAVAILABLE Enqueuing failed for some reason.
* Usually because the connection to the resource is down.
*/
-static fr_trunk_enqueue_t trunk_request_enqueue_existing(fr_trunk_request_t *treq)
+static trunk_enqueue_t trunk_request_enqueue_existing(trunk_request_t *treq)
{
- fr_trunk_t *trunk = treq->pub.trunk;
- fr_trunk_connection_t *tconn = NULL;
- fr_trunk_enqueue_t ret;
+ trunk_t *trunk = treq->pub.trunk;
+ trunk_connection_t *tconn = NULL;
+ trunk_enqueue_t ret;
/*
* Must *NOT* still be assigned to another connection
ret = trunk_request_check_enqueue(&tconn, trunk, treq->pub.request);
switch (ret) {
- case FR_TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_OK:
if (trunk->conf.always_writable) {
- fr_connection_signals_pause(tconn->pub.conn);
+ connection_signals_pause(tconn->pub.conn);
trunk_request_enter_pending(treq, tconn, false);
trunk_connection_writable(tconn);
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
} else {
trunk_request_enter_pending(treq, tconn, false);
}
break;
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
/*
* No more connections and request
* is already in the backlog.
* Signal our caller it should stop
* trying to drain the backlog.
*/
- if (treq->pub.state == FR_TRUNK_REQUEST_STATE_BACKLOG) return FR_TRUNK_ENQUEUE_NO_CAPACITY;
+ if (treq->pub.state == TRUNK_REQUEST_STATE_BACKLOG) return TRUNK_ENQUEUE_NO_CAPACITY;
trunk_request_enter_backlog(treq, false);
break;
* @param[in] states Dequeue request in these states.
* @param[in] max The maximum number of requests to dequeue. 0 for unlimited.
*/
-static uint64_t trunk_connection_requests_dequeue(fr_dlist_head_t *out, fr_trunk_connection_t *tconn,
+static uint64_t trunk_connection_requests_dequeue(fr_dlist_head_t *out, trunk_connection_t *tconn,
int states, uint64_t max)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
uint64_t count = 0;
if (max == 0) max = UINT64_MAX;
* Don't need to do anything with
* cancellation requests.
*/
- if (states & FR_TRUNK_REQUEST_STATE_CANCEL) DEQUEUE_ALL(&tconn->cancel,
- FR_TRUNK_REQUEST_STATE_CANCEL);
+ if (states & TRUNK_REQUEST_STATE_CANCEL) DEQUEUE_ALL(&tconn->cancel,
+ TRUNK_REQUEST_STATE_CANCEL);
/*
* ...same with cancel inform
*/
- if (states & FR_TRUNK_REQUEST_STATE_CANCEL_SENT) DEQUEUE_ALL(&tconn->cancel_sent,
- FR_TRUNK_REQUEST_STATE_CANCEL_SENT);
+ if (states & TRUNK_REQUEST_STATE_CANCEL_SENT) DEQUEUE_ALL(&tconn->cancel_sent,
+ TRUNK_REQUEST_STATE_CANCEL_SENT);
/*
* ....same with cancel partial
*/
- if (states & FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL) {
+ if (states & TRUNK_REQUEST_STATE_CANCEL_PARTIAL) {
OVER_MAX_CHECK;
treq = tconn->cancel_partial;
if (treq) {
- fr_assert(treq->pub.state == FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
+ fr_assert(treq->pub.state == TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
trunk_request_enter_unassigned(treq);
fr_dlist_insert_tail(out, treq);
}
/*
* ...and pending.
*/
- if (states & FR_TRUNK_REQUEST_STATE_PENDING) {
+ if (states & TRUNK_REQUEST_STATE_PENDING) {
while ((treq = fr_heap_peek(tconn->pending))) {
OVER_MAX_CHECK;
- fr_assert(treq->pub.state == FR_TRUNK_REQUEST_STATE_PENDING);
+ fr_assert(treq->pub.state == TRUNK_REQUEST_STATE_PENDING);
trunk_request_enter_unassigned(treq);
fr_dlist_insert_tail(out, treq);
}
/*
* Cancel partially sent requests
*/
- if (states & FR_TRUNK_REQUEST_STATE_PARTIAL) {
+ if (states & TRUNK_REQUEST_STATE_PARTIAL) {
OVER_MAX_CHECK;
treq = tconn->partial;
if (treq) {
- fr_assert(treq->pub.state == FR_TRUNK_REQUEST_STATE_PARTIAL);
+ fr_assert(treq->pub.state == TRUNK_REQUEST_STATE_PARTIAL);
/*
* Don't allow the connection to change state whilst
* we're draining requests from it.
*/
- fr_connection_signals_pause(tconn->pub.conn);
- trunk_request_enter_cancel(treq, FR_TRUNK_CANCEL_REASON_MOVE);
+ connection_signals_pause(tconn->pub.conn);
+ trunk_request_enter_cancel(treq, TRUNK_CANCEL_REASON_MOVE);
trunk_request_enter_unassigned(treq);
fr_dlist_insert_tail(out, treq);
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
}
}
/*
* Cancel sent requests
*/
- if (states & FR_TRUNK_REQUEST_STATE_SENT) {
+ if (states & TRUNK_REQUEST_STATE_SENT) {
/*
* Don't allow the connection to change state whilst
* we're draining requests from it.
*/
- fr_connection_signals_pause(tconn->pub.conn);
+ connection_signals_pause(tconn->pub.conn);
while ((treq = fr_dlist_head(&tconn->sent))) {
OVER_MAX_CHECK;
- fr_assert(treq->pub.state == FR_TRUNK_REQUEST_STATE_SENT);
+ fr_assert(treq->pub.state == TRUNK_REQUEST_STATE_SENT);
- trunk_request_enter_cancel(treq, FR_TRUNK_CANCEL_REASON_MOVE);
+ trunk_request_enter_cancel(treq, TRUNK_CANCEL_REASON_MOVE);
trunk_request_enter_unassigned(treq);
fr_dlist_insert_tail(out, treq);
}
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
}
return count;
*
* @return the number of requests re-queued.
*/
-static uint64_t trunk_connection_requests_requeue(fr_trunk_connection_t *tconn, int states, uint64_t max,
+static uint64_t trunk_connection_requests_requeue(trunk_connection_t *tconn, int states, uint64_t max,
bool fail_bound)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
fr_dlist_head_t to_process;
- fr_trunk_request_t *treq = NULL;
+ trunk_request_t *treq = NULL;
uint64_t moved = 0;
if (max == 0) max = UINT64_MAX;
- fr_dlist_talloc_init(&to_process, fr_trunk_request_t, entry);
+ fr_dlist_talloc_init(&to_process, trunk_request_t, entry);
/*
* Prevent the connection changing state whilst we're
* trunk_request_enqueue_existing which can reconnect
* the connection.
*/
- fr_connection_signals_pause(tconn->pub.conn);
+ connection_signals_pause(tconn->pub.conn);
/*
* Remove non-cancelled requests from the connection
*/
- moved += trunk_connection_requests_dequeue(&to_process, tconn, states & ~FR_TRUNK_REQUEST_STATE_CANCEL_ALL, max);
+ moved += trunk_connection_requests_dequeue(&to_process, tconn, states & ~TRUNK_REQUEST_STATE_CANCEL_ALL, max);
/*
* Prevent requests being requeued on the same trunk
* and if something is added later, it'll be flagged
* by the tests.
*/
- if (tconn->pub.state == FR_TRUNK_CONN_ACTIVE) {
+ if (tconn->pub.state == TRUNK_CONN_ACTIVE) {
int ret;
ret = fr_minmax_heap_extract(trunk->active, tconn);
* redistribute them to new connections.
*/
while ((treq = fr_dlist_next(&to_process, treq))) {
- fr_trunk_request_t *prev;
+ trunk_request_t *prev;
prev = fr_dlist_remove(&to_process, treq);
}
switch (trunk_request_enqueue_existing(treq)) {
- case FR_TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_OK:
break;
/*
* load, it's been placed back
* in the backlog.
*/
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
/*
* there's nothing to do except
* fail the request.
*/
- case FR_TRUNK_ENQUEUE_DST_UNAVAILABLE:
- case FR_TRUNK_ENQUEUE_NO_CAPACITY:
- case FR_TRUNK_ENQUEUE_FAIL:
+ case TRUNK_ENQUEUE_DST_UNAVAILABLE:
+ case TRUNK_ENQUEUE_NO_CAPACITY:
+ case TRUNK_ENQUEUE_FAIL:
trunk_request_enter_failed(treq);
break;
}
/*
* Add the connection back into the active list
*/
- if (tconn->pub.state == FR_TRUNK_CONN_ACTIVE) {
+ if (tconn->pub.state == TRUNK_CONN_ACTIVE) {
int ret;
ret = fr_minmax_heap_insert(trunk->active, tconn);
* just means freeing them.
*/
moved += trunk_connection_requests_dequeue(&to_process, tconn,
- states & FR_TRUNK_REQUEST_STATE_CANCEL_ALL, max - moved);
+ states & TRUNK_REQUEST_STATE_CANCEL_ALL, max - moved);
while ((treq = fr_dlist_next(&to_process, treq))) {
- fr_trunk_request_t *prev;
+ trunk_request_t *prev;
prev = fr_dlist_remove(&to_process, treq);
- fr_trunk_request_free(&treq);
+ trunk_request_free(&treq);
treq = prev;
}
*/
trunk_requests_per_connection(NULL, NULL, trunk, fr_time(), false);
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
return moved;
}
* If false bound requests will not be moved.
* @return The number of requests requeued.
*/
-uint64_t fr_trunk_connection_requests_requeue(fr_trunk_connection_t *tconn, int states, uint64_t max, bool fail_bound)
+uint64_t trunk_connection_requests_requeue(trunk_connection_t *tconn, int states, uint64_t max, bool fail_bound)
{
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
- case FR_TRUNK_CONN_FULL:
- case FR_TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_FULL:
+ case TRUNK_CONN_INACTIVE:
return trunk_connection_requests_requeue(tconn, states, max, fail_bound);
default:
*
* @param[in] treq to signal state change for.
*/
-void fr_trunk_request_signal_partial(fr_trunk_request_t *treq)
+void trunk_request_signal_partial(trunk_request_t *treq)
{
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
"%s can only be called from within request_mux handler", __FUNCTION__)) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_PENDING:
trunk_request_enter_partial(treq);
break;
*
* @param[in] treq to signal state change for.
*/
-void fr_trunk_request_signal_sent(fr_trunk_request_t *treq)
+void trunk_request_signal_sent(trunk_request_t *treq)
{
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
"%s can only be called from within request_mux handler", __FUNCTION__)) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PENDING:
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_PARTIAL:
trunk_request_enter_sent(treq);
break;
*
* @param[in] treq to signal state change for.
*/
-void fr_trunk_request_signal_idle(fr_trunk_request_t *treq)
+void trunk_request_signal_idle(trunk_request_t *treq)
{
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
"%s can only be called from within request_mux handler", __FUNCTION__)) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PENDING:
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_PENDING:
+ case TRUNK_REQUEST_STATE_PARTIAL:
trunk_request_enter_idle(treq);
break;
*
* The API client will be informed that the request is now complete.
*/
-void fr_trunk_request_signal_complete(fr_trunk_request_t *treq)
+void trunk_request_signal_complete(trunk_request_t *treq)
{
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_t *trunk = treq->pub.trunk;
if (!fr_cond_assert_msg(trunk, "treq not associated with trunk")) return;
if (IN_REQUEST_DEMUX(trunk)) trunk->pub.last_read_success = fr_time();
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_SENT:
- case FR_TRUNK_REQUEST_STATE_PENDING: /* Got immediate response, i.e. cached */
- case FR_TRUNK_REQUEST_STATE_IDLE:
+ case TRUNK_REQUEST_STATE_SENT:
+ case TRUNK_REQUEST_STATE_PENDING: /* Got immediate response, i.e. cached */
+ case TRUNK_REQUEST_STATE_IDLE:
trunk_request_enter_complete(treq);
break;
*
* The API client will be informed that the request has failed.
*/
-void fr_trunk_request_signal_fail(fr_trunk_request_t *treq)
+void trunk_request_signal_fail(trunk_request_t *treq)
{
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
/** Cancel a trunk request
*
* treq can be in any state, but requests to cancel if the treq is not in
- * the FR_TRUNK_REQUEST_STATE_PARTIAL or FR_TRUNK_REQUEST_STATE_SENT state will be ignored.
+ * the TRUNK_REQUEST_STATE_PARTIAL or TRUNK_REQUEST_STATE_SENT state will be ignored.
*
* The complete or failed callbacks will not be called here, as it's assumed the request_t *
* is now inviable as it's being cancelled.
*
* @param[in] treq to signal state change for.
*/
-void fr_trunk_request_signal_cancel(fr_trunk_request_t *treq)
+void trunk_request_signal_cancel(trunk_request_t *treq)
{
- fr_trunk_t *trunk;
+ trunk_t *trunk;
/*
* Ensure treq hasn't been freed
*/
- (void)talloc_get_type_abort(treq, fr_trunk_request_t);
+ (void)talloc_get_type_abort(treq, trunk_request_t);
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
* We don't call the complete or failed callbacks
* as the request and rctx are no longer viable.
*/
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
- case FR_TRUNK_REQUEST_STATE_SENT:
+ case TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_SENT:
{
- fr_trunk_connection_t *tconn = treq->pub.tconn;
+ trunk_connection_t *tconn = treq->pub.tconn;
/*
* Don't allow connection state changes
*/
- fr_connection_signals_pause(tconn->pub.conn);
- trunk_request_enter_cancel(treq, FR_TRUNK_CANCEL_REASON_SIGNAL);
- if (!fr_cond_assert_msg(treq->pub.state == FR_TRUNK_REQUEST_STATE_CANCEL,
+ connection_signals_pause(tconn->pub.conn);
+ trunk_request_enter_cancel(treq, TRUNK_CANCEL_REASON_SIGNAL);
+ if (!fr_cond_assert_msg(treq->pub.state == TRUNK_REQUEST_STATE_CANCEL,
"Bad state %s after cancellation",
- fr_table_str_by_value(fr_trunk_request_states, treq->pub.state, "<INVALID>"))) {
- fr_connection_signals_resume(tconn->pub.conn);
+ fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"))) {
+ connection_signals_resume(tconn->pub.conn);
return;
}
/*
*/
if (!trunk->funcs.request_cancel_mux) {
trunk_request_enter_unassigned(treq);
- fr_trunk_request_free(&treq);
+ trunk_request_free(&treq);
}
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
}
break;
* We're already in the process of cancelling a
* request, so ignore duplicate signals.
*/
- case FR_TRUNK_REQUEST_STATE_CANCEL:
- case FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
- case FR_TRUNK_REQUEST_STATE_CANCEL_SENT:
- case FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE:
+ case TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
+ case TRUNK_REQUEST_STATE_CANCEL_SENT:
+ case TRUNK_REQUEST_STATE_CANCEL_COMPLETE:
break;
/*
*/
default:
trunk_request_enter_unassigned(treq);
- fr_trunk_request_free(&treq);
+ trunk_request_free(&treq);
break;
}
}
*
* @param[in] treq to signal state change for.
*/
-void fr_trunk_request_signal_cancel_partial(fr_trunk_request_t *treq)
+void trunk_request_signal_cancel_partial(trunk_request_t *treq)
{
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
"%s can only be called from within request_cancel_mux handler", __FUNCTION__)) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL:
trunk_request_enter_cancel_partial(treq);
break;
*
* @param[in] treq to signal state change for.
*/
-void fr_trunk_request_signal_cancel_sent(fr_trunk_request_t *treq)
+void trunk_request_signal_cancel_sent(trunk_request_t *treq)
{
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
"%s can only be called from within request_cancel_mux handler", __FUNCTION__)) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_CANCEL:
- case FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
+ case TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL_PARTIAL:
trunk_request_enter_cancel_sent(treq);
break;
*
* @param[in] treq to signal state change for.
*/
-void fr_trunk_request_signal_cancel_complete(fr_trunk_request_t *treq)
+void trunk_request_signal_cancel_complete(trunk_request_t *treq)
{
if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
__FUNCTION__)) return;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_CANCEL_SENT:
+ case TRUNK_REQUEST_STATE_CANCEL_SENT:
/*
* This is allowed, as we may not need to wait
* for the database to ACK our cancellation
* request.
*
- * Note: FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL
+ * Note: TRUNK_REQUEST_STATE_CANCEL_PARTIAL
* is not allowed here, as that'd mean we'd half
* written the cancellation request out to the
* socket, and then decided to abandon it.
*
* That'd leave the socket in an unusable state.
*/
- case FR_TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL:
trunk_request_enter_cancel_complete(treq);
break;
*
* @param[in] treq_to_free request.
*/
-void fr_trunk_request_free(fr_trunk_request_t **treq_to_free)
+void trunk_request_free(trunk_request_t **treq_to_free)
{
- fr_trunk_request_t *treq = *treq_to_free;
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_request_t *treq = *treq_to_free;
+ trunk_t *trunk = treq->pub.trunk;
if (unlikely(!treq)) return;
* freed from.
*/
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_INIT:
- case FR_TRUNK_REQUEST_STATE_UNASSIGNED:
- case FR_TRUNK_REQUEST_STATE_COMPLETE:
- case FR_TRUNK_REQUEST_STATE_FAILED:
- case FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE:
+ case TRUNK_REQUEST_STATE_INIT:
+ case TRUNK_REQUEST_STATE_UNASSIGNED:
+ case TRUNK_REQUEST_STATE_COMPLETE:
+ case TRUNK_REQUEST_STATE_FAILED:
+ case TRUNK_REQUEST_STATE_CANCEL_COMPLETE:
break;
default:
* No cleanup delay, means cleanup immediately
*/
if (!fr_time_delta_ispos(trunk->conf.req_cleanup_delay)) {
- treq->pub.state = FR_TRUNK_REQUEST_STATE_INIT;
+ treq->pub.state = TRUNK_REQUEST_STATE_INIT;
#ifndef NDEBUG
/*
*
* Return the trunk request back to the init state.
*/
- *treq = (fr_trunk_request_t){
+ *treq = (trunk_request_t){
.pub = {
- .state = FR_TRUNK_REQUEST_STATE_INIT,
+ .state = TRUNK_REQUEST_STATE_INIT,
.trunk = treq->pub.trunk,
},
- .cancel_reason = FR_TRUNK_CANCEL_REASON_NONE,
+ .cancel_reason = TRUNK_CANCEL_REASON_NONE,
.last_freed = fr_time(),
#ifndef NDEBUG
.log = treq->log /* Keep the list head, to save reinitialisation */
/** Actually free the trunk request
*
*/
-static int _trunk_request_free(fr_trunk_request_t *treq)
+static int _trunk_request_free(trunk_request_t *treq)
{
- fr_trunk_t *trunk = treq->pub.trunk;
+ trunk_t *trunk = treq->pub.trunk;
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_INIT:
- case FR_TRUNK_REQUEST_STATE_UNASSIGNED:
+ case TRUNK_REQUEST_STATE_INIT:
+ case TRUNK_REQUEST_STATE_UNASSIGNED:
break;
default:
* - A newly allocated request.
* - NULL if too many requests are allocated.
*/
-fr_trunk_request_t *fr_trunk_request_alloc(fr_trunk_t *trunk, request_t *request)
+trunk_request_t *trunk_request_alloc(trunk_t *trunk, request_t *request)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
/*
* The number of treqs currently allocated
treq = fr_dlist_head(&trunk->free_requests);
if (treq) {
fr_dlist_remove(&trunk->free_requests, treq);
- fr_assert(treq->pub.state == FR_TRUNK_REQUEST_STATE_INIT);
+ fr_assert(treq->pub.state == TRUNK_REQUEST_STATE_INIT);
fr_assert(treq->pub.trunk == trunk);
fr_assert(treq->pub.tconn == NULL);
- fr_assert(treq->cancel_reason == FR_TRUNK_CANCEL_REASON_NONE);
+ fr_assert(treq->cancel_reason == TRUNK_CANCEL_REASON_NONE);
fr_assert(fr_time_gt(treq->last_freed, fr_time_wrap(0)));
trunk->pub.req_alloc_reused++;
} else {
- MEM(treq = talloc_pooled_object(trunk, fr_trunk_request_t,
+ MEM(treq = talloc_pooled_object(trunk, trunk_request_t,
trunk->conf.req_pool_headers, trunk->conf.req_pool_size));
talloc_set_destructor(treq, _trunk_request_free);
- *treq = (fr_trunk_request_t){
+ *treq = (trunk_request_t){
.pub = {
- .state = FR_TRUNK_REQUEST_STATE_INIT,
+ .state = TRUNK_REQUEST_STATE_INIT,
.trunk = trunk
},
- .cancel_reason = FR_TRUNK_CANCEL_REASON_NONE
+ .cancel_reason = TRUNK_CANCEL_REASON_NONE
};
trunk->pub.req_alloc_new++;
#ifndef NDEBUG
- fr_dlist_init(&treq->log, fr_trunk_request_state_log_t, entry);
+ fr_dlist_init(&treq->log, trunk_request_state_log_t, entry);
#endif
}
* (trunk connection) and treq state changes to be called.
*
* When a tconn becomes writable (or the trunk is configured to be always writable)
- * the #fr_trunk_request_mux_t callback will be called to dequeue, encode and
- * send any pending requests for that tconn. The #fr_trunk_request_mux_t callback
+ * the #trunk_request_mux_t callback will be called to dequeue, encode and
+ * send any pending requests for that tconn. The #trunk_request_mux_t callback
* is also responsible for tracking the outbound requests to allow the
- * #fr_trunk_request_demux_t callback to match inbound responses with the original
- * treq. Once the #fr_trunk_request_mux_t callback is done processing the treq
+ * #trunk_request_demux_t callback to match inbound responses with the original
+ * treq. Once the #trunk_request_mux_t callback is done processing the treq
* it signals what state the treq should enter next using one of the
- * fr_trunk_request_signal_* functions.
+ * trunk_request_signal_* functions.
*
- * When a tconn becomes readable the user specified #fr_trunk_request_demux_t
+ * When a tconn becomes readable the user specified #trunk_request_demux_t
* callback is called to process any responses, match them with the original treq.
* and signal what state they should enter next using one of the
- * fr_trunk_request_signal_* functions.
+ * trunk_request_signal_* functions.
*
* @param[in,out] treq_out A trunk request handle. If the memory pointed to
* is NULL, a new treq will be allocated.
* Otherwise treq should point to memory allocated
- * with fr_trunk_request_alloc.
+ * with trunk_request_alloc.
* @param[in] trunk to enqueue request on.
* @param[in] request to enqueue.
* @param[in] preq Protocol request to write out. Will be freed when
* treq is freed. Should ideally be parented by the
* treq if possible.
- * Use #fr_trunk_request_alloc for pre-allocation of
+ * Use #trunk_request_alloc for pre-allocation of
* the treq.
* @param[in] rctx The resume context to write any result to.
* @return
- * - FR_TRUNK_ENQUEUE_OK.
- * - FR_TRUNK_ENQUEUE_IN_BACKLOG.
- * - FR_TRUNK_ENQUEUE_NO_CAPACITY.
- * - FR_TRUNK_ENQUEUE_DST_UNAVAILABLE
- * - FR_TRUNK_ENQUEUE_FAIL
+ * - TRUNK_ENQUEUE_OK.
+ * - TRUNK_ENQUEUE_IN_BACKLOG.
+ * - TRUNK_ENQUEUE_NO_CAPACITY.
+ * - TRUNK_ENQUEUE_DST_UNAVAILABLE
+ * - TRUNK_ENQUEUE_FAIL
*/
-fr_trunk_enqueue_t fr_trunk_request_enqueue(fr_trunk_request_t **treq_out, fr_trunk_t *trunk,
+trunk_enqueue_t trunk_request_enqueue(trunk_request_t **treq_out, trunk_t *trunk,
request_t *request, void *preq, void *rctx)
{
- fr_trunk_connection_t *tconn = NULL;
- fr_trunk_request_t *treq;
- fr_trunk_enqueue_t ret;
+ trunk_connection_t *tconn = NULL;
+ trunk_request_t *treq;
+ trunk_enqueue_t ret;
if (!fr_cond_assert_msg(!IN_HANDLER(trunk),
- "%s cannot be called within a handler", __FUNCTION__)) return FR_TRUNK_ENQUEUE_FAIL;
+ "%s cannot be called within a handler", __FUNCTION__)) return TRUNK_ENQUEUE_FAIL;
- if (!fr_cond_assert_msg(!*treq_out || ((*treq_out)->pub.state == FR_TRUNK_REQUEST_STATE_INIT),
- "%s requests must be in \"init\" state", __FUNCTION__)) return FR_TRUNK_ENQUEUE_FAIL;
+ if (!fr_cond_assert_msg(!*treq_out || ((*treq_out)->pub.state == TRUNK_REQUEST_STATE_INIT),
+ "%s requests must be in \"init\" state", __FUNCTION__)) return TRUNK_ENQUEUE_FAIL;
/*
* If delay_start was set, we may need
* to insert the timer for the connection manager.
*/
if (unlikely(!trunk->started)) {
- if (fr_trunk_start(trunk) < 0) return FR_TRUNK_ENQUEUE_FAIL;
+ if (trunk_start(trunk) < 0) return TRUNK_ENQUEUE_FAIL;
}
ret = trunk_request_check_enqueue(&tconn, trunk, request);
switch (ret) {
- case FR_TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_OK:
if (*treq_out) {
treq = *treq_out;
} else {
- *treq_out = treq = fr_trunk_request_alloc(trunk, request);
- if (!treq) return FR_TRUNK_ENQUEUE_FAIL;
+ *treq_out = treq = trunk_request_alloc(trunk, request);
+ if (!treq) return TRUNK_ENQUEUE_FAIL;
}
treq->pub.preq = preq;
treq->pub.rctx = rctx;
if (trunk->conf.always_writable) {
- fr_connection_signals_pause(tconn->pub.conn);
+ connection_signals_pause(tconn->pub.conn);
trunk_request_enter_pending(treq, tconn, true);
trunk_connection_writable(tconn);
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
} else {
trunk_request_enter_pending(treq, tconn, true);
}
break;
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
if (*treq_out) {
treq = *treq_out;
} else {
- *treq_out = treq = fr_trunk_request_alloc(trunk, request);
- if (!treq) return FR_TRUNK_ENQUEUE_FAIL;
+ *treq_out = treq = trunk_request_alloc(trunk, request);
+ if (!treq) return TRUNK_ENQUEUE_FAIL;
}
treq->pub.preq = preq;
treq->pub.rctx = rctx;
* If a trunk request was provided
* populate the preq and rctx fields
* so that if it's freed with
- * fr_trunk_request_free, the free
+ * trunk_request_free, the free
* function works as intended.
*/
if (*treq_out) {
*
* @param[in] treq to requeue (retransmit).
* @return
- * - FR_TRUNK_ENQUEUE_OK.
- * - FR_TRUNK_ENQUEUE_DST_UNAVAILABLE - Connection cannot service requests.
- * - FR_TRUNK_ENQUEUE_FAIL - Request isn't in a valid state to be reassigned.
+ * - TRUNK_ENQUEUE_OK.
+ * - TRUNK_ENQUEUE_DST_UNAVAILABLE - Connection cannot service requests.
+ * - TRUNK_ENQUEUE_FAIL - Request isn't in a valid state to be reassigned.
*/
-fr_trunk_enqueue_t fr_trunk_request_requeue(fr_trunk_request_t *treq)
+trunk_enqueue_t trunk_request_requeue(trunk_request_t *treq)
{
- fr_trunk_connection_t *tconn = treq->pub.tconn; /* Existing conn */
+ trunk_connection_t *tconn = treq->pub.tconn; /* Existing conn */
- if (!tconn) return FR_TRUNK_ENQUEUE_FAIL;
+ if (!tconn) return TRUNK_ENQUEUE_FAIL;
if (!IS_PROCESSING(tconn)) {
trunk_request_enter_failed(treq);
- return FR_TRUNK_ENQUEUE_DST_UNAVAILABLE;
+ return TRUNK_ENQUEUE_DST_UNAVAILABLE;
}
switch (treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_PARTIAL:
- case FR_TRUNK_REQUEST_STATE_SENT:
- case FR_TRUNK_REQUEST_STATE_IDLE:
- fr_connection_signals_pause(tconn->pub.conn);
- trunk_request_enter_cancel(treq, FR_TRUNK_CANCEL_REASON_REQUEUE);
+ case TRUNK_REQUEST_STATE_PARTIAL:
+ case TRUNK_REQUEST_STATE_SENT:
+ case TRUNK_REQUEST_STATE_IDLE:
+ connection_signals_pause(tconn->pub.conn);
+ trunk_request_enter_cancel(treq, TRUNK_CANCEL_REASON_REQUEUE);
trunk_request_enter_pending(treq, tconn, false);
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
break;
- case FR_TRUNK_REQUEST_STATE_BACKLOG: /* Do nothing.... */
- case FR_TRUNK_REQUEST_STATE_PENDING: /* Do nothing.... */
+ case TRUNK_REQUEST_STATE_BACKLOG: /* Do nothing.... */
+ case TRUNK_REQUEST_STATE_PENDING: /* Do nothing.... */
break;
default:
trunk_request_enter_failed(treq);
- return FR_TRUNK_ENQUEUE_FAIL;
+ return TRUNK_ENQUEUE_FAIL;
}
- return FR_TRUNK_ENQUEUE_OK;
+ return TRUNK_ENQUEUE_OK;
}
/** Enqueue additional requests on a specific connection
* @param[in,out] treq_out A trunk request handle. If the memory pointed to
* is NULL, a new treq will be allocated.
* Otherwise treq should point to memory allocated
- * with fr_trunk_request_alloc.
+ * with trunk_request_alloc.
* @param[in] tconn to enqueue request on.
* @param[in] request to enqueue.
* @param[in] preq Protocol request to write out. Will be freed when
* treq is freed. Should ideally be parented by the
* treq if possible.
- * Use #fr_trunk_request_alloc for pre-allocation of
+ * Use #trunk_request_alloc for pre-allocation of
* the treq.
* @param[in] rctx The resume context to write any result to.
* @param[in] ignore_limits Ignore max_req_per_conn. Useful to force status
* Will also allow enqueuing on "inactive", "draining",
* "draining-to-free" connections.
* @return
- * - FR_TRUNK_ENQUEUE_OK.
- * - FR_TRUNK_ENQUEUE_NO_CAPACITY - At max_req_per_conn_limit
- * - FR_TRUNK_ENQUEUE_DST_UNAVAILABLE - Connection cannot service requests.
+ * - TRUNK_ENQUEUE_OK.
+ * - TRUNK_ENQUEUE_NO_CAPACITY - At max_req_per_conn_limit
+ * - TRUNK_ENQUEUE_DST_UNAVAILABLE - Connection cannot service requests.
*/
-fr_trunk_enqueue_t fr_trunk_request_enqueue_on_conn(fr_trunk_request_t **treq_out, fr_trunk_connection_t *tconn,
+trunk_enqueue_t trunk_request_enqueue_on_conn(trunk_request_t **treq_out, trunk_connection_t *tconn,
request_t *request, void *preq, void *rctx,
bool ignore_limits)
{
- fr_trunk_request_t *treq;
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_request_t *treq;
+ trunk_t *trunk = tconn->pub.trunk;
- if (!fr_cond_assert_msg(!*treq_out || ((*treq_out)->pub.state == FR_TRUNK_REQUEST_STATE_INIT),
- "%s requests must be in \"init\" state", __FUNCTION__)) return FR_TRUNK_ENQUEUE_FAIL;
+ if (!fr_cond_assert_msg(!*treq_out || ((*treq_out)->pub.state == TRUNK_REQUEST_STATE_INIT),
+ "%s requests must be in \"init\" state", __FUNCTION__)) return TRUNK_ENQUEUE_FAIL;
- if (!IS_SERVICEABLE(tconn)) return FR_TRUNK_ENQUEUE_DST_UNAVAILABLE;
+ if (!IS_SERVICEABLE(tconn)) return TRUNK_ENQUEUE_DST_UNAVAILABLE;
/*
* Limits check
*/
if (!ignore_limits) {
if (trunk->conf.max_req_per_conn &&
- (fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) >=
- trunk->conf.max_req_per_conn)) return FR_TRUNK_ENQUEUE_NO_CAPACITY;
+ (trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) >=
+ trunk->conf.max_req_per_conn)) return TRUNK_ENQUEUE_NO_CAPACITY;
- if (tconn->pub.state != FR_TRUNK_CONN_ACTIVE) return FR_TRUNK_ENQUEUE_NO_CAPACITY;
+ if (tconn->pub.state != TRUNK_CONN_ACTIVE) return TRUNK_ENQUEUE_NO_CAPACITY;
}
if (*treq_out) {
treq = *treq_out;
} else {
- MEM(*treq_out = treq = fr_trunk_request_alloc(trunk, request));
+ MEM(*treq_out = treq = trunk_request_alloc(trunk, request));
}
treq->pub.preq = preq;
treq->bound_to_conn = true; /* Don't let the request be transferred */
if (trunk->conf.always_writable) {
- fr_connection_signals_pause(tconn->pub.conn);
+ connection_signals_pause(tconn->pub.conn);
trunk_request_enter_pending(treq, tconn, true);
trunk_connection_writable(tconn);
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
} else {
trunk_request_enter_pending(treq, tconn, true);
}
- return FR_TRUNK_ENQUEUE_OK;
+ return TRUNK_ENQUEUE_OK;
}
#ifndef NDEBUG
/** Used for sanity checks to ensure all log entries have been freed
*
*/
-static int _state_log_entry_free(fr_trunk_request_state_log_t *slog)
+static int _state_log_entry_free(trunk_request_state_log_t *slog)
{
fr_dlist_remove(slog->log_head, slog);
}
void trunk_request_state_log_entry_add(char const *function, int line,
- fr_trunk_request_t *treq, fr_trunk_request_state_t new)
+ trunk_request_t *treq, trunk_request_state_t new)
{
- fr_trunk_request_state_log_t *slog = NULL;
+ trunk_request_state_log_t *slog = NULL;
- if (fr_dlist_num_elements(&treq->log) >= FR_TRUNK_REQUEST_STATE_LOG_MAX) {
+ if (fr_dlist_num_elements(&treq->log) >= TRUNK_REQUEST_STATE_LOG_MAX) {
slog = fr_dlist_head(&treq->log);
fr_assert_msg(slog, "slog list head NULL but element counter was %u",
fr_dlist_num_elements(&treq->log));
(void)fr_dlist_remove(&treq->log, slog); /* Returns NULL when removing the list head */
memset(slog, 0, sizeof(*slog));
} else {
- MEM(slog = talloc_zero(treq, fr_trunk_request_state_log_t));
+ MEM(slog = talloc_zero(treq, trunk_request_state_log_t));
talloc_set_destructor(slog, _state_log_entry_free);
}
}
-void fr_trunk_request_state_log(fr_log_t const *log, fr_log_type_t log_type, char const *file, int line,
- fr_trunk_request_t const *treq)
+void trunk_request_state_log(fr_log_t const *log, fr_log_type_t log_type, char const *file, int line,
+ trunk_request_t const *treq)
{
- fr_trunk_request_state_log_t *slog = NULL;
+ trunk_request_state_log_t *slog = NULL;
int i;
fr_log(log, log_type, file, line, "[%u] %s:%i - in conn %"PRIu64" in state %s - %s -> %s",
i, slog->function, slog->line,
slog->tconn_id,
- slog->tconn ? fr_table_str_by_value(fr_trunk_connection_states,
+ slog->tconn ? fr_table_str_by_value(trunk_connection_states,
slog->tconn_state, "<INVALID>") : "none",
- fr_table_str_by_value(fr_trunk_request_states, slog->from, "<INVALID>"),
- fr_table_str_by_value(fr_trunk_request_states, slog->to, "<INVALID>"));
+ fr_table_str_by_value(trunk_request_states, slog->from, "<INVALID>"),
+ fr_table_str_by_value(trunk_request_states, slog->to, "<INVALID>"));
}
}
#endif
/** Return the count number of connections in the specified states
*
* @param[in] trunk to retrieve counts for.
- * @param[in] conn_state One or more #fr_trunk_connection_state_t states or'd together.
+ * @param[in] conn_state One or more #trunk_connection_state_t states or'd together.
* @return The number of connections in the specified states.
*/
-uint16_t fr_trunk_connection_count_by_state(fr_trunk_t *trunk, int conn_state)
+uint16_t trunk_connection_count_by_state(trunk_t *trunk, int conn_state)
{
uint16_t count = 0;
- if (conn_state & FR_TRUNK_CONN_INIT) count += fr_dlist_num_elements(&trunk->init);
- if (conn_state & FR_TRUNK_CONN_CONNECTING) count += fr_dlist_num_elements(&trunk->connecting);
- if (conn_state & FR_TRUNK_CONN_ACTIVE) count += fr_minmax_heap_num_elements(trunk->active);
- if (conn_state & FR_TRUNK_CONN_FULL) count += fr_dlist_num_elements(&trunk->full);
- if (conn_state & FR_TRUNK_CONN_INACTIVE) count += fr_dlist_num_elements(&trunk->inactive);
- if (conn_state & FR_TRUNK_CONN_INACTIVE_DRAINING) count += fr_dlist_num_elements(&trunk->inactive_draining);
- if (conn_state & FR_TRUNK_CONN_CLOSED) count += fr_dlist_num_elements(&trunk->closed);
- if (conn_state & FR_TRUNK_CONN_DRAINING) count += fr_dlist_num_elements(&trunk->draining);
- if (conn_state & FR_TRUNK_CONN_DRAINING_TO_FREE) count += fr_dlist_num_elements(&trunk->draining_to_free);
+ if (conn_state & TRUNK_CONN_INIT) count += fr_dlist_num_elements(&trunk->init);
+ if (conn_state & TRUNK_CONN_CONNECTING) count += fr_dlist_num_elements(&trunk->connecting);
+ if (conn_state & TRUNK_CONN_ACTIVE) count += fr_minmax_heap_num_elements(trunk->active);
+ if (conn_state & TRUNK_CONN_FULL) count += fr_dlist_num_elements(&trunk->full);
+ if (conn_state & TRUNK_CONN_INACTIVE) count += fr_dlist_num_elements(&trunk->inactive);
+ if (conn_state & TRUNK_CONN_INACTIVE_DRAINING) count += fr_dlist_num_elements(&trunk->inactive_draining);
+ if (conn_state & TRUNK_CONN_CLOSED) count += fr_dlist_num_elements(&trunk->closed);
+ if (conn_state & TRUNK_CONN_DRAINING) count += fr_dlist_num_elements(&trunk->draining);
+ if (conn_state & TRUNK_CONN_DRAINING_TO_FREE) count += fr_dlist_num_elements(&trunk->draining_to_free);
return count;
}
*
* @return The number of requests in the specified states, associated with a tconn.
*/
-uint32_t fr_trunk_request_count_by_connection(fr_trunk_connection_t const *tconn, int req_state)
+uint32_t trunk_request_count_by_connection(trunk_connection_t const *tconn, int req_state)
{
uint32_t count = 0;
- if (req_state & FR_TRUNK_REQUEST_STATE_PENDING) count += fr_heap_num_elements(tconn->pending);
- if (req_state & FR_TRUNK_REQUEST_STATE_PARTIAL) count += tconn->partial ? 1 : 0;
- if (req_state & FR_TRUNK_REQUEST_STATE_SENT) count += fr_dlist_num_elements(&tconn->sent);
- if (req_state & FR_TRUNK_REQUEST_STATE_IDLE) count += fr_dlist_num_elements(&tconn->idle);
- if (req_state & FR_TRUNK_REQUEST_STATE_CANCEL) count += fr_dlist_num_elements(&tconn->cancel);
- if (req_state & FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL) count += tconn->cancel_partial ? 1 : 0;
- if (req_state & FR_TRUNK_REQUEST_STATE_CANCEL_SENT) count += fr_dlist_num_elements(&tconn->cancel_sent);
+ if (req_state & TRUNK_REQUEST_STATE_PENDING) count += fr_heap_num_elements(tconn->pending);
+ if (req_state & TRUNK_REQUEST_STATE_PARTIAL) count += tconn->partial ? 1 : 0;
+ if (req_state & TRUNK_REQUEST_STATE_SENT) count += fr_dlist_num_elements(&tconn->sent);
+ if (req_state & TRUNK_REQUEST_STATE_IDLE) count += fr_dlist_num_elements(&tconn->idle);
+ if (req_state & TRUNK_REQUEST_STATE_CANCEL) count += fr_dlist_num_elements(&tconn->cancel);
+ if (req_state & TRUNK_REQUEST_STATE_CANCEL_PARTIAL) count += tconn->cancel_partial ? 1 : 0;
+ if (req_state & TRUNK_REQUEST_STATE_CANCEL_SENT) count += fr_dlist_num_elements(&tconn->cancel_sent);
return count;
}
*
* @param[in] tconn to potentially mark as inactive.
*/
-static inline void trunk_connection_auto_full(fr_trunk_connection_t *tconn)
+static inline void trunk_connection_auto_full(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
uint32_t count;
- if (tconn->pub.state != FR_TRUNK_CONN_ACTIVE) return;
+ if (tconn->pub.state != TRUNK_CONN_ACTIVE) return;
/*
* Enforces max_req_per_conn
*/
if (trunk->conf.max_req_per_conn > 0) {
- count = fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL);
+ count = trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL);
if (count >= trunk->conf.max_req_per_conn) trunk_connection_enter_full(tconn);
}
}
* - true if the connection is full.
* - false if the connection is not full.
*/
-static inline bool trunk_connection_is_full(fr_trunk_connection_t *tconn)
+static inline bool trunk_connection_is_full(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
uint32_t count;
/*
* Enforces max_req_per_conn
*/
- count = fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL);
+ count = trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL);
if ((trunk->conf.max_req_per_conn == 0) || (count < trunk->conf.max_req_per_conn)) return false;
return true;
*
* @param[in] tconn to potentially mark as active or reconnect.
*/
-static inline void trunk_connection_auto_unfull(fr_trunk_connection_t *tconn)
+static inline void trunk_connection_auto_unfull(trunk_connection_t *tconn)
{
- if (tconn->pub.state != FR_TRUNK_CONN_FULL) return;
+ if (tconn->pub.state != TRUNK_CONN_FULL) return;
/*
* Enforces max_req_per_conn
/** A connection is readable. Call the request_demux function to read pending requests
*
*/
-static inline void trunk_connection_readable(fr_trunk_connection_t *tconn)
+static inline void trunk_connection_readable(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
DO_REQUEST_DEMUX(tconn);
}
/** A connection is writable. Call the request_mux function to write pending requests
*
*/
-static inline void trunk_connection_writable(fr_trunk_connection_t *tconn)
+static inline void trunk_connection_writable(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
/*
* Call the cancel_sent function (if we have one)
* to inform a backend datastore we no longer
* care about the result
*/
- if (trunk->funcs.request_cancel_mux && fr_trunk_request_count_by_connection(tconn,
- FR_TRUNK_REQUEST_STATE_CANCEL |
- FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL)) {
+ if (trunk->funcs.request_cancel_mux && trunk_request_count_by_connection(tconn,
+ TRUNK_REQUEST_STATE_CANCEL |
+ TRUNK_REQUEST_STATE_CANCEL_PARTIAL)) {
DO_REQUEST_CANCEL_MUX(tconn);
}
- if (!fr_trunk_request_count_by_connection(tconn,
- FR_TRUNK_REQUEST_STATE_PENDING |
- FR_TRUNK_REQUEST_STATE_PARTIAL)) return;
+ if (!trunk_request_count_by_connection(tconn,
+ TRUNK_REQUEST_STATE_PENDING |
+ TRUNK_REQUEST_STATE_PARTIAL)) return;
DO_REQUEST_MUX(tconn);
}
/** Update the registrations for I/O events we're interested in
*
*/
-static void trunk_connection_event_update(fr_trunk_connection_t *tconn)
+static void trunk_connection_event_update(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
- fr_trunk_connection_event_t events = FR_TRUNK_CONN_EVENT_NONE;
+ trunk_t *trunk = tconn->pub.trunk;
+ trunk_connection_event_t events = TRUNK_CONN_EVENT_NONE;
switch (tconn->pub.state) {
/*
* For the other states the trunk shouldn't be processing
* requests.
*/
- case FR_TRUNK_CONN_ACTIVE:
- case FR_TRUNK_CONN_FULL:
- case FR_TRUNK_CONN_INACTIVE:
- case FR_TRUNK_CONN_INACTIVE_DRAINING:
- case FR_TRUNK_CONN_DRAINING:
- case FR_TRUNK_CONN_DRAINING_TO_FREE:
+ case TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_FULL:
+ case TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE_DRAINING:
+ case TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_DRAINING_TO_FREE:
/*
* If the connection is always writable,
* then we don't care about write events.
*/
if (!trunk->conf.always_writable &&
- fr_trunk_request_count_by_connection(tconn,
- FR_TRUNK_REQUEST_STATE_PARTIAL |
- FR_TRUNK_REQUEST_STATE_PENDING |
+ trunk_request_count_by_connection(tconn,
+ TRUNK_REQUEST_STATE_PARTIAL |
+ TRUNK_REQUEST_STATE_PENDING |
(trunk->funcs.request_cancel_mux ?
- FR_TRUNK_REQUEST_STATE_CANCEL |
- FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL : 0)) > 0) {
- events |= FR_TRUNK_CONN_EVENT_WRITE;
+ TRUNK_REQUEST_STATE_CANCEL |
+ TRUNK_REQUEST_STATE_CANCEL_PARTIAL : 0)) > 0) {
+ events |= TRUNK_CONN_EVENT_WRITE;
}
- if (fr_trunk_request_count_by_connection(tconn,
- FR_TRUNK_REQUEST_STATE_SENT |
+ if (trunk_request_count_by_connection(tconn,
+ TRUNK_REQUEST_STATE_SENT |
(trunk->funcs.request_cancel_mux ?
- FR_TRUNK_REQUEST_STATE_CANCEL_SENT : 0)) > 0) {
- events |= FR_TRUNK_CONN_EVENT_READ;
+ TRUNK_REQUEST_STATE_CANCEL_SENT : 0)) > 0) {
+ events |= TRUNK_CONN_EVENT_READ;
}
break;
* Stop that from happening until after
* we're done using it.
*/
- fr_connection_signals_pause(tconn->pub.conn);
+ connection_signals_pause(tconn->pub.conn);
DO_CONNECTION_NOTIFY(tconn, events);
tconn->events = events;
- fr_connection_signals_resume(tconn->pub.conn);
+ connection_signals_resume(tconn->pub.conn);
}
}
*
* @param[in] tconn to remove.
*/
-static void trunk_connection_remove(fr_trunk_connection_t *tconn)
+static void trunk_connection_remove(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_ACTIVE:
{
int ret;
}
return;
- case FR_TRUNK_CONN_INIT:
+ case TRUNK_CONN_INIT:
fr_dlist_remove(&trunk->init, tconn);
break;
- case FR_TRUNK_CONN_CONNECTING:
+ case TRUNK_CONN_CONNECTING:
fr_dlist_remove(&trunk->connecting, tconn);
return;
- case FR_TRUNK_CONN_CLOSED:
+ case TRUNK_CONN_CLOSED:
fr_dlist_remove(&trunk->closed, tconn);
return;
- case FR_TRUNK_CONN_FULL:
+ case TRUNK_CONN_FULL:
fr_dlist_remove(&trunk->full, tconn);
return;
- case FR_TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE:
fr_dlist_remove(&trunk->inactive, tconn);
return;
- case FR_TRUNK_CONN_INACTIVE_DRAINING:
+ case TRUNK_CONN_INACTIVE_DRAINING:
fr_dlist_remove(&trunk->inactive_draining, tconn);
return;
- case FR_TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_DRAINING:
fr_dlist_remove(&trunk->draining, tconn);
return;
- case FR_TRUNK_CONN_DRAINING_TO_FREE:
+ case TRUNK_CONN_DRAINING_TO_FREE:
fr_dlist_remove(&trunk->draining_to_free, tconn);
return;
- case FR_TRUNK_CONN_HALTED:
+ case TRUNK_CONN_HALTED:
return;
}
}
* Called whenever a trunk connection is at the maximum number of requests.
* Removes the connection from the connected heap, and places it in the full list.
*/
-static void trunk_connection_enter_full(fr_trunk_connection_t *tconn)
+static void trunk_connection_enter_full(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_ACTIVE:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_FULL);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_FULL);
}
fr_dlist_insert_head(&trunk->full, tconn);
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_FULL, DEBUG2);
+ CONN_STATE_TRANSITION(TRUNK_CONN_FULL, DEBUG2);
}
/** Transition a connection to the inactive state
* Called whenever the API client wants to stop new requests being enqueued
* on a trunk connection.
*/
-static void trunk_connection_enter_inactive(fr_trunk_connection_t *tconn)
+static void trunk_connection_enter_inactive(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
- case FR_TRUNK_CONN_FULL:
+ case TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_FULL:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_INACTIVE);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_INACTIVE);
}
fr_dlist_insert_head(&trunk->inactive, tconn);
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_INACTIVE, DEBUG2);
+ CONN_STATE_TRANSITION(TRUNK_CONN_INACTIVE, DEBUG2);
}
/** Transition a connection to the inactive-draining state
* Called whenever the trunk manager wants to drain an inactive connection
* of its requests.
*/
-static void trunk_connection_enter_inactive_draining(fr_trunk_connection_t *tconn)
+static void trunk_connection_enter_inactive_draining(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_INACTIVE:
- case FR_TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_DRAINING:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_INACTIVE_DRAINING);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_INACTIVE_DRAINING);
}
fr_dlist_insert_head(&trunk->inactive_draining, tconn);
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_INACTIVE_DRAINING, INFO);
+ CONN_STATE_TRANSITION(TRUNK_CONN_INACTIVE_DRAINING, INFO);
/*
* Immediately re-enqueue all pending
* requests, so the connection is drained
* quicker.
*/
- trunk_connection_requests_requeue(tconn, FR_TRUNK_REQUEST_STATE_PENDING, 0, false);
+ trunk_connection_requests_requeue(tconn, TRUNK_REQUEST_STATE_PENDING, 0, false);
}
/** Transition a connection to the draining state
* Removes the connection from the active heap so it won't be assigned any new
* connections.
*/
-static void trunk_connection_enter_draining(fr_trunk_connection_t *tconn)
+static void trunk_connection_enter_draining(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
- case FR_TRUNK_CONN_FULL:
- case FR_TRUNK_CONN_INACTIVE:
- case FR_TRUNK_CONN_INACTIVE_DRAINING:
+ case TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_FULL:
+ case TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE_DRAINING:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_DRAINING);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_DRAINING);
}
fr_dlist_insert_head(&trunk->draining, tconn);
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_DRAINING, INFO);
+ CONN_STATE_TRANSITION(TRUNK_CONN_DRAINING, INFO);
/*
* Immediately re-enqueue all pending
* requests, so the connection is drained
* quicker.
*/
- trunk_connection_requests_requeue(tconn, FR_TRUNK_REQUEST_STATE_PENDING, 0, false);
+ trunk_connection_requests_requeue(tconn, TRUNK_REQUEST_STATE_PENDING, 0, false);
}
/** Transition a connection to the draining-to-reconnect state
* Removes the connection from the active heap so it won't be assigned any new
* connections.
*/
-static void trunk_connection_enter_draining_to_free(fr_trunk_connection_t *tconn)
+static void trunk_connection_enter_draining_to_free(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
if (tconn->lifetime_ev) fr_event_timer_delete(&tconn->lifetime_ev);
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
- case FR_TRUNK_CONN_FULL:
- case FR_TRUNK_CONN_INACTIVE:
- case FR_TRUNK_CONN_INACTIVE_DRAINING:
- case FR_TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_FULL:
+ case TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE_DRAINING:
+ case TRUNK_CONN_DRAINING:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_DRAINING_TO_FREE);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_DRAINING_TO_FREE);
}
fr_dlist_insert_head(&trunk->draining_to_free, tconn);
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_DRAINING_TO_FREE, INFO);
+ CONN_STATE_TRANSITION(TRUNK_CONN_DRAINING_TO_FREE, INFO);
/*
* Immediately re-enqueue all pending
* requests, so the connection is drained
* quicker.
*/
- trunk_connection_requests_requeue(tconn, FR_TRUNK_REQUEST_STATE_PENDING, 0, false);
+ trunk_connection_requests_requeue(tconn, TRUNK_REQUEST_STATE_PENDING, 0, false);
}
* This should only be called on a connection which is in the full state,
* inactive state, draining state or connecting state.
*/
-static void trunk_connection_enter_active(fr_trunk_connection_t *tconn)
+static void trunk_connection_enter_active(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
int ret;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_FULL:
- case FR_TRUNK_CONN_INACTIVE:
- case FR_TRUNK_CONN_INACTIVE_DRAINING:
- case FR_TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_FULL:
+ case TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE_DRAINING:
+ case TRUNK_CONN_DRAINING:
trunk_connection_remove(tconn);
break;
- case FR_TRUNK_CONN_CONNECTING:
+ case TRUNK_CONN_CONNECTING:
trunk_connection_remove(tconn);
- fr_assert(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ fr_assert(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_ACTIVE);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_ACTIVE);
}
ret = fr_minmax_heap_insert(trunk->active, tconn); /* re-insert into the active heap*/
return;
}
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_ACTIVE, DEBUG2);
+ CONN_STATE_TRANSITION(TRUNK_CONN_ACTIVE, DEBUG2);
/*
* Reorder the connections
* @param[in] conn The connection which changes state.
* @param[in] prev The connection is was in.
* @param[in] state The connection is now in.
- * @param[in] uctx The fr_trunk_connection_t wrapping the connection.
+ * @param[in] uctx The trunk_connection_t wrapping the connection.
*/
-static void _trunk_connection_on_init(UNUSED fr_connection_t *conn,
- UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state,
+static void _trunk_connection_on_init(UNUSED connection_t *conn,
+ UNUSED connection_state_t prev,
+ UNUSED connection_state_t state,
void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_HALTED:
+ case TRUNK_CONN_HALTED:
break;
- case FR_TRUNK_CONN_CLOSED:
+ case TRUNK_CONN_CLOSED:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_INIT);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_INIT);
}
fr_dlist_insert_head(&trunk->init, tconn);
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_INIT, DEBUG2);
+ CONN_STATE_TRANSITION(TRUNK_CONN_INIT, DEBUG2);
}
/** Connection transitioned to the connecting state
* @param[in] conn The connection which changes state.
* @param[in] prev The connection is was in.
* @param[in] state The connection is now in.
- * @param[in] uctx The fr_trunk_connection_t wrapping the connection.
+ * @param[in] uctx The trunk_connection_t wrapping the connection.
*/
-static void _trunk_connection_on_connecting(UNUSED fr_connection_t *conn,
- UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state,
+static void _trunk_connection_on_connecting(UNUSED connection_t *conn,
+ UNUSED connection_state_t prev,
+ UNUSED connection_state_t state,
void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_INIT:
- case FR_TRUNK_CONN_CLOSED:
+ case TRUNK_CONN_INIT:
+ case TRUNK_CONN_CLOSED:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_CONNECTING);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_CONNECTING);
}
/*
* connecting state, it should have
* no requests associated with it.
*/
- fr_assert(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ fr_assert(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0);
fr_dlist_insert_head(&trunk->connecting, tconn); /* MUST remain a head insertion for reconnect logic */
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_CONNECTING, INFO);
+ CONN_STATE_TRANSITION(TRUNK_CONN_CONNECTING, INFO);
}
/** Connection transitioned to the shutdown state
* @param[in] conn The connection which changes state.
* @param[in] prev The connection is was in.
* @param[in] state The connection is now in.
- * @param[in] uctx The fr_trunk_connection_t wrapping the connection.
+ * @param[in] uctx The trunk_connection_t wrapping the connection.
*/
-static void _trunk_connection_on_shutdown(UNUSED fr_connection_t *conn,
- UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state,
+static void _trunk_connection_on_shutdown(UNUSED connection_t *conn,
+ UNUSED connection_state_t prev,
+ UNUSED connection_state_t state,
void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_DRAINING_TO_FREE: /* Do Nothing */
+ case TRUNK_CONN_DRAINING_TO_FREE: /* Do Nothing */
return;
- case FR_TRUNK_CONN_ACTIVE: /* Transition to draining-to-free */
- case FR_TRUNK_CONN_FULL:
- case FR_TRUNK_CONN_INACTIVE:
- case FR_TRUNK_CONN_INACTIVE_DRAINING:
- case FR_TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_ACTIVE: /* Transition to draining-to-free */
+ case TRUNK_CONN_FULL:
+ case TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE_DRAINING:
+ case TRUNK_CONN_DRAINING:
break;
- case FR_TRUNK_CONN_INIT:
- case FR_TRUNK_CONN_CONNECTING:
- case FR_TRUNK_CONN_CLOSED:
- case FR_TRUNK_CONN_HALTED:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_DRAINING_TO_FREE);
+ case TRUNK_CONN_INIT:
+ case TRUNK_CONN_CONNECTING:
+ case TRUNK_CONN_CLOSED:
+ case TRUNK_CONN_HALTED:
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_DRAINING_TO_FREE);
}
trunk_connection_enter_draining_to_free(tconn);
*/
static void _trunk_connection_lifetime_expire(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
trunk_connection_enter_draining_to_free(tconn);
}
* @param[in] conn The connection which changes state.
* @param[in] prev The connection is was in.
* @param[in] state The connection is now in.
- * @param[in] uctx The fr_trunk_connection_t wrapping the connection.
+ * @param[in] uctx The trunk_connection_t wrapping the connection.
*/
-static void _trunk_connection_on_connected(UNUSED fr_connection_t *conn,
- UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state,
+static void _trunk_connection_on_connected(UNUSED connection_t *conn,
+ UNUSED connection_state_t prev,
+ UNUSED connection_state_t state,
void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_t *trunk = tconn->pub.trunk;
/*
* If a connection was just connected,
* it should have no requests associated
* with it.
*/
- fr_assert(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ fr_assert(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0);
/*
* Set here, as the active state can
if (fr_event_timer_in(tconn, trunk->el, &tconn->lifetime_ev,
trunk->conf.lifetime, _trunk_connection_lifetime_expire, tconn) < 0) {
PERROR("Failed inserting connection reconnection timer event, halting connection");
- fr_connection_signal_shutdown(tconn->pub.conn);
+ connection_signal_shutdown(tconn->pub.conn);
return;
}
}
* @param[in] conn The connection which changes state.
* @param[in] prev The connection is was in.
* @param[in] state The connection is now in.
- * @param[in] uctx The fr_trunk_connection_t wrapping the connection.
+ * @param[in] uctx The trunk_connection_t wrapping the connection.
*/
-static void _trunk_connection_on_closed(UNUSED fr_connection_t *conn,
- UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state,
+static void _trunk_connection_on_closed(UNUSED connection_t *conn,
+ UNUSED connection_state_t prev,
+ UNUSED connection_state_t state,
void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_t *trunk = tconn->pub.trunk;
bool need_requeue = false;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
- case FR_TRUNK_CONN_FULL:
- case FR_TRUNK_CONN_INACTIVE:
- case FR_TRUNK_CONN_INACTIVE_DRAINING:
- case FR_TRUNK_CONN_DRAINING:
- case FR_TRUNK_CONN_DRAINING_TO_FREE:
+ case TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_FULL:
+ case TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE_DRAINING:
+ case TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_DRAINING_TO_FREE:
need_requeue = true;
trunk_connection_remove(tconn);
break;
- case FR_TRUNK_CONN_INIT: /* Initialisation failed */
- case FR_TRUNK_CONN_CONNECTING:
+ case TRUNK_CONN_INIT: /* Initialisation failed */
+ case TRUNK_CONN_CONNECTING:
trunk_connection_remove(tconn);
- fr_assert(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ fr_assert(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0);
break;
- case FR_TRUNK_CONN_CLOSED:
- case FR_TRUNK_CONN_HALTED: /* Can't move backwards? */
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_CLOSED);
+ case TRUNK_CONN_CLOSED:
+ case TRUNK_CONN_HALTED: /* Can't move backwards? */
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_CLOSED);
}
fr_dlist_insert_head(&trunk->closed, tconn); /* MUST remain a head insertion for reconnect logic */
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_CLOSED, INFO);
+ CONN_STATE_TRANSITION(TRUNK_CONN_CLOSED, INFO);
/*
* Now *AFTER* the connection has been
* removed from the active, pool
* re-enqueue the requests.
*/
- if (need_requeue) trunk_connection_requests_requeue(tconn, FR_TRUNK_REQUEST_STATE_ALL, 0, true);
+ if (need_requeue) trunk_connection_requests_requeue(tconn, TRUNK_REQUEST_STATE_ALL, 0, true);
/*
* There should be no requests left on this
* connection. They should have all been
* moved off or failed.
*/
- fr_assert(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ fr_assert(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0);
/*
* Clear statistics and flags
* @param[in] conn The connection which changes state.
* @param[in] prev The connection is was in.
* @param[in] state The connection is now in.
- * @param[in] uctx The fr_trunk_connection_t wrapping the connection.
+ * @param[in] uctx The trunk_connection_t wrapping the connection.
*/
-static void _trunk_connection_on_failed(fr_connection_t *conn,
- fr_connection_state_t prev,
- fr_connection_state_t state,
+static void _trunk_connection_on_failed(connection_t *conn,
+ connection_state_t prev,
+ connection_state_t state,
void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_t *trunk = tconn->pub.trunk;
/*
* Need to set this first as it
* INIT -> INIT transition which triggers
* an assert.
*/
- if (prev == FR_CONNECTION_STATE_INIT) _trunk_connection_on_closed(conn, prev, state, uctx);
+ if (prev == connection_STATE_INIT) _trunk_connection_on_closed(conn, prev, state, uctx);
/*
* See what the state of the trunk is
* future, then fail all the requests in the
* trunk backlog.
*/
- if ((state == FR_CONNECTION_STATE_CONNECTED) &&
- (fr_trunk_connection_count_by_state(trunk,
- (FR_TRUNK_CONN_ACTIVE |
- FR_TRUNK_CONN_FULL |
- FR_TRUNK_CONN_DRAINING)) == 0)) trunk_backlog_drain(trunk);
+ if ((state == connection_STATE_CONNECTED) &&
+ (trunk_connection_count_by_state(trunk,
+ (TRUNK_CONN_ACTIVE |
+ TRUNK_CONN_FULL |
+ TRUNK_CONN_DRAINING)) == 0)) trunk_backlog_drain(trunk);
}
/** Connection transitioned to the halted state
* Remove the connection remove all lists, as it's likely about to be freed.
*
* Setting the trunk back to the init state ensures that if the code is ever
- * refactored and #fr_connection_signal_reconnect is used after a connection
+ * refactored and #connection_signal_reconnect is used after a connection
* is halted, then everything is maintained in a valid state.
*
* @note This function is only called from the connection API as a watcher.
* @param[in] conn The connection which changes state.
* @param[in] prev The connection is was in.
* @param[in] state The connection is now in.
- * @param[in] uctx The fr_trunk_connection_t wrapping the connection.
+ * @param[in] uctx The trunk_connection_t wrapping the connection.
*/
-static void _trunk_connection_on_halted(UNUSED fr_connection_t *conn,
- UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state,
+static void _trunk_connection_on_halted(UNUSED connection_t *conn,
+ UNUSED connection_state_t prev,
+ UNUSED connection_state_t state,
void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_t *trunk = tconn->pub.trunk;
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_INIT:
- case FR_TRUNK_CONN_CLOSED:
+ case TRUNK_CONN_INIT:
+ case TRUNK_CONN_CLOSED:
trunk_connection_remove(tconn);
break;
default:
- CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_HALTED);
+ CONN_BAD_STATE_TRANSITION(TRUNK_CONN_HALTED);
}
/*
* It began life in the halted state,
* and will end life in the halted state.
*/
- CONN_STATE_TRANSITION(FR_TRUNK_CONN_HALTED, DEBUG2);
+ CONN_STATE_TRANSITION(TRUNK_CONN_HALTED, DEBUG2);
/*
* There should be no requests left on this
* connection. They should have all been
* moved off or failed.
*/
- fr_assert(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ fr_assert(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0);
/*
* And free the connection...
*
* Enforces orderly free order of children of the tconn
*/
-static int _trunk_connection_free(fr_trunk_connection_t *tconn)
+static int _trunk_connection_free(trunk_connection_t *tconn)
{
- fr_assert(tconn->pub.state == FR_TRUNK_CONN_HALTED);
+ fr_assert(tconn->pub.state == TRUNK_CONN_HALTED);
fr_assert(!fr_dlist_entry_in_list(&tconn->entry)); /* Should not be in a list */
/*
*/
if (tconn->pub.trunk->freeing) {
fr_dlist_head_t to_fail;
- fr_trunk_request_t *treq = NULL;
+ trunk_request_t *treq = NULL;
- fr_dlist_talloc_init(&to_fail, fr_trunk_request_t, entry);
+ fr_dlist_talloc_init(&to_fail, trunk_request_t, entry);
/*
* Remove requests from this connection
*/
- trunk_connection_requests_dequeue(&to_fail, tconn, FR_TRUNK_REQUEST_STATE_ALL, 0);
+ trunk_connection_requests_dequeue(&to_fail, tconn, TRUNK_REQUEST_STATE_ALL, 0);
while ((treq = fr_dlist_next(&to_fail, treq))) {
- fr_trunk_request_t *prev;
+ trunk_request_t *prev;
prev = fr_dlist_remove(&to_fail, treq);
trunk_request_enter_failed(treq);
* as it processes its backlog of state changes,
* as we are about to be freed.
*/
- fr_connection_del_watch_pre(tconn->pub.conn, FR_CONNECTION_STATE_INIT, _trunk_connection_on_init);
- fr_connection_del_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_CONNECTING, _trunk_connection_on_connecting);
- fr_connection_del_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_CONNECTED, _trunk_connection_on_connected);
- fr_connection_del_watch_pre(tconn->pub.conn, FR_CONNECTION_STATE_CLOSED, _trunk_connection_on_closed);
- fr_connection_del_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_SHUTDOWN, _trunk_connection_on_shutdown);
- fr_connection_del_watch_pre(tconn->pub.conn, FR_CONNECTION_STATE_FAILED, _trunk_connection_on_failed);
- fr_connection_del_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_HALTED, _trunk_connection_on_halted);
+ connection_del_watch_pre(tconn->pub.conn, connection_STATE_INIT, _trunk_connection_on_init);
+ connection_del_watch_post(tconn->pub.conn, connection_STATE_CONNECTING, _trunk_connection_on_connecting);
+ connection_del_watch_post(tconn->pub.conn, connection_STATE_CONNECTED, _trunk_connection_on_connected);
+ connection_del_watch_pre(tconn->pub.conn, connection_STATE_CLOSED, _trunk_connection_on_closed);
+ connection_del_watch_post(tconn->pub.conn, connection_STATE_SHUTDOWN, _trunk_connection_on_shutdown);
+ connection_del_watch_pre(tconn->pub.conn, connection_STATE_FAILED, _trunk_connection_on_failed);
+ connection_del_watch_post(tconn->pub.conn, connection_STATE_HALTED, _trunk_connection_on_halted);
/*
* This may return -1, indicating the free was deferred
/** Attempt to spawn a new connection
*
- * Calls the API client's alloc() callback to create a new fr_connection_t,
+ * Calls the API client's alloc() callback to create a new connection_t,
* then inserts the connection into the 'connecting' list.
*
* @param[in] trunk to spawn connection in.
* @param[in] now The current time.
*/
-static int trunk_connection_spawn(fr_trunk_t *trunk, fr_time_t now)
+static int trunk_connection_spawn(trunk_t *trunk, fr_time_t now)
{
- fr_trunk_connection_t *tconn;
+ trunk_connection_t *tconn;
/*
* Call the API client's callback to create
- * a new fr_connection_t.
+ * a new connection_t.
*/
- MEM(tconn = talloc_zero(trunk, fr_trunk_connection_t));
+ MEM(tconn = talloc_zero(trunk, trunk_connection_t));
tconn->pub.trunk = trunk;
- tconn->pub.state = FR_TRUNK_CONN_HALTED; /* All connections start in the halted state */
+ tconn->pub.state = TRUNK_CONN_HALTED; /* All connections start in the halted state */
/*
- * Allocate a new fr_connection_t or fail.
+ * Allocate a new connection_t or fail.
*/
DO_CONNECTION_ALLOC(tconn);
- MEM(tconn->pending = fr_heap_talloc_alloc(tconn, _trunk_request_prioritise, fr_trunk_request_t, heap_id, 0));
- fr_dlist_talloc_init(&tconn->sent, fr_trunk_request_t, entry);
- fr_dlist_talloc_init(&tconn->idle, fr_trunk_request_t, entry);
- fr_dlist_talloc_init(&tconn->cancel, fr_trunk_request_t, entry);
- fr_dlist_talloc_init(&tconn->cancel_sent, fr_trunk_request_t, entry);
+ MEM(tconn->pending = fr_heap_talloc_alloc(tconn, _trunk_request_prioritise, trunk_request_t, heap_id, 0));
+ fr_dlist_talloc_init(&tconn->sent, trunk_request_t, entry);
+ fr_dlist_talloc_init(&tconn->idle, trunk_request_t, entry);
+ fr_dlist_talloc_init(&tconn->cancel, trunk_request_t, entry);
+ fr_dlist_talloc_init(&tconn->cancel_sent, trunk_request_t, entry);
/*
* OK, we have the connection, now setup watch
* between the different lists in the trunk
* with minimum extra code.
*/
- fr_connection_add_watch_pre(tconn->pub.conn, FR_CONNECTION_STATE_INIT,
+ connection_add_watch_pre(tconn->pub.conn, connection_STATE_INIT,
_trunk_connection_on_init, false, tconn); /* Before init() has been called */
- fr_connection_add_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_CONNECTING,
+ connection_add_watch_post(tconn->pub.conn, connection_STATE_CONNECTING,
_trunk_connection_on_connecting, false, tconn); /* After init() has been called */
- fr_connection_add_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_CONNECTED,
+ connection_add_watch_post(tconn->pub.conn, connection_STATE_CONNECTED,
_trunk_connection_on_connected, false, tconn); /* After open() has been called */
- fr_connection_add_watch_pre(tconn->pub.conn, FR_CONNECTION_STATE_CLOSED,
+ connection_add_watch_pre(tconn->pub.conn, connection_STATE_CLOSED,
_trunk_connection_on_closed, false, tconn); /* Before close() has been called */
- fr_connection_add_watch_pre(tconn->pub.conn, FR_CONNECTION_STATE_FAILED,
+ connection_add_watch_pre(tconn->pub.conn, connection_STATE_FAILED,
_trunk_connection_on_failed, false, tconn); /* Before failed() has been called */
- fr_connection_add_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_SHUTDOWN,
+ connection_add_watch_post(tconn->pub.conn, connection_STATE_SHUTDOWN,
_trunk_connection_on_shutdown, false, tconn); /* After shutdown() has been called */
- fr_connection_add_watch_post(tconn->pub.conn, FR_CONNECTION_STATE_HALTED,
+ connection_add_watch_post(tconn->pub.conn, connection_STATE_HALTED,
_trunk_connection_on_halted, false, tconn); /* About to be freed */
talloc_set_destructor(tconn, _trunk_connection_free);
- fr_connection_signal_init(tconn->pub.conn); /* annnnd GO! */
+ connection_signal_init(tconn->pub.conn); /* annnnd GO! */
trunk->pub.last_open = now;
* One of these signalling functions must be called after the request
* has been popped:
*
- * - #fr_trunk_request_signal_cancel_sent
+ * - #trunk_request_signal_cancel_sent
* The remote datastore has been informed, but we need to wait for acknowledgement.
- * The #fr_trunk_request_demux_t callback must handle the acks calling
- * #fr_trunk_request_signal_cancel_complete when an ack is received.
+ * The #trunk_request_demux_t callback must handle the acks calling
+ * #trunk_request_signal_cancel_complete when an ack is received.
*
- * - #fr_trunk_request_signal_cancel_complete
+ * - #trunk_request_signal_cancel_complete
* The request was cancelled and we don't need to wait, clean it up immediately.
*
* @param[out] treq_out to process
* memory or requests associated with the connection.
* - -2 if called outside of the cancel muxer.
*/
-int fr_trunk_connection_pop_cancellation(fr_trunk_request_t **treq_out, fr_trunk_connection_t *tconn)
+int trunk_connection_pop_cancellation(trunk_request_t **treq_out, trunk_connection_t *tconn)
{
- if (unlikely(tconn->pub.state == FR_TRUNK_CONN_HALTED)) return -1;
+ if (unlikely(tconn->pub.state == TRUNK_CONN_HALTED)) return -1;
if (!fr_cond_assert_msg(IN_REQUEST_CANCEL_MUX(tconn->pub.trunk),
"%s can only be called from within request_cancel_mux handler",
*
* One of these signalling functions must be used after the request has been popped:
*
- * - #fr_trunk_request_signal_complete
+ * - #trunk_request_signal_complete
* The request was completed. Either we got a synchronous response, or we knew the
* response without contacting an external server (cache).
*
- * - #fr_trunk_request_signal_fail
+ * - #trunk_request_signal_fail
* Failed muxing the request due to a permanent issue, i.e. an invalid request.
*
- * - #fr_trunk_request_signal_partial
+ * - #trunk_request_signal_partial
* Wrote part of a request. This request will be returned on the next call to this
* function so that the request_mux function can finish writing it. Only useful
* for stream type connections. Datagram type connections cannot have partial
* writes.
*
- * - #fr_trunk_request_signal_sent Successfully sent a request.
+ * - #trunk_request_signal_sent Successfully sent a request.
*
* @param[out] treq_out to process
* @param[in] tconn to pop a request from.
* memory or requests associated with the connection.
* - -2 if called outside of the muxer.
*/
-int fr_trunk_connection_pop_request(fr_trunk_request_t **treq_out, fr_trunk_connection_t *tconn)
+int trunk_connection_pop_request(trunk_request_t **treq_out, trunk_connection_t *tconn)
{
- if (unlikely(tconn->pub.state == FR_TRUNK_CONN_HALTED)) return -1;
+ if (unlikely(tconn->pub.state == TRUNK_CONN_HALTED)) return -1;
if (!fr_cond_assert_msg(IN_REQUEST_MUX(tconn->pub.trunk),
"%s can only be called from within request_mux handler",
*
* @param[in] tconn to signal.
*/
-void fr_trunk_connection_signal_writable(fr_trunk_connection_t *tconn)
+void trunk_connection_signal_writable(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
if (!fr_cond_assert_msg(!IN_HANDLER(tconn->pub.trunk),
"%s cannot be called within a handler", __FUNCTION__)) return;
*
* @param[in] tconn to signal.
*/
-void fr_trunk_connection_signal_readable(fr_trunk_connection_t *tconn)
+void trunk_connection_signal_readable(trunk_connection_t *tconn)
{
- fr_trunk_t *trunk = tconn->pub.trunk;
+ trunk_t *trunk = tconn->pub.trunk;
if (!fr_cond_assert_msg(!IN_HANDLER(tconn->pub.trunk),
"%s cannot be called within a handler", __FUNCTION__)) return;
*
* @param[in] tconn to signal.
*/
-void fr_trunk_connection_signal_inactive(fr_trunk_connection_t *tconn)
+void trunk_connection_signal_inactive(trunk_connection_t *tconn)
{
/* Can be called anywhere */
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_ACTIVE:
- case FR_TRUNK_CONN_FULL:
+ case TRUNK_CONN_ACTIVE:
+ case TRUNK_CONN_FULL:
trunk_connection_enter_inactive(tconn);
break;
- case FR_TRUNK_CONN_DRAINING:
+ case TRUNK_CONN_DRAINING:
trunk_connection_enter_inactive_draining(tconn);
break;
*
* @param[in] tconn to signal.
*/
-void fr_trunk_connection_signal_active(fr_trunk_connection_t *tconn)
+void trunk_connection_signal_active(trunk_connection_t *tconn)
{
switch (tconn->pub.state) {
- case FR_TRUNK_CONN_FULL:
+ case TRUNK_CONN_FULL:
trunk_connection_auto_unfull(tconn); /* Mark as active if it should be active */
break;
- case FR_TRUNK_CONN_INACTIVE:
+ case TRUNK_CONN_INACTIVE:
/*
* Do the appropriate state transition based on
* how many requests the trunk connection is
* the connection back to the normal
* draining state.
*/
- case FR_TRUNK_CONN_INACTIVE_DRAINING: /* Only an external signal can trigger this transition */
+ case TRUNK_CONN_INACTIVE_DRAINING: /* Only an external signal can trigger this transition */
trunk_connection_enter_draining(tconn);
break;
* @param[in] tconn to signal.
* @param[in] reason the connection is being reconnected.
*/
-void fr_trunk_connection_signal_reconnect(fr_trunk_connection_t *tconn, fr_connection_reason_t reason)
+void trunk_connection_signal_reconnect(trunk_connection_t *tconn, connection_reason_t reason)
{
- fr_connection_signal_reconnect(tconn->pub.conn, reason);
+ connection_signal_reconnect(tconn->pub.conn, reason);
}
/** Standard I/O read function
* @param[in] flags describing the read event.
* @param[in] uctx The trunk connection handle (tconn).
*/
-void fr_trunk_connection_callback_readable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
+void trunk_connection_callback_readable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
- fr_trunk_connection_signal_readable(tconn);
+ trunk_connection_signal_readable(tconn);
}
/** Standard I/O write function
* @param[in] flags describing the write event.
* @param[in] uctx The trunk connection handle (tcon).
*/
-void fr_trunk_connection_callback_writable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
+void trunk_connection_callback_writable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
- fr_trunk_connection_signal_writable(tconn);
+ trunk_connection_signal_writable(tconn);
}
* - True if trunk connection is in a particular state.
* - False if trunk connection is not in a particular state.
*/
-bool fr_trunk_connection_in_state(fr_trunk_connection_t *tconn, int state)
+bool trunk_connection_in_state(trunk_connection_t *tconn, int state)
{
return (bool)(tconn->pub.state & state);
}
* @param[in] trunk containing connections we want to close.
* @param[in] head of list of connections to examine.
*/
-static void trunk_connection_close_if_empty(fr_trunk_t *trunk, fr_dlist_head_t *head)
+static void trunk_connection_close_if_empty(trunk_t *trunk, fr_dlist_head_t *head)
{
- fr_trunk_connection_t *tconn = NULL;
+ trunk_connection_t *tconn = NULL;
while ((tconn = fr_dlist_next(head, tconn))) {
- fr_trunk_connection_t *prev;
+ trunk_connection_t *prev;
- if (fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) != 0) continue;
+ if (trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) != 0) continue;
prev = fr_dlist_prev(head, tconn);
DEBUG3("Closing %s connection with no requests",
- fr_table_str_by_value(fr_trunk_connection_states, tconn->pub.state, "<INVALID>"));
+ fr_table_str_by_value(trunk_connection_states, tconn->pub.state, "<INVALID>"));
/*
* Close the connection as gracefully
* as possible by signalling it should
* complete at which point we'll be informed
* and free our tconn wrapper.
*/
- fr_connection_signal_shutdown(tconn->pub.conn);
+ connection_signal_shutdown(tconn->pub.conn);
tconn = prev;
}
}
*
* @param[in] trunk The trunk to rebalance.
*/
-static void trunk_rebalance(fr_trunk_t *trunk)
+static void trunk_rebalance(trunk_t *trunk)
{
- fr_trunk_connection_t *head;
+ trunk_connection_t *head;
head = fr_minmax_heap_min_peek(trunk->active);
*/
while ((fr_minmax_heap_min_peek(trunk->active) == head) &&
trunk_connection_requests_requeue(fr_minmax_heap_max_peek(trunk->active),
- FR_TRUNK_REQUEST_STATE_PENDING, 1, false));
+ TRUNK_REQUEST_STATE_PENDING, 1, false));
}
/** Implements the algorithm we use to manage requests per connection levels
* - Return if we last closed a connection within 'closed_delay'.
* - Otherwise we move a connection to draining state.
*/
-static void trunk_manage(fr_trunk_t *trunk, fr_time_t now)
+static void trunk_manage(trunk_t *trunk, fr_time_t now)
{
- fr_trunk_connection_t *tconn = NULL;
- fr_trunk_request_t *treq;
+ trunk_connection_t *tconn = NULL;
+ trunk_request_t *treq;
uint32_t average = 0;
uint32_t req_count;
uint16_t conn_count;
- fr_trunk_state_t new_state;
+ trunk_state_t new_state;
DEBUG4("Managing trunk");
/*
* Update the state of the trunk
*/
- if (fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE)) {
- new_state = FR_TRUNK_STATE_ACTIVE;
+ if (trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE)) {
+ new_state = TRUNK_STATE_ACTIVE;
} else {
/*
* INIT / CONNECTING / FULL mean connections will become active
* so the trunk is PENDING
*/
- new_state = fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_INIT |
- FR_TRUNK_CONN_CONNECTING |
- FR_TRUNK_CONN_FULL) ?
- FR_TRUNK_STATE_PENDING : FR_TRUNK_STATE_IDLE;
+ new_state = trunk_connection_count_by_state(trunk, TRUNK_CONN_INIT |
+ TRUNK_CONN_CONNECTING |
+ TRUNK_CONN_FULL) ?
+ TRUNK_STATE_PENDING : TRUNK_STATE_IDLE;
}
if (new_state != trunk->pub.state) TRUNK_STATE_TRANSITION(new_state);
* unavailable.
*/
if ((trunk->conf.connecting > 0) &&
- (fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) >=
+ (trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) >=
trunk->conf.connecting)) {
DEBUG4("Not opening connection - Too many (%u) connections in the connecting state",
trunk->conf.connecting);
* If the connection has no requests associated
* with it then immediately free.
*/
- if (fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0) {
- fr_connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
+ if (trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0) {
+ connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
} else {
trunk_connection_enter_inactive_draining(tconn);
}
* of requests decreases.
*/
} else if ((tconn = fr_dlist_tail(&trunk->connecting))) {
- fr_connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
+ connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
/*
* Finally if there are no "connecting"
* If the connection has no requests associated
* with it then immediately free.
*/
- if (fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0) {
- fr_connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
+ if (trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0) {
+ connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
} else {
trunk_connection_enter_draining(tconn);
}
*/
static void _trunk_timer(fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_trunk_t *trunk = talloc_get_type_abort(uctx, fr_trunk_t);
+ trunk_t *trunk = talloc_get_type_abort(uctx, trunk_t);
trunk_manage(trunk, now);
* @param[in] req_state One or more request states or'd together.
* @return The number of requests in a particular state, on connection in a particular state.
*/
-uint64_t fr_trunk_request_count_by_state(fr_trunk_t *trunk, int conn_state, int req_state)
+uint64_t trunk_request_count_by_state(trunk_t *trunk, int conn_state, int req_state)
{
uint64_t count = 0;
- fr_trunk_connection_t *tconn = NULL;
+ trunk_connection_t *tconn = NULL;
fr_minmax_heap_iter_t iter;
#define COUNT_BY_STATE(_state, _list) \
if (conn_state & (_state)) { \
tconn = NULL; \
while ((tconn = fr_dlist_next(&trunk->_list, tconn))) { \
- count += fr_trunk_request_count_by_connection(tconn, req_state); \
+ count += trunk_request_count_by_connection(tconn, req_state); \
} \
} \
} while (0)
- if (conn_state & FR_TRUNK_CONN_ACTIVE) {
+ if (conn_state & TRUNK_CONN_ACTIVE) {
for (tconn = fr_minmax_heap_iter_init(trunk->active, &iter);
tconn;
tconn = fr_minmax_heap_iter_next(trunk->active, &iter)) {
- count += fr_trunk_request_count_by_connection(tconn, req_state);
+ count += trunk_request_count_by_connection(tconn, req_state);
}
}
- COUNT_BY_STATE(FR_TRUNK_CONN_FULL, full);
- COUNT_BY_STATE(FR_TRUNK_CONN_INACTIVE, inactive);
- COUNT_BY_STATE(FR_TRUNK_CONN_INACTIVE_DRAINING, inactive_draining);
- COUNT_BY_STATE(FR_TRUNK_CONN_DRAINING, draining);
- COUNT_BY_STATE(FR_TRUNK_CONN_DRAINING_TO_FREE, draining_to_free);
+ COUNT_BY_STATE(TRUNK_CONN_FULL, full);
+ COUNT_BY_STATE(TRUNK_CONN_INACTIVE, inactive);
+ COUNT_BY_STATE(TRUNK_CONN_INACTIVE_DRAINING, inactive_draining);
+ COUNT_BY_STATE(TRUNK_CONN_DRAINING, draining);
+ COUNT_BY_STATE(TRUNK_CONN_DRAINING_TO_FREE, draining_to_free);
- if (req_state & FR_TRUNK_REQUEST_STATE_BACKLOG) count += fr_heap_num_elements(trunk->backlog);
+ if (req_state & TRUNK_REQUEST_STATE_BACKLOG) count += fr_heap_num_elements(trunk->backlog);
return count;
}
* - The average number of requests per connection.
*/
static uint64_t trunk_requests_per_connection(uint16_t *conn_count_out, uint32_t *req_count_out,
- fr_trunk_t *trunk, fr_time_t now,
+ trunk_t *trunk, fr_time_t now,
NDEBUG_UNUSED bool verify)
{
uint32_t req_count = 0;
* request to connection ratio, so that we can preemptively spawn
* new connections.
*
- * In the case of FR_TRUNK_CONN_DRAINING | FR_TRUNK_CONN_INACTIVE_DRAINING
+ * In the case of TRUNK_CONN_DRAINING | TRUNK_CONN_INACTIVE_DRAINING
* the trunk management code has enough hysteresis to not
* immediately reactivate the connection.
*
* talking to, or misconfigured firewalls which are trashing
* TCP/UDP connection states.
*/
- conn_count = fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ALL ^
- (FR_TRUNK_CONN_DRAINING |
- FR_TRUNK_CONN_INACTIVE_DRAINING |
- FR_TRUNK_CONN_DRAINING_TO_FREE));
+ conn_count = trunk_connection_count_by_state(trunk, TRUNK_CONN_ALL ^
+ (TRUNK_CONN_DRAINING |
+ TRUNK_CONN_INACTIVE_DRAINING |
+ TRUNK_CONN_DRAINING_TO_FREE));
/*
* Requests on all connections
*/
- req_count = fr_trunk_request_count_by_state(trunk,
- FR_TRUNK_CONN_ALL ^
- FR_TRUNK_CONN_DRAINING_TO_FREE, FR_TRUNK_REQUEST_STATE_ALL);
+ req_count = trunk_request_count_by_state(trunk,
+ TRUNK_CONN_ALL ^
+ TRUNK_CONN_DRAINING_TO_FREE, TRUNK_REQUEST_STATE_ALL);
/*
* No connections, but we do have requests
*
* @param[in] trunk To drain backlog requests for.
*/
-static void trunk_backlog_drain(fr_trunk_t *trunk)
+static void trunk_backlog_drain(trunk_t *trunk)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
if (fr_heap_num_elements(trunk->backlog) == 0) return;
*/
while ((treq = fr_heap_peek(trunk->backlog))) {
switch (trunk_request_enqueue_existing(treq)) {
- case FR_TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_OK:
continue;
/*
* Signal to stop
*/
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
/*
* which will free it and
* re-enliven the yielded request.
*/
- case FR_TRUNK_ENQUEUE_DST_UNAVAILABLE:
- case FR_TRUNK_ENQUEUE_FAIL:
+ case TRUNK_ENQUEUE_DST_UNAVAILABLE:
+ case TRUNK_ENQUEUE_FAIL:
trunk_request_enter_failed(treq);
continue;
- case FR_TRUNK_ENQUEUE_NO_CAPACITY:
+ case TRUNK_ENQUEUE_NO_CAPACITY:
fr_assert(fr_minmax_heap_num_elements(trunk->active) == 0);
return;
}
* @param[in] states One or more states or'd together.
* @param[in] reason Why the connections are being signalled to reconnect.
*/
-void fr_trunk_reconnect(fr_trunk_t *trunk, int states, fr_connection_reason_t reason)
+void trunk_reconnect(trunk_t *trunk, int states, connection_reason_t reason)
{
#define RECONNECT_BY_STATE(_state, _list) \
if (states & (_state)) { \
size_t i; \
for (i = fr_dlist_num_elements(&trunk->_list); i > 0; i--) { \
- fr_connection_signal_reconnect(((fr_trunk_connection_t *)fr_dlist_tail(&trunk->_list))->pub.conn, reason); \
+ connection_signal_reconnect(((trunk_connection_t *)fr_dlist_tail(&trunk->_list))->pub.conn, reason); \
} \
} \
} while (0)
* loop, as we iterate over the list
* again and again.
*/
- RECONNECT_BY_STATE(FR_TRUNK_CONN_CONNECTING, connecting);
+ RECONNECT_BY_STATE(TRUNK_CONN_CONNECTING, connecting);
- if (states & FR_TRUNK_CONN_ACTIVE) {
- fr_trunk_connection_t *tconn;
- while ((tconn = fr_minmax_heap_min_peek(trunk->active))) fr_connection_signal_reconnect(tconn->pub.conn, reason);
+ if (states & TRUNK_CONN_ACTIVE) {
+ trunk_connection_t *tconn;
+ while ((tconn = fr_minmax_heap_min_peek(trunk->active))) connection_signal_reconnect(tconn->pub.conn, reason);
}
- RECONNECT_BY_STATE(FR_TRUNK_CONN_INIT, init);
- RECONNECT_BY_STATE(FR_TRUNK_CONN_FULL, full);
- RECONNECT_BY_STATE(FR_TRUNK_CONN_INACTIVE, inactive);
- RECONNECT_BY_STATE(FR_TRUNK_CONN_INACTIVE_DRAINING, inactive_draining);
- RECONNECT_BY_STATE(FR_TRUNK_CONN_CLOSED, closed);
- RECONNECT_BY_STATE(FR_TRUNK_CONN_DRAINING, draining);
- RECONNECT_BY_STATE(FR_TRUNK_CONN_DRAINING_TO_FREE, draining_to_free);
+ RECONNECT_BY_STATE(TRUNK_CONN_INIT, init);
+ RECONNECT_BY_STATE(TRUNK_CONN_FULL, full);
+ RECONNECT_BY_STATE(TRUNK_CONN_INACTIVE, inactive);
+ RECONNECT_BY_STATE(TRUNK_CONN_INACTIVE_DRAINING, inactive_draining);
+ RECONNECT_BY_STATE(TRUNK_CONN_CLOSED, closed);
+ RECONNECT_BY_STATE(TRUNK_CONN_DRAINING, draining);
+ RECONNECT_BY_STATE(TRUNK_CONN_DRAINING_TO_FREE, draining_to_free);
}
/** Start the trunk running
*
*/
-int fr_trunk_start(fr_trunk_t *trunk)
+int trunk_start(trunk_t *trunk)
{
uint16_t i;
/** Allow the trunk to open and close connections in response to load
*
*/
-void fr_trunk_connection_manage_start(fr_trunk_t *trunk)
+void trunk_connection_manage_start(trunk_t *trunk)
{
if (!trunk->started || trunk->managing_connections) return;
/** Stop the trunk from opening and closing connections in response to load
*
*/
-void fr_trunk_connection_manage_stop(fr_trunk_t *trunk)
+void trunk_connection_manage_stop(trunk_t *trunk)
{
if (!trunk->started || !trunk->managing_connections) return;
/** Schedule a trunk management event for the next time the event loop is executed
*/
-int fr_trunk_connection_manage_schedule(fr_trunk_t *trunk)
+int trunk_connection_manage_schedule(trunk_t *trunk)
{
if (!trunk->started || !trunk->managing_connections) return 0;
*/
static int8_t _trunk_connection_order_by_shortest_queue(void const *one, void const *two)
{
- fr_trunk_connection_t const *a = talloc_get_type_abort_const(one, fr_trunk_connection_t);
- fr_trunk_connection_t const *b = talloc_get_type_abort_const(two, fr_trunk_connection_t);
+ trunk_connection_t const *a = talloc_get_type_abort_const(one, trunk_connection_t);
+ trunk_connection_t const *b = talloc_get_type_abort_const(two, trunk_connection_t);
- uint32_t a_count = fr_trunk_request_count_by_connection(a, FR_TRUNK_REQUEST_STATE_ALL);
- uint32_t b_count = fr_trunk_request_count_by_connection(b, FR_TRUNK_REQUEST_STATE_ALL);
+ uint32_t a_count = trunk_request_count_by_connection(a, TRUNK_REQUEST_STATE_ALL);
+ uint32_t b_count = trunk_request_count_by_connection(b, TRUNK_REQUEST_STATE_ALL);
/*
* Add a fudge factor of 1 to reduce spurious rebalancing
/** Free a trunk, gracefully closing all connections.
*
*/
-static int _trunk_free(fr_trunk_t *trunk)
+static int _trunk_free(trunk_t *trunk)
{
- fr_trunk_connection_t *tconn;
- fr_trunk_request_t *treq;
- fr_trunk_watch_entry_t *watch;
+ trunk_connection_t *tconn;
+ trunk_request_t *treq;
+ trunk_watch_entry_t *watch;
size_t i;
DEBUG4("Trunk free %p", trunk);
* Each time a connection is freed it removes itself from the list
* its in, which means the head should keep advancing automatically.
*/
- while ((tconn = fr_minmax_heap_min_peek(trunk->active))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->init))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->connecting))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->full))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->inactive))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->inactive_draining))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->closed))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->draining))) fr_connection_signal_halt(tconn->pub.conn);
- while ((tconn = fr_dlist_head(&trunk->draining_to_free))) fr_connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_minmax_heap_min_peek(trunk->active))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->init))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->connecting))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->full))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->inactive))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->inactive_draining))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->closed))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->draining))) connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_dlist_head(&trunk->draining_to_free))) connection_signal_halt(tconn->pub.conn);
/*
* Process any deferred connection frees
*
* This function should be called first to allocate a new trunk connection.
*
- * After the trunk has been allocated, #fr_trunk_request_alloc and
- * #fr_trunk_request_enqueue should be used to allocate memory for trunk
+ * After the trunk has been allocated, #trunk_request_alloc and
+ * #trunk_request_enqueue should be used to allocate memory for trunk
* requests, and pass a preq (protocol request) to the trunk for
* processing.
*
* The trunk will then asynchronously process the request, writing the result
- * to a specified rctx. See #fr_trunk_request_enqueue for more details.
+ * to a specified rctx. See #trunk_request_enqueue for more details.
*
* @note Trunks may not be shared between multiple threads under any circumstances.
*
* - New trunk handle on success.
* - NULL on error.
*/
-fr_trunk_t *fr_trunk_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
- fr_trunk_io_funcs_t const *funcs, fr_trunk_conf_t const *conf,
+trunk_t *trunk_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+ trunk_io_funcs_t const *funcs, trunk_conf_t const *conf,
char const *log_prefix, void const *uctx, bool delay_start)
{
- fr_trunk_t *trunk;
+ trunk_t *trunk;
size_t i;
/*
*/
if (!fr_cond_assert(funcs->connection_alloc)) return NULL;
- MEM(trunk = talloc_zero(ctx, fr_trunk_t));
+ MEM(trunk = talloc_zero(ctx, trunk_t));
trunk->el = el;
trunk->log_prefix = talloc_strdup(trunk, log_prefix);
/*
* Unused request list...
*/
- fr_dlist_talloc_init(&trunk->free_requests, fr_trunk_request_t, entry);
+ fr_dlist_talloc_init(&trunk->free_requests, trunk_request_t, entry);
/*
* Request backlog queue
*/
MEM(trunk->backlog = fr_heap_talloc_alloc(trunk, _trunk_request_prioritise,
- fr_trunk_request_t, heap_id, 0));
+ trunk_request_t, heap_id, 0));
/*
* Connection queues and trees
*/
MEM(trunk->active = fr_minmax_heap_talloc_alloc(trunk, trunk->funcs.connection_prioritise,
- fr_trunk_connection_t, heap_id, 0));
- fr_dlist_talloc_init(&trunk->init, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->connecting, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->full, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->inactive, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->inactive_draining, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->closed, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->draining, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->draining_to_free, fr_trunk_connection_t, entry);
- fr_dlist_talloc_init(&trunk->to_free, fr_trunk_connection_t, entry);
+ trunk_connection_t, heap_id, 0));
+ fr_dlist_talloc_init(&trunk->init, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->connecting, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->full, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->inactive, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->inactive_draining, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->closed, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->draining, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->draining_to_free, trunk_connection_t, entry);
+ fr_dlist_talloc_init(&trunk->to_free, trunk_connection_t, entry);
/*
* Watch lists
*/
for (i = 0; i < NUM_ELEMENTS(trunk->watch); i++) {
- fr_dlist_talloc_init(&trunk->watch[i], fr_trunk_watch_entry_t, entry);
+ fr_dlist_talloc_init(&trunk->watch[i], trunk_watch_entry_t, entry);
}
DEBUG4("Trunk allocated %p", trunk);
if (!delay_start) {
- if (fr_trunk_start(trunk) < 0) {
+ if (trunk_start(trunk) < 0) {
talloc_free(trunk);
return NULL;
}
* down with the ancestral values, but that breaks the foo_verify() API. Each foo_verify() will only verify the
* foo's children.
*/
-void fr_trunk_verify(char const *file, int line, fr_trunk_t *trunk)
+void trunk_verify(char const *file, int line, trunk_t *trunk)
{
- fr_fatal_assert_msg(trunk, "CONSISTENCY CHECK FAILED %s[%i]: fr_trunk_t pointer was NULL", file, line);
- (void) talloc_get_type_abort(trunk, fr_trunk_t);
+ fr_fatal_assert_msg(trunk, "CONSISTENCY CHECK FAILED %s[%i]: trunk_t pointer was NULL", file, line);
+ (void) talloc_get_type_abort(trunk, trunk_t);
for (size_t i = 0; i < NUM_ELEMENTS(trunk->watch); i++) {
_fr_dlist_verify(file, line, &trunk->watch[i]);
#define TCONN_DLIST_VERIFY(_dlist, _state) \
do { \
_fr_dlist_verify(file, line, &(trunk->_dlist)); \
- fr_dlist_foreach(&(trunk->_dlist), fr_trunk_connection_t, tconn) { \
- fr_trunk_connection_verify(file, line, tconn); \
+ fr_dlist_foreach(&(trunk->_dlist), trunk_connection_t, tconn) { \
+ trunk_connection_verify(file, line, tconn); \
TRUNK_TCONN_CHECKS(tconn, _state); \
} \
} while (0)
#define TCONN_MINMAX_HEAP_VERIFY(_heap, _state) \
do {\
fr_minmax_heap_verify(file, line, trunk->_heap); \
- fr_minmax_heap_foreach(trunk->_heap, fr_trunk_connection_t, tconn) { \
- fr_trunk_connection_verify(file, line, tconn); \
+ fr_minmax_heap_foreach(trunk->_heap, trunk_connection_t, tconn) { \
+ trunk_connection_verify(file, line, tconn); \
TRUNK_TCONN_CHECKS(tconn, _state); \
}} \
} while (0)
fr_dlist_verify(&(trunk->free_requests));
FR_HEAP_VERIFY(trunk->backlog);
- TCONN_DLIST_VERIFY(init, FR_TRUNK_CONN_INIT);
- TCONN_DLIST_VERIFY(connecting, FR_TRUNK_CONN_CONNECTING);
- TCONN_MINMAX_HEAP_VERIFY(active, FR_TRUNK_CONN_ACTIVE);
- TCONN_DLIST_VERIFY(full, FR_TRUNK_CONN_FULL);
- TCONN_DLIST_VERIFY(inactive, FR_TRUNK_CONN_INACTIVE);
- TCONN_DLIST_VERIFY(inactive_draining, FR_TRUNK_CONN_INACTIVE_DRAINING);
+ TCONN_DLIST_VERIFY(init, TRUNK_CONN_INIT);
+ TCONN_DLIST_VERIFY(connecting, TRUNK_CONN_CONNECTING);
+ TCONN_MINMAX_HEAP_VERIFY(active, TRUNK_CONN_ACTIVE);
+ TCONN_DLIST_VERIFY(full, TRUNK_CONN_FULL);
+ TCONN_DLIST_VERIFY(inactive, TRUNK_CONN_INACTIVE);
+ TCONN_DLIST_VERIFY(inactive_draining, TRUNK_CONN_INACTIVE_DRAINING);
/* TCONN_DLIST_VERIFY(failed, ???); */
- TCONN_DLIST_VERIFY(closed, FR_TRUNK_CONN_CLOSED);
- TCONN_DLIST_VERIFY(draining, FR_TRUNK_CONN_DRAINING);
- TCONN_DLIST_VERIFY(draining_to_free, FR_TRUNK_CONN_DRAINING_TO_FREE);
- TCONN_DLIST_VERIFY(to_free, FR_TRUNK_CONN_HALTED);
+ TCONN_DLIST_VERIFY(closed, TRUNK_CONN_CLOSED);
+ TCONN_DLIST_VERIFY(draining, TRUNK_CONN_DRAINING);
+ TCONN_DLIST_VERIFY(draining_to_free, TRUNK_CONN_DRAINING_TO_FREE);
+ TCONN_DLIST_VERIFY(to_free, TRUNK_CONN_HALTED);
}
-void fr_trunk_connection_verify(char const *file, int line, fr_trunk_connection_t *tconn)
+void trunk_connection_verify(char const *file, int line, trunk_connection_t *tconn)
{
- fr_fatal_assert_msg(tconn, "CONSISTENCY CHECK FAILED %s[%i]: fr_trunk_connection_t pointer was NULL", file, line);
- (void) talloc_get_type_abort(tconn, fr_trunk_connection_t);
+ fr_fatal_assert_msg(tconn, "CONSISTENCY CHECK FAILED %s[%i]: trunk_connection_t pointer was NULL", file, line);
+ (void) talloc_get_type_abort(tconn, trunk_connection_t);
- (void) talloc_get_type_abort(tconn->pub.trunk, fr_trunk_t);
+ (void) talloc_get_type_abort(tconn->pub.trunk, trunk_t);
/*
* shouldn't be both in heap and on list--but it doesn't look like moves
#define TREQ_DLIST_VERIFY(_dlist, _state) \
do { \
_fr_dlist_verify(file, line, &(tconn->_dlist)); \
- fr_dlist_foreach(&(tconn->_dlist), fr_trunk_request_t, treq) { \
- fr_trunk_request_verify(file, line, treq); \
+ fr_dlist_foreach(&(tconn->_dlist), trunk_request_t, treq) { \
+ trunk_request_verify(file, line, treq); \
TCONN_TREQ_CHECKS(treq, _state); \
} \
} while (0)
do { \
fr_heap_iter_t _iter; \
fr_heap_verify(file, line, tconn->_heap); \
- for (fr_trunk_request_t *treq = fr_heap_iter_init(tconn->_heap, &_iter); \
+ for (trunk_request_t *treq = fr_heap_iter_init(tconn->_heap, &_iter); \
treq; \
treq = fr_heap_iter_next(tconn->_heap, &_iter)) { \
- fr_trunk_request_verify(file, line, treq); \
+ trunk_request_verify(file, line, treq); \
TCONN_TREQ_CHECKS(treq, _state); \
} \
} while (0)
#define TREQ_OPTION_VERIFY(_option, _state) \
do { \
if (tconn->_option) { \
- fr_trunk_request_verify(file, line, tconn->_option); \
+ trunk_request_verify(file, line, tconn->_option); \
TCONN_TREQ_CHECKS(tconn->_option, _state); \
} \
} while (0)
/* verify associated requests */
- TREQ_HEAP_VERIFY(pending, FR_TRUNK_REQUEST_STATE_PENDING);
- TREQ_DLIST_VERIFY(sent, FR_TRUNK_REQUEST_STATE_SENT);
- TREQ_DLIST_VERIFY(cancel, FR_TRUNK_REQUEST_STATE_CANCEL);
- TREQ_DLIST_VERIFY(cancel_sent, FR_TRUNK_REQUEST_STATE_CANCEL_SENT);
- TREQ_OPTION_VERIFY(partial, FR_TRUNK_REQUEST_STATE_PARTIAL);
- TREQ_OPTION_VERIFY(cancel_partial, FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
+ TREQ_HEAP_VERIFY(pending, TRUNK_REQUEST_STATE_PENDING);
+ TREQ_DLIST_VERIFY(sent, TRUNK_REQUEST_STATE_SENT);
+ TREQ_DLIST_VERIFY(cancel, TRUNK_REQUEST_STATE_CANCEL);
+ TREQ_DLIST_VERIFY(cancel_sent, TRUNK_REQUEST_STATE_CANCEL_SENT);
+ TREQ_OPTION_VERIFY(partial, TRUNK_REQUEST_STATE_PARTIAL);
+ TREQ_OPTION_VERIFY(cancel_partial, TRUNK_REQUEST_STATE_CANCEL_PARTIAL);
}
-void fr_trunk_request_verify(char const *file, int line, fr_trunk_request_t *treq)
+void trunk_request_verify(char const *file, int line, trunk_request_t *treq)
{
- fr_fatal_assert_msg(treq, "CONSISTENCY CHECK FAILED %s[%i]: fr_trunk_request_t pointer was NULL", file, line);
- (void) talloc_get_type_abort(treq, fr_trunk_request_t);
+ fr_fatal_assert_msg(treq, "CONSISTENCY CHECK FAILED %s[%i]: trunk_request_t pointer was NULL", file, line);
+ (void) talloc_get_type_abort(treq, trunk_request_t);
#ifdef WITH_VERIFY_PTR
if (treq->pub.request) request_verify(file, line, treq->pub.request);
}
-bool fr_trunk_search(fr_trunk_t *trunk, void *ptr)
+bool trunk_search(trunk_t *trunk, void *ptr)
{
#define TCONN_DLIST_SEARCH(_dlist) \
do { \
- fr_dlist_foreach(&(trunk->_dlist), fr_trunk_connection_t, tconn) { \
+ fr_dlist_foreach(&(trunk->_dlist), trunk_connection_t, tconn) { \
if (ptr == tconn) { \
- fr_fprintf(stderr, "fr_trunk_search: tconn %p on " #_dlist "\n", ptr); \
+ fr_fprintf(stderr, "trunk_search: tconn %p on " #_dlist "\n", ptr); \
return true; \
} \
- if (fr_trunk_connection_search(tconn, ptr)) { \
+ if (trunk_connection_search(tconn, ptr)) { \
fr_fprintf(stderr, " in tconn %p on " #_dlist "\n", tconn); \
return true; \
} \
#define TCONN_MINMAX_HEAP_SEARCH(_heap) \
do { \
- fr_minmax_heap_foreach(trunk->_heap, fr_trunk_connection_t, tconn) { \
+ fr_minmax_heap_foreach(trunk->_heap, trunk_connection_t, tconn) { \
if (ptr == tconn) { \
- fr_fprintf(stderr, "fr_trunk_search: tconn %p on " #_heap "\n", ptr); \
+ fr_fprintf(stderr, "trunk_search: tconn %p on " #_heap "\n", ptr); \
return true; \
} \
- if (fr_trunk_connection_search(tconn, ptr)) { \
+ if (trunk_connection_search(tconn, ptr)) { \
fr_fprintf(stderr, " on tconn %p on " #_heap "\n", tconn); \
return true; \
} \
return false;
}
-bool fr_trunk_connection_search(fr_trunk_connection_t *tconn, void *ptr)
+bool trunk_connection_search(trunk_connection_t *tconn, void *ptr)
{
#define TREQ_DLIST_SEARCH(_dlist) \
do { \
- fr_dlist_foreach(&(tconn->_dlist), fr_trunk_request_t, treq) { \
+ fr_dlist_foreach(&(tconn->_dlist), trunk_request_t, treq) { \
if (ptr == treq) { \
- fr_fprintf(stderr, "fr_trunk_search: treq %p on " #_dlist "\n", ptr); \
+ fr_fprintf(stderr, "trunk_search: treq %p on " #_dlist "\n", ptr); \
return true; \
} \
- if (fr_trunk_request_search(treq, ptr)) { \
- fr_fprintf(stderr, "fr_trunk_search: preq %p found on " #_dlist, ptr); \
+ if (trunk_request_search(treq, ptr)) { \
+ fr_fprintf(stderr, "trunk_search: preq %p found on " #_dlist, ptr); \
return true; \
} \
} \
#define TREQ_HEAP_SEARCH(_heap) \
do { \
fr_heap_iter_t _iter; \
- for (fr_trunk_request_t *treq = fr_heap_iter_init(tconn->_heap, &_iter); \
+ for (trunk_request_t *treq = fr_heap_iter_init(tconn->_heap, &_iter); \
treq; \
treq = fr_heap_iter_next(tconn->_heap, &_iter)) { \
if (ptr == treq) { \
- fr_fprintf(stderr, "fr_trunk_search: treq %p in " #_heap "\n", ptr); \
+ fr_fprintf(stderr, "trunk_search: treq %p in " #_heap "\n", ptr); \
return true; \
} \
- if (fr_trunk_request_search(treq, ptr)) { \
- fr_fprintf(stderr, "fr_trunk_search: preq %p found in " #_heap, ptr); \
+ if (trunk_request_search(treq, ptr)) { \
+ fr_fprintf(stderr, "trunk_search: preq %p found in " #_heap, ptr); \
return true; \
} \
} \
do { \
if (tconn->_option) { \
if (ptr == tconn->_option) { \
- fr_fprintf(stderr, "fr_trunk_search: treq %p is " #_option "\n", ptr); \
+ fr_fprintf(stderr, "trunk_search: treq %p is " #_option "\n", ptr); \
return true; \
} \
- if (fr_trunk_request_search(tconn->_option, ptr)) { \
- fr_fprintf(stderr, "fr_trunk_search: preq %p found in " #_option, ptr); \
+ if (trunk_request_search(tconn->_option, ptr)) { \
+ fr_fprintf(stderr, "trunk_search: preq %p found in " #_option, ptr); \
return true; \
} \
} \
return false;
}
-bool fr_trunk_request_search(fr_trunk_request_t *treq, void *ptr)
+bool trunk_request_search(trunk_request_t *treq, void *ptr)
{
return treq->pub.preq == ptr;
}
# error _CONST can only be defined in the local header
#endif
#ifndef _TRUNK_PRIVATE
-typedef struct fr_trunk_request_pub_s fr_trunk_request_t;
-typedef struct fr_trunk_connection_pub_s fr_trunk_connection_t;
-typedef struct fr_trunk_pub_s fr_trunk_t;
+typedef struct trunk_request_pub_s trunk_request_t;
+typedef struct trunk_connection_pub_s trunk_connection_t;
+typedef struct trunk_pub_s trunk_t;
# define _CONST const
#else
# define _CONST
*
*/
typedef enum {
- FR_TRUNK_CANCEL_REASON_NONE = 0, //!< Request has not been cancelled.
- FR_TRUNK_CANCEL_REASON_SIGNAL, //!< Request cancelled due to a signal.
- FR_TRUNK_CANCEL_REASON_MOVE, //!< Request cancelled because it's being moved.
- FR_TRUNK_CANCEL_REASON_REQUEUE //!< A previously sent request is being requeued.
-} fr_trunk_cancel_reason_t;
+ TRUNK_CANCEL_REASON_NONE = 0, //!< Request has not been cancelled.
+ TRUNK_CANCEL_REASON_SIGNAL, //!< Request cancelled due to a signal.
+ TRUNK_CANCEL_REASON_MOVE, //!< Request cancelled because it's being moved.
+ TRUNK_CANCEL_REASON_REQUEUE //!< A previously sent request is being requeued.
+} trunk_cancel_reason_t;
typedef enum {
- FR_TRUNK_STATE_IDLE = 0, //!< Trunk has no connections
- FR_TRUNK_STATE_ACTIVE, //!< Trunk has active connections
- FR_TRUNK_STATE_PENDING, //!< Trunk has connections, but none are active
- FR_TRUNK_STATE_MAX
-} fr_trunk_state_t;
+ TRUNK_STATE_IDLE = 0, //!< Trunk has no connections
+ TRUNK_STATE_ACTIVE, //!< Trunk has active connections
+ TRUNK_STATE_PENDING, //!< Trunk has connections, but none are active
+ TRUNK_STATE_MAX
+} trunk_state_t;
/** What type of I/O events the trunk connection is currently interested in receiving
*
*/
typedef enum {
- FR_TRUNK_CONN_EVENT_NONE = 0x00, //!< Don't notify the trunk on connection state
+ TRUNK_CONN_EVENT_NONE = 0x00, //!< Don't notify the trunk on connection state
///< changes.
- FR_TRUNK_CONN_EVENT_READ = 0x01, //!< Trunk should be notified if a connection is
+ TRUNK_CONN_EVENT_READ = 0x01, //!< Trunk should be notified if a connection is
///< readable.
- FR_TRUNK_CONN_EVENT_WRITE = 0x02, //!< Trunk should be notified if a connection is
+ TRUNK_CONN_EVENT_WRITE = 0x02, //!< Trunk should be notified if a connection is
///< writable.
- FR_TRUNK_CONN_EVENT_BOTH = 0x03, //!< Trunk should be notified if a connection is
+ TRUNK_CONN_EVENT_BOTH = 0x03, //!< Trunk should be notified if a connection is
///< readable or writable.
-} fr_trunk_connection_event_t;
+} trunk_connection_event_t;
/** Used for sanity checks and to track which list the connection is in
*
*/
typedef enum {
- FR_TRUNK_CONN_HALTED = 0x0000, //!< Halted, ready to be freed.
- FR_TRUNK_CONN_INIT = 0x0001, //!< In the initial state.
- FR_TRUNK_CONN_CONNECTING = 0x0002, //!< Connection is connecting.
- FR_TRUNK_CONN_ACTIVE = 0x0004, //!< Connection is connected and ready to service requests.
+ TRUNK_CONN_HALTED = 0x0000, //!< Halted, ready to be freed.
+ TRUNK_CONN_INIT = 0x0001, //!< In the initial state.
+ TRUNK_CONN_CONNECTING = 0x0002, //!< Connection is connecting.
+ TRUNK_CONN_ACTIVE = 0x0004, //!< Connection is connected and ready to service requests.
///< This is active and not 'connected', because a connection
///< can be 'connected' and 'full' or 'connected' and 'active'.
- FR_TRUNK_CONN_CLOSED = 0x0008, //!< Connection was closed, either explicitly or due to failure.
- FR_TRUNK_CONN_FULL = 0x0010, //!< Connection is full and can't accept any more requests.
- FR_TRUNK_CONN_INACTIVE = 0x0020, //!< Connection is inactive and can't accept any more requests.
- FR_TRUNK_CONN_INACTIVE_DRAINING = 0x0040, //!< Connection is inactive, can't accept any more requests,
+ TRUNK_CONN_CLOSED = 0x0008, //!< Connection was closed, either explicitly or due to failure.
+ TRUNK_CONN_FULL = 0x0010, //!< Connection is full and can't accept any more requests.
+ TRUNK_CONN_INACTIVE = 0x0020, //!< Connection is inactive and can't accept any more requests.
+ TRUNK_CONN_INACTIVE_DRAINING = 0x0040, //!< Connection is inactive, can't accept any more requests,
///< and will be closed once it has no more outstanding
///< requests. Connections in this state can transition to
- ///< #FR_TRUNK_CONN_DRAINING.
- FR_TRUNK_CONN_DRAINING = 0x0080, //!< Connection will be closed once it has no more outstanding
+ ///< #TRUNK_CONN_DRAINING.
+ TRUNK_CONN_DRAINING = 0x0080, //!< Connection will be closed once it has no more outstanding
///< requests, if it's not reactivated.
- FR_TRUNK_CONN_DRAINING_TO_FREE = 0x0100, //!< Connection will be closed once it has no more outstanding
+ TRUNK_CONN_DRAINING_TO_FREE = 0x0100, //!< Connection will be closed once it has no more outstanding
///< requests.
-} fr_trunk_connection_state_t;
+} trunk_connection_state_t;
/** All connection states
*
*/
-#define FR_TRUNK_CONN_ALL \
+#define TRUNK_CONN_ALL \
(\
- FR_TRUNK_CONN_INIT | \
- FR_TRUNK_CONN_CONNECTING | \
- FR_TRUNK_CONN_ACTIVE | \
- FR_TRUNK_CONN_CLOSED | \
- FR_TRUNK_CONN_FULL | \
- FR_TRUNK_CONN_INACTIVE | \
- FR_TRUNK_CONN_DRAINING | \
- FR_TRUNK_CONN_DRAINING_TO_FREE \
+ TRUNK_CONN_INIT | \
+ TRUNK_CONN_CONNECTING | \
+ TRUNK_CONN_ACTIVE | \
+ TRUNK_CONN_CLOSED | \
+ TRUNK_CONN_FULL | \
+ TRUNK_CONN_INACTIVE | \
+ TRUNK_CONN_DRAINING | \
+ TRUNK_CONN_DRAINING_TO_FREE \
)
/** States where the connection may potentially be used to send requests
*
*/
-#define FR_TRUNK_CONN_SERVICEABLE \
+#define TRUNK_CONN_SERVICEABLE \
(\
- FR_TRUNK_CONN_ACTIVE | \
- FR_TRUNK_CONN_INACTIVE | \
- FR_TRUNK_CONN_DRAINING | \
- FR_TRUNK_CONN_INACTIVE_DRAINING | \
- FR_TRUNK_CONN_DRAINING_TO_FREE \
+ TRUNK_CONN_ACTIVE | \
+ TRUNK_CONN_INACTIVE | \
+ TRUNK_CONN_DRAINING | \
+ TRUNK_CONN_INACTIVE_DRAINING | \
+ TRUNK_CONN_DRAINING_TO_FREE \
)
/** States where the connection may be processing requests
*
*/
-#define FR_TRUNK_CONN_PROCESSING \
+#define TRUNK_CONN_PROCESSING \
(\
- FR_TRUNK_CONN_ACTIVE | \
- FR_TRUNK_CONN_FULL | \
- FR_TRUNK_CONN_INACTIVE | \
- FR_TRUNK_CONN_DRAINING | \
- FR_TRUNK_CONN_INACTIVE_DRAINING | \
- FR_TRUNK_CONN_DRAINING_TO_FREE \
+ TRUNK_CONN_ACTIVE | \
+ TRUNK_CONN_FULL | \
+ TRUNK_CONN_INACTIVE | \
+ TRUNK_CONN_DRAINING | \
+ TRUNK_CONN_INACTIVE_DRAINING | \
+ TRUNK_CONN_DRAINING_TO_FREE \
)
typedef enum {
- FR_TRUNK_ENQUEUE_IN_BACKLOG = 1, //!< Request should be enqueued in backlog
- FR_TRUNK_ENQUEUE_OK = 0, //!< Operation was successful.
- FR_TRUNK_ENQUEUE_NO_CAPACITY = -1, //!< At maximum number of connections,
+ TRUNK_ENQUEUE_IN_BACKLOG = 1, //!< Request should be enqueued in backlog
+ TRUNK_ENQUEUE_OK = 0, //!< Operation was successful.
+ TRUNK_ENQUEUE_NO_CAPACITY = -1, //!< At maximum number of connections,
///< and no connection has capacity.
- FR_TRUNK_ENQUEUE_DST_UNAVAILABLE = -2, //!< Destination is down.
- FR_TRUNK_ENQUEUE_FAIL = -3 //!< General failure.
-} fr_trunk_enqueue_t;
+ TRUNK_ENQUEUE_DST_UNAVAILABLE = -2, //!< Destination is down.
+ TRUNK_ENQUEUE_FAIL = -3 //!< General failure.
+} trunk_enqueue_t;
/** Used for sanity checks and to simplify freeing
*
* Allows us to track which
*/
typedef enum {
- FR_TRUNK_REQUEST_STATE_INIT = 0x0000, //!< Initial state. Requests in this state
+ TRUNK_REQUEST_STATE_INIT = 0x0000, //!< Initial state. Requests in this state
///< were never assigned, and the request_t should
///< not have been yielded.
- FR_TRUNK_REQUEST_STATE_UNASSIGNED = 0x0001, //!< Transition state - Request currently
+ TRUNK_REQUEST_STATE_UNASSIGNED = 0x0001, //!< Transition state - Request currently
///< not assigned to any connection.
- FR_TRUNK_REQUEST_STATE_BACKLOG = 0x0002, //!< In the backlog.
- FR_TRUNK_REQUEST_STATE_PENDING = 0x0004, //!< In the queue of a connection
+ TRUNK_REQUEST_STATE_BACKLOG = 0x0002, //!< In the backlog.
+ TRUNK_REQUEST_STATE_PENDING = 0x0004, //!< In the queue of a connection
///< and is pending writing.
- FR_TRUNK_REQUEST_STATE_PARTIAL = 0x0008, //!< Some of the request was written to the socket,
+ TRUNK_REQUEST_STATE_PARTIAL = 0x0008, //!< Some of the request was written to the socket,
///< more of it should be written later.
- FR_TRUNK_REQUEST_STATE_SENT = 0x0010, //!< Was written to a socket. Waiting for a response.
- FR_TRUNK_REQUEST_STATE_COMPLETE = 0x0020, //!< The request is complete.
- FR_TRUNK_REQUEST_STATE_FAILED = 0x0040, //!< The request failed.
- FR_TRUNK_REQUEST_STATE_CANCEL = 0x0080, //!< A request on a particular socket was cancel.
- FR_TRUNK_REQUEST_STATE_CANCEL_SENT = 0x0100, //!< We've informed the remote server that
+ TRUNK_REQUEST_STATE_SENT = 0x0010, //!< Was written to a socket. Waiting for a response.
+ TRUNK_REQUEST_STATE_COMPLETE = 0x0020, //!< The request is complete.
+ TRUNK_REQUEST_STATE_FAILED = 0x0040, //!< The request failed.
+ TRUNK_REQUEST_STATE_CANCEL = 0x0080, //!< A request on a particular socket was cancel.
+ TRUNK_REQUEST_STATE_CANCEL_SENT = 0x0100, //!< We've informed the remote server that
///< the request has been cancelled.
- FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL = 0x0200, //!< We partially wrote a cancellation request.
- FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE = 0x0400, //!< Remote server has acknowledged our cancellation.
+ TRUNK_REQUEST_STATE_CANCEL_PARTIAL = 0x0200, //!< We partially wrote a cancellation request.
+ TRUNK_REQUEST_STATE_CANCEL_COMPLETE = 0x0400, //!< Remote server has acknowledged our cancellation.
- FR_TRUNK_REQUEST_STATE_IDLE = 0x0800, //!< Request has been written, needs to persist, but we
+ TRUNK_REQUEST_STATE_IDLE = 0x0800, //!< Request has been written, needs to persist, but we
///< are not currently waiting for any response.
-} fr_trunk_request_state_t;
+} trunk_request_state_t;
/** All request states
*
*/
-#define FR_TRUNK_REQUEST_STATE_ALL \
+#define TRUNK_REQUEST_STATE_ALL \
(\
- FR_TRUNK_REQUEST_STATE_BACKLOG | \
- FR_TRUNK_REQUEST_STATE_PENDING | \
- FR_TRUNK_REQUEST_STATE_PARTIAL | \
- FR_TRUNK_REQUEST_STATE_SENT | \
- FR_TRUNK_REQUEST_STATE_COMPLETE | \
- FR_TRUNK_REQUEST_STATE_FAILED | \
- FR_TRUNK_REQUEST_STATE_CANCEL | \
- FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL | \
- FR_TRUNK_REQUEST_STATE_CANCEL_SENT | \
- FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE | \
- FR_TRUNK_REQUEST_STATE_IDLE \
+ TRUNK_REQUEST_STATE_BACKLOG | \
+ TRUNK_REQUEST_STATE_PENDING | \
+ TRUNK_REQUEST_STATE_PARTIAL | \
+ TRUNK_REQUEST_STATE_SENT | \
+ TRUNK_REQUEST_STATE_COMPLETE | \
+ TRUNK_REQUEST_STATE_FAILED | \
+ TRUNK_REQUEST_STATE_CANCEL | \
+ TRUNK_REQUEST_STATE_CANCEL_PARTIAL | \
+ TRUNK_REQUEST_STATE_CANCEL_SENT | \
+ TRUNK_REQUEST_STATE_CANCEL_COMPLETE | \
+ TRUNK_REQUEST_STATE_IDLE \
)
/** All requests in various cancellation states
*
*/
-#define FR_TRUNK_REQUEST_STATE_CANCEL_ALL \
+#define TRUNK_REQUEST_STATE_CANCEL_ALL \
(\
- FR_TRUNK_REQUEST_STATE_CANCEL | \
- FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL | \
- FR_TRUNK_REQUEST_STATE_CANCEL_SENT | \
- FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE \
+ TRUNK_REQUEST_STATE_CANCEL | \
+ TRUNK_REQUEST_STATE_CANCEL_PARTIAL | \
+ TRUNK_REQUEST_STATE_CANCEL_SENT | \
+ TRUNK_REQUEST_STATE_CANCEL_COMPLETE \
)
/** Common configuration parameters for a trunk
*
*/
typedef struct {
- fr_connection_conf_t const *conn_conf; //!< Connection configuration.
+ connection_conf_t const *conn_conf; //!< Connection configuration.
uint16_t start; //!< How many connections to start.
///< used to implement the connection can always receive
///< and buffer new requests irrespective of the state
///< of the underlying socket.
- ///< If this is true, #fr_trunk_connection_signal_writable
+ ///< If this is true, #trunk_connection_signal_writable
///< does not need to be called, and requests will be
///< enqueued as soon as they're received.
bool backlog_on_failed_conn; //!< Assign requests to the backlog when there are no
//!< available connections and the last connection event
//!< was a failure, instead of failing them immediately.
-} fr_trunk_conf_t;
+} trunk_conf_t;
/** Public fields for the trunk
*
* Though these fields are public, they should _NOT_ be modified by clients of
* the trunk API.
*/
-struct fr_trunk_pub_s {
+struct trunk_pub_s {
/** @name Last time an event occurred
* @{
*/
bool _CONST triggers; //!< do we run the triggers?
- fr_trunk_state_t _CONST state; //!< Current state of the trunk.
+ trunk_state_t _CONST state; //!< Current state of the trunk.
};
/** Public fields for the trunk request
* Though these fields are public, they should _NOT_ be modified by clients of
* the trunk API.
*/
-struct fr_trunk_request_pub_s {
- fr_trunk_request_state_t _CONST state; //!< Which list the request is now located in.
+struct trunk_request_pub_s {
+ trunk_request_state_t _CONST state; //!< Which list the request is now located in.
- fr_trunk_t * _CONST trunk; //!< Trunk this request belongs to.
+ trunk_t * _CONST trunk; //!< Trunk this request belongs to.
- fr_trunk_connection_t * _CONST tconn; //!< Connection this request belongs to.
+ trunk_connection_t * _CONST tconn; //!< Connection this request belongs to.
void * _CONST preq; //!< Data for the muxer to write to the connection.
* Though these fields are public, they should _NOT_ be modified by clients of
* the trunk API.
*/
-struct fr_trunk_connection_pub_s {
- fr_trunk_connection_state_t _CONST state; //!< What state the connection is in.
+struct trunk_connection_pub_s {
+ trunk_connection_state_t _CONST state; //!< What state the connection is in.
- fr_connection_t * _CONST conn; //!< The underlying connection.
+ connection_t * _CONST conn; //!< The underlying connection.
- fr_trunk_t * _CONST trunk; //!< Trunk this connection belongs to.
+ trunk_t * _CONST trunk; //!< Trunk this connection belongs to.
};
#ifndef TRUNK_TESTS
-/** Config parser definitions to populate a fr_trunk_conf_t
+/** Config parser definitions to populate a trunk_conf_t
*
*/
-extern conf_parser_t const fr_trunk_config[];
+extern conf_parser_t const trunk_config[];
#endif
/** Allocate a new connection for the trunk
* and closing connections.
*
* When creating new connections, this callback is used to allocate and configure
- * a new #fr_connection_t, this #fr_connection_t and the fr_connection API is how the
+ * a new #connection_t, this #connection_t and the connection API is how the
* trunk signals the underlying connection that it should start, reconnect, and halt (stop).
*
* The trunk must be informed when the underlying connection is readable, and,
* if `always_writable == false`, when the connection is writable.
*
* When the connection is readable, a read I/O handler installed by the init()
- * callback of the #fr_connection_t must either:
+ * callback of the #connection_t must either:
*
- * - If there's no underlying I/O library, call `fr_trunk_connection_signal_readable(tconn)`
+ * - If there's no underlying I/O library, call `trunk_connection_signal_readable(tconn)`
* immediately, relying on the trunk demux callback to perform decoding and demuxing.
* - If there is an underlying I/O library, feed any incoming data to that library and
- * then call #fr_trunk_connection_signal_readable if the underlying I/O library
+ * then call #trunk_connection_signal_readable if the underlying I/O library
* indicates complete responses are ready for processing.
*
* When the connection is writable a write I/O handler installed by the open() callback
- * of the #fr_connection_t must either:
+ * of the #connection_t must either:
*
* - If `always_writable == true` - Inform the underlying I/O library that the connection
* is writable. The trunk API does not need to be informed as it will immediately pass
* through any enqueued requests to the I/O library.
* - If `always_writable == false` and there's an underlying I/O library,
- * call `fr_trunk_connection_signal_writable(tconn)` to allow the trunk mux callback
+ * call `trunk_connection_signal_writable(tconn)` to allow the trunk mux callback
* to pass requests to the underlying I/O library and (optionally) signal the I/O library
* that the connection is writable.
* - If `always_writable == false` and there's no underlying I/O library,
- * call `fr_trunk_connection_signal_writable(tconn)` to allow the trunk mux callback
+ * call `trunk_connection_signal_writable(tconn)` to allow the trunk mux callback
* to encode and write requests to a socket.
*
* @param[in] tconn The trunk connection this connection will be bound to.
- * Should be used as the context for any #fr_connection_t
+ * Should be used as the context for any #connection_t
* allocated.
* @param[in] el The event list to use for I/O and timer events.
- * @param[in] conf Configuration of the #fr_connection_t.
+ * @param[in] conf Configuration of the #connection_t.
* @param[in] log_prefix What to prefix connection log messages with.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
* @return
- * - A new fr_connection_t on success (should be in the halted state - the default).
+ * - A new connection_t on success (should be in the halted state - the default).
* - NULL on error.
*/
-typedef fr_connection_t *(*fr_trunk_connection_alloc_t)(fr_trunk_connection_t *tconn, fr_event_list_t *el,
- fr_connection_conf_t const *conf,
+typedef connection_t *(*trunk_connection_alloc_t)(trunk_connection_t *tconn, fr_event_list_t *el,
+ connection_conf_t const *conf,
char const *log_prefix, void *uctx);
/** Inform the trunk API client which I/O events the trunk wants to receive
* I/O handlers installed by this callback should call one or more of the following
* functions to signal that an I/O event has occurred:
*
- * - fr_trunk_connection_signal_writable - Connection is now writable.
- * - fr_trunk_connection_signal_readable - Connection is now readable.
- * - fr_trunk_connection_signal_inactive - Connection is full or congested.
- * - fr_trunk_connection_signal_active - Connection is no longer full or congested.
- * - fr_trunk_connection_signal_reconnect - Connection is inviable and should be reconnected.
+ * - trunk_connection_signal_writable - Connection is now writable.
+ * - trunk_connection_signal_readable - Connection is now readable.
+ * - trunk_connection_signal_inactive - Connection is full or congested.
+ * - trunk_connection_signal_active - Connection is no longer full or congested.
+ * - trunk_connection_signal_reconnect - Connection is inviable and should be reconnected.
*
* @param[in] tconn That should be notified of I/O events.
- * @param[in] conn The #fr_connection_t bound to the tconn.
+ * @param[in] conn The #connection_t bound to the tconn.
* Use conn->h to access the
* connection handle or file descriptor.
* @param[in] el to insert I/O events into.
* @param[in] notify_on I/O events to signal the trunk connection on.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
*/
-typedef void (*fr_trunk_connection_notify_t)(fr_trunk_connection_t *tconn, fr_connection_t *conn,
+typedef void (*trunk_connection_notify_t)(trunk_connection_t *tconn, connection_t *conn,
fr_event_list_t *el,
- fr_trunk_connection_event_t notify_on, void *uctx);
+ trunk_connection_event_t notify_on, void *uctx);
/** Multiplex one or more requests into a single connection
*
* This callback should:
*
* - Pop one or more requests from the trunk connection's pending queue using
- * #fr_trunk_connection_pop_request.
+ * #trunk_connection_pop_request.
* - Serialize the protocol request data contained within the trunk request's (treq's)
- * pctx, writing it to the provided #fr_connection_t (or underlying connection handle).
+ * pctx, writing it to the provided #connection_t (or underlying connection handle).
* - Insert the provided treq
- * into a tracking structure associated with the #fr_connection_t or uctx.
+ * into a tracking structure associated with the #connection_t or uctx.
* This tracking structure will be used later in the trunk demux callback to match
* protocol requests with protocol responses.
*
* If working at the socket level and a write on a file descriptor indicates
* less data was written than was needed, the trunk API client should track the
* amount of data written in the protocol request (preq), and should call
- * `fr_trunk_request_signal_partial(treq)`.
- * #fr_trunk_request_signal_partial will move the request out of the pending
+ * `trunk_request_signal_partial(treq)`.
+ * #trunk_request_signal_partial will move the request out of the pending
* queue, and store it in the partial slot of the trunk connection.
- * The next time #fr_trunk_connection_pop_request is called, the partially written
+ * The next time #trunk_connection_pop_request is called, the partially written
* treq will be returned first. The API client should continue writing the partially
* written request to the socket.
*
- * After calling #fr_trunk_request_signal_partial this callback *MUST NOT*
- * call #fr_trunk_connection_pop_request again, and should immediately return.
+ * After calling #trunk_request_signal_partial this callback *MUST NOT*
+ * call #trunk_connection_pop_request again, and should immediately return.
*
* If the request can't be written to the connection because it the connection
* has become unusable, this callback should call
- * `fr_connection_signal_reconnect(conn)` to notify the connection API that the
+ * `connection_signal_reconnect(conn)` to notify the connection API that the
* connection is unusable. The current request will either fail, or be
* re-enqueued depending on the trunk configuration.
*
- * After calling #fr_connection_signal_reconnect this callback *MUST NOT*
- * call #fr_trunk_connection_pop_request again, and should immediately return.
+ * After calling #connection_signal_reconnect this callback *MUST NOT*
+ * call #trunk_connection_pop_request again, and should immediately return.
*
* If the protocol request data can't be written to the connection because the
* data is invalid or because some other error occurred, this callback should
- * call `fr_trunk_request_signal_fail(treq)`, this callback may then continue
+ * call `trunk_request_signal_fail(treq)`, this callback may then continue
* popping/processing other requests.
*
* @param[in] el For timer management.
* @param[in] conn Connection to write the request to.
* Use conn->h to access the
* connection handle or file descriptor.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
*/
-typedef void (*fr_trunk_request_mux_t)(fr_event_list_t *el,
- fr_trunk_connection_t *tconn, fr_connection_t *conn, void *uctx);
+typedef void (*trunk_request_mux_t)(fr_event_list_t *el,
+ trunk_connection_t *tconn, connection_t *conn, void *uctx);
/** Demultiplex on or more responses, reading them from a connection, decoding them, and matching them with their requests
*
*
* - If an underlying I/O library is used, request complete responses from
* the I/O library, and match the responses with a treq (trunk request)
- * using a tracking structure associated with the #fr_connection_t or uctx.
- * - If no underlying I/O library is used, read responses from the #fr_connection_t,
+ * using a tracking structure associated with the #connection_t or uctx.
+ * - If no underlying I/O library is used, read responses from the #connection_t,
* decode those responses, and match those responses with a treq using a tracking
- * structure associated with the #fr_connection_t or uctx.
+ * structure associated with the #connection_t or uctx.
*
* The result (positive or negative), should be written to the rctx structure.
*
- * #fr_trunk_request_signal_complete should be used to inform the trunk
+ * #trunk_request_signal_complete should be used to inform the trunk
* that the request is now complete.
*
* If a connection appears to have become unusable, this callback should call
- * #fr_connection_signal_reconnect and immediately return. The current
+ * #connection_signal_reconnect and immediately return. The current
* treq will either fail, or be re-enqueued depending on the trunk configuration.
*
- * #fr_trunk_request_signal_fail should *NOT* be called as this function is only
+ * #trunk_request_signal_fail should *NOT* be called as this function is only
* used for reporting failures at an I/O layer level not failures of queries or
* external services.
*
* @param[in] conn Connection to read the request from.
* Use conn->h to access the
* connection handle or file descriptor.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
*/
-typedef void (*fr_trunk_request_demux_t)(fr_event_list_t *el,
- fr_trunk_connection_t *tconn, fr_connection_t *conn, void *uctx);
+typedef void (*trunk_request_demux_t)(fr_event_list_t *el,
+ trunk_connection_t *tconn, connection_t *conn, void *uctx);
/** Inform a remote service like a datastore that a request should be cancelled
*
* This callback will be called any time there are one or more requests to be
- * cancelled and a #fr_connection_t is writable, or as soon as a request is
+ * cancelled and a #connection_t is writable, or as soon as a request is
* cancelled if `always_writable == true`.
*
- * For efficiency, this callback should call #fr_trunk_connection_pop_cancellation
+ * For efficiency, this callback should call #trunk_connection_pop_cancellation
* multiple times, and process all outstanding cancellation requests.
*
* If the response (cancel ACK) from the remote service needs to be tracked,
* then the treq should be inserted into a tracking tree shared with the demuxer,
- * and #fr_trunk_request_signal_cancel_sent should be called to move the treq into
+ * and #trunk_request_signal_cancel_sent should be called to move the treq into
* the cancel_sent state.
*
* As with the main mux callback, if a cancellation request is partially written
- * #fr_trunk_request_signal_cancel_partial should be called, and the amount
+ * #trunk_request_signal_cancel_partial should be called, and the amount
* of data written should be tracked in the preq (protocol request).
*
* When the demuxer finds a matching (cancel ACK) response, the demuxer should
* remove the entry from the tracking tree and call
- * #fr_trunk_request_signal_cancel_complete.
+ * #trunk_request_signal_cancel_complete.
*
* @param[in] el To insert any timers into.
*
* @param[in] conn Connection to write the request to.
* Use conn->h to access the
* connection handle or file descriptor.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
*/
-typedef void (*fr_trunk_request_cancel_mux_t)(fr_event_list_t *el,
- fr_trunk_connection_t *tconn, fr_connection_t *conn, void *uctx);
+typedef void (*trunk_request_cancel_mux_t)(fr_event_list_t *el,
+ trunk_connection_t *tconn, connection_t *conn, void *uctx);
/** Remove an outstanding "sent" request from a tracking/matching structure
*
- * If the treq (trunk request) is in the FR_TRUNK_REQUEST_STATE_PARTIAL or
- * FR_TRUNK_REQUEST_STATE_SENT states, this callback will be called prior
+ * If the treq (trunk request) is in the TRUNK_REQUEST_STATE_PARTIAL or
+ * TRUNK_REQUEST_STATE_SENT states, this callback will be called prior
* to moving the treq to a new connection, requeueing the treq or freeing
* the treq.
*
* The treq, and any associated resources, should be
* removed from the the matching structure associated with the
- * #fr_connection_t or uctx.
+ * #connection_t or uctx.
*
* Which resources should be freed depends on the cancellation reason:
*
- * - FR_TRUNK_CANCEL_REASON_REQUEUE - If an encoded request can be
+ * - TRUNK_CANCEL_REASON_REQUEUE - If an encoded request can be
* reused, then it should be kept, otherwise it should be freed.
* Any resources like ID allocations bound to that request should
* also be freed.
- * #fr_trunk_request_conn_release_t callback will not be called in this
+ * #trunk_request_conn_release_t callback will not be called in this
* instance and cannot be used as an alternative.
- * - FR_TRUNK_CANCEL_REASON_MOVE - If an encoded request can be reused
+ * - TRUNK_CANCEL_REASON_MOVE - If an encoded request can be reused
* it should be kept. The trunk mux callback should be aware that
* an encoded request may already be associated with a preq and use
* that instead of re-encoding the preq.
* If the encoded request cannot be reused it should be freed, and
* any fields in the preq that were modified during the last mux call
* (other than perhaps counters) should be reset to their initial values.
- * Alternatively the #fr_trunk_request_conn_release_t callback can be used for
+ * Alternatively the #trunk_request_conn_release_t callback can be used for
* the same purpose, as that will be called before the request is moved.
- * - FR_TRUNK_CANCEL_REASON_SIGNAL - The encoded request and any I/O library
+ * - TRUNK_CANCEL_REASON_SIGNAL - The encoded request and any I/O library
* request handled may be freed though that may (optionally) be left to
- * another callback like #fr_trunk_request_conn_release_t, as that will be
+ * another callback like #trunk_request_conn_release_t, as that will be
* called as the treq is removed from the conn.
- * Note that the #fr_trunk_request_complete_t and
- * #fr_trunk_request_fail_t callbacks will not be called in this
+ * Note that the #trunk_request_complete_t and
+ * #trunk_request_fail_t callbacks will not be called in this
* instance.
*
* After this callback is complete one of several actions will be taken:
*
- * - If the cancellation reason was FR_TRUNK_CANCEL_REASON_REQUEUE the
+ * - If the cancellation reason was TRUNK_CANCEL_REASON_REQUEUE the
* treq will be placed back into the pending list of the connection it
* was previously associated with.
- * - If the cancellation reason was FR_TRUNK_CANCEL_REASON_MOVE, the treq
+ * - If the cancellation reason was TRUNK_CANCEL_REASON_MOVE, the treq
* will move to the unassigned state, and then either be placed in the
* trunk backlog, or immediately enqueued on another trunk connection.
- * - If the reason was FR_TRUNK_CANCEL_SIGNAL
+ * - If the reason was TRUNK_CANCEL_SIGNAL
* - ...and a request_cancel_mux callback was provided, the
* the request_cancel_mux callback will be called when the connection
* is next writable (or immediately if `always_writable == true`) and
* - ...and no request_cancel_mux callback was provided, the
* treq will enter the unassigned state and then be freed.
*
- * @note FR_TRUNK_CANCEL_REASON_MOVE will only be set if the underlying
+ * @note TRUNK_CANCEL_REASON_MOVE will only be set if the underlying
* connection is bad. A 'sent' treq will never be moved due to load
* balancing.
*
* @param[in] conn to remove request from.
* @param[in] preq_to_reset Preq to reset.
* @param[in] reason Why the request was cancelled.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
*/
-typedef void (*fr_trunk_request_cancel_t)(fr_connection_t *conn, void *preq_to_reset,
- fr_trunk_cancel_reason_t reason, void *uctx);
+typedef void (*trunk_request_cancel_t)(connection_t *conn, void *preq_to_reset,
+ trunk_cancel_reason_t reason, void *uctx);
/** Free connection specific resources from a treq, as the treq is being removed from a connection
*
* @param[in] conn request will be removed from.
* @param[in] preq_to_reset Preq to remove connection specified resources
* from.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
*/
-typedef void (*fr_trunk_request_conn_release_t)(fr_connection_t *conn, void *preq_to_reset,
+typedef void (*trunk_request_conn_release_t)(connection_t *conn, void *preq_to_reset,
void *uctx);
/** Write a successful result to the rctx so that the trunk API client is aware of the result
*
* After this callback is complete, the request_free callback will be called if provided.
*/
-typedef void (*fr_trunk_request_complete_t)(request_t *request, void *preq, void *rctx, void *uctx);
+typedef void (*trunk_request_complete_t)(request_t *request, void *preq, void *rctx, void *uctx);
/** Write a failure result to the rctx so that the trunk API client is aware that the request failed
*
*
* After this callback is complete, the request_free callback will be called if provided.
*/
-typedef void (*fr_trunk_request_fail_t)(request_t *request, void *preq, void *rctx,
- fr_trunk_request_state_t state, void *uctx);
+typedef void (*trunk_request_fail_t)(request_t *request, void *preq, void *rctx,
+ trunk_request_state_t state, void *uctx);
/** Free resources associated with a trunk request
*
*
* @param[in] request to mark as runnable if no further processing is required.
* @param[in] preq_to_free As per the name.
- * @param[in] uctx User context data passed to #fr_trunk_alloc.
+ * @param[in] uctx User context data passed to #trunk_alloc.
*/
-typedef void (*fr_trunk_request_free_t)(request_t *request, void *preq_to_free, void *uctx);
+typedef void (*trunk_request_free_t)(request_t *request, void *preq_to_free, void *uctx);
/** Receive a notification when a trunk enters a particular state
*
* @param[in] trunk Being watched.
* @param[in] prev State we came from.
* @param[in] state State that was entered (the current state)
- * @param[in] uctx that was passed to fr_trunk_add_watch_*.
+ * @param[in] uctx that was passed to trunk_add_watch_*.
*/
-typedef void(*fr_trunk_watch_t)(fr_trunk_t *trunk,
- fr_trunk_state_t prev, fr_trunk_state_t state, void *uctx);
+typedef void(*trunk_watch_t)(trunk_t *trunk,
+ trunk_state_t prev, trunk_state_t state, void *uctx);
-typedef struct fr_trunk_watch_entry_s fr_trunk_watch_entry_t;
+typedef struct trunk_watch_entry_s trunk_watch_entry_t;
-/** I/O functions to pass to fr_trunk_alloc
+/** I/O functions to pass to trunk_alloc
*
*/
typedef struct {
- fr_trunk_connection_alloc_t connection_alloc; //!< Allocate a new fr_connection_t.
+ trunk_connection_alloc_t connection_alloc; //!< Allocate a new connection_t.
- fr_trunk_connection_notify_t connection_notify; //!< Update the I/O event registrations for
+ trunk_connection_notify_t connection_notify; //!< Update the I/O event registrations for
fr_heap_cmp_t connection_prioritise; //!< Ordering function for connections.
fr_heap_cmp_t request_prioritise; //!< Ordering function for requests. Controls
///< where in the outbound queues they're inserted.
- fr_trunk_request_mux_t request_mux; ///!< Write one or more requests to a connection.
+ trunk_request_mux_t request_mux; ///!< Write one or more requests to a connection.
- fr_trunk_request_demux_t request_demux; ///!< Read one or more requests from a connection.
+ trunk_request_demux_t request_demux; ///!< Read one or more requests from a connection.
- fr_trunk_request_cancel_mux_t request_cancel_mux; //!< Inform an external resource that we no longer
+ trunk_request_cancel_mux_t request_cancel_mux; //!< Inform an external resource that we no longer
///< care about the result of any queries we
///< issued for this request.
- fr_trunk_request_cancel_t request_cancel; //!< Request should be removed from tracking
+ trunk_request_cancel_t request_cancel; //!< Request should be removed from tracking
///< and should be reset to its initial state.
- fr_trunk_request_conn_release_t request_conn_release; //!< Any connection specific resources should be
+ trunk_request_conn_release_t request_conn_release; //!< Any connection specific resources should be
///< removed from the treq as it's about to be
///< moved or freed.
- fr_trunk_request_complete_t request_complete; //!< Request is complete, interpret the response
+ trunk_request_complete_t request_complete; //!< Request is complete, interpret the response
///< contained in preq.
- fr_trunk_request_fail_t request_fail; //!< Request failed, write out a canned response.
+ trunk_request_fail_t request_fail; //!< Request failed, write out a canned response.
- fr_trunk_request_free_t request_free; //!< Free the preq and any resources it holds and
+ trunk_request_free_t request_free; //!< Free the preq and any resources it holds and
///< provide a chance to mark the request as runnable.
-} fr_trunk_io_funcs_t;
+} trunk_io_funcs_t;
/** @name Statistics
* @{
*/
-uint16_t fr_trunk_connection_count_by_state(fr_trunk_t *trunk, int conn_state) CC_HINT(nonnull);
+uint16_t trunk_connection_count_by_state(trunk_t *trunk, int conn_state) CC_HINT(nonnull);
-uint32_t fr_trunk_request_count_by_connection(fr_trunk_connection_t const *tconn, int req_state) CC_HINT(nonnull);
+uint32_t trunk_request_count_by_connection(trunk_connection_t const *tconn, int req_state) CC_HINT(nonnull);
-uint64_t fr_trunk_request_count_by_state(fr_trunk_t *trunk, int conn_state, int req_state) CC_HINT(nonnull);
+uint64_t trunk_request_count_by_state(trunk_t *trunk, int conn_state, int req_state) CC_HINT(nonnull);
/** @} */
/** @name Request state signalling
* @{
*/
-void fr_trunk_request_signal_partial(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_partial(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_sent(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_sent(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_idle(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_idle(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_complete(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_complete(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_fail(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_fail(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_cancel(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_cancel(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_cancel_partial(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_cancel_partial(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_cancel_sent(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_cancel_sent(trunk_request_t *treq) CC_HINT(nonnull);
-void fr_trunk_request_signal_cancel_complete(fr_trunk_request_t *treq) CC_HINT(nonnull);
+void trunk_request_signal_cancel_complete(trunk_request_t *treq) CC_HINT(nonnull);
/** @} */
/** @name (R)enqueue and alloc requests
* @{
*/
-uint64_t fr_trunk_connection_requests_requeue(fr_trunk_connection_t *tconn, int states, uint64_t max,
+uint64_t trunk_connection_requests_requeue(trunk_connection_t *tconn, int states, uint64_t max,
bool fail_bound) CC_HINT(nonnull);
-void fr_trunk_request_free(fr_trunk_request_t **treq);
+void trunk_request_free(trunk_request_t **treq);
-fr_trunk_request_t *fr_trunk_request_alloc(fr_trunk_t *trunk, request_t *request) CC_HINT(nonnull(1));
+trunk_request_t *trunk_request_alloc(trunk_t *trunk, request_t *request) CC_HINT(nonnull(1));
-fr_trunk_enqueue_t fr_trunk_request_enqueue(fr_trunk_request_t **treq, fr_trunk_t *trunk, request_t *request,
+trunk_enqueue_t trunk_request_enqueue(trunk_request_t **treq, trunk_t *trunk, request_t *request,
void *preq, void *rctx) CC_HINT(nonnull(2));
-fr_trunk_enqueue_t fr_trunk_request_requeue(fr_trunk_request_t *treq) CC_HINT(nonnull);
+trunk_enqueue_t trunk_request_requeue(trunk_request_t *treq) CC_HINT(nonnull);
-fr_trunk_enqueue_t fr_trunk_request_enqueue_on_conn(fr_trunk_request_t **treq_out, fr_trunk_connection_t *tconn,
+trunk_enqueue_t trunk_request_enqueue_on_conn(trunk_request_t **treq_out, trunk_connection_t *tconn,
request_t *request, void *preq, void *rctx,
bool ignore_limits) CC_HINT(nonnull(2));
#ifndef NDEBUG
-void fr_trunk_request_state_log(fr_log_t const *log, fr_log_type_t log_type, char const *file, int line,
- fr_trunk_request_t const *treq);
+void trunk_request_state_log(fr_log_t const *log, fr_log_type_t log_type, char const *file, int line,
+ trunk_request_t const *treq);
#endif
/** @} */
/** @name Dequeue protocol requests and cancellations
* @{
*/
-int fr_trunk_connection_pop_cancellation(fr_trunk_request_t **treq_out, fr_trunk_connection_t *tconn);
+int trunk_connection_pop_cancellation(trunk_request_t **treq_out, trunk_connection_t *tconn);
-int fr_trunk_connection_pop_request(fr_trunk_request_t **treq_out, fr_trunk_connection_t *tconn);
+int trunk_connection_pop_request(trunk_request_t **treq_out, trunk_connection_t *tconn);
/** @} */
/** @name Connection state signalling
* - writable - The connection is writable (the muxer will be called).
* - readable - The connection is readable (the demuxer will be called).
* - reconnect - The connection is likely bad and should be reconnected.
- * If the code signalling has access to the conn, fr_connection_signal_reconnect
- * can be used instead of fr_trunk_connection_signal_reconnect.
+ * If the code signalling has access to the conn, connection_signal_reconnect
+ * can be used instead of trunk_connection_signal_reconnect.
*
* The following states are signalled to control whether a connection may be
* assigned new requests:
* max_req_per_conn.
*
* For other connection states the trunk API should not be signalled directly.
- * It will be informed by "watch" callbacks inserted into the #fr_connection_t as
+ * It will be informed by "watch" callbacks inserted into the #connection_t as
* to when the connection changes state.
*
- * #fr_trunk_connection_signal_active does not need to be called in any of the
- * #fr_connection_t state callbacks. It is only used to activate a connection
+ * #trunk_connection_signal_active does not need to be called in any of the
+ * #connection_t state callbacks. It is only used to activate a connection
* which has been previously marked inactive using
- * #fr_trunk_connection_signal_inactive.
+ * #trunk_connection_signal_inactive.
*
- * If #fr_trunk_connection_signal_inactive is being used to remove a congested
+ * If #trunk_connection_signal_inactive is being used to remove a congested
* connection from the active list (i.e. on receipt of an explicit protocol level
- * congestion notification), consider calling #fr_trunk_connection_requests_requeue
- * with the FR_TRUNK_REQUEST_STATE_PENDING state to redistribute that connection's
+ * congestion notification), consider calling #trunk_connection_requests_requeue
+ * with the TRUNK_REQUEST_STATE_PENDING state to redistribute that connection's
* backlog to other connections in the trunk.
*
* @{
*/
-void fr_trunk_connection_signal_writable(fr_trunk_connection_t *tconn) CC_HINT(nonnull);
+void trunk_connection_signal_writable(trunk_connection_t *tconn) CC_HINT(nonnull);
-void fr_trunk_connection_signal_readable(fr_trunk_connection_t *tconn) CC_HINT(nonnull);
+void trunk_connection_signal_readable(trunk_connection_t *tconn) CC_HINT(nonnull);
-void fr_trunk_connection_signal_inactive(fr_trunk_connection_t *tconn) CC_HINT(nonnull);
+void trunk_connection_signal_inactive(trunk_connection_t *tconn) CC_HINT(nonnull);
-void fr_trunk_connection_signal_active(fr_trunk_connection_t *tconn) CC_HINT(nonnull);
+void trunk_connection_signal_active(trunk_connection_t *tconn) CC_HINT(nonnull);
-void fr_trunk_connection_signal_reconnect(fr_trunk_connection_t *tconn, fr_connection_reason_t reason) CC_HINT(nonnull);
+void trunk_connection_signal_reconnect(trunk_connection_t *tconn, connection_reason_t reason) CC_HINT(nonnull);
-bool fr_trunk_connection_in_state(fr_trunk_connection_t *tconn, int state);
+bool trunk_connection_in_state(trunk_connection_t *tconn, int state);
/** @} */
/** @name Connection Callbacks
* @{
*/
-void fr_trunk_connection_callback_writable(fr_event_list_t *el, int fd, int flags, void *uctx);
+void trunk_connection_callback_writable(fr_event_list_t *el, int fd, int flags, void *uctx);
-void fr_trunk_connection_callback_readable(fr_event_list_t *el, int fd, int flags, void *uctx);
+void trunk_connection_callback_readable(fr_event_list_t *el, int fd, int flags, void *uctx);
/** @} */
/** @name Connection management
* @{
*/
-void fr_trunk_reconnect(fr_trunk_t *trunk, int state, fr_connection_reason_t reason) CC_HINT(nonnull);
+void trunk_reconnect(trunk_t *trunk, int state, connection_reason_t reason) CC_HINT(nonnull);
/** @} */
/** @name Trunk allocation
* @{
*/
-int fr_trunk_start(fr_trunk_t *trunk) CC_HINT(nonnull);
+int trunk_start(trunk_t *trunk) CC_HINT(nonnull);
-void fr_trunk_connection_manage_start(fr_trunk_t *trunk) CC_HINT(nonnull);
+void trunk_connection_manage_start(trunk_t *trunk) CC_HINT(nonnull);
-void fr_trunk_connection_manage_stop(fr_trunk_t *trunk) CC_HINT(nonnull);
+void trunk_connection_manage_stop(trunk_t *trunk) CC_HINT(nonnull);
-int fr_trunk_connection_manage_schedule(fr_trunk_t *trunk) CC_HINT(nonnull);
+int trunk_connection_manage_schedule(trunk_t *trunk) CC_HINT(nonnull);
-fr_trunk_t *fr_trunk_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
- fr_trunk_io_funcs_t const *funcs, fr_trunk_conf_t const *conf,
+trunk_t *trunk_alloc(TALLOC_CTX *ctx, fr_event_list_t *el,
+ trunk_io_funcs_t const *funcs, trunk_conf_t const *conf,
char const *log_prefix, void const *uctx, bool delay_start) CC_HINT(nonnull(2, 3, 4));
/** @} */
/** @name Watchers
* @{
*/
-fr_trunk_watch_entry_t *fr_trunk_add_watch(fr_trunk_t *trunk, fr_trunk_state_t state,
- fr_trunk_watch_t watch, bool oneshot, void const *uctx) CC_HINT(nonnull(1));
+trunk_watch_entry_t *trunk_add_watch(trunk_t *trunk, trunk_state_t state,
+ trunk_watch_t watch, bool oneshot, void const *uctx) CC_HINT(nonnull(1));
-int fr_trunk_del_watch(fr_trunk_t *trunk, fr_trunk_state_t state, fr_trunk_watch_t watch);
+int trunk_del_watch(trunk_t *trunk, trunk_state_t state, trunk_watch_t watch);
/** @} */
#ifndef TALLOC_GET_TYPE_ABORT_NOOP
-void CC_HINT(nonnull(1)) fr_trunk_verify(char const *file, int line, fr_trunk_t *trunk);
-void CC_HINT(nonnull(1)) fr_trunk_connection_verify(char const *file, int line, fr_trunk_connection_t *tconn);
-void CC_HINT(nonnull(1)) fr_trunk_request_verify(char const *file, int line, fr_trunk_request_t *treq);
+void CC_HINT(nonnull(1)) trunk_verify(char const *file, int line, trunk_t *trunk);
+void CC_HINT(nonnull(1)) trunk_connection_verify(char const *file, int line, trunk_connection_t *tconn);
+void CC_HINT(nonnull(1)) trunk_request_verify(char const *file, int line, trunk_request_t *treq);
-# define FR_TRUNK_VERIFY(_trunk) fr_trunk_verify(__FILE__, __LINE__, _trunk)
-# define FR_TRUNK_CONNECTION_VERIFY(_tconn) fr_trunk_connection_verify(__FILE__, __LINE__, _tconn)
-# define FR_TRUNK_REQUEST_VERIFY(_treq) fr_trunk_request_verify(__FILE__, __LINE__, _treq)
+# define TRUNK_VERIFY(_trunk) trunk_verify(__FILE__, __LINE__, _trunk)
+# define TRUNK_CONNECTION_VERIFY(_tconn) trunk_connection_verify(__FILE__, __LINE__, _tconn)
+# define TRUNK_REQUEST_VERIFY(_treq) trunk_request_verify(__FILE__, __LINE__, _treq)
#elif !defined(NDEBUG)
-# define FR_TRUNK_VERIFY(_trunk) fr_assert(_trunk)
-# define FR_TRUNK_CONNECTION_VERIFY(_tconn) fr_assert(_tconn)
-# define FR_TRUNK_REQUEST_VERIFY(_treq) fr_assert(_treq)
+# define TRUNK_VERIFY(_trunk) fr_assert(_trunk)
+# define TRUNK_CONNECTION_VERIFY(_tconn) fr_assert(_tconn)
+# define TRUNK_REQUEST_VERIFY(_treq) fr_assert(_treq)
#else
-# define FR_TRUNK_VERIFY(_trunk)
-# define FR_TRUNK_CONNECTION_VERIFY(_tconn)
-# define FR_TRUNK_REQUEST_VERIFY(_treq)
+# define TRUNK_VERIFY(_trunk)
+# define TRUNK_CONNECTION_VERIFY(_tconn)
+# define TRUNK_REQUEST_VERIFY(_treq)
#endif
-bool fr_trunk_search(fr_trunk_t *trunk, void *ptr);
-bool fr_trunk_connection_search(fr_trunk_connection_t *tconn, void *ptr);
-bool fr_trunk_request_search(fr_trunk_request_t *treq, void *ptr);
+bool trunk_search(trunk_t *trunk, void *ptr);
+bool trunk_connection_search(trunk_connection_t *tconn, void *ptr);
+bool trunk_request_search(trunk_request_t *treq, void *ptr);
#undef _CONST
*
* Copyright 2020 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
*/
-digraph fr_trunk_connection_t_states {
- label = "fr_trunk_connection_t states"
+digraph trunk_connection_t_states {
+ label = "trunk_connection_t states"
condition_key [
shape=plaintext
label=<
* Copyright 2023 Network RADIUS SARL (legal@networkradius.com)
*/
- digraph fr_trunk_request_t_states {
- label = "fr_trunk_request_t states"
+ digraph trunk_request_t_states {
+ label = "trunk_request_t states"
condition_key [
shape=plaintext
label=<
<table border='0'>
<tr><td align='left'>[ no_cancel_mux ]</td><td align='left'> [ !treq->pub.trunkfuncs.request_cancel_mux ]</td></tr>
- <tr><td align='left'>[ enqueue_backlog ]</td><td align='left'> { trunk_request_check_enqueue() == FR_TRUNK_ENQUEUE_IN_BACKLOG ]</td></tr>
- <tr><td align='left'>[ enqueue_pending ]</td><td align='left'> { trunk_request_check_enqueue() == FR_TRUNK_ENQUEUE_PENDING ]</td></tr>
+ <tr><td align='left'>[ enqueue_backlog ]</td><td align='left'> { trunk_request_check_enqueue() == TRUNK_ENQUEUE_IN_BACKLOG ]</td></tr>
+ <tr><td align='left'>[ enqueue_pending ]</td><td align='left'> { trunk_request_check_enqueue() == TRUNK_ENQUEUE_PENDING ]</td></tr>
<tr><td align='left'>[ on_connection ]</td><td align='left'> { treq->pub.tconn == tconn ]</td></tr>
<tr><td align='left'>[ not_too_many ]</td><td align='left'> { count < max ]</td></tr>
<tr><td align='left'>[ in_states ]</td><td align='left'> { treq->pub.state & states ]</td></tr>
{backlog, pending, cancel, cancel_partial, cancel_sent} -> unassigned [ label = "trunk_connection_requests_dequeue(); [ dequeueable ]" ]
- cancel -> cancel_partial [ label = "fr_trunk_request_signal_cancel_partial()" ]
+ cancel -> cancel_partial [ label = "trunk_request_signal_cancel_partial()" ]
{cancel, cancel_partial} -> cancel_sent [ label = "trunk_request_signal_sent()" ]
- {cancel, cancel_sent} -> cancel_complete [ label = "fr_trunk_request_signal_cancel_complete(()" ]
+ {cancel, cancel_sent} -> cancel_complete [ label = "trunk_request_signal_cancel_complete(()" ]
- init -> backlog [ label = "fr_trunk_request_enqueue(); [ enqueue_backlog ]" ]
- init -> pending [ label = "fr_trunk_request_enqueue(); [ enqueue_pending ]" ]
+ init -> backlog [ label = "trunk_request_enqueue(); [ enqueue_backlog ]" ]
+ init -> pending [ label = "trunk_request_enqueue(); [ enqueue_pending ]" ]
- {pending, partial} -> sent [ label = "fr_trunk_request_signal_sent()" ]
+ {pending, partial} -> sent [ label = "trunk_request_signal_sent()" ]
- {partial, sent} -> cancel [label = "fr_trunk_request_requeue()", style = dashed, color = red]
+ {partial, sent} -> cancel [label = "trunk_request_requeue()", style = dashed, color = red]
cancel -> pending [style = dashed, color = red]
- {partial, sent} -> cancel [label = "fr_trunk_request_signal_cancel()", style = dashed, color = blue]
+ {partial, sent} -> cancel [label = "trunk_request_signal_cancel()", style = dashed, color = blue]
cancel -> unassigned [label = "no_cancel_mux", style = dashed, color = blue]
- {sent, pending} -> complete [ label = "fr_trunk_request_signal_complete()" ]
+ {sent, pending} -> complete [ label = "trunk_request_signal_complete()" ]
- pending -> partial [ label = "fr_trunk_request_signal_partial()" ]
+ pending -> partial [ label = "trunk_request_signal_partial()" ]
}
//#include <gperftools/profiler.h>
typedef struct {
- fr_trunk_request_t *treq; //!< Trunk request.
+ trunk_request_t *treq; //!< Trunk request.
bool cancelled; //!< Seen by the cancelled callback.
bool completed; //!< Seen by the complete callback.
bool failed; //!< Seen by the failed callback.
#define DEBUG_LVL_SET if (acutest_verbose_level_ >= 3) fr_debug_lvl = L_DBG_LVL_4 + 1
-static void test_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+static void test_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
size_t count = 0;
int fd = *(talloc_get_type_abort(conn->h, int));
ssize_t slen;
- while (fr_trunk_connection_pop_request(&treq, tconn) == 0) {
+ while (trunk_connection_pop_request(&treq, tconn) == 0) {
test_proto_request_t *preq = treq->pub.preq;
count++;
* Simulate a partial write
*/
if (preq && preq->signal_partial) {
- fr_trunk_request_signal_partial(treq);
+ trunk_request_signal_partial(treq);
preq->signal_partial = false;
break;
}
if (slen == 0) return;
if (slen < (ssize_t)sizeof(preq)) abort();
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
}
TEST_CHECK(count > 0);
}
-static void test_cancel_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+static void test_cancel_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
size_t count = 0;
int fd = *(talloc_get_type_abort(conn->h, int));
ssize_t slen;
/*
* For cancellation we just do
*/
- while ((fr_trunk_connection_pop_cancellation(&treq, tconn) == 0)) {
+ while ((trunk_connection_pop_cancellation(&treq, tconn) == 0)) {
test_proto_request_t *preq = treq->pub.preq;
count++;
* Simulate a partial cancel write
*/
if (preq && preq->signal_cancel_partial) {
- fr_trunk_request_signal_cancel_partial(treq);
+ trunk_request_signal_cancel_partial(treq);
preq->signal_cancel_partial = false;
break;
}
if (slen == 0) return;
if (slen < (ssize_t)sizeof(preq)) abort();
- fr_trunk_request_signal_cancel_sent(treq);
+ trunk_request_signal_cancel_sent(treq);
}
TEST_CHECK(count > 0);
}
-static void test_demux(UNUSED fr_event_list_t *el, UNUSED fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+static void test_demux(UNUSED fr_event_list_t *el, UNUSED trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
int fd = *(talloc_get_type_abort(conn->h, int));
test_proto_request_t *preq;
* Demuxer can handle both normal requests and cancelled ones
*/
switch (preq->treq->pub.state) {
- case FR_TRUNK_REQUEST_STATE_CANCEL:
+ case TRUNK_REQUEST_STATE_CANCEL:
break; /* Hack - just ignore it */
- case FR_TRUNK_REQUEST_STATE_CANCEL_SENT:
+ case TRUNK_REQUEST_STATE_CANCEL_SENT:
/* coverity[tainted_data] */
- fr_trunk_request_signal_cancel_complete(preq->treq);
+ trunk_request_signal_cancel_complete(preq->treq);
break;
- case FR_TRUNK_REQUEST_STATE_SENT:
+ case TRUNK_REQUEST_STATE_SENT:
/* coverity[tainted_data] */
- fr_trunk_request_signal_complete(preq->treq);
+ trunk_request_signal_complete(preq->treq);
break;
default:
UNUSED int fd_errno, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
static void _conn_io_read(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_connection_signal_readable(tconn);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_connection_signal_readable(tconn);
}
static void _conn_io_write(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_connection_signal_writable(tconn);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_connection_signal_writable(tconn);
}
-static void _conn_notify(fr_trunk_connection_t *tconn, fr_connection_t *conn,
+static void _conn_notify(trunk_connection_t *tconn, connection_t *conn,
fr_event_list_t *el,
- fr_trunk_connection_event_t notify_on, UNUSED void *uctx)
+ trunk_connection_event_t notify_on, UNUSED void *uctx)
{
int fd = *(talloc_get_type_abort(conn->h, int));
switch (notify_on) {
- case FR_TRUNK_CONN_EVENT_NONE:
+ case TRUNK_CONN_EVENT_NONE:
fr_event_fd_delete(el, fd, FR_EVENT_FILTER_IO);
break;
- case FR_TRUNK_CONN_EVENT_READ:
+ case TRUNK_CONN_EVENT_READ:
TEST_CHECK(fr_event_fd_insert(conn, NULL, el, fd, _conn_io_read, NULL, _conn_io_error, tconn) == 0);
break;
- case FR_TRUNK_CONN_EVENT_WRITE:
+ case TRUNK_CONN_EVENT_WRITE:
TEST_CHECK(fr_event_fd_insert(conn, NULL, el, fd, NULL, _conn_io_write, _conn_io_error, tconn) == 0);
break;
- case FR_TRUNK_CONN_EVENT_BOTH:
+ case TRUNK_CONN_EVENT_BOTH:
TEST_CHECK(fr_event_fd_insert(conn, NULL, el, fd, _conn_io_read, _conn_io_write, _conn_io_error, tconn) == 0);
break;
}
}
-static void test_request_cancel(UNUSED fr_connection_t *conn, void *preq,
- UNUSED fr_trunk_cancel_reason_t reason, void *uctx)
+static void test_request_cancel(UNUSED connection_t *conn, void *preq,
+ UNUSED trunk_cancel_reason_t reason, void *uctx)
{
test_proto_stats_t *stats = uctx;
test_proto_request_t *our_preq;
if (stats) stats->completed++;
}
-static void test_request_fail(UNUSED request_t *request, void *preq, UNUSED void *rctx, UNUSED fr_trunk_request_state_t state, void *uctx)
+static void test_request_fail(UNUSED request_t *request, void *preq, UNUSED void *rctx, UNUSED trunk_request_state_t state, void *uctx)
{
test_proto_stats_t *stats = uctx;
test_proto_request_t *our_preq;
/** Insert I/O handlers that loop any data back round
*
*/
-static fr_connection_state_t _conn_open(fr_event_list_t *el, void *h, UNUSED void *uctx)
+static connection_state_t _conn_open(fr_event_list_t *el, void *h, UNUSED void *uctx)
{
int *our_h = talloc_get_type_abort(h, int);
*/
TEST_CHECK(fr_event_fd_insert(our_h, NULL, el, our_h[1], _conn_io_loopback, NULL, NULL, our_h) == 0);
- return FR_CONNECTION_STATE_CONNECTED;
+ return connection_STATE_CONNECTED;
}
/** Allocate a basic socket pair
*
*/
-static fr_connection_state_t _conn_init(void **h_out, fr_connection_t *conn, UNUSED void *uctx)
+static connection_state_t _conn_init(void **h_out, connection_t *conn, UNUSED void *uctx)
{
int *h;
fr_nonblock(h[0]);
fr_nonblock(h[1]);
- fr_connection_signal_on_fd(conn, h[0]);
+ connection_signal_on_fd(conn, h[0]);
*h_out = h;
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
-static fr_connection_t *test_setup_socket_pair_connection_alloc(fr_trunk_connection_t *tconn,
+static connection_t *test_setup_socket_pair_connection_alloc(trunk_connection_t *tconn,
fr_event_list_t *el,
- fr_connection_conf_t const *conn_conf,
+ connection_conf_t const *conn_conf,
char const *log_prefix, UNUSED void *uctx)
{
- fr_connection_conf_t cstat;
+ connection_conf_t cstat;
if (!conn_conf) {
memset(&cstat, 0, sizeof(cstat));
conn_conf = &cstat;
}
- return fr_connection_alloc(tconn, el,
- &(fr_connection_funcs_t){
+ return connection_alloc(tconn, el,
+ &(connection_funcs_t){
.init = _conn_init,
.open = _conn_open,
.close = _conn_close
return CMP(preq_a->priority, preq_b->priority);
}
-static fr_trunk_t *test_setup_trunk(TALLOC_CTX *ctx, fr_event_list_t *el, fr_trunk_conf_t *conf, bool with_cancel_mux, void *uctx)
+static trunk_t *test_setup_trunk(TALLOC_CTX *ctx, fr_event_list_t *el, trunk_conf_t *conf, bool with_cancel_mux, void *uctx)
{
- fr_trunk_io_funcs_t io_funcs = {
+ trunk_io_funcs_t io_funcs = {
.connection_alloc = test_setup_socket_pair_connection_alloc,
.connection_notify = _conn_notify,
.request_prioritise = test_preq_cmp,
*/
if (with_cancel_mux) io_funcs.request_cancel_mux = test_cancel_mux;
- return fr_trunk_alloc(ctx, el, &io_funcs, conf, "test_socket_pair", uctx, false);
+ return trunk_alloc(ctx, el, &io_funcs, conf, "test_socket_pair", uctx, false);
}
static void test_socket_pair_alloc_then_free(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
int events;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 2,
.min = 2
};
- fr_trunk_io_funcs_t io_funcs = {
+ trunk_io_funcs_t io_funcs = {
.connection_alloc = test_setup_socket_pair_connection_alloc,
.request_prioritise = fr_pointer_cmp,
};
fr_event_list_set_time_func(el, test_time);
- trunk = fr_trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
+ trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
TEST_CHECK(trunk != NULL);
if (!trunk) return;
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 2);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 2);
events = fr_event_corral(el, test_time_base, true);
TEST_CHECK(events == 2); /* Two I/O write events, no timers */
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 2);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 2);
events = fr_event_corral(el, test_time_base, false);
TEST_CHECK(events == 0); /* I/O events should have been cleared */
static void test_socket_pair_alloc_then_reconnect_then_free(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
int events;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 2,
.min = 2,
- .conn_conf = &(fr_connection_conf_t){
+ .conn_conf = &(connection_conf_t){
.reconnection_delay = fr_time_delta_from_nsec(NSEC / 2)
}
};
- fr_trunk_io_funcs_t io_funcs = {
+ trunk_io_funcs_t io_funcs = {
.connection_alloc = test_setup_socket_pair_connection_alloc,
.request_prioritise = fr_pointer_cmp,
};
fr_event_list_set_time_func(el, test_time);
- trunk = fr_trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
+ trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
TEST_CHECK(trunk != NULL);
if (!trunk) return;
events = fr_event_corral(el, test_time_base, true);
TEST_CHECK(events == 2); /* Two I/O write events, no timers */
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 2);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 2);
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 2);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 2);
events = fr_event_corral(el, test_time_base, false);
TEST_CHECK(events == 0); /* I/O events should have been cleared */
TEST_MSG("Got %u events", events);
- fr_trunk_reconnect(trunk, FR_TRUNK_CONN_ACTIVE, FR_CONNECTION_FAILED);
+ trunk_reconnect(trunk, TRUNK_CONN_ACTIVE, connection_FAILED);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(2));
events = fr_event_corral(el, test_time_base, true);
TEST_MSG("Got %u events", events);
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 2);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 2);
events = fr_event_corral(el, test_time_base, true);
TEST_CHECK(events == 2); /* Two I/O write events, no timers */
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 2);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 2);
events = fr_event_corral(el, test_time_base, false);
TEST_CHECK(events == 0); /* I/O events should have been cleared */
talloc_free(ctx);
}
-static fr_connection_state_t _conn_init_no_signal(void **h_out, fr_connection_t *conn, UNUSED void *uctx)
+static connection_state_t _conn_init_no_signal(void **h_out, connection_t *conn, UNUSED void *uctx)
{
int *h;
socketpair(AF_UNIX, SOCK_STREAM, 0, h);
*h_out = h;
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
-static fr_connection_t *test_setup_socket_pair_1s_timeout_connection_alloc(fr_trunk_connection_t *tconn,
+static connection_t *test_setup_socket_pair_1s_timeout_connection_alloc(trunk_connection_t *tconn,
fr_event_list_t *el,
- UNUSED fr_connection_conf_t const *conf,
+ UNUSED connection_conf_t const *conf,
char const *log_prefix, void *uctx)
{
- return fr_connection_alloc(tconn, el,
- &(fr_connection_funcs_t){
+ return connection_alloc(tconn, el,
+ &(connection_funcs_t){
.init = _conn_init_no_signal,
.open = _conn_open,
.close = _conn_close
},
- &(fr_connection_conf_t){
+ &(connection_conf_t){
.connection_timeout = fr_time_delta_from_sec(1),
.reconnection_delay = fr_time_delta_from_sec(1)
},
static void test_socket_pair_alloc_then_connect_timeout(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
int events;
- fr_trunk_connection_t *tconn;
- fr_trunk_conf_t conf = {
+ trunk_connection_t *tconn;
+ trunk_conf_t conf = {
.start = 1,
.min = 1
};
- fr_trunk_io_funcs_t io_funcs = {
+ trunk_io_funcs_t io_funcs = {
.connection_alloc = test_setup_socket_pair_1s_timeout_connection_alloc,
.request_prioritise = fr_pointer_cmp,
};
fr_event_list_set_time_func(el, test_time);
- trunk = fr_trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
+ trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
TEST_CHECK(trunk != NULL);
if (!trunk) return;
TEST_CHECK(tconn != NULL);
if (tconn == NULL) return;
- TEST_CHECK(fr_connection_get_num_timed_out(tconn->pub.conn) == 0);
- TEST_CHECK(fr_connection_get_num_reconnected(tconn->pub.conn) == 0);
+ TEST_CHECK(connection_get_num_timed_out(tconn->pub.conn) == 0);
+ TEST_CHECK(connection_get_num_reconnected(tconn->pub.conn) == 0);
/*
* Timeout should now fire
/*
* Connection delay not implemented for timed out connections
*/
- TEST_CHECK(fr_connection_get_num_timed_out(tconn->pub.conn) == 1);
- TEST_CHECK(fr_connection_get_num_reconnected(tconn->pub.conn) == 1);
+ TEST_CHECK(connection_get_num_timed_out(tconn->pub.conn) == 1);
+ TEST_CHECK(connection_get_num_reconnected(tconn->pub.conn) == 1);
events = fr_event_corral(el, test_time_base, false);
TEST_CHECK(events == 0); /* I/O events should have been cleared */
talloc_free(ctx);
}
-static fr_connection_t *test_setup_socket_pair_1s_reconnection_delay_alloc(fr_trunk_connection_t *tconn,
+static connection_t *test_setup_socket_pair_1s_reconnection_delay_alloc(trunk_connection_t *tconn,
fr_event_list_t *el,
- UNUSED fr_connection_conf_t const *conn_conf,
+ UNUSED connection_conf_t const *conn_conf,
char const *log_prefix, void *uctx)
{
- return fr_connection_alloc(tconn, el,
- &(fr_connection_funcs_t){
+ return connection_alloc(tconn, el,
+ &(connection_funcs_t){
.init = _conn_init,
.open = _conn_open,
.close = _conn_close
},
- &(fr_connection_conf_t){
+ &(connection_conf_t){
.connection_timeout = fr_time_delta_from_sec(1),
.reconnection_delay = fr_time_delta_from_sec(1)
},
static void test_socket_pair_alloc_then_reconnect_check_delay(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
int events;
- fr_trunk_connection_t *tconn;
- fr_trunk_conf_t conf = {
+ trunk_connection_t *tconn;
+ trunk_conf_t conf = {
.start = 1,
.min = 1,
- .conn_conf = &(fr_connection_conf_t){
+ .conn_conf = &(connection_conf_t){
.reconnection_delay = fr_time_delta_from_sec(1),
.connection_timeout = fr_time_delta_from_sec(1)
}
};
- fr_trunk_io_funcs_t io_funcs = {
+ trunk_io_funcs_t io_funcs = {
.connection_alloc = test_setup_socket_pair_1s_reconnection_delay_alloc,
.request_prioritise = fr_pointer_cmp,
};
el = fr_event_list_alloc(ctx, NULL, NULL);
fr_event_list_set_time_func(el, test_time);
- trunk = fr_trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
+ trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
TEST_CHECK(trunk != NULL);
if (!trunk) return;
TEST_CHECK(tconn != NULL);
if (tconn == NULL) return;
- TEST_CHECK(fr_connection_get_num_timed_out(tconn->pub.conn) == 0);
- TEST_CHECK(fr_connection_get_num_reconnected(tconn->pub.conn) == 0);
+ TEST_CHECK(connection_get_num_timed_out(tconn->pub.conn) == 0);
+ TEST_CHECK(connection_get_num_reconnected(tconn->pub.conn) == 0);
/*
* Trigger reconnection
*/
- fr_connection_signal_reconnect(tconn->pub.conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(tconn->pub.conn, connection_FAILED);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5));
events = fr_event_corral(el, test_time_base, false);
fr_event_service(el); /* Services the timer, which then triggers init */
- TEST_CHECK(fr_connection_get_num_timed_out(tconn->pub.conn) == 0);
- TEST_CHECK(fr_connection_get_num_reconnected(tconn->pub.conn) == 1);
+ TEST_CHECK(connection_get_num_timed_out(tconn->pub.conn) == 0);
+ TEST_CHECK(connection_get_num_reconnected(tconn->pub.conn) == 1);
events = fr_event_corral(el, test_time_base, true);
TEST_CHECK(events == 1); /* Should have a pending I/O event and a timer */
static void test_enqueue_basic(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 1,
.min = 1,
.manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
};
test_proto_request_t *preq;
- fr_trunk_request_t *treq = NULL;
- fr_trunk_enqueue_t rcode;
+ trunk_request_t *treq = NULL;
+ trunk_enqueue_t rcode;
DEBUG_LVL_SET;
* so the request should enter the
* backlog.
*/
- rcode = fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ rcode = trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
- TEST_CHECK(rcode == FR_TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK(rcode == TRUNK_ENQUEUE_IN_BACKLOG);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_BACKLOG) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_BACKLOG) == 1);
/*
* Allow the connection to establish
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_BACKLOG) == 0);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_BACKLOG) == 0);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING) == 1);
/*
* Should now be active and have a write event
* inserted into the event loop.
*/
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 1);
/*
* Trunk should be signalled the connection is
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 1);
/*
* Gives the loopback function a chance
static void test_enqueue_cancellation_points(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 1,
.min = 1,
.manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
};
test_proto_request_t *preq;
- fr_trunk_request_t *treq = NULL;
+ trunk_request_t *treq = NULL;
DEBUG_LVL_SET;
trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
- TEST_CASE("cancellation via trunk free - FR_TRUNK_REQUEST_STATE_BACKLOG");
+ TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_BACKLOG");
talloc_free(trunk);
TEST_CHECK(preq->completed == false);
TEST_CHECK(preq->failed == true);
TEST_CHECK(preq->freed == true);
talloc_free(preq);
- TEST_CASE("cancellation via signal - FR_TRUNK_REQUEST_STATE_BACKLOG");
+ TEST_CASE("cancellation via signal - TRUNK_REQUEST_STATE_BACKLOG");
trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 0);
TEST_CHECK(preq->completed == false);
TEST_CHECK(preq->failed == false); /* Request/rctx not guaranteed after signal, so can't call fail */
talloc_free(preq);
talloc_free(trunk);
- TEST_CASE("cancellation via trunk free - FR_TRUNK_REQUEST_STATE_PARTIAL");
+ TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_PARTIAL");
trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
preq->signal_partial = true;
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PARTIAL));
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PARTIAL));
talloc_free(trunk);
TEST_CHECK(preq->freed == true);
talloc_free(preq);
- TEST_CASE("cancellation via signal - FR_TRUNK_REQUEST_STATE_PARTIAL");
+ TEST_CASE("cancellation via signal - TRUNK_REQUEST_STATE_PARTIAL");
trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
preq->signal_partial = true;
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PARTIAL) == 1);
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PARTIAL) == 1);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 0);
TEST_CHECK(preq->completed == false);
TEST_CHECK(preq->failed == false); /* Request/rctx not guaranteed after signal, so can't call fail */
talloc_free(preq);
talloc_free(trunk);
- TEST_CASE("cancellation via trunk free - FR_TRUNK_REQUEST_STATE_SENT");
+ TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_SENT");
trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 1);
talloc_free(trunk);
TEST_CHECK(preq->completed == false);
TEST_CHECK(preq->freed == true);
talloc_free(preq);
- TEST_CASE("cancellation via signal - FR_TRUNK_REQUEST_STATE_SENT");
+ TEST_CASE("cancellation via signal - TRUNK_REQUEST_STATE_SENT");
trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 1);
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 1);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 0);
TEST_CHECK(preq->completed == false);
TEST_CHECK(preq->failed == false); /* Request/rctx not guaranteed after signal, so can't call fail */
talloc_free(preq);
talloc_free(trunk);
- TEST_CASE("cancellation via trunk free - FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL");
+ TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_CANCEL_PARTIAL");
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
preq->signal_cancel_partial = true;
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 1);
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 1);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL) == 1);
fr_event_corral(el, test_time_base, false); /* Send the cancellation request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL_PARTIAL) == 1);
talloc_free(trunk);
TEST_CHECK(preq->freed == true);
talloc_free(preq);
- TEST_CASE("cancellation via trunk free - FR_TRUNK_REQUEST_STATE_CANCEL_SENT");
+ TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_CANCEL_SENT");
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 1);
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 1);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL) == 1);
fr_event_corral(el, test_time_base, false); /* Send the cancellation request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL_SENT) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL_SENT) == 1);
talloc_free(trunk);
TEST_CHECK(preq->freed == true);
talloc_free(preq);
- TEST_CASE("trunk free after FR_TRUNK_REQUEST_STATE_CANCEL_COMPLETE");
+ TEST_CASE("trunk free after TRUNK_REQUEST_STATE_CANCEL_COMPLETE");
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
preq = talloc_zero(NULL, test_proto_request_t);
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 1);
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 1);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL) == 1);
fr_event_corral(el, test_time_base, false); /* Send the cancellation request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL_SENT) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL_SENT) == 1);
fr_event_corral(el, test_time_base, false); /* Loop the cancel request back round */
fr_event_service(el);
fr_event_corral(el, test_time_base, false); /* Read the cancel ACK (such that it is) */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 0);
talloc_free(trunk);
static void test_partial_to_complete_states(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 1,
.min = 1,
.manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
};
test_proto_request_t *preq;
- fr_trunk_request_t *treq = NULL;
+ trunk_request_t *treq = NULL;
DEBUG_LVL_SET;
preq->signal_partial = true;
preq->signal_cancel_partial = true;
- TEST_CASE("FR_TRUNK_REQUEST_STATE_PARTIAL -> FR_TRUNK_REQUEST_STATE_SENT");
+ TEST_CASE("TRUNK_REQUEST_STATE_PARTIAL -> TRUNK_REQUEST_STATE_SENT");
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
fr_event_corral(el, test_time_base, false); /* Connect the connection */
fr_event_corral(el, test_time_base, false); /* Send the request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PARTIAL) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PARTIAL) == 1);
fr_event_corral(el, test_time_base, false); /* Complete the partial request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 1);
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL) == 1);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL) == 1);
- TEST_CASE("FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL -> FR_TRUNK_REQUEST_STATE_CANCEL_SENT");
+ TEST_CASE("TRUNK_REQUEST_STATE_CANCEL_PARTIAL -> TRUNK_REQUEST_STATE_CANCEL_SENT");
fr_event_corral(el, test_time_base, false); /* Send partial cancel request */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL_PARTIAL) == 1);
fr_event_corral(el, test_time_base, false); /* Complete the partial cancellation */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL_SENT) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL_SENT) == 1);
fr_event_corral(el, test_time_base, false); /* Loop the cancellation request back */
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 0);
talloc_free(trunk);
static void test_requeue_on_reconnect(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 2,
.min = 2,
.manage_interval = fr_time_delta_from_nsec(NSEC * 0.5),
- .conn_conf = &(fr_connection_conf_t){
+ .conn_conf = &(connection_conf_t){
.reconnection_delay = fr_time_delta_from_nsec(NSEC / 10)
},
.backlog_on_failed_conn = true
};
test_proto_request_t *preq;
- fr_trunk_request_t *treq = NULL;
- fr_trunk_connection_t *tconn;
+ trunk_request_t *treq = NULL;
+ trunk_connection_t *tconn;
DEBUG_LVL_SET;
fr_talloc_fault_setup();
fr_event_corral(el, test_time_base, false); /* Connect the connection(s) */
fr_event_service(el);
- TEST_CASE("dequeue on reconnect - FR_TRUNK_REQUEST_STATE_PENDING");
+ TEST_CASE("dequeue on reconnect - TRUNK_REQUEST_STATE_PENDING");
- TEST_CHECK_LEN(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE), 2);
+ TEST_CHECK_LEN(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE), 2);
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
tconn = treq->pub.tconn; /* Store the conn the request was assigned to */
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 1);
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
/*
* Should be reassigned to the other connection
*/
TEST_CHECK(tconn != treq->pub.tconn);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 1);
/*
* Should be reassigned to the backlog
*/
- fr_trunk_connection_signal_reconnect(treq->pub.tconn, FR_CONNECTION_FAILED);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_BACKLOG), 1);
+ trunk_connection_signal_reconnect(treq->pub.tconn, connection_FAILED);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_BACKLOG), 1);
TEST_CHECK(!treq->pub.tconn);
- TEST_CASE("cancel on reconnect - FR_TRUNK_REQUEST_STATE_PARTIAL");
+ TEST_CASE("cancel on reconnect - TRUNK_REQUEST_STATE_PARTIAL");
/*
* Allow the connections to reconnect
* Request should now be assigned back to one of the reconnected
* connections.
*/
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 1);
TEST_CHECK(treq->pub.tconn != NULL);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
fr_event_corral(el, test_time_base, false); /* Send the request (partially) */
fr_event_service(el);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PARTIAL), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PARTIAL), 1);
/*
* Reconnect the connection.
* then be re-assigned.
*/
tconn = treq->pub.tconn;
- fr_trunk_connection_signal_reconnect(treq->pub.tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(treq->pub.tconn, connection_FAILED);
TEST_CHECK(preq->completed == false);
TEST_CHECK(preq->failed == false);
preq->cancelled = false; /* Reset */
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 1);
TEST_CHECK(tconn != treq->pub.tconn); /* Ensure it moved */
- TEST_CASE("cancel on reconnect - FR_TRUNK_REQUEST_STATE_SENT");
+ TEST_CASE("cancel on reconnect - TRUNK_REQUEST_STATE_SENT");
/*
* Sent the request (fully)
*/
fr_event_corral(el, test_time_base, false); /* Send the request (partially) */
fr_event_service(el);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT), 1);
tconn = treq->pub.tconn;
- fr_trunk_connection_signal_reconnect(treq->pub.tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(treq->pub.tconn, connection_FAILED);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 1);
/*
* Allow the connections to reconnect
preq->cancelled = false; /* Reset */
- TEST_CASE("free on reconnect - FR_TRUNK_REQUEST_STATE_CANCEL");
+ TEST_CASE("free on reconnect - TRUNK_REQUEST_STATE_CANCEL");
/*
* Signal the request should be cancelled
*/
- fr_trunk_request_signal_cancel(treq);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL), 1);
+ trunk_request_signal_cancel(treq);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL), 1);
/*
* Requests in the cancel state, are
* freed instead of being moved between
* connections.
*/
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED); /* treq->pub.tconn, now invalid due to cancel */
+ trunk_connection_signal_reconnect(tconn, connection_FAILED); /* treq->pub.tconn, now invalid due to cancel */
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
fr_event_corral(el, test_time_base, false);
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CASE("free on reconnect - FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL");
+ TEST_CASE("free on reconnect - TRUNK_REQUEST_STATE_CANCEL_PARTIAL");
/*
* Queue up a new request, and get it to the cancel-partial state.
preq = talloc_zero(ctx, test_proto_request_t);
preq->signal_cancel_partial = true;
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 1);
/*
* Sent the request (fully)
fr_event_corral(el, test_time_base, false); /* Send the request (fully) */
fr_event_service(el);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT), 1);
- fr_trunk_request_signal_cancel(treq); /* Cancel the request */
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT), 1);
+ trunk_request_signal_cancel(treq); /* Cancel the request */
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL), 1);
/*
* Transition to cancel partial
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL_PARTIAL), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL_PARTIAL), 1);
/*
* Trigger a reconnection
*/
- fr_trunk_connection_signal_reconnect(treq->pub.tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(treq->pub.tconn, connection_FAILED);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
fr_event_corral(el, test_time_base, false);
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CASE("free on reconnect - FR_TRUNK_REQUEST_STATE_CANCEL_SENT");
+ TEST_CASE("free on reconnect - TRUNK_REQUEST_STATE_CANCEL_SENT");
/*
* Queue up a new request, and get it to the cancel-sent state.
*/
preq = talloc_zero(NULL, test_proto_request_t);
treq = NULL;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
preq->treq = treq;
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 1);
/*
* Sent the request (fully)
fr_event_corral(el, test_time_base, false); /* Send the request (fully) */
fr_event_service(el);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT), 1);
- fr_trunk_request_signal_cancel(treq); /* Cancel the request */
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT), 1);
+ trunk_request_signal_cancel(treq); /* Cancel the request */
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL), 1);
/*
* Transition to cancel
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_CANCEL_SENT), 1);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_CANCEL_SENT), 1);
/*
* Trigger a reconnection
*/
- fr_trunk_connection_signal_reconnect(treq->pub.tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(treq->pub.tconn, connection_FAILED);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
fr_event_corral(el, test_time_base, false);
static void test_connection_start_on_enqueue(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 0,
.min = 0, /* No connections on start */
.manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
};
test_proto_request_t *preq;
- fr_trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
+ trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
DEBUG_LVL_SET;
preq = talloc_zero(NULL, test_proto_request_t);
TEST_CASE("C0 - Enqueue should spawn");
- fr_trunk_request_enqueue(&treq_a, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq_a, trunk, NULL, preq, NULL);
/*
* This causes the event associated with the request left on
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
TEST_CASE("C1 connecting, !max_req_per_conn - Enqueue MUST NOT spawn");
- fr_trunk_request_enqueue(&treq_b, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq_b, trunk, NULL, preq, NULL);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
/*
* Allow the connections to open
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 1);
TEST_CASE("C1 active, !max_req_per_conn - Enqueue MUST NOT spawn");
- fr_trunk_request_enqueue(&treq_c, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq_c, trunk, NULL, preq, NULL);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 1);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING) == 3);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING) == 3);
talloc_free(ctx);
talloc_free(preq);
static void test_connection_rebalance_requests(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 2,
.min = 2, /* No connections on start */
.manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
};
test_proto_request_t *preq;
- fr_trunk_connection_t *tconn;
- fr_trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
+ trunk_connection_t *tconn;
+ trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
DEBUG_LVL_SET;
tconn = fr_minmax_heap_min_peek(trunk->active);
TEST_CASE("C2 connected, R0 - Signal inactive");
- fr_trunk_connection_signal_inactive(tconn);
+ trunk_connection_signal_inactive(tconn);
- fr_trunk_request_enqueue(&treq_a, trunk, NULL, preq, NULL);
- fr_trunk_request_enqueue(&treq_b, trunk, NULL, preq, NULL);
- fr_trunk_request_enqueue(&treq_c, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq_a, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq_b, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq_c, trunk, NULL, preq, NULL);
TEST_CASE("C1 connected, C2 inactive, R3 - Enqueued");
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING) == 3);
- TEST_CHECK(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) == 0);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING) == 3);
+ TEST_CHECK(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) == 0);
/*
* Now mark the previous connection as
* one of the requests.
*/
TEST_CASE("C2 active, R3 - Signal active, should balance");
- fr_trunk_connection_signal_active(tconn);
+ trunk_connection_signal_active(tconn);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING) == 3);
- TEST_CHECK(fr_trunk_request_count_by_connection(tconn, FR_TRUNK_REQUEST_STATE_ALL) >= 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING) == 3);
+ TEST_CHECK(trunk_request_count_by_connection(tconn, TRUNK_REQUEST_STATE_ALL) >= 1);
talloc_free(ctx);
talloc_free(preq);
#define ALLOC_REQ(_id) \
do { \
- treq_##_id = fr_trunk_request_alloc(trunk, NULL); \
+ treq_##_id = trunk_request_alloc(trunk, NULL); \
preq_##_id = talloc_zero(ctx, test_proto_request_t); \
preq_##_id->treq = treq_##_id; \
preq_##_id->priority = next_prio++; \
static void test_connection_levels_max(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 0, /* No connections on start */
.min = 0,
.max = 2,
.manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
};
test_proto_request_t *preq_a, *preq_b, *preq_c, *preq_d, *preq_e;
- fr_trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL, *treq_d = NULL, *treq_e = NULL;
+ trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL, *treq_d = NULL, *treq_e = NULL;
int next_prio = 0;
DEBUG_LVL_SET;
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_nsec(NSEC * 0.5));
trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
- FR_TRUNK_VERIFY(trunk);
+ TRUNK_VERIFY(trunk);
/*
* Queuing a request should start a connection.
*/
TEST_CASE("C0, R1 - Enqueue should spawn");
ALLOC_REQ(a);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK(trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
+ TRUNK_VERIFY(trunk);
/*
* Like test_connection_start_on_enqueue(), you have to process the backlog
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK_LEN(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING), 1);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK_LEN(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING), 1);
+ TRUNK_VERIFY(trunk);
/*
* Queuing another request should *NOT* start another connection
*/
TEST_CASE("C1 connecting, R2 - MUST NOT spawn");
ALLOC_REQ(b);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
- TEST_CHECK_LEN(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING), 1);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK(trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK_LEN(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING), 1);
+ TRUNK_VERIFY(trunk);
TEST_CASE("C1 connecting, R3 - MUST NOT spawn");
ALLOC_REQ(c);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
- TEST_CHECK_LEN(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING), 1);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK(trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK_LEN(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING), 1);
+ TRUNK_VERIFY(trunk);
TEST_CASE("C1 connecting, R4 - MUST NOT spawn");
ALLOC_REQ(d);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_d, trunk, NULL, preq_d, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
- TEST_CHECK_LEN(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING), 1);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK(trunk_request_enqueue(&treq_d, trunk, NULL, preq_d, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK_LEN(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING), 1);
+ TRUNK_VERIFY(trunk);
TEST_CASE("C1 connecting, R5 - MUST NOT spawn, NO CAPACITY");
ALLOC_REQ(e);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_e, trunk, NULL, preq_e, NULL) == FR_TRUNK_ENQUEUE_NO_CAPACITY);
- TEST_CHECK_LEN(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING), 1);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK(trunk_request_enqueue(&treq_e, trunk, NULL, preq_e, NULL) == TRUNK_ENQUEUE_NO_CAPACITY);
+ TEST_CHECK_LEN(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING), 1);
+ TRUNK_VERIFY(trunk);
/*
* Allowing connection to open
fr_event_service(el);
TEST_CASE("C1 active, R4 - Check pending 2");
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 2);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_BACKLOG), 2);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 2);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_BACKLOG), 2);
+ TRUNK_VERIFY(trunk);
/*
* Sending requests
fr_event_service(el);
TEST_CASE("C1 active, R4 - Check sent 2");
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT), 2);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT), 2);
+ TRUNK_VERIFY(trunk);
/*
* Looping I/O
TEST_CHECK(preq_b->cancelled == false);
TEST_CHECK(preq_b->freed == true);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING), 2);
- TEST_CHECK_LEN(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_BACKLOG), 0);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING), 2);
+ TEST_CHECK_LEN(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_BACKLOG), 0);
+ TRUNK_VERIFY(trunk);
TEST_CASE("C1 active, R0 - Check complete 2, pending 0");
TEST_CHECK(preq_d->cancelled == false);
TEST_CHECK(preq_d->freed == true);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 0);
- FR_TRUNK_VERIFY(trunk);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 0);
+ TRUNK_VERIFY(trunk);
talloc_free(trunk);
talloc_free(ctx);
static void test_connection_levels_alternating_edges(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 0, /* No connections on start */
.min = 0,
.max = 0,
};
test_proto_request_t *preq_a, *preq_b, *preq_c;
- fr_trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
+ trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
test_proto_stats_t stats;
int next_prio = 0;
*/
TEST_CASE("C0, R1 - Enqueue should spawn");
ALLOC_REQ(a);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK(trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
/*
* Processing the event associated with the backlog creates
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
TEST_CASE("C1 connecting, R2 - MUST NOT spawn");
ALLOC_REQ(b);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
/*
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING) == 2);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING) == 2);
TEST_CASE("C1 connected, R3 - should spawn");
ALLOC_REQ(c);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == FR_TRUNK_ENQUEUE_OK);
+ TEST_CHECK(trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == TRUNK_ENQUEUE_OK);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 3);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 1);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 3);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
/*
* Complete requests
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
TEST_CASE("C1 connected, C2 connecting, R2 - MUST NOT spawn");
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 3);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 2);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 3);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 2);
/*
* Finish the last request, should close one connection
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
TEST_CASE("C1 connected, R0");
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_ALL) == 0);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_ALL) == 0);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 1);
/*
* Requests now done, should close another connection
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
TEST_CASE("C0, R0");
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 0);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 0);
TEST_CHECK(stats.completed == 3);
TEST_CHECK(stats.failed == 0);
*/
TEST_CASE("C0, R1 - Enqueue should spawn");
ALLOC_REQ(a);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK(trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
/*
* ...once the event associated with the backlogged request is handled.
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
TEST_CASE("C1 connecting, R2 - MUST NOT spawn");
ALLOC_REQ(b);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == FR_TRUNK_ENQUEUE_IN_BACKLOG);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
/*
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_PENDING) == 2);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_PENDING) == 2);
TEST_CASE("C1 connected, R3 - should spawn");
ALLOC_REQ(c);
- TEST_CHECK(fr_trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == FR_TRUNK_ENQUEUE_OK);
+ TEST_CHECK(trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == TRUNK_ENQUEUE_OK);
test_time_base = fr_time_add_time_delta(test_time_base, fr_time_delta_from_sec(1));
fr_event_corral(el, test_time_base, false);
fr_event_service(el);
- TEST_CHECK(fr_trunk_request_count_by_state(trunk, FR_TRUNK_CONN_ALL, FR_TRUNK_REQUEST_STATE_SENT) == 3);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_ACTIVE) == 1);
- TEST_CHECK(fr_trunk_connection_count_by_state(trunk, FR_TRUNK_CONN_CONNECTING) == 1);
+ TEST_CHECK(trunk_request_count_by_state(trunk, TRUNK_CONN_ALL, TRUNK_REQUEST_STATE_SENT) == 3);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_ACTIVE) == 1);
+ TEST_CHECK(trunk_connection_count_by_state(trunk, TRUNK_CONN_CONNECTING) == 1);
talloc_free(trunk);
talloc_free(ctx);
static void test_enqueue_and_io_speed(void)
{
TALLOC_CTX *ctx = talloc_init_const("test");
- fr_trunk_t *trunk;
+ trunk_t *trunk;
fr_event_list_t *el;
int events;
- fr_trunk_conf_t conf = {
+ trunk_conf_t conf = {
.start = 1,
.min = 1,
.max = 0,
size_t i = 0, requests = 100000;
fr_time_t enqueue_start, enqueue_stop, io_start, io_stop;
fr_time_delta_t enqueue_time, io_time, total_time;
- fr_trunk_request_t **treq_array;
+ trunk_request_t **treq_array;
test_proto_request_t **preq_array;
test_proto_stats_t stats;
* When the server's running, this does represent
* close to what we'd have as a steady state.
*/
- MEM(treq_array = talloc_array(ctx, fr_trunk_request_t *, requests));
- for (i = 0; i < requests; i++) treq_array[i] = fr_trunk_request_alloc(trunk, NULL);
- for (i = 0; i < requests; i++) fr_trunk_request_free(&treq_array[i]);
+ MEM(treq_array = talloc_array(ctx, trunk_request_t *, requests));
+ for (i = 0; i < requests; i++) treq_array[i] = trunk_request_alloc(trunk, NULL);
+ for (i = 0; i < requests; i++) trunk_request_free(&treq_array[i]);
MEM(preq_array = talloc_array(ctx, test_proto_request_t *, requests));
enqueue_start = fr_time();
// ProfilerStart(getenv("FR_PROFILE"));
for (i = 0; i < requests; i++) {
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
test_proto_request_t *preq = NULL;
- treq = fr_trunk_request_alloc(trunk, NULL);
+ treq = trunk_request_alloc(trunk, NULL);
preq = talloc_zero(treq, test_proto_request_t);
preq->treq = treq;
- fr_trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
+ trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
}
enqueue_stop = fr_time();
enqueue_time = fr_time_sub(enqueue_stop, enqueue_start);
typedef struct {
fr_listen_t *main_listen;
fr_listen_t *child_listen;
- fr_connection_t *conn;
+ connection_t *conn;
int msgid;
} proto_ldap_dir_ctx;
return 0;
}
-static void _proto_ldap_socket_init(fr_connection_t *conn, UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state, void *uctx);
+static void _proto_ldap_socket_init(connection_t *conn, UNUSED connection_state_t prev,
+ UNUSED connection_state_t state, void *uctx);
-static void _proto_ldap_socket_open_connected(fr_connection_t *conn, UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state, void *uctx);
+static void _proto_ldap_socket_open_connected(connection_t *conn, UNUSED connection_state_t prev,
+ UNUSED connection_state_t state, void *uctx);
/** Attempt to (re)initialise a connection
*
/*
* Add watch functions on the LDAP connection
*/
- fr_connection_add_watch_post(thread->conn, FR_CONNECTION_STATE_INIT,
+ connection_add_watch_post(thread->conn, connection_STATE_INIT,
_proto_ldap_socket_init, true, thread);
- fr_connection_add_watch_post(thread->conn, FR_CONNECTION_STATE_CONNECTED,
+ connection_add_watch_post(thread->conn, connection_STATE_CONNECTED,
_proto_ldap_socket_open_connected, true, thread);
/*
* Signal the connection to start
*/
- fr_connection_signal_init(thread->conn);
+ connection_signal_init(thread->conn);
return;
}
{
proto_ldap_sync_ldap_thread_t *thread = talloc_get_type_abort(li->thread_instance, proto_ldap_sync_ldap_thread_t);
- fr_connection_signal_shutdown(thread->conn);
+ connection_signal_shutdown(thread->conn);
return 0;
}
error:
talloc_free(dir_ctx);
if (local) talloc_free(local);
- fr_connection_signal_reconnect(ldap_conn->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(ldap_conn->conn, connection_FAILED);
return;
}
if (ldap_conn->directory->sync_type == FR_LDAP_SYNC_NONE) {
ERROR("LDAP sync configured for directory which does not support any suitable control");
talloc_free(dir_ctx);
- fr_connection_signal_halt(ldap_conn->conn);
+ connection_signal_halt(ldap_conn->conn);
return;
}
*
* Called as a watch function when the LDAP connection enters the INIT state
*/
-static void _proto_ldap_socket_init(fr_connection_t *conn, UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state, void *uctx)
+static void _proto_ldap_socket_init(connection_t *conn, UNUSED connection_state_t prev,
+ UNUSED connection_state_t state, void *uctx)
{
proto_ldap_sync_ldap_thread_t *thread = talloc_get_type_abort(uctx, proto_ldap_sync_ldap_thread_t);
fr_listen_t *li;
*
* Schedules re-start of the connection if appropriate
*/
-static void _proto_ldap_socket_closed(UNUSED fr_connection_t *conn, fr_connection_state_t prev,
- UNUSED fr_connection_state_t state, void *uctx)
+static void _proto_ldap_socket_closed(UNUSED connection_t *conn, connection_state_t prev,
+ UNUSED connection_state_t state, void *uctx)
{
fr_listen_t *listen = talloc_get_type_abort(uctx, fr_listen_t);
proto_ldap_sync_ldap_thread_t *thread = talloc_get_type_abort(listen->thread_instance, proto_ldap_sync_ldap_thread_t);
if (fr_event_loop_exiting(thread->el)) return;
- if (prev == FR_CONNECTION_STATE_CONNECTED) {
+ if (prev == connection_STATE_CONNECTED) {
ERROR("LDAP connection closed. Scheduling restart in %pVs",
fr_box_time_delta(inst->handle_config.reconnection_delay));
if (fr_event_timer_in(thread, thread->el, &thread->conn_retry_ev,
* There are three different forms of LDAP sync/persistent search - so we need
* to know what we're dealing with, and whether the relevant options have been enabled.
*/
-static void _proto_ldap_socket_open_connected(fr_connection_t *conn, UNUSED fr_connection_state_t prev,
- UNUSED fr_connection_state_t state, void *uctx)
+static void _proto_ldap_socket_open_connected(connection_t *conn, UNUSED connection_state_t prev,
+ UNUSED connection_state_t state, void *uctx)
{
proto_ldap_sync_ldap_thread_t *thread = talloc_get_type_abort(uctx, proto_ldap_sync_ldap_thread_t);
fr_listen_t *listen = talloc_get_type_abort(thread->parent, fr_listen_t);
/*
* Add a watch to catch closed LDAP connections
*/
- fr_connection_add_watch_post(thread->conn, FR_CONNECTION_STATE_CLOSED,
+ connection_add_watch_post(thread->conn, connection_STATE_CLOSED,
_proto_ldap_socket_closed, true, listen);
}
fr_event_timer_t const *conn_retry_ev; //!< When to retry re-establishing the conn.
- fr_connection_t *conn; //!< Our connection to the LDAP directory.
+ connection_t *conn; //!< Our connection to the LDAP directory.
} proto_ldap_sync_ldap_thread_t;
typedef enum {
--- /dev/null
+# rlm_cache_htrie
+## Metadata
+<dl>
+ <dt>category</dt><dd>datastore</dd>
+</dl>
+
+## Summary
+Stores cache entries in a process local, non-persistent lookup structure. This structure will either be a hash, an rbtree, or prefix tree.
+
+It is a submodule of rlm_cache and cannot be used on its own.
--- /dev/null
+/*
+ * This program is is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/**
+ * $Id$
+ * @file rlm_cache_htrie.c
+ * @brief Simple htrie based cache.
+ *
+ * @copyright 2024 Arran Cudbard-Bell <a.cudbardb@freeradius.org>
+ * @copyright 2014 The FreeRADIUS server project
+ */
+#include <freeradius-devel/server/base.h>
+#include <freeradius-devel/util/heap.h>
+#include <freeradius-devel/util/debug.h>
+#include <freeradius-devel/util/value.h>
+#include <freeradius-devel/util/htrie.h>
+#include "../../rlm_cache.h"
+#include "lib/server/cf_parse.h"
+#include "lib/server/tmpl.h"
+#include "lib/util/types.h"
+
+static int cf_htrie_type_parse(TALLOC_CTX *ctx, void *out, void *parent, CONF_ITEM *ci, conf_parser_t const *rule);
+static int cf_htrie_key_parse(TALLOC_CTX *ctx, void *out, tmpl_rules_t const *t_rules, CONF_ITEM *ci,
+ void const *data, UNUSED call_env_parser_t const *rule);
+
+typedef struct {
+ fr_htrie_t *cache; //!< Tree for looking up cache keys.
+ fr_heap_t *heap; //!< For managing entry expiry.
+
+ fr_type_t ktype; //!< When htrie is "auto", we use this type to decide
+ ///< what type of tree to use.
+
+ fr_htrie_type_t htype; //!< The htrie type we'll be using
+ bool htrie_auto; //!< Whether the user wanted to automatically configure
+ ///< the htrie.
+
+ pthread_mutex_t mutex; //!< Protect the tree from multiple readers/writers.
+} rlm_cache_htrie_t;
+
+typedef struct {
+ rlm_cache_entry_t fields; //!< Entry data.
+ fr_heap_index_t heap_id; //!< Offset used for expiry heap.
+} rlm_cache_htrie_entry_t;
+
+static conf_parser_t driver_config[] = {
+ { FR_CONF_OFFSET("type", rlm_cache_htrie_t, htype), .dflt = "auto",
+ .func = cf_htrie_type_parse,
+ .uctx = &(cf_table_parse_ctx_t){ .table = fr_htrie_type_table, .len = &fr_htrie_type_table_len } },
+ CONF_PARSER_TERMINATOR
+};
+
+/** Custom htrie type parsing function
+ *
+ * Sets a bool, so we known if the original type was "auto", so we can constantly re-evaluate
+ * the htrie type based on the key type.
+ */
+int cf_htrie_type_parse(TALLOC_CTX *ctx, void *out, void *parent, CONF_ITEM *ci, conf_parser_t const *rule)
+{
+ rlm_cache_htrie_t *inst = talloc_get_type_abort(parent, rlm_cache_htrie_t);
+ int ret;
+
+ ret = cf_table_parse_int(ctx, out, parent, ci, rule);
+ if (unlikely(ret < 0)) return ret;
+
+ /*
+ * Record this now, so when we overwrite this
+ * value later, we know to keep checking the
+ * htrie type value for consistency.
+ */
+ if (*(int *)out == FR_HTRIE_AUTO) inst->htrie_auto = true;
+
+ return 0;
+}
+
+/** Custom key parsing function for checking compatibility of key types
+ *
+ * This function does two things:
+ * - It selects a htrie type based on the key type.
+ * - It checks that all keys are compatible with each other.
+ */
+static int cf_htrie_key_parse(TALLOC_CTX *ctx, void *out, tmpl_rules_t const *t_rules, CONF_ITEM *ci,
+ void const *data, UNUSED call_env_parser_t const *rule)
+{
+ rlm_cache_htrie_t *inst = talloc_get_type_abort_const(data, rlm_cache_htrie_t);
+ tmpl_t *key_tmpl;
+ fr_type_t our_ktype, old_ktype;
+
+ /*
+ * Call the standard pair parsing function
+ */
+ if (unlikely(call_env_parse_pair(ctx, &key_tmpl, t_rules, ci, data, rule) < 0)) return -1;
+ our_ktype = tmpl_expanded_type(key_tmpl);
+
+ /*
+ * We need the user to tell us what the key type is for ambiguous expansions
+ */
+ if (fr_type_is_void(our_ktype)) {
+ cf_log_err(ci, "Key type is unspecified. Add a cast to set a specific type");
+ return -1;
+ }
+
+ /*
+ * If we don't have a key type already, then just set it to the first key type we see
+ */
+ if (fr_type_is_void(inst->ktype)) {
+ inst->ktype = our_ktype;
+ /*
+ * Check if we can cast this key type, to the key type we've already seen
+ */
+ } else if (!fr_type_cast(our_ktype, inst->ktype)) {
+ cf_log_err(ci, "Incompatible key types '%s' and '%s', cast to a more broadly compatible "
+ "type such as 'string'", fr_type_to_str(inst->ktype), fr_type_to_str(our_ktype));
+ return -1;
+ }
+
+ /*
+ * See if we should promote inst->ktype
+ */
+ old_ktype = inst->ktype;
+ inst->ktype = fr_type_promote(inst->ktype, our_ktype);
+ fr_assert(!fr_type_is_void(inst->ktype));
+
+ /*
+ * If we're not automatically determining the htrie type,
+ * or the ktype hasn't changed, then don't bother figuring
+ * out the htrie type.
+ */
+ if (!inst->htrie_auto || (old_ktype == inst->ktype)) return 0;
+
+ /*
+ * We need to figure out the htrie type based on the key type
+ */
+ inst->htype = fr_htrie_hint(inst->ktype);
+ if (inst->htype == FR_HTRIE_INVALID) {
+ cf_log_err(ci, "Invalid data type '%s' for htrie key. "
+ "Cast to another type, or manually specify 'type", fr_type_to_str(inst->ktype));
+ return -1;
+ }
+
+ cf_log_info(ci, "Automatically setting htrie type to '%s' based on key type '%s'",
+ fr_htrie_type_to_str(inst->htype), fr_type_to_str(inst->ktype));
+
+ *(void **)out = key_tmpl;
+ return 0;
+}
+
+/** Compare two entries by expiry time
+ *
+ * There may be multiple entries with the same expiry time.
+ */
+static int8_t cache_heap_cmp(void const *one, void const *two)
+{
+ rlm_cache_entry_t const *a = one, *b = two;
+
+ return fr_unix_time_cmp(a->expires, b->expires);
+}
+
+/** Custom allocation function for the driver
+ *
+ * Allows allocation of cache entry structures with additional fields.
+ *
+ * @copydetails cache_entry_alloc_t
+ */
+static rlm_cache_entry_t *cache_entry_alloc(UNUSED rlm_cache_config_t const *config, UNUSED void *instance,
+ request_t *request)
+{
+ rlm_cache_htrie_entry_t *c;
+
+ c = talloc_zero(NULL, rlm_cache_htrie_entry_t);
+ if (!c) {
+ RERROR("Failed allocating cache entry");
+ return NULL;
+ }
+
+ return (rlm_cache_entry_t *)c;
+}
+
+/** Locate a cache entry
+ *
+ * @note handle not used except for sanity checks.
+ *
+ * @copydetails cache_entry_find_t
+ */
+static cache_status_t cache_entry_find(rlm_cache_entry_t **out,
+ UNUSED rlm_cache_config_t const *config, void *instance,
+ request_t *request, UNUSED void *handle, fr_value_box_t const *key)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(instance, rlm_cache_htrie_t);
+ rlm_cache_entry_t find = {};
+
+ rlm_cache_entry_t *c;
+
+ fr_assert(driver->cache);
+
+ /*
+ * Clear out old entries
+ */
+ c = fr_heap_peek(driver->heap);
+ if (c && (fr_unix_time_lt(c->expires, fr_time_to_unix_time(request->packet->timestamp)))) {
+ fr_heap_extract(&driver->heap, c);
+ fr_htrie_delete(driver->cache, c);
+ talloc_free(c);
+ }
+
+ fr_value_box_copy_shallow(NULL, &find.key, key);
+
+ /*
+ * Is there an entry for this key?
+ */
+ c = fr_htrie_find(driver->cache, &find);
+ if (!c) {
+ *out = NULL;
+ return CACHE_MISS;
+ }
+ *out = c;
+
+ return CACHE_OK;
+}
+
+/** Free an entry and remove it from the data store
+ *
+ * @note handle not used except for sanity checks.
+ *
+ * @copydetails cache_entry_expire_t
+ */
+static cache_status_t cache_entry_expire(UNUSED rlm_cache_config_t const *config, void *instance,
+ request_t *request, UNUSED void *handle,
+ fr_value_box_t const *key)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(instance, rlm_cache_htrie_t);
+ rlm_cache_entry_t find = {};
+ rlm_cache_entry_t *c;
+
+ if (!request) return CACHE_ERROR;
+
+ fr_value_box_copy_shallow(NULL, &find.key, key);
+
+ c = fr_htrie_find(driver->cache, &find);
+ if (!c) return CACHE_MISS;
+
+ fr_heap_extract(&driver->heap, c);
+ fr_htrie_delete(driver->cache, c);
+ talloc_free(c);
+
+ return CACHE_OK;
+}
+
+/** Insert a new entry into the data store
+ *
+ * @note handle not used except for sanity checks.
+ *
+ * @copydetails cache_entry_insert_t
+ */
+static cache_status_t cache_entry_insert(rlm_cache_config_t const *config, void *instance,
+ request_t *request, void *handle,
+ rlm_cache_entry_t const *c)
+{
+ cache_status_t status;
+
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(instance, rlm_cache_htrie_t);
+
+ fr_assert(handle == request);
+
+ if (!request) return CACHE_ERROR;
+
+ /*
+ * Allow overwriting
+ */
+ if (!fr_htrie_insert(driver->cache, c)) {
+ status = cache_entry_expire(config, instance, request, handle, &c->key);
+ if ((status != CACHE_OK) && !fr_cond_assert(0)) return CACHE_ERROR;
+
+ if (!fr_htrie_insert(driver->cache, c)) {
+ RERROR("Failed adding entry");
+
+ return CACHE_ERROR;
+ }
+ }
+
+ if (fr_heap_insert(&driver->heap, UNCONST(rlm_cache_entry_t *, c)) < 0) {
+ fr_htrie_delete(driver->cache, c);
+ RERROR("Failed adding entry to expiry heap");
+
+ return CACHE_ERROR;
+ }
+
+ return CACHE_OK;
+}
+
+/** Update the TTL of an entry
+ *
+ * @note handle not used except for sanity checks.
+ *
+ * @copydetails cache_entry_set_ttl_t
+ */
+static cache_status_t cache_entry_set_ttl(UNUSED rlm_cache_config_t const *config, void *instance,
+ request_t *request, UNUSED void *handle,
+ rlm_cache_entry_t *c)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(instance, rlm_cache_htrie_t);
+
+#ifdef NDEBUG
+ if (!request) return CACHE_ERROR;
+#endif
+
+ if (!fr_cond_assert(fr_heap_extract(&driver->heap, c) == 0)) {
+ RERROR("Entry not in heap");
+ return CACHE_ERROR;
+ }
+
+ if (fr_heap_insert(&driver->heap, c) < 0) {
+ fr_htrie_delete(driver->cache, c); /* make sure we don't leak entries... */
+ RERROR("Failed updating entry TTL. Entry was forcefully expired");
+ return CACHE_ERROR;
+ }
+ return CACHE_OK;
+}
+
+/** Return the number of entries in the cache
+ *
+ * @note handle not used except for sanity checks.
+ *
+ * @copydetails cache_entry_count_t
+ */
+static uint64_t cache_entry_count(UNUSED rlm_cache_config_t const *config, void *instance,
+ request_t *request, UNUSED void *handle)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(instance, rlm_cache_htrie_t);
+
+ if (!request) return CACHE_ERROR;
+
+ return fr_htrie_num_elements(driver->cache);
+}
+
+/** Lock the htrie
+ *
+ * @note handle not used except for sanity checks.
+ *
+ * @copydetails cache_acquire_t
+ */
+static int cache_acquire(void **handle, UNUSED rlm_cache_config_t const *config, void *instance,
+ request_t *request)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(instance, rlm_cache_htrie_t);
+
+ pthread_mutex_lock(&driver->mutex);
+
+ *handle = request; /* handle is unused, this is just for sanity checking */
+
+ RDEBUG3("Mutex acquired");
+
+ return 0;
+}
+
+/** Release an entry unlocking any mutexes
+ *
+ * @note handle not used except for sanity checks.
+ *
+ * @copydetails cache_release_t
+ */
+static void cache_release(UNUSED rlm_cache_config_t const *config, void *instance, request_t *request,
+ UNUSED rlm_cache_handle_t *handle)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(instance, rlm_cache_htrie_t);
+
+ pthread_mutex_unlock(&driver->mutex);
+
+ RDEBUG3("Mutex released");
+}
+
+/** Cleanup a cache_htrie instance
+ *
+ */
+static int mod_detach(module_detach_ctx_t const *mctx)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(mctx->mi->data, rlm_cache_htrie_t);
+
+ if (driver->cache) {
+ fr_rb_iter_inorder_t iter;
+ void *data;
+
+ for (data = fr_rb_iter_init_inorder(&iter, driver->cache);
+ data;
+ data = fr_rb_iter_next_inorder(&iter)) {
+ fr_rb_iter_delete_inorder(&iter);
+ talloc_free(data);
+ }
+ }
+
+ pthread_mutex_destroy(&driver->mutex);
+
+ return 0;
+}
+
+/** Create a new cache_htrie instance
+ *
+ * @param[in] mctx Data required for instantiation.
+ * @return
+ * - 0 on success.
+ * - -1 on failure.
+ */
+static int mod_instantiate(module_inst_ctx_t const *mctx)
+{
+ rlm_cache_htrie_t *driver = talloc_get_type_abort(mctx->mi->data, rlm_cache_htrie_t);
+ int ret;
+
+ /*
+ * The cache.
+ */
+ driver->cache = fr_htrie_alloc(driver, driver->htype,
+ (fr_hash_t)fr_value_box_hash,
+ (fr_cmp_t)fr_value_box_cmp,
+ (fr_trie_key_t)fr_value_box_to_key, NULL);
+ if (!driver->cache) {
+ PERROR("Failed to create cache");
+ return -1;
+ }
+
+ /*
+ * The heap of entries to expire.
+ */
+ driver->heap = fr_heap_talloc_alloc(driver, cache_heap_cmp, rlm_cache_htrie_entry_t, heap_id, 0);
+ if (!driver->heap) {
+ ERROR("Failed to create heap for the cache");
+ return -1;
+ }
+
+ if ((ret = pthread_mutex_init(&driver->mutex, NULL)) < 0) {
+ ERROR("Failed initializing mutex: %s", fr_syserror(ret));
+ return -1;
+ }
+
+ return 0;
+}
+
+extern rlm_cache_driver_t rlm_cache_htrie;
+rlm_cache_driver_t rlm_cache_htrie = {
+ .common = {
+ .magic = MODULE_MAGIC_INIT,
+ .name = "cache_htrie",
+ .config = driver_config,
+ .instantiate = mod_instantiate,
+ .detach = mod_detach,
+ .inst_size = sizeof(rlm_cache_htrie_t),
+ .inst_type = "rlm_cache_htrie_t",
+ },
+ .alloc = cache_entry_alloc,
+
+ .find = cache_entry_find,
+ .insert = cache_entry_insert,
+ .expire = cache_entry_expire,
+ .set_ttl = cache_entry_set_ttl,
+ .count = cache_entry_count,
+
+ .acquire = cache_acquire,
+ .release = cache_release,
+
+ .key_parse = cf_htrie_key_parse
+};
*/
if (!group_ctx->query || !(group_ctx->query->treq)) return;
- fr_trunk_request_signal_cancel(group_ctx->query->treq);
+ trunk_request_signal_cancel(group_ctx->query->treq);
}
/** Convert multiple group names into a DNs
*/
if (!group_ctx->query || !group_ctx->query->treq) return;
- fr_trunk_request_signal_cancel(group_ctx->query->treq);
+ trunk_request_signal_cancel(group_ctx->query->treq);
}
/** Process the results of a group object lookup.
if (!group_ctx->query || !group_ctx->query->treq) return;
- fr_trunk_request_signal_cancel(group_ctx->query->treq);
+ trunk_request_signal_cancel(group_ctx->query->treq);
}
/** Initiate a user lookup to check membership.
if (!profile_ctx->query || !profile_ctx->query->treq) return;
- fr_trunk_request_signal_cancel(profile_ctx->query->treq);
+ trunk_request_signal_cancel(profile_ctx->query->treq);
}
/** Search for and apply an LDAP profile
scope, filter,
expanded->attrs, NULL, NULL);
}
-
{ FR_CONF_POINTER("profile", 0, CONF_FLAG_SUBSECTION, NULL), .subcs = (void const *) profile_config },
- { FR_CONF_OFFSET_SUBSECTION("pool", 0, rlm_ldap_t, trunk_conf, fr_trunk_config ) },
+ { FR_CONF_OFFSET_SUBSECTION("pool", 0, rlm_ldap_t, trunk_conf, trunk_config ) },
- { FR_CONF_OFFSET_SUBSECTION("bind_pool", 0, rlm_ldap_t, bind_trunk_conf, fr_trunk_config ) },
+ { FR_CONF_OFFSET_SUBSECTION("bind_pool", 0, rlm_ldap_t, bind_trunk_conf, trunk_config ) },
CONF_PARSER_TERMINATOR
};
static void ldap_query_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
fr_ldap_query_t *query = talloc_get_type_abort(uctx, fr_ldap_query_t);
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
request_t *request;
/*
*/
if (!query->treq) return;
- treq = talloc_get_type_abort(query->treq, fr_trunk_request_t);
+ treq = talloc_get_type_abort(query->treq, trunk_request_t);
request = treq->request;
ROPTIONAL(RERROR, ERROR, "Timeout waiting for LDAP query");
- fr_trunk_request_signal_cancel(query->treq);
+ trunk_request_signal_cancel(query->treq);
query->ret = LDAP_RESULT_TIMEOUT;
unlang_interpret_mark_runnable(request);
RDEBUG2("Forcefully cancelling pending LDAP query");
- fr_trunk_request_signal_cancel(query->treq);
+ trunk_request_signal_cancel(query->treq);
}
/*
goto query_error;
}
- switch (fr_trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch (trunk_request_enqueue(&query->treq, ttrunk->trunk, request, query, NULL)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
default:
if (fr_event_timer_in(query, unlang_interpret_event_list(request), &query->ev, handle_config->res_timeout,
ldap_query_timeout, query) < 0) {
REDEBUG("Unable to set timeout for LDAP query");
- fr_trunk_request_signal_cancel(query->treq);
+ trunk_request_signal_cancel(query->treq);
goto query_error;
}
if (!xlat_ctx->query || !xlat_ctx->query->treq) return;
- fr_trunk_request_signal_cancel(xlat_ctx->query->treq);
+ trunk_request_signal_cancel(xlat_ctx->query->treq);
}
#define REPEAT_LDAP_MEMBEROF_XLAT_RESULTS \
{
ldap_autz_ctx_t *autz_ctx = talloc_get_type_abort(uctx, ldap_autz_ctx_t);
- if (autz_ctx->query && autz_ctx->query->treq) fr_trunk_request_signal_cancel(autz_ctx->query->treq);
+ if (autz_ctx->query && autz_ctx->query->treq) trunk_request_signal_cancel(autz_ctx->query->treq);
}
/** Ensure authorization context is properly cleared up
if (!usermod_ctx->query || !usermod_ctx->query->treq) return;
- fr_trunk_request_signal_cancel(usermod_ctx->query->treq);
+ trunk_request_signal_cancel(usermod_ctx->query->treq);
}
/** Handle results of user modification.
#endif
fr_ldap_config_t handle_config; //!< Connection configuration instance.
- fr_trunk_conf_t trunk_conf; //!< Trunk configuration
- fr_trunk_conf_t bind_trunk_conf; //!< Trunk configuration for trunk used for bind auths
+ trunk_conf_t trunk_conf; //!< Trunk configuration
+ trunk_conf_t bind_trunk_conf; //!< Trunk configuration for trunk used for bind auths
module_instance_t const *mi; //!< Module instance data for thread lookups.
} rlm_ldap_t;
*/
if (!user_ctx->query || !user_ctx->query->treq) return;
- fr_trunk_request_signal_cancel(user_ctx->query->treq);
+ trunk_request_signal_cancel(user_ctx->query->treq);
}
/** Initiate asynchronous retrieval of the DN of a user object
typedef struct {
rlm_logtee_t const *inst; //!< Instance of logtee.
fr_event_list_t *el; //!< This thread's event list.
- fr_connection_t *conn; //!< Connection to our log destination.
+ connection_t *conn; //!< Connection to our log destination.
fr_fring_t *fring; //!< Circular buffer used to batch up messages.
/*
* Something bad happened... Fix it...
*/
- fr_connection_signal_reconnect(t->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(t->conn, connection_FAILED);
}
/** Drain any data we received
case ETIMEDOUT:
case EIO:
case ENXIO:
- fr_connection_signal_reconnect(t->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(t->conn, connection_FAILED);
return;
/*
case ENXIO:
case EPIPE:
case ENETDOWN:
- fr_connection_signal_reconnect(t->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(t->conn, connection_FAILED);
return;
/*
/** Process notification that fd is open
*
*/
-static fr_connection_state_t _logtee_conn_open(UNUSED fr_event_list_t *el, UNUSED void *h, void *uctx)
+static connection_state_t _logtee_conn_open(UNUSED fr_event_list_t *el, UNUSED void *h, void *uctx)
{
rlm_logtee_thread_t *t = talloc_get_type_abort(uctx, rlm_logtee_thread_t);
logtee_fd_idle(t);
}
- return FR_CONNECTION_STATE_CONNECTED;
+ return connection_STATE_CONNECTED;
}
/** Initialise a new outbound connection
* @param[in] conn being initialised.
* @param[in] uctx A #rlm_logtee_thread_t.
*/
-static fr_connection_state_t _logtee_conn_init(void **h_out, fr_connection_t *conn, void *uctx)
+static connection_state_t _logtee_conn_init(void **h_out, connection_t *conn, void *uctx)
{
rlm_logtee_thread_t *t = talloc_get_type_abort(uctx, rlm_logtee_thread_t);
rlm_logtee_t const *inst = t->inst;
case LOGTEE_DST_UNIX:
DEBUG2("Opening UNIX socket at \"%s\"", inst->unix_sock.path);
fd = fr_socket_client_unix(inst->unix_sock.path, true);
- if (fd < 0) return FR_CONNECTION_STATE_FAILED;
+ if (fd < 0) return connection_STATE_FAILED;
break;
case LOGTEE_DST_TCP:
DEBUG2("Opening TCP connection to %pV:%u",
fr_box_ipaddr(inst->tcp.dst_ipaddr), inst->tcp.port);
fd = fr_socket_client_tcp(NULL, NULL, &inst->tcp.dst_ipaddr, inst->tcp.port, true);
- if (fd < 0) return FR_CONNECTION_STATE_FAILED;
+ if (fd < 0) return connection_STATE_FAILED;
break;
case LOGTEE_DST_UDP:
DEBUG2("Opening UDP connection to %pV:%u",
fr_box_ipaddr(inst->udp.dst_ipaddr), inst->udp.port);
fd = fr_socket_client_udp(NULL, NULL, NULL, &inst->udp.dst_ipaddr, inst->udp.port, true);
- if (fd < 0) return FR_CONNECTION_STATE_FAILED;
+ if (fd < 0) return connection_STATE_FAILED;
break;
/*
case LOGTEE_DST_INVALID:
case LOGTEE_DST_FILE:
fr_assert(0);
- return FR_CONNECTION_STATE_FAILED;
+ return connection_STATE_FAILED;
}
/*
*fd_s = fd;
*h_out = fd_s;
- fr_connection_signal_on_fd(conn, fd);
+ connection_signal_on_fd(conn, fd);
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
/** Logging callback to write log messages to a destination
/*
* This opens the outbound connection
*/
- t->conn = fr_connection_alloc(t, t->el,
- &(fr_connection_funcs_t){
+ t->conn = connection_alloc(t, t->el,
+ &(connection_funcs_t){
.init = _logtee_conn_init,
.open = _logtee_conn_open,
.close = _logtee_conn_close
},
- &(fr_connection_conf_t){
+ &(connection_conf_t){
.connection_timeout = inst->connection_timeout,
.reconnection_delay = inst->reconnection_delay
},
inst->name, t);
if (t->conn == NULL) return -1;
- fr_connection_signal_init(t->conn);
+ connection_signal_init(t->conn);
return 0;
}
{ FR_CONF_OFFSET("revive_interval", rlm_radius_t, revive_interval) },
- { FR_CONF_OFFSET_SUBSECTION("pool", 0, rlm_radius_t, trunk_conf, fr_trunk_config ) },
+ { FR_CONF_OFFSET_SUBSECTION("pool", 0, rlm_radius_t, trunk_conf, trunk_config ) },
CONF_PARSER_TERMINATOR
};
bool allowed[FR_RADIUS_CODE_MAX];
fr_retry_config_t retry[FR_RADIUS_CODE_MAX];
- fr_trunk_conf_t trunk_conf; //!< trunk configuration
+ trunk_conf_t trunk_conf; //!< trunk configuration
};
/** Enqueue a request_t to an IO submodule
bool send_buff_is_set; //!< Whether we were provided with a send_buf
bool replicate; //!< Copied from parent->replicate
- fr_trunk_conf_t trunk_conf; //!< trunk configuration
+ trunk_conf_t trunk_conf; //!< trunk configuration
} rlm_radius_udp_t;
typedef struct {
rlm_radius_udp_t const *inst; //!< our instance
- fr_trunk_t *trunk; //!< trunk handler
+ trunk_t *trunk; //!< trunk handler
} udp_thread_t;
typedef struct {
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
rlm_rcode_t rcode; //!< from the transport
} udp_result_t;
typedef struct {
struct iovec out; //!< Describes buffer to send.
- fr_trunk_request_t *treq; //!< Used for signalling.
+ trunk_request_t *treq; //!< Used for signalling.
} udp_coalesced_t;
/** Track the handle, which is tightly correlated with the FD
fr_log(log, log_type, file, line, "request %s, allocated %s:%u", request->name,
request->alloc_file, request->alloc_line);
- fr_trunk_request_state_log(log, log_type, file, line, talloc_get_type_abort(te->uctx, fr_trunk_request_t));
+ trunk_request_state_log(log, log_type, file, line, talloc_get_type_abort(te->uctx, trunk_request_t));
}
#endif
*/
static void conn_error_status_check(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, int fd_errno, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
udp_handle_t *h;
/*
* Connection must be in the connecting state when this fires
*/
- fr_assert(conn->state == FR_CONNECTION_STATE_CONNECTING);
+ fr_assert(conn->state == connection_STATE_CONNECTING);
h = talloc_get_type_abort(conn->h, udp_handle_t);
ERROR("%s - Connection %s failed: %s", h->module_name, h->name, fr_syserror(fd_errno));
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
/** Status check timer when opening the connection for the first time.
*/
static void conn_status_check_timeout(fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
udp_handle_t *h;
udp_request_t *u;
/*
* Connection must be in the connecting state when this fires
*/
- fr_assert(conn->state == FR_CONNECTION_STATE_CONNECTING);
+ fr_assert(conn->state == connection_STATE_CONNECTING);
h = talloc_get_type_abort(conn->h, udp_handle_t);
u = h->status_u;
DEBUG("%s - Reached maximum_retransmit_count (%u > %u), failing status checks",
h->module_name, u->retry.count, u->retry.config->mrc);
fail:
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
return;
case FR_RETRY_CONTINUE:
if (fr_event_fd_insert(h, NULL, el, h->fd, conn_writable_status_check, NULL,
conn_error_status_check, conn) < 0) {
PERROR("%s - Failed inserting FD event", h->module_name);
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
return;
}
*/
static void conn_status_check_again(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
if (fr_event_fd_insert(h, NULL, el, h->fd, conn_writable_status_check, NULL, conn_error_status_check, conn) < 0) {
PERROR("%s - Failed inserting FD event", h->module_name);
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
}
*/
static void conn_readable_status_check(fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
- fr_trunk_t *trunk = h->thread->trunk;
+ trunk_t *trunk = h->thread->trunk;
rlm_radius_t const *inst = h->inst->parent;
udp_request_t *u = h->status_u;
ssize_t slen;
ERROR("%s - Failed reading response from socket: %s",
h->module_name, fr_syserror(errno));
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
return;
}
* Set the timer for the next retransmit.
*/
if (fr_event_timer_at(h, el, &u->ev, u->retry.next, conn_status_check_again, conn) < 0) {
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
return;
}
DEBUG("%s - Connection open - %s", h->module_name, h->name);
- fr_connection_signal_connected(conn);
+ connection_signal_connected(conn);
}
/** Send our status-check packet as soon as the connection becomes writable
*/
static void conn_writable_status_check(fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
udp_request_t *u = h->status_u;
ssize_t slen;
if (encode(h->inst, h->status_request, u, u->id) < 0) {
fail:
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
return;
}
DEBUG3("Encoded packet");
* @param[in] conn to initialise.
* @param[in] uctx A #udp_thread_t
*/
-static fr_connection_state_t conn_init(void **h_out, fr_connection_t *conn, void *uctx)
+static connection_state_t conn_init(void **h_out, connection_t *conn, void *uctx)
{
int fd;
udp_handle_t *h;
PERROR("%s - Failed opening socket", h->module_name);
fail:
talloc_free(h);
- return FR_CONNECTION_STATE_FAILED;
+ return connection_STATE_FAILED;
}
/*
* as open as soon as it becomes writable.
*/
} else {
- fr_connection_signal_on_fd(conn, fd);
+ connection_signal_on_fd(conn, fd);
}
*h_out = h;
// i.e. histograms (or hyperloglog) of packets, so we can see
// which connections / home servers are fast / slow.
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
/** Shutdown/close a file descriptor
* @param[in] state the connection was in when it failed.
* @param[in] uctx UNUSED.
*/
-static fr_connection_state_t conn_failed(void *handle, fr_connection_state_t state, UNUSED void *uctx)
+static connection_state_t conn_failed(void *handle, connection_state_t state, UNUSED void *uctx)
{
switch (state) {
/*
* we need to handle any outstanding packets and
* timer events before reconnecting.
*/
- case FR_CONNECTION_STATE_CONNECTED:
+ case connection_STATE_CONNECTED:
{
udp_handle_t *h = talloc_get_type_abort(handle, udp_handle_t); /* h only available if connected */
break;
}
- return FR_CONNECTION_STATE_INIT;
+ return connection_STATE_INIT;
}
-static fr_connection_t *thread_conn_alloc(fr_trunk_connection_t *tconn, fr_event_list_t *el,
- fr_connection_conf_t const *conf,
+static connection_t *thread_conn_alloc(trunk_connection_t *tconn, fr_event_list_t *el,
+ connection_conf_t const *conf,
char const *log_prefix, void *uctx)
{
- fr_connection_t *conn;
+ connection_t *conn;
udp_thread_t *thread = talloc_get_type_abort(uctx, udp_thread_t);
- conn = fr_connection_alloc(tconn, el,
- &(fr_connection_funcs_t){
+ conn = connection_alloc(tconn, el,
+ &(connection_funcs_t){
.init = conn_init,
.close = conn_close,
.failed = conn_failed
*/
static void conn_discard(UNUSED fr_event_list_t *el, int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
uint8_t buffer[4096];
ssize_t slen;
case ENOTCONN:
case ETIMEDOUT:
ERROR("%s - Failed draining socket: %s", h->module_name, fr_syserror(errno));
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
break;
default:
*/
static void conn_error(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, int fd_errno, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_connection_t *conn = tconn->conn;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ connection_t *conn = tconn->conn;
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
ERROR("%s - Connection %s failed: %s", h->module_name, h->name, fr_syserror(fd_errno));
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
-static void thread_conn_notify(fr_trunk_connection_t *tconn, fr_connection_t *conn,
+static void thread_conn_notify(trunk_connection_t *tconn, connection_t *conn,
fr_event_list_t *el,
- fr_trunk_connection_event_t notify_on, UNUSED void *uctx)
+ trunk_connection_event_t notify_on, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
fr_event_fd_cb_t read_fn = NULL;
* of letting the packets sit in the UDP receive
* queue.
*/
- case FR_TRUNK_CONN_EVENT_NONE:
+ case TRUNK_CONN_EVENT_NONE:
read_fn = conn_discard;
break;
- case FR_TRUNK_CONN_EVENT_READ:
- read_fn = fr_trunk_connection_callback_readable;
+ case TRUNK_CONN_EVENT_READ:
+ read_fn = trunk_connection_callback_readable;
break;
- case FR_TRUNK_CONN_EVENT_WRITE:
- write_fn = fr_trunk_connection_callback_writable;
+ case TRUNK_CONN_EVENT_WRITE:
+ write_fn = trunk_connection_callback_writable;
break;
- case FR_TRUNK_CONN_EVENT_BOTH:
- read_fn = fr_trunk_connection_callback_readable;
- write_fn = fr_trunk_connection_callback_writable;
+ case TRUNK_CONN_EVENT_BOTH:
+ read_fn = trunk_connection_callback_readable;
+ write_fn = trunk_connection_callback_writable;
break;
}
/*
* May free the connection!
*/
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
/** A special version of the trunk/event loop glue function which always discards incoming data
*
*/
-static void thread_conn_notify_replicate(fr_trunk_connection_t *tconn, fr_connection_t *conn,
+static void thread_conn_notify_replicate(trunk_connection_t *tconn, connection_t *conn,
fr_event_list_t *el,
- fr_trunk_connection_event_t notify_on, UNUSED void *uctx)
+ trunk_connection_event_t notify_on, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
fr_event_fd_cb_t read_fn = NULL;
fr_event_fd_cb_t write_fn = NULL;
switch (notify_on) {
- case FR_TRUNK_CONN_EVENT_NONE:
+ case TRUNK_CONN_EVENT_NONE:
read_fn = conn_discard;
write_fn = NULL;
break;
- case FR_TRUNK_CONN_EVENT_READ:
+ case TRUNK_CONN_EVENT_READ:
read_fn = conn_discard;
break;
- case FR_TRUNK_CONN_EVENT_BOTH:
- case FR_TRUNK_CONN_EVENT_WRITE:
+ case TRUNK_CONN_EVENT_BOTH:
+ case TRUNK_CONN_EVENT_WRITE:
read_fn = conn_discard;
- write_fn = fr_trunk_connection_callback_writable;
+ write_fn = trunk_connection_callback_writable;
break;
}
/*
* May free the connection!
*/
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
*/
static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
INFO("%s - Reviving connection %s", h->module_name, h->name);
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
/** Mark a connection dead after "zombie_interval"
*/
static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
INFO("%s - No replies during 'zombie_period', marking connection %s as dead", h->module_name, h->name);
* Don't use this connection, and re-queue all of its
* requests onto other connections.
*/
- fr_trunk_connection_signal_inactive(tconn);
- (void) fr_trunk_connection_requests_requeue(tconn, FR_TRUNK_REQUEST_STATE_ALL, 0, false);
+ trunk_connection_signal_inactive(tconn);
+ (void) trunk_connection_requests_requeue(tconn, TRUNK_REQUEST_STATE_ALL, 0, false);
/*
* We do have status checks. Try to reconnect the
* then the connection will be marked "alive"
*/
if (h->inst->parent->status_check) {
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
if (fr_event_timer_at(h, el, &h->zombie_ev,
fr_time_add(now, h->inst->parent->revive_interval), revive_timeout, tconn) < 0) {
ERROR("Failed inserting revive timeout for connection");
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
* - true if the connection is zombie.
* - false if the connection is not zombie.
*/
-static bool check_for_zombie(fr_event_list_t *el, fr_trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent)
+static bool check_for_zombie(fr_event_list_t *el, trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent)
{
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
h->status_u->retry.start = fr_time_wrap(0);
h->status_r->treq = NULL;
- if (fr_trunk_request_enqueue_on_conn(&h->status_r->treq, tconn, h->status_request,
- h->status_u, h->status_r, true) != FR_TRUNK_ENQUEUE_OK) {
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ if (trunk_request_enqueue_on_conn(&h->status_r->treq, tconn, h->status_request,
+ h->status_u, h->status_r, true) != TRUNK_ENQUEUE_OK) {
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
} else {
if (fr_event_timer_at(h, el, &h->zombie_ev, fr_time_add(now, h->inst->parent->zombie_period),
zombie_timeout, tconn) < 0) {
ERROR("Failed inserting zombie timeout for connection");
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
*/
static void request_timeout(fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_trunk_request_t *treq = talloc_get_type_abort(uctx, fr_trunk_request_t);
+ trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t);
udp_request_t *u = talloc_get_type_abort(treq->preq, udp_request_t);
udp_result_t *r = talloc_get_type_abort(treq->rctx, udp_result_t);
- fr_trunk_connection_t *tconn = treq->tconn;
+ trunk_connection_t *tconn = treq->tconn;
- fr_assert(treq->state == FR_TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
+ fr_assert(treq->state == TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
fr_assert(treq->preq); /* Must still have a protocol request */
fr_assert(u->rr);
fr_assert(tconn);
r->rcode = RLM_MODULE_FAIL;
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
fr_assert(!u->status_check);
*/
static void request_retry(fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_trunk_request_t *treq = talloc_get_type_abort(uctx, fr_trunk_request_t);
+ trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t);
udp_request_t *u = talloc_get_type_abort(treq->preq, udp_request_t);
udp_result_t *r = talloc_get_type_abort(treq->rctx, udp_result_t);
request_t *request = treq->request;
- fr_trunk_connection_t *tconn = treq->tconn;
+ trunk_connection_t *tconn = treq->tconn;
- fr_assert(treq->state == FR_TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
+ fr_assert(treq->state == TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
fr_assert(treq->preq); /* Must still have a protocol request */
fr_assert(u->rr);
fr_assert(tconn);
* time, and still run the timers.
*/
case FR_RETRY_CONTINUE:
- fr_trunk_request_requeue(treq);
+ trunk_request_requeue(treq);
return;
case FR_RETRY_MRD:
}
r->rcode = RLM_MODULE_FAIL;
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
check_for_zombie(el, tconn, now, u->retry.start);
}
static void status_check_retry(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_trunk_request_t *treq = talloc_get_type_abort(uctx, fr_trunk_request_t);
+ trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t);
udp_handle_t *h;
udp_request_t *u = talloc_get_type_abort(treq->preq, udp_request_t);
udp_result_t *r = talloc_get_type_abort(treq->rctx, udp_result_t);
request_t *request = treq->request;
- fr_trunk_connection_t *tconn = treq->tconn;
+ trunk_connection_t *tconn = treq->tconn;
- fr_assert(treq->state == FR_TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
+ fr_assert(treq->state == TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
fr_assert(treq->preq); /* Must still have a protocol request */
fr_assert(u->rr);
fr_assert(tconn);
* time, and still run the timers.
*/
case FR_RETRY_CONTINUE:
- fr_trunk_request_requeue(treq);
+ trunk_request_requeue(treq);
return;
case FR_RETRY_MRD:
}
r->rcode = RLM_MODULE_FAIL;
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
WARN("%s - No response to status check, marking connection as dead - %s", h->module_name, h->name);
* connection.
*/
h->status_checking = false;
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
static void request_mux(fr_event_list_t *el,
- fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+ trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
rlm_radius_udp_t const *inst = h->inst;
* for transmission with sendmmsg.
*/
for (i = 0, queued = 0; (i < inst->max_send_coalesce) && (total_len < h->send_buff_actual); i++) {
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
udp_request_t *u;
request_t *request;
- if (unlikely(fr_trunk_connection_pop_request(&treq, tconn) < 0)) return;
+ if (unlikely(trunk_connection_pop_request(&treq, tconn) < 0)) return;
/*
* No more requests to send
*/
if (!treq) break;
- fr_assert((treq->state == FR_TRUNK_REQUEST_STATE_PENDING) ||
- (treq->state == FR_TRUNK_REQUEST_STATE_PARTIAL));
+ fr_assert((treq->state == TRUNK_REQUEST_STATE_PENDING) ||
+ (treq->state == TRUNK_REQUEST_STATE_PARTIAL));
request = treq->request;
u = talloc_get_type_abort(treq->preq, udp_request_t);
h->tt, udp_tracking_entry_log);
#endif
fr_assert_fail("Tracking entry allocation failed: %s", fr_strerror());
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
continue;
}
u->id = u->rr->id;
*/
udp_request_reset(u);
if (u->ev) (void) fr_event_timer_delete(&u->ev);
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
continue;
}
RHEXDUMP3(u->packet, u->packet_len, "Encoded packet");
* been sent, but it's the only way to get at the
* next entry in the heap.
*/
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
queued++;
}
if (queued == 0) return; /* No work */
case EMSGSIZE: /* Packet size exceeds max size allowed on socket */
ERROR("%s - Failed sending data over connection %s: %s",
h->module_name, h->name, fr_syserror(errno));
- fr_trunk_request_signal_fail(h->coalesced[0].treq);
+ trunk_request_signal_fail(h->coalesced[0].treq);
sent = 1;
break;
default:
ERROR("%s - Failed sending data over connection %s: %s",
h->module_name, h->name, fr_syserror(errno));
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
}
* start the request timer.
*/
for (i = 0; i < sent; i++) {
- fr_trunk_request_t *treq = h->coalesced[i].treq;
+ trunk_request_t *treq = h->coalesced[i].treq;
udp_request_t *u;
request_t *request;
char const *action;
*/
fr_assert((size_t)h->mmsgvec[i].msg_len == h->mmsgvec[i].msg_hdr.msg_iov->iov_len);
- fr_assert(treq->state == FR_TRUNK_REQUEST_STATE_SENT);
+ fr_assert(treq->state == TRUNK_REQUEST_STATE_SENT);
request = treq->request;
u = talloc_get_type_abort(treq->preq, udp_request_t);
if (fr_event_timer_at(u, el, &u->ev, u->retry.next, status_check_retry, treq) < 0) {
RERROR("Failed inserting retransmit timeout for connection");
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
continue;
}
if (fr_event_timer_at(u, el, &u->ev, u->retry.next, request_retry, treq) < 0) {
RERROR("Failed inserting retransmit timeout for connection");
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
continue;
}
fr_time_add(u->retry.start, h->inst->parent->response_window),
request_timeout, treq) < 0) {
RERROR("Failed inserting timeout for connection");
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
continue;
}
* The cancel logic runs as per-normal and cleans up
* the request ready for sending again...
*/
- for (i = sent; i < queued; i++) fr_trunk_request_requeue(h->coalesced[i].treq);
+ for (i = sent; i < queued; i++) trunk_request_requeue(h->coalesced[i].treq);
}
static void request_mux_replicate(UNUSED fr_event_list_t *el,
- fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+ trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
rlm_radius_udp_t const *inst = h->inst;
size_t total_len = 0;
for (i = 0, queued = 0; (i < inst->max_send_coalesce) && (total_len < h->send_buff_actual); i++) {
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
udp_request_t *u;
request_t *request;
- if (unlikely(fr_trunk_connection_pop_request(&treq, tconn) < 0)) return;
+ if (unlikely(trunk_connection_pop_request(&treq, tconn) < 0)) return;
/*
* No more requests to send
*/
if (!treq) break;
- fr_assert((treq->state == FR_TRUNK_REQUEST_STATE_PENDING) ||
- (treq->state == FR_TRUNK_REQUEST_STATE_PARTIAL));
+ fr_assert((treq->state == TRUNK_REQUEST_STATE_PENDING) ||
+ (treq->state == TRUNK_REQUEST_STATE_PARTIAL));
request = treq->request;
u = talloc_get_type_abort(treq->preq, udp_request_t);
u->id = h->last_id++;
if (encode(h->inst, request, u, u->id) < 0) {
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
continue;
}
}
*/
total_len += u->packet_len;
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
queued++;
}
if (queued == 0) return; /* No work */
case EMSGSIZE: /* Packet size exceeds max size allowed on socket */
ERROR("%s - Failed sending data over connection %s: %s",
h->module_name, h->name, fr_syserror(errno));
- fr_trunk_request_signal_fail(h->coalesced[0].treq);
+ trunk_request_signal_fail(h->coalesced[0].treq);
sent = 1;
break;
default:
ERROR("%s - Failed sending data over connection %s: %s",
h->module_name, h->name, fr_syserror(errno));
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
}
for (i = 0; i < sent; i++) {
- fr_trunk_request_t *treq = h->coalesced[i].treq;
+ trunk_request_t *treq = h->coalesced[i].treq;
udp_result_t *r = talloc_get_type_abort(treq->rctx, udp_result_t);
/*
fr_assert((size_t)h->mmsgvec[i].msg_len == h->mmsgvec[i].msg_hdr.msg_iov->iov_len);
r->rcode = RLM_MODULE_OK;
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
}
- for (i = sent; i < queued; i++) fr_trunk_request_requeue(h->coalesced[i].treq);
+ for (i = sent; i < queued; i++) trunk_request_requeue(h->coalesced[i].treq);
}
/** Deal with Protocol-Error replies, and possible negotiation
*/
static void status_check_next(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
- if (fr_trunk_request_enqueue_on_conn(&h->status_r->treq, tconn, h->status_request,
- h->status_u, h->status_r, true) != FR_TRUNK_ENQUEUE_OK) {
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ if (trunk_request_enqueue_on_conn(&h->status_r->treq, tconn, h->status_request,
+ h->status_u, h->status_r, true) != TRUNK_ENQUEUE_OK) {
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
/** Deal with replies replies to status checks and possible negotiation
*
*/
-static void status_check_reply(fr_trunk_request_t *treq, fr_time_t now)
+static void status_check_reply(trunk_request_t *treq, fr_time_t now)
{
udp_handle_t *h = talloc_get_type_abort(treq->tconn->conn->h, udp_handle_t);
rlm_radius_t const *inst = h->inst->parent;
* Set the timer for the next retransmit.
*/
if (fr_event_timer_at(h, h->thread->el, &u->ev, u->retry.next, status_check_next, treq->tconn) < 0) {
- fr_trunk_connection_signal_reconnect(treq->tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(treq->tconn, connection_FAILED);
}
return;
}
* also frees u->ev.
*/
status_check_reset(h, u);
- fr_trunk_connection_signal_active(treq->tconn);
+ trunk_connection_signal_active(treq->tconn);
}
-static void request_demux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+static void request_demux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
while (true) {
ssize_t slen;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
request_t *request;
udp_request_t *u;
udp_result_t *r;
ERROR("%s - Failed reading response from socket: %s",
h->module_name, fr_syserror(errno));
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
continue;
}
- treq = talloc_get_type_abort(rr->uctx, fr_trunk_request_t);
+ treq = talloc_get_type_abort(rr->uctx, trunk_request_t);
request = treq->request;
fr_assert(request != NULL);
u = talloc_get_type_abort(treq->preq, udp_request_t);
if (u == h->status_u) {
fr_pair_list_free(&reply); /* Probably want to pass this to status_check_reply? */
status_check_reply(treq, now);
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
continue;
}
treq->request->reply->code = code;
r->rcode = radius_code_to_rcode[code];
fr_pair_list_append(&request->reply_pairs, &reply);
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
}
}
*
* Frees encoded packets if the request is being moved to a new connection
*/
-static void request_cancel(UNUSED fr_connection_t *conn, void *preq_to_reset,
- fr_trunk_cancel_reason_t reason, UNUSED void *uctx)
+static void request_cancel(UNUSED connection_t *conn, void *preq_to_reset,
+ trunk_cancel_reason_t reason, UNUSED void *uctx)
{
udp_request_t *u = talloc_get_type_abort(preq_to_reset, udp_request_t);
* connection due to timeout or DUP signal. We
* keep the same packet to avoid re-encoding it.
*/
- if (reason == FR_TRUNK_CANCEL_REASON_REQUEUE) {
+ if (reason == TRUNK_CANCEL_REASON_REQUEUE) {
/*
* Delete the request_timeout
*
/** Clear out anything associated with the handle from the request
*
*/
-static void request_conn_release(fr_connection_t *conn, void *preq_to_reset, UNUSED void *uctx)
+static void request_conn_release(connection_t *conn, void *preq_to_reset, UNUSED void *uctx)
{
udp_request_t *u = talloc_get_type_abort(preq_to_reset, udp_request_t);
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
/** Clear out anything associated with the handle from the request
*
*/
-static void request_conn_release_replicate(UNUSED fr_connection_t *conn, void *preq_to_reset, UNUSED void *uctx)
+static void request_conn_release_replicate(UNUSED connection_t *conn, void *preq_to_reset, UNUSED void *uctx)
{
udp_request_t *u = talloc_get_type_abort(preq_to_reset, udp_request_t);
*
*/
static void request_fail(request_t *request, void *preq, void *rctx,
- NDEBUG_UNUSED fr_trunk_request_state_t state, UNUSED void *uctx)
+ NDEBUG_UNUSED trunk_request_state_t state, UNUSED void *uctx)
{
udp_result_t *r = talloc_get_type_abort(rctx, udp_result_t);
udp_request_t *u = talloc_get_type_abort(preq, udp_request_t);
fr_assert(!u->rr && !u->packet && fr_pair_list_empty(&u->extra) && !u->ev); /* Dealt with by request_conn_release */
- fr_assert(state != FR_TRUNK_REQUEST_STATE_INIT);
+ fr_assert(state != TRUNK_REQUEST_STATE_INIT);
if (u->status_check) return;
* trunk so it can clean up the treq.
*/
case FR_SIGNAL_CANCEL:
- fr_trunk_request_signal_cancel(r->treq);
+ trunk_request_signal_cancel(r->treq);
r->treq = NULL;
talloc_free(r); /* Should be freed soon anyway, but better to be explicit */
return;
* connection is dead, then a callback will move
* this request to a new connection.
*/
- fr_trunk_request_requeue(r->treq);
+ trunk_request_requeue(r->treq);
return;
default:
*/
static int _udp_result_free(udp_result_t *r)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
udp_request_t *u;
if (!r->treq) return 0;
- treq = talloc_get_type_abort(r->treq, fr_trunk_request_t);
+ treq = talloc_get_type_abort(r->treq, trunk_request_t);
u = talloc_get_type_abort(treq->preq, udp_request_t);
fr_assert_msg(!u->ev, "udp_result_t freed with active timer");
udp_thread_t *t = talloc_get_type_abort(thread, udp_thread_t);
udp_result_t *r;
udp_request_t *u;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_assert(request->packet->code > 0);
fr_assert(request->packet->code < FR_RADIUS_CODE_MAX);
RETURN_MODULE_NOOP;
}
- treq = fr_trunk_request_alloc(t->trunk, request);
+ treq = trunk_request_alloc(t->trunk, request);
if (!treq) RETURN_MODULE_FAIL;
MEM(r = talloc_zero(request, udp_result_t));
pair_delete_request(attr_message_authenticator);
}
- switch(fr_trunk_request_enqueue(&treq, t->trunk, request, u, r)) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ switch(trunk_request_enqueue(&treq, t->trunk, request, u, r)) {
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
break;
- case FR_TRUNK_ENQUEUE_NO_CAPACITY:
+ case TRUNK_ENQUEUE_NO_CAPACITY:
REDEBUG("Unable to queue packet - connections at maximum capacity");
fail:
fr_assert(!u->rr && !u->packet); /* Should not have been fed to the muxer */
- fr_trunk_request_free(&treq); /* Return to the free list */
+ trunk_request_free(&treq); /* Return to the free list */
talloc_free(r);
RETURN_MODULE_FAIL;
- case FR_TRUNK_ENQUEUE_DST_UNAVAILABLE:
+ case TRUNK_ENQUEUE_DST_UNAVAILABLE:
REDEBUG("All destinations are down - cannot send packet");
goto fail;
- case FR_TRUNK_ENQUEUE_FAIL:
+ case TRUNK_ENQUEUE_FAIL:
REDEBUG("Unable to queue packet");
goto fail;
}
rlm_radius_udp_t *inst = talloc_get_type_abort(mctx->mi->data, rlm_radius_udp_t);
udp_thread_t *thread = talloc_get_type_abort(mctx->thread, udp_thread_t);
- static fr_trunk_io_funcs_t io_funcs = {
+ static trunk_io_funcs_t io_funcs = {
.connection_alloc = thread_conn_alloc,
.connection_notify = thread_conn_notify,
.request_prioritise = request_prioritise,
.request_free = request_free
};
- static fr_trunk_io_funcs_t io_funcs_replicate = {
+ static trunk_io_funcs_t io_funcs_replicate = {
.connection_alloc = thread_conn_alloc,
.connection_notify = thread_conn_notify_replicate,
.request_prioritise = request_prioritise,
thread->el = mctx->el;
thread->inst = inst;
- thread->trunk = fr_trunk_alloc(thread, mctx->el, inst->replicate ? &io_funcs_replicate : &io_funcs,
+ thread->trunk = trunk_alloc(thread, mctx->el, inst->replicate ? &io_funcs_replicate : &io_funcs,
&inst->trunk_conf, inst->parent->name, thread, false);
if (!thread->trunk) return -1;
MYSQL db; //!< Structure representing connection details.
MYSQL *sock; //!< Connection details as returned by connection init functions.
MYSQL_RES *result; //!< Result from most recent query.
- fr_connection_t *conn; //!< Generic connection structure for this connection.
+ connection_t *conn; //!< Generic connection structure for this connection.
int fd; //!< fd for this connection's I/O events.
fr_sql_query_t *query_ctx; //!< Current query running on this connection.
int status; //!< returned by the most recent non-blocking function call.
connected:
if (!c->sock) {
ERROR("MySQL error: %s", mysql_error(&c->db));
- fr_connection_signal_reconnect(c->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(c->conn, connection_FAILED);
return;
}
mysql_get_host_info(c->sock),
mysql_get_server_info(c->sock), mysql_get_proto_info(c->sock));
- fr_connection_signal_connected(c->conn);
+ connection_signal_connected(c->conn);
}
-static fr_connection_state_t _sql_connection_init(void **h, fr_connection_t *conn, void *uctx)
+static connection_state_t _sql_connection_init(void **h, connection_t *conn, void *uctx)
{
rlm_sql_t const *sql = talloc_get_type_abort_const(uctx, rlm_sql_t);
rlm_sql_mysql_t const *inst = talloc_get_type_abort(sql->driver_submodule->data, rlm_sql_mysql_t);
ERROR("MySQL error: %s", mysql_error(&c->db));
error:
talloc_free(c);
- return FR_CONNECTION_STATE_FAILED;
+ return connection_STATE_FAILED;
}
if (c->status == 0) {
DEBUG2("Connected to database '%s' on %s, server version %s, protocol version %i",
config->sql_db, mysql_get_host_info(c->sock),
mysql_get_server_info(c->sock), mysql_get_proto_info(c->sock));
- fr_connection_signal_connected(c->conn);
- return FR_CONNECTION_STATE_CONNECTING;
+ connection_signal_connected(c->conn);
+ return connection_STATE_CONNECTING;
}
if (fr_event_fd_insert(c, NULL, c->conn->el, c->fd,
*h = c;
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
static void _sql_connection_close(fr_event_list_t *el, void *h, UNUSED void *uctx)
* If the query is not in a state which would return results, then do nothing.
*/
if (query_ctx->treq && !(query_ctx->treq->state &
- (FR_TRUNK_REQUEST_STATE_SENT | FR_TRUNK_REQUEST_STATE_IDLE | FR_TRUNK_REQUEST_STATE_COMPLETE))) return RLM_SQL_OK;
+ (TRUNK_REQUEST_STATE_SENT | TRUNK_REQUEST_STATE_IDLE | TRUNK_REQUEST_STATE_COMPLETE))) return RLM_SQL_OK;
/*
* If the connection doesn't exist there's nothing to do
/*
* If the connection is not active, then all that we can do is free any stored results
*/
- if (query_ctx->tconn->conn->state != FR_CONNECTION_STATE_CONNECTED) {
+ if (query_ctx->tconn->conn->state != connection_STATE_CONNECTED) {
sql_free_result(query_ctx, config);
return RLM_SQL_OK;
}
static size_t sql_escape_func(UNUSED request_t *request, char *out, size_t outlen, char const *in, void *arg)
{
size_t inlen;
- fr_connection_t *c = talloc_get_type_abort(arg, fr_connection_t);
+ connection_t *c = talloc_get_type_abort(arg, connection_t);
rlm_sql_mysql_conn_t *conn = talloc_get_type_abort(c->h, rlm_sql_mysql_conn_t);
/* Check for potential buffer overflow */
static void sql_conn_writable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_connection_signal_writable(tconn);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_connection_signal_writable(tconn);
}
static void sql_conn_readable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_trunk_connection_signal_readable(tconn);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ trunk_connection_signal_readable(tconn);
}
static void sql_conn_error(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, int fd_errno, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
ERROR("%s - Connection failed: %s", tconn->conn->name, fr_syserror(fd_errno));
- fr_connection_signal_reconnect(tconn->conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(tconn->conn, connection_FAILED);
}
/** Allocate an SQL trunk connection
* @param[in] el Event list which will be used for I/O and timer events.
* @param[in] conn_conf Configuration of the connection.
* @param[in] log_prefix What to prefix log messages with.
- * @param[in] uctx User context passed to fr_trunk_alloc.
+ * @param[in] uctx User context passed to trunk_alloc.
*/
-static fr_connection_t *sql_trunk_connection_alloc(fr_trunk_connection_t *tconn, fr_event_list_t *el,
- fr_connection_conf_t const *conn_conf,
+static connection_t *sql_trunk_connection_alloc(trunk_connection_t *tconn, fr_event_list_t *el,
+ connection_conf_t const *conn_conf,
char const *log_prefix, void *uctx)
{
- fr_connection_t *conn;
+ connection_t *conn;
rlm_sql_thread_t *thread = talloc_get_type_abort(uctx, rlm_sql_thread_t);
- conn = fr_connection_alloc(tconn, el,
- &(fr_connection_funcs_t){
+ conn = connection_alloc(tconn, el,
+ &(connection_funcs_t){
.init = _sql_connection_init,
.close = _sql_connection_close
},
return conn;
}
-static void sql_trunk_connection_notify(fr_trunk_connection_t *tconn, fr_connection_t *conn,
+static void sql_trunk_connection_notify(trunk_connection_t *tconn, connection_t *conn,
fr_event_list_t *el,
- fr_trunk_connection_event_t notify_on, UNUSED void *uctx)
+ trunk_connection_event_t notify_on, UNUSED void *uctx)
{
rlm_sql_mysql_conn_t *sql_conn = talloc_get_type_abort(conn->h, rlm_sql_mysql_conn_t);
fr_event_fd_cb_t read_fn = NULL, write_fn = NULL;
switch (notify_on) {
- case FR_TRUNK_CONN_EVENT_NONE:
+ case TRUNK_CONN_EVENT_NONE:
fr_event_fd_delete(el, sql_conn->fd, FR_EVENT_FILTER_IO);
return;
- case FR_TRUNK_CONN_EVENT_READ:
+ case TRUNK_CONN_EVENT_READ:
read_fn = sql_conn_readable;
break;
- case FR_TRUNK_CONN_EVENT_WRITE:
+ case TRUNK_CONN_EVENT_WRITE:
write_fn = sql_conn_writable;
break;
- case FR_TRUNK_CONN_EVENT_BOTH:
+ case TRUNK_CONN_EVENT_BOTH:
read_fn = sql_conn_readable;
write_fn = sql_conn_writable;
break;
if (fr_event_fd_insert(sql_conn, NULL, el, sql_conn->fd, read_fn, write_fn, sql_conn_error, tconn) < 0) {
PERROR("Failed inserting FD event");
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
-static void sql_trunk_request_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn,
- fr_connection_t *conn, UNUSED void *uctx)
+static void sql_trunk_request_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn,
+ connection_t *conn, UNUSED void *uctx)
{
rlm_sql_mysql_conn_t *sql_conn = talloc_get_type_abort(conn->h, rlm_sql_mysql_conn_t);
request_t *request;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
fr_sql_query_t *query_ctx;
char const *info;
int err;
- if (fr_trunk_connection_pop_request(&treq, tconn) != 0) return;
+ if (trunk_connection_pop_request(&treq, tconn) != 0) return;
if (!treq) return;
query_ctx = talloc_get_type_abort(treq->preq, fr_sql_query_t);
ROPTIONAL(RDEBUG3, DEBUG3, "Waiting for IO");
query_ctx->status = SQL_QUERY_SUBMITTED;
sql_conn->query_ctx = query_ctx;
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
return;
}
default:
query_ctx->status = SQL_QUERY_FAILED;
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
return;
}
}
ROPTIONAL(RDEBUG3, DEBUG3, "Waiting for IO");
query_ctx->status = SQL_QUERY_FETCHING_RESULTS;
sql_conn->query_ctx = query_ctx;
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
return;
}
query_ctx->status = SQL_QUERY_RESULTS_FETCHED;
* The current request is not waiting for I/O so the request can run
*/
ROPTIONAL(RDEBUG3, DEBUG3, "Got immediate response");
- fr_trunk_request_signal_idle(treq);
+ trunk_request_signal_idle(treq);
if (request) unlang_interpret_mark_runnable(request);
}
-static void sql_trunk_request_demux(UNUSED fr_event_list_t *el, UNUSED fr_trunk_connection_t *tconn,
- fr_connection_t *conn, UNUSED void *uctx)
+static void sql_trunk_request_demux(UNUSED fr_event_list_t *el, UNUSED trunk_connection_t *tconn,
+ connection_t *conn, UNUSED void *uctx)
{
rlm_sql_mysql_conn_t *sql_conn = talloc_get_type_abort(conn->h, rlm_sql_mysql_conn_t);
fr_sql_query_t *query_ctx;
query_ctx->rcode = RLM_SQL_OK;
}
-static void sql_request_cancel(fr_connection_t *conn, void *preq, fr_trunk_cancel_reason_t reason,
+static void sql_request_cancel(connection_t *conn, void *preq, trunk_cancel_reason_t reason,
UNUSED void *uctx)
{
fr_sql_query_t *query_ctx = talloc_get_type_abort(preq, fr_sql_query_t);
rlm_sql_mysql_conn_t *sql_conn = talloc_get_type_abort(conn->h, rlm_sql_mysql_conn_t);
if (!query_ctx->treq) return;
- if (reason != FR_TRUNK_CANCEL_REASON_SIGNAL) return;
+ if (reason != TRUNK_CANCEL_REASON_SIGNAL) return;
if (sql_conn->query_ctx == query_ctx) sql_conn->query_ctx = NULL;
}
-static void sql_request_cancel_mux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn,
- fr_connection_t *conn, UNUSED void *uctx)
+static void sql_request_cancel_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn,
+ connection_t *conn, UNUSED void *uctx)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
/*
* The MariaDB non-blocking API doesn't have any cancellation functions -
* rather you are expected to close the connection.
*/
- if ((fr_trunk_connection_pop_cancellation(&treq, tconn)) == 0) {
- fr_trunk_request_signal_cancel_complete(treq);
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ if ((trunk_connection_pop_cancellation(&treq, tconn)) == 0) {
+ trunk_request_signal_cancel_complete(treq);
+ connection_signal_reconnect(conn, connection_FAILED);
}
}
static void sql_request_fail(request_t *request, void *preq, UNUSED void *rctx,
- UNUSED fr_trunk_request_state_t state, UNUSED void *uctx)
+ UNUSED trunk_request_state_t state, UNUSED void *uctx)
{
fr_sql_query_t *query_ctx = talloc_get_type_abort(preq, fr_sql_query_t);
if (query_ctx->rcode != RLM_SQL_OK) RETURN_MODULE_FAIL;
if (query_ctx->status == SQL_QUERY_RETURNED) {
- fr_trunk_request_requeue(query_ctx->treq);
+ trunk_request_requeue(query_ctx->treq);
if (unlang_function_repeat_set(request, sql_select_query_resume) < 0) {
query_ctx->rcode = RLM_SQL_ERROR;
static void *sql_escape_arg_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, void *uctx)
{
rlm_sql_t const *inst = talloc_get_type_abort(uctx, rlm_sql_t);
- fr_connection_t *conn;
+ connection_t *conn;
- conn = fr_connection_alloc(ctx, el,
- &(fr_connection_funcs_t){
+ conn = connection_alloc(ctx, el,
+ &(connection_funcs_t){
.init = _sql_connection_init,
.close = _sql_connection_close,
},
return NULL;
}
- fr_connection_signal_init(conn);
+ connection_signal_init(conn);
return conn;
}
static void sql_escape_arg_free(void *uctx)
{
- fr_connection_t *conn = talloc_get_type_abort(uctx, fr_connection_t);
- fr_connection_signal_halt(conn);
+ connection_t *conn = talloc_get_type_abort(uctx, connection_t);
+ connection_signal_halt(conn);
}
/* Exported to rlm_sql */
request_t *request; //!< Request being processed.
rlm_rcode_t rcode; //!< Module return code.
rlm_sql_handle_t *handle; //!< Database connection handle in use for current authorization.
- fr_trunk_t *trunk; //!< Trunk connection for current authorization.
+ trunk_t *trunk; //!< Trunk connection for current authorization.
sql_autz_call_env_t *call_env; //!< Call environment data.
map_list_t check_tmp; //!< List to store check items before processing.
map_list_t reply_tmp; //!< List to store reply items before processing.
rlm_sql_t const *inst; //!< Module instance.
request_t *request; //!< Request being processed.
rlm_sql_handle_t *handle; //!< Database connection handle.
- fr_trunk_t *trunk; //!< Trunk connection for queries.
+ trunk_t *trunk; //!< Trunk connection for queries.
sql_redundant_call_env_t *call_env; //!< Call environment data.
size_t query_no; //!< Current query number.
fr_value_box_list_t query; //!< Where expanded query tmpl will be written.
RETURN_MODULE_OK;
}
-static unlang_action_t sql_get_grouplist(sql_group_ctx_t *group_ctx, rlm_sql_handle_t **handle, fr_trunk_t *trunk, request_t *request)
+static unlang_action_t sql_get_grouplist(sql_group_ctx_t *group_ctx, rlm_sql_handle_t **handle, trunk_t *trunk, request_t *request)
{
rlm_sql_t const *inst = group_ctx->inst;
*/
cs = cf_section_find(conf, "pool", NULL);
if (!cs) cs = cf_section_alloc(conf, conf, "pool", NULL);
- if (cf_section_rules_push(cs, fr_trunk_config) < 0) return -1;
+ if (cf_section_rules_push(cs, trunk_config) < 0) return -1;
if (cf_section_parse(&inst->config, &inst->config.trunk_conf, cs) < 0) return -1;
/*
if (!inst->driver->uses_trunks) return 0;
- t->trunk = fr_trunk_alloc(t, mctx->el, &inst->driver->trunk_io_funcs,
+ t->trunk = trunk_alloc(t, mctx->el, &inst->driver->trunk_io_funcs,
&inst->config.trunk_conf, inst->name, t, false);
if (!t->trunk) return -1;
char const *connect_query; //!< Query executed after establishing
//!< new connection.
- fr_trunk_conf_t trunk_conf; //!< Configuration for trunk connections.
+ trunk_conf_t trunk_conf; //!< Configuration for trunk connections.
} rlm_sql_config_t;
typedef struct sql_inst rlm_sql_t;
* Per-thread instance data structure
*/
typedef struct {
- fr_trunk_t *trunk; //!< Trunk connection for this thread.
+ trunk_t *trunk; //!< Trunk connection for this thread.
rlm_sql_t const *inst; //!< Module instance data.
void *sql_escape_arg; //!< Thread specific argument to be passed to escape function.
} rlm_sql_thread_t;
rlm_sql_t const *inst; //!< Module instance for this query.
request_t *request; //!< Request this query relates to.
rlm_sql_handle_t *handle; //!< Connection handle this query is being run on.
- fr_trunk_t *trunk; //!< Trunk this query is being run on.
- fr_trunk_connection_t *tconn; //!< Trunk connection this query is being run on.
- fr_trunk_request_t *treq; //!< Trunk request for this query.
+ trunk_t *trunk; //!< Trunk this query is being run on.
+ trunk_connection_t *tconn; //!< Trunk connection this query is being run on.
+ trunk_request_t *treq; //!< Trunk request for this query.
char const *query_str; //!< Query string to run.
fr_sql_query_type_t type; //!< Type of query.
fr_sql_query_status_t status; //!< Status of the query.
void (*sql_escape_arg_free)(void *uctx);
bool uses_trunks; //!< Transitional flag for drivers which use trunks.
- fr_trunk_io_funcs_t trunk_io_funcs; //!< Trunk callback functions for this driver.
+ trunk_io_funcs_t trunk_io_funcs; //!< Trunk callback functions for this driver.
} rlm_sql_driver_t;
struct sql_inst {
unlang_function_t query;
unlang_function_t select;
unlang_function_t fetch_row;
- fr_sql_query_t *(*query_alloc)(TALLOC_CTX *ctx, rlm_sql_t const *inst, request_t *request, rlm_sql_handle_t *handle, fr_trunk_t *trunk, char const *query_str, fr_sql_query_type_t type);
+ fr_sql_query_t *(*query_alloc)(TALLOC_CTX *ctx, rlm_sql_t const *inst, request_t *request, rlm_sql_handle_t *handle, trunk_t *trunk, char const *query_str, fr_sql_query_type_t type);
char const *name; //!< Module instance name.
fr_dict_attr_t const *group_da; //!< Group dictionary attribute.
};
void *sql_mod_conn_create(TALLOC_CTX *ctx, void *instance, fr_time_delta_t timeout);
-unlang_action_t sql_get_map_list(request_t *request, fr_sql_map_ctx_t *map_ctx, rlm_sql_handle_t **handle, fr_trunk_t *trunk);
+unlang_action_t sql_get_map_list(request_t *request, fr_sql_map_ctx_t *map_ctx, rlm_sql_handle_t **handle, trunk_t *trunk);
void rlm_sql_query_log(rlm_sql_t const *inst, char const *filename, char const *query) CC_HINT(nonnull);
unlang_action_t rlm_sql_select_query(rlm_rcode_t *p_result, UNUSED int *priority, request_t *request, void *uctx);
unlang_action_t rlm_sql_query(rlm_rcode_t *p_result, int *priority, request_t *request, void *uctx);
unlang_action_t rlm_sql_trunk_query(rlm_rcode_t *p_result, UNUSED int *priority, request_t *request, void *uctx);
unlang_action_t rlm_sql_fetch_row(rlm_rcode_t *p_result, UNUSED int *priority, request_t *request, void *uctx);
void rlm_sql_print_error(rlm_sql_t const *inst, request_t *request, fr_sql_query_t *query_ctx, bool force_debug);
-fr_sql_query_t *fr_sql_query_alloc(TALLOC_CTX *ctx, rlm_sql_t const *inst, request_t *request, rlm_sql_handle_t *handle, fr_trunk_t *trunk, char const *query_str, fr_sql_query_type_t type);
+fr_sql_query_t *fr_sql_query_alloc(TALLOC_CTX *ctx, rlm_sql_t const *inst, request_t *request, rlm_sql_handle_t *handle, trunk_t *trunk, char const *query_str, fr_sql_query_type_t type);
/*
* sql_state.c
} else {
(to_free->inst->driver->sql_finish_query)(to_free, &to_free->inst->config);
}
- if (to_free->treq) fr_trunk_request_signal_complete(to_free->treq);
+ if (to_free->treq) trunk_request_signal_complete(to_free->treq);
return 0;
}
*
*/
fr_sql_query_t *fr_sql_query_alloc(TALLOC_CTX *ctx, rlm_sql_t const *inst, request_t *request, rlm_sql_handle_t *handle,
- fr_trunk_t *trunk, char const *query_str, fr_sql_query_type_t type)
+ trunk_t *trunk, char const *query_str, fr_sql_query_type_t type)
{
fr_sql_query_t *query;
MEM(query = talloc(ctx, fr_sql_query_t));
*/
talloc_steal(query_ctx->treq, query_ctx);
- fr_trunk_request_signal_cancel(query_ctx->treq);
+ trunk_request_signal_cancel(query_ctx->treq);
query_ctx->treq = NULL;
}
unlang_action_t rlm_sql_trunk_query(rlm_rcode_t *p_result, UNUSED int *priority, request_t *request, void *uctx)
{
fr_sql_query_t *query_ctx = talloc_get_type_abort(uctx, fr_sql_query_t);
- fr_trunk_enqueue_t status;
+ trunk_enqueue_t status;
fr_assert(query_ctx->trunk);
* then this is part of an ongoing transaction and needs requeueing
* to submit on the same connection.
*/
- if (query_ctx->treq && query_ctx->treq->state != FR_TRUNK_REQUEST_STATE_INIT) {
- status = fr_trunk_request_requeue(query_ctx->treq);
+ if (query_ctx->treq && query_ctx->treq->state != TRUNK_REQUEST_STATE_INIT) {
+ status = trunk_request_requeue(query_ctx->treq);
} else {
- status = fr_trunk_request_enqueue(&query_ctx->treq, query_ctx->trunk, request, query_ctx, NULL);
+ status = trunk_request_enqueue(&query_ctx->treq, query_ctx->trunk, request, query_ctx, NULL);
}
switch (status) {
- case FR_TRUNK_ENQUEUE_OK:
- case FR_TRUNK_ENQUEUE_IN_BACKLOG:
+ case TRUNK_ENQUEUE_OK:
+ case TRUNK_ENQUEUE_IN_BACKLOG:
if (unlang_function_push(request, sql_trunk_query_start,
query_ctx->type == SQL_QUERY_SELECT ?
query_ctx->inst->driver->sql_select_query_resume :
*
*/
unlang_action_t sql_get_map_list(request_t *request, fr_sql_map_ctx_t *map_ctx, rlm_sql_handle_t **handle,
- fr_trunk_t *trunk)
+ trunk_t *trunk)
{
rlm_sql_t const *inst = map_ctx->inst;
ippool_alloc_status_t status; //!< Status of the allocation.
ippool_alloc_call_env_t *env; //!< Call environment for the allocation.
rlm_sql_handle_t *handle; //!< SQL handle being used for queries.
- fr_trunk_t *trunk; //!< Trunk connection for queries.
+ trunk_t *trunk; //!< Trunk connection for queries.
rlm_sql_t const *sql; //!< SQL module instance.
fr_value_box_list_t values; //!< Where to put the expanded queries ready for execution.
fr_value_box_t *query; //!< Current query being run.
{ FR_CONF_OFFSET("revive_interval", rlm_tacacs_t, revive_interval) },
- { FR_CONF_OFFSET_SUBSECTION("pool", 0, rlm_tacacs_t, trunk_conf, fr_trunk_config ) },
+ { FR_CONF_OFFSET_SUBSECTION("pool", 0, rlm_tacacs_t, trunk_conf, trunk_config ) },
{ FR_CONF_OFFSET_SUBSECTION("retry", 0, rlm_tacacs_t, retry, retry_config ) },
bool allowed[FR_TACACS_CODE_MAX];
- fr_trunk_conf_t trunk_conf; //!< trunk configuration
+ trunk_conf_t trunk_conf; //!< trunk configuration
};
/** Enqueue a request_t to an IO submodule
bool recv_buff_is_set; //!< Whether we were provided with a recv_buf
bool send_buff_is_set; //!< Whether we were provided with a send_buf
- fr_trunk_conf_t *trunk_conf; //!< trunk configuration
+ trunk_conf_t *trunk_conf; //!< trunk configuration
} rlm_tacacs_tcp_t;
typedef struct {
rlm_tacacs_tcp_t const *inst; //!< our instance
- fr_trunk_t *trunk; //!< trunk handler
+ trunk_t *trunk; //!< trunk handler
} udp_thread_t;
typedef struct {
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
rlm_rcode_t rcode; //!< from the transport
} udp_result_t;
int fd; //!< File descriptor.
- fr_trunk_request_t **coalesced; //!< Outbound coalesced requests.
+ trunk_request_t **coalesced; //!< Outbound coalesced requests.
size_t send_buff_actual; //!< What we believe the maximum SO_SNDBUF size to be.
///< We don't try and encode more packet data than this
int id; //!< starts at 1.
int active; //!< active packets
- fr_trunk_request_t *tracking[UINT8_MAX]; //!< all sequential!
+ trunk_request_t *tracking[UINT8_MAX]; //!< all sequential!
fr_time_t mrs_time; //!< Most recent sent time which had a reply.
fr_time_t last_reply; //!< When we last received a reply.
fr_event_timer_t const *zombie_ev; //!< Zombie timeout.
- fr_trunk_connection_t *tconn; //!< trunk connection
+ trunk_connection_t *tconn; //!< trunk connection
} udp_handle_t;
* Welcome to the insanity that is TACACS+.
*/
if ((h->active == 0) && (h->id > 255)) {
- fr_trunk_connection_signal_reconnect(h->tconn, FR_CONNECTION_EXPIRED);
+ trunk_connection_signal_reconnect(h->tconn, connection_EXPIRED);
}
}
* @param[in] conn to initialise.
* @param[in] uctx A #udp_thread_t
*/
-static fr_connection_state_t conn_init(void **h_out, fr_connection_t *conn, void *uctx)
+static connection_state_t conn_init(void **h_out, connection_t *conn, void *uctx)
{
int fd;
udp_handle_t *h;
/*
* Initialize the buffer of coalesced packets we're doing to write.
*/
- h->coalesced = talloc_zero_array(h, fr_trunk_request_t *, h->inst->max_send_coalesce);
+ h->coalesced = talloc_zero_array(h, trunk_request_t *, h->inst->max_send_coalesce);
/*
* Open the outgoing socket.
if (fd < 0) {
PERROR("%s - Failed opening socket", h->module_name);
talloc_free(h);
- return FR_CONNECTION_STATE_FAILED;
+ return connection_STATE_FAILED;
}
/*
* Signal the connection
* as open as soon as it becomes writable.
*/
- fr_connection_signal_on_fd(conn, fd);
+ connection_signal_on_fd(conn, fd);
*h_out = h;
// i.e. histograms (or hyperloglog) of packets, so we can see
// which connections / home servers are fast / slow.
- return FR_CONNECTION_STATE_CONNECTING;
+ return connection_STATE_CONNECTING;
}
/** Shutdown/close a file descriptor
}
-static fr_connection_t *thread_conn_alloc(fr_trunk_connection_t *tconn, fr_event_list_t *el,
- fr_connection_conf_t const *conf,
+static connection_t *thread_conn_alloc(trunk_connection_t *tconn, fr_event_list_t *el,
+ connection_conf_t const *conf,
char const *log_prefix, void *uctx)
{
- fr_connection_t *conn;
+ connection_t *conn;
udp_thread_t *thread = talloc_get_type_abort(uctx, udp_thread_t);
- conn = fr_connection_alloc(tconn, el,
- &(fr_connection_funcs_t){
+ conn = connection_alloc(tconn, el,
+ &(connection_funcs_t){
.init = conn_init,
.close = conn_close,
},
*/
static void conn_error(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, int fd_errno, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
- fr_connection_t *conn = tconn->conn;
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
+ connection_t *conn = tconn->conn;
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
ERROR("%s - Connection %s failed: %s", h->module_name, h->name, fr_syserror(fd_errno));
- fr_connection_signal_reconnect(conn, FR_CONNECTION_FAILED);
+ connection_signal_reconnect(conn, connection_FAILED);
}
-static void thread_conn_notify(fr_trunk_connection_t *tconn, fr_connection_t *conn,
+static void thread_conn_notify(trunk_connection_t *tconn, connection_t *conn,
fr_event_list_t *el,
- fr_trunk_connection_event_t notify_on, UNUSED void *uctx)
+ trunk_connection_event_t notify_on, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
fr_event_fd_cb_t read_fn = NULL;
fr_event_fd_cb_t write_fn = NULL;
switch (notify_on) {
- case FR_TRUNK_CONN_EVENT_NONE:
+ case TRUNK_CONN_EVENT_NONE:
return;
- case FR_TRUNK_CONN_EVENT_READ:
- read_fn = fr_trunk_connection_callback_readable;
+ case TRUNK_CONN_EVENT_READ:
+ read_fn = trunk_connection_callback_readable;
break;
- case FR_TRUNK_CONN_EVENT_WRITE:
- write_fn = fr_trunk_connection_callback_writable;
+ case TRUNK_CONN_EVENT_WRITE:
+ write_fn = trunk_connection_callback_writable;
break;
- case FR_TRUNK_CONN_EVENT_BOTH:
- read_fn = fr_trunk_connection_callback_readable;
- write_fn = fr_trunk_connection_callback_writable;
+ case TRUNK_CONN_EVENT_BOTH:
+ read_fn = trunk_connection_callback_readable;
+ write_fn = trunk_connection_callback_writable;
break;
}
/*
* May free the connection!
*/
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
*/
static void revive_timeout(UNUSED fr_event_list_t *el, UNUSED fr_time_t now, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
INFO("%s - Reviving connection %s", h->module_name, h->name);
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
/** Mark a connection dead after "zombie_interval"
*/
static void zombie_timeout(fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_trunk_connection_t *tconn = talloc_get_type_abort(uctx, fr_trunk_connection_t);
+ trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
INFO("%s - No replies during 'zombie_period', marking connection %s as dead", h->module_name, h->name);
* Don't use this connection, and re-queue all of its
* requests onto other connections.
*/
- fr_trunk_connection_signal_inactive(tconn);
- (void) fr_trunk_connection_requests_requeue(tconn, FR_TRUNK_REQUEST_STATE_ALL, 0, false);
+ trunk_connection_signal_inactive(tconn);
+ (void) trunk_connection_requests_requeue(tconn, TRUNK_REQUEST_STATE_ALL, 0, false);
/*
* Revive the connection after a time.
if (fr_event_timer_at(h, el, &h->zombie_ev,
fr_time_add(now, h->inst->parent->revive_interval), revive_timeout, h) < 0) {
ERROR("Failed inserting revive timeout for connection");
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
}
* - true if the connection is zombie.
* - false if the connection is not zombie.
*/
-static bool check_for_zombie(fr_event_list_t *el, fr_trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent)
+static bool check_for_zombie(fr_event_list_t *el, trunk_connection_t *tconn, fr_time_t now, fr_time_t last_sent)
{
udp_handle_t *h = talloc_get_type_abort(tconn->conn->h, udp_handle_t);
* packets on it.
*/
WARN("%s - Entering Zombie state - connection %s", h->module_name, h->name);
- fr_trunk_connection_signal_inactive(tconn);
+ trunk_connection_signal_inactive(tconn);
if (fr_event_timer_at(h, el, &h->zombie_ev, fr_time_add(now, h->inst->parent->zombie_period),
zombie_timeout, h) < 0) {
ERROR("Failed inserting zombie timeout for connection");
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
}
return true;
*/
static void request_retry(fr_event_list_t *el, fr_time_t now, void *uctx)
{
- fr_trunk_request_t *treq = talloc_get_type_abort(uctx, fr_trunk_request_t);
+ trunk_request_t *treq = talloc_get_type_abort(uctx, trunk_request_t);
udp_request_t *u = talloc_get_type_abort(treq->preq, udp_request_t);
udp_result_t *r = talloc_get_type_abort(treq->rctx, udp_result_t);
request_t *request = treq->request;
- fr_trunk_connection_t *tconn = treq->tconn;
+ trunk_connection_t *tconn = treq->tconn;
- fr_assert(treq->state == FR_TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
+ fr_assert(treq->state == TRUNK_REQUEST_STATE_SENT); /* No other states should be timing out */
fr_assert(treq->preq); /* Must still have a protocol request */
fr_assert(tconn);
* time, and still run the timers.
*/
case FR_RETRY_CONTINUE:
- fr_trunk_request_requeue(treq);
+ trunk_request_requeue(treq);
return;
case FR_RETRY_MRD:
}
r->rcode = RLM_MODULE_FAIL;
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
check_for_zombie(el, tconn, now, u->retry.start);
}
static void request_mux(fr_event_list_t *el,
- fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+ trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
rlm_tacacs_tcp_t const *inst = h->inst;
* Encode multiple packets in preparation for transmission with write()
*/
for (i = 0, queued = 0; (i < inst->max_send_coalesce); i++) {
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
udp_request_t *u;
request_t *request;
- if (unlikely(fr_trunk_connection_pop_request(&treq, tconn) < 0)) return;
+ if (unlikely(trunk_connection_pop_request(&treq, tconn) < 0)) return;
/*
* No more requests to send
* buffer. However, the request MAY still be freed or timed out before we can write the
* data. As a result, we ignore the udp_request_t, and just keep writing the data.
*/
- if (treq->state == FR_TRUNK_REQUEST_STATE_PARTIAL) {
+ if (treq->state == TRUNK_REQUEST_STATE_PARTIAL) {
fr_assert(h->send.read == h->send.data);
fr_assert(h->send.write > h->send.read);
/*
* The request must still be pending.
*/
- fr_assert(treq->state == FR_TRUNK_REQUEST_STATE_PENDING);
+ fr_assert(treq->state == TRUNK_REQUEST_STATE_PENDING);
request = treq->request;
u = talloc_get_type_abort(treq->preq, udp_request_t);
* may not be called.
*/
udp_request_reset(h, u);
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
continue;
}
RHEXDUMP3(u->packet, u->packet_len, "Encoded packet");
* When we've received all replies (or timeouts), we'll close the connections.
*/
if (h->id > 255) {
- fr_trunk_connection_signal_inactive(tconn);
+ trunk_connection_signal_inactive(tconn);
}
next:
* been sent, but it's the only way to get at the
* next entry in the heap.
*/
- fr_trunk_request_signal_sent(treq);
+ trunk_request_signal_sent(treq);
queued++;
}
default:
ERROR("%s - Failed sending data over connection %s: %s",
h->module_name, h->name, fr_syserror(errno));
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
}
* start the request timer.
*/
for (i = 0; i < queued; i++) {
- fr_trunk_request_t *treq = h->coalesced[i];
+ trunk_request_t *treq = h->coalesced[i];
udp_request_t *u;
request_t *request;
/*
* We *think* we sent this, but we might not had :(
*/
- fr_assert(treq->state == FR_TRUNK_REQUEST_STATE_SENT);
+ fr_assert(treq->state == TRUNK_REQUEST_STATE_SENT);
request = treq->request;
u = talloc_get_type_abort(treq->preq, udp_request_t);
if (fr_event_timer_at(u, el, &u->ev, u->retry.next, request_retry, treq) < 0) {
RERROR("Failed inserting retransmit timeout for connection");
- fr_trunk_request_signal_fail(treq);
+ trunk_request_signal_fail(treq);
}
/*
partial = h->send.data + left;
u->outstanding = true;
- fr_trunk_request_signal_partial(h->coalesced[i]);
+ trunk_request_signal_partial(h->coalesced[i]);
continue;
}
* The cancel logic runs as per-normal and cleans up
* the request ready for sending again...
*/
- fr_trunk_request_requeue(h->coalesced[i]);
+ trunk_request_requeue(h->coalesced[i]);
fr_assert(!u->outstanding); /* must have called udp_request_requeue() */
}
h->send.write = partial;
}
-static void request_demux(UNUSED fr_event_list_t *el, fr_trunk_connection_t *tconn, fr_connection_t *conn, UNUSED void *uctx)
+static void request_demux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
{
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
bool do_read = true;
ssize_t slen;
size_t available, used, packet_len;
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
request_t *request;
udp_request_t *u;
udp_result_t *r;
ERROR("%s - Failed reading response from socket: %s",
h->module_name, fr_syserror(errno));
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
if (packet_len > h->inst->max_packet_size) {
ERROR("%s - Packet is larger than max_packet_size",
h->module_name);
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
continue;
}
- treq = talloc_get_type_abort(treq, fr_trunk_request_t);
+ treq = talloc_get_type_abort(treq, trunk_request_t);
request = treq->request;
fr_assert(request != NULL);
u = talloc_get_type_abort(treq->preq, udp_request_t);
slen = decode(request->reply_ctx, &reply, &code, h, request, u, h->recv.read, packet_len);
if (slen < 0) {
// @todo - give real decode error?
- fr_trunk_connection_signal_reconnect(tconn, FR_CONNECTION_FAILED);
+ trunk_connection_signal_reconnect(tconn, connection_FAILED);
return;
}
h->recv.read += packet_len;
r->rcode = RLM_MODULE_OK;
// r->rcode = radius_code_to_rcode[code];
fr_pair_list_append(&request->reply_pairs, &reply);
- fr_trunk_request_signal_complete(treq);
+ trunk_request_signal_complete(treq);
}
}
*
* Frees encoded packets if the request is being moved to a new connection
*/
-static void request_cancel(fr_connection_t *conn, void *preq_to_reset,
- fr_trunk_cancel_reason_t reason, UNUSED void *uctx)
+static void request_cancel(connection_t *conn, void *preq_to_reset,
+ trunk_cancel_reason_t reason, UNUSED void *uctx)
{
udp_request_t *u = talloc_get_type_abort(preq_to_reset, udp_request_t);
* Request has been requeued on the same
* connection due to timeout or DUP signal.
*/
- if (reason == FR_TRUNK_CANCEL_REASON_REQUEUE) {
+ if (reason == TRUNK_CANCEL_REASON_REQUEUE) {
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
udp_request_reset(h, u);
/** Clear out anything associated with the handle from the request
*
*/
-static void request_conn_release(fr_connection_t *conn, void *preq_to_reset, UNUSED void *uctx)
+static void request_conn_release(connection_t *conn, void *preq_to_reset, UNUSED void *uctx)
{
udp_request_t *u = talloc_get_type_abort(preq_to_reset, udp_request_t);
udp_handle_t *h = talloc_get_type_abort(conn->h, udp_handle_t);
*
*/
static void request_fail(request_t *request, NDEBUG_UNUSED void *preq, void *rctx,
- NDEBUG_UNUSED fr_trunk_request_state_t state, UNUSED void *uctx)
+ NDEBUG_UNUSED trunk_request_state_t state, UNUSED void *uctx)
{
udp_result_t *r = talloc_get_type_abort(rctx, udp_result_t);
#ifndef NDEBUG
fr_assert(!u->ev); /* Dealt with by request_conn_release */
- fr_assert(state != FR_TRUNK_REQUEST_STATE_INIT);
+ fr_assert(state != TRUNK_REQUEST_STATE_INIT);
r->rcode = RLM_MODULE_FAIL;
r->treq = NULL;
* trunk so it can clean up the treq.
*/
case FR_SIGNAL_CANCEL:
- fr_trunk_request_signal_cancel(r->treq);
+ trunk_request_signal_cancel(r->treq);
r->treq = NULL;
talloc_free(r); /* Should be freed soon anyway, but better to be explicit */
return;
* connection is dead, then a callback will move
* this request to a new connection.
*/
- fr_trunk_request_requeue(r->treq);
+ trunk_request_requeue(r->treq);
return;
default:
*/
static int _udp_result_free(udp_result_t *r)
{
- fr_trunk_request_t *treq;
+ trunk_request_t *treq;
udp_request_t *u;
if (!r->treq) return 0;
- treq = talloc_get_type_abort(r->treq, fr_trunk_request_t);
+ treq = talloc_get_type_abort(r->treq, trunk_request_t);
u = talloc_get_type_abort(treq->preq, udp_request_t);
fr_assert_msg(!u->ev, "udp_result_t freed with active timer");
udp_thread_t *t = talloc_get_type_abort(thread, udp_thread_t);
udp_result_t *r;
udp_request_t *u;
- fr_trunk_request_t *treq;
- fr_trunk_enqueue_t q;
+ trunk_request_t *treq;
+ trunk_enqueue_t q;
fr_assert(FR_TACACS_PACKET_CODE_VALID(request->packet->code));
- treq = fr_trunk_request_alloc(t->trunk, request);
+ treq = trunk_request_alloc(t->trunk, request);
if (!treq) RETURN_MODULE_FAIL;
MEM(r = talloc_zero(request, udp_result_t));
r->rcode = RLM_MODULE_FAIL;
- q = fr_trunk_request_enqueue(&treq, t->trunk, request, u, r);
+ q = trunk_request_enqueue(&treq, t->trunk, request, u, r);
if (q < 0) {
fr_assert(!u->packet); /* Should not have been fed to the muxer */
- fr_trunk_request_free(&treq); /* Return to the free list */
+ trunk_request_free(&treq); /* Return to the free list */
fail:
talloc_free(r);
RETURN_MODULE_FAIL;
/*
* All destinations are down.
*/
- if (q == FR_TRUNK_ENQUEUE_IN_BACKLOG) {
+ if (q == TRUNK_ENQUEUE_IN_BACKLOG) {
RDEBUG("All destinations are down - cannot send packet");
goto fail;
}
rlm_tacacs_tcp_t *inst = talloc_get_type_abort(mctx->mi->data, rlm_tacacs_tcp_t);
udp_thread_t *thread = talloc_get_type_abort(mctx->thread, udp_thread_t);
- static fr_trunk_io_funcs_t io_funcs = {
+ static trunk_io_funcs_t io_funcs = {
.connection_alloc = thread_conn_alloc,
.connection_notify = thread_conn_notify,
.request_prioritise = request_prioritise,
thread->el = mctx->el;
thread->inst = inst;
- thread->trunk = fr_trunk_alloc(thread, mctx->el, &io_funcs,
+ thread->trunk = trunk_alloc(thread, mctx->el, &io_funcs,
inst->trunk_conf, inst->parent->name, thread, false);
if (!thread->trunk) return -1;