This is similar to the changes made to modules.
These changes also switch the instance and thread instance trees to heaps, which allows O(1) lookup of thread instance data. This is significantly more efficient than the old code.
This commit is large as it changes the function signature for xlat functions, and removes a large amount of boilerplate in the modules that dealt with copying module instance and thread instance data around.
static int thread_instantiate(TALLOC_CTX *ctx, fr_event_list_t *el, UNUSED void *uctx)
{
if (modules_thread_instantiate(ctx, el) < 0) return -1;
- if (xlat_thread_instantiate(ctx) < 0) return -1;
+ if (xlat_thread_instantiate(ctx, el) < 0) return -1;
#ifdef WITH_TLS
if (fr_openssl_thread_init(main_config->openssl_async_pool_init,
main_config->openssl_async_pool_max) < 0) return -1;
} command_entry_t;
static xlat_action_t xlat_test(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
- UNUSED request_t *request, UNUSED void const *mod_inst,
- UNUSED void *xlat_thread_inst, UNUSED fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx, UNUSED request_t *request,
+ UNUSED fr_value_box_list_t *in)
{
return XLAT_ACTION_DONE;
}
/*
* Read a file compose of xlat's and expected results
*/
-static bool do_xlats(char const *filename, FILE *fp)
+static bool do_xlats(fr_event_list_t *el, char const *filename, FILE *fp)
{
int lineno = 0;
ssize_t len;
xlat_exp_t *head = NULL;
fr_sbuff_parse_rules_t p_rules = { .escapes = &fr_value_unescape_double };
- slen = xlat_tokenize_ephemeral(xlat_ctx, &head, NULL,
+ slen = xlat_tokenize_ephemeral(xlat_ctx, &head, el, NULL,
&FR_SBUFF_IN(fmt, talloc_array_length(fmt) - 1), &p_rules, NULL);
if (slen <= 0) {
talloc_free(xlat_ctx);
* Simulate thread specific instantiation
*/
if (modules_thread_instantiate(thread_ctx, el) < 0) EXIT_WITH_FAILURE;
- if (xlat_thread_instantiate(thread_ctx) < 0) EXIT_WITH_FAILURE;
+ if (xlat_thread_instantiate(thread_ctx, el) < 0) EXIT_WITH_FAILURE;
unlang_thread_instantiate(thread_ctx);
/*
* For simplicity, read xlat's.
*/
if (xlat_only) {
- if (!do_xlats(input_file, fp)) ret = EXIT_FAILURE;
+ if (!do_xlats(el, input_file, fp)) ret = EXIT_FAILURE;
if (input_file) fclose(fp);
goto cleanup;
}
*
* @ingroup xlat_functions
*/
-static xlat_action_t aka_sim_xlat_id_method_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
+static xlat_action_t aka_sim_xlat_id_method_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request,
fr_value_box_list_t *in)
{
char const *method;
*
* @ingroup xlat_functions
*/
-static xlat_action_t aka_sim_xlat_id_type_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t aka_sim_xlat_id_type_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
char const *type;
fr_aka_sim_id_type_t type_hint;
*
* @ingroup xlat_functions
*/
-static xlat_action_t aka_sim_id_3gpp_temporary_id_key_index_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+static xlat_action_t aka_sim_id_3gpp_temporary_id_key_index_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *id = fr_dlist_head(in);
fr_value_box_t *vb;
*
* @ingroup xlat_functions
*/
-static xlat_action_t aka_sim_3gpp_temporary_id_decrypt_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+static xlat_action_t aka_sim_3gpp_temporary_id_decrypt_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
uint8_t tag;
char out_tag = '\0', *buff;
*
* @ingroup xlat_functions
*/
-static xlat_action_t aka_sim_3gpp_temporary_id_encrypt_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+static xlat_action_t aka_sim_3gpp_temporary_id_encrypt_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
char encrypted[AKA_SIM_3GPP_PSEUDONYM_LEN + 1];
uint8_t tag = 0;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_config(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_config(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
char const *value;
CONF_PAIR *cp;
///< Instantiated xlats are not added to the global
///< trees, regexes are not JIT'd.
+ fr_event_list_t *runtime_el; //!< The eventlist to use for runtime instantiation
+ ///< of xlats.
+
tmpl_attr_prefix_t prefix; //!< Whether the attribute reference requires
///< a prefix.
};
if (!t_rules->at_runtime) {
slen = xlat_tokenize(vpt, &head, &flags, &our_in, p_rules, t_rules);
} else {
- slen = xlat_tokenize_ephemeral(vpt, &head, &flags, &our_in, p_rules, t_rules);
+ slen = xlat_tokenize_ephemeral(vpt, &head,
+ t_rules->runtime_el, &flags, &our_in, p_rules, t_rules);
}
if (!head) return slen;
if (!t_rules->at_runtime) {
slen = xlat_tokenize(vpt, &head, &flags, &our_in, p_rules, t_rules);
} else {
- slen = xlat_tokenize_ephemeral(vpt, &head, &flags, &our_in, p_rules, t_rules);
+ slen = xlat_tokenize_ephemeral(vpt, &head, t_rules->runtime_el,
+ &flags, &our_in, p_rules, t_rules);
}
if (!head) return slen;
/** Retrieve attributes from a special trigger list
*
*/
-xlat_action_t trigger_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+xlat_action_t trigger_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_pair_list_t *head = NULL;
fr_dict_attr_t const *da;
RDEBUG("Running trigger \"%s\"", trigger->command);
- if (unlang_xlat_push(request, &trigger->args, request,
+ if (unlang_xlat_push(request, NULL, &trigger->args, request,
trigger->xlat, UNLANG_SUB_FRAME) < 0) RETURN_MODULE_FAIL;
return UNLANG_ACTION_PUSHED_CHILD;
extern xlat_arg_parser_t const trigger_xlat_args[];
-xlat_action_t trigger_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in);
+xlat_action_t trigger_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in);
int trigger_exec_init(CONF_SECTION const *cs);
#endif
} unlang_frame_state_foreach_t;
-static xlat_action_t unlang_foreach_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in);
+static xlat_action_t unlang_foreach_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in);
#define FOREACH_REQUEST_DATA (void *)unlang_foreach_xlat
*
* @ingroup xlat_functions
*/
-static xlat_action_t unlang_foreach_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in)
+static xlat_action_t unlang_foreach_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
fr_pair_t **pvp;
- int const *inst = xlat_inst;
+ int const *inst = xctx->inst;
fr_value_box_t *vb;
pvp = (fr_pair_t **) request_data_reference(request, FOREACH_REQUEST_DATA, *inst);
*
* @ingroup xlat_functions
*/
-static xlat_action_t unlang_interpret_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t unlang_interpret_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
unlang_stack_t *stack = request->stack;
int depth = stack->depth;
return UNLANG_ACTION_PUSHED_CHILD;
case TMPL_TYPE_XLAT:
- if (unlang_xlat_push(update_state, &update_state->lhs_result,
+ if (unlang_xlat_push(update_state, NULL, &update_state->lhs_result,
request, tmpl_xlat(map->lhs), false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
return UNLANG_ACTION_PUSHED_CHILD;
case TMPL_TYPE_XLAT:
- if (unlang_xlat_push(update_state, &update_state->rhs_result,
+ if (unlang_xlat_push(update_state, NULL, &update_state->rhs_result,
request, tmpl_xlat(map->rhs), false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
return UNLANG_ACTION_PUSHED_CHILD;
case TMPL_TYPE_XLAT:
- if (unlang_xlat_push(map_proc_state, &map_proc_state->src_result,
+ if (unlang_xlat_push(map_proc_state, NULL, &map_proc_state->src_result,
request, tmpl_xlat(inst->src), false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
dl_module_inst_t *dl_inst; //!< Module instance to pass to callbacks.
///< Use dl_inst->data to get instance data.
void *thread; //!< Thread specific module instance.
- void const *ctx; //!< ctx data to pass to callbacks.
+ void const *rctx; //!< rctx data to pass to callbacks.
fr_event_timer_t const *ev; //!< Event in this worker's event heap.
} unlang_module_event_t;
fr_assert(ev->fd == fd);
- ev->fd_read(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->ctx)), ev->request, fd);
+ ev->fd_read(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->rctx)), ev->request, fd);
}
/** Frees an unlang event, removing it from the request's event loop
*/
static int _unlang_event_free(unlang_module_event_t *ev)
{
- if (ev->request) (void) request_data_get(ev->request, ev->ctx, UNLANG_TYPE_MODULE);
+ if (ev->request) (void) request_data_get(ev->request, ev->rctx, UNLANG_TYPE_MODULE);
if (ev->ev) {
(void) fr_event_timer_delete(&(ev->ev));
{
unlang_module_event_t *ev = talloc_get_type_abort(ctx, unlang_module_event_t);
- ev->timeout(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->ctx)), ev->request, now);
+ ev->timeout(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->rctx)), ev->request, now);
talloc_free(ev);
}
*
* param[in] request the current request.
* param[in] callback to call.
- * param[in] ctx for the callback.
+ * param[in] rctx to pass to the callback.
* param[in] timeout when to call the timeout (i.e. now + timeout).
* @return
* - 0 on success.
* - <0 on error.
*/
int unlang_module_timeout_add(request_t *request, unlang_module_timeout_t callback,
- void const *ctx, fr_time_t when)
+ void const *rctx, fr_time_t when)
{
unlang_stack_t *stack = request->stack;
unlang_stack_frame_t *frame = &stack->frame[stack->depth];
unlang_module_event_t *ev;
unlang_module_t *mc;
- unlang_frame_state_module_t *state = talloc_get_type_abort(frame->state,
- unlang_frame_state_module_t);
+ unlang_frame_state_module_t *state = talloc_get_type_abort(frame->state, unlang_frame_state_module_t);
fr_assert(stack->depth > 0);
fr_assert(frame->instruction->type == UNLANG_TYPE_MODULE);
.timeout = callback,
.dl_inst = mc->instance->dl_inst,
.thread = state->thread,
- .ctx = ctx
+ .rctx = rctx
};
if (fr_event_timer_at(request, unlang_interpret_event_list(request), &ev->ev,
return -1;
}
- (void) request_data_talloc_add(request, ctx, UNLANG_TYPE_MODULE, unlang_module_event_t, ev, true, false, false);
+ (void) request_data_talloc_add(request, rctx, UNLANG_TYPE_MODULE, unlang_module_event_t, ev, true, false, false);
talloc_set_destructor(ev, _unlang_event_free);
unlang_module_event_t *ev = talloc_get_type_abort(ctx, unlang_module_event_t);
fr_assert(ev->fd == fd);
- ev->fd_write(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->ctx)), ev->request, fd);
+ ev->fd_write(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->rctx)), ev->request, fd);
}
/** Call the callback registered for an I/O error event
fr_assert(ev->fd == fd);
- ev->fd_error(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->ctx)), ev->request, fd);
+ ev->fd_error(MODULE_CTX(ev->dl_inst, ev->thread, UNCONST(void *, ev->rctx)), ev->request, fd);
}
* a suspended state.
* @param[in] error callback. If the fd enters an error state. Should cleanup any
* handles wrapping the file descriptor, and any outstanding requests.
- * @param[in] ctx for the callback.
+ * @param[in] rctx for the callback.
* @param[in] fd to watch.
* @return
* - 0 on success.
unlang_module_fd_event_t read,
unlang_module_fd_event_t write,
unlang_module_fd_event_t error,
- void const *ctx, int fd)
+ void const *rctx, int fd)
{
unlang_stack_t *stack = request->stack;
unlang_stack_frame_t *frame = &stack->frame[stack->depth];
ev->fd_error = error;
ev->dl_inst = mc->instance->dl_inst;
ev->thread = state->thread;
- ev->ctx = ctx;
+ ev->rctx = rctx;
/*
* Register for events on the file descriptor
return -1;
}
- (void) request_data_talloc_add(request, ctx, fd, unlang_module_event_t, ev, true, false, false);
+ (void) request_data_talloc_add(request, rctx, fd, unlang_module_event_t, ev, true, false, false);
talloc_set_destructor(ev, _unlang_event_free);
return 0;
* will then call the module resumption frame, allowing the module to continue exectuion.
*
* @param[in] ctx To allocate talloc value boxes and values in.
+ * @param[out] p_success Whether xlat evaluation was successful.
* @param[out] out Where to write the result of the expansion.
* @param[in] request The current request.
* @param[in] exp XLAT expansion to evaluate.
* @return
* - UNLANG_ACTION_YIELD
*/
-unlang_action_t unlang_module_yield_to_xlat(TALLOC_CTX *ctx, fr_value_box_list_t *out,
+unlang_action_t unlang_module_yield_to_xlat(TALLOC_CTX *ctx, bool *p_success, fr_value_box_list_t *out,
request_t *request, xlat_exp_t const *exp,
unlang_module_resume_t resume,
unlang_module_signal_t signal, void *rctx)
/*
* Push the xlat function
*/
- if (unlang_xlat_push(ctx, out, request, exp, false) < 0) return UNLANG_ACTION_STOP_PROCESSING;
+ if (unlang_xlat_push(ctx, p_success, out, request, exp, false) < 0) return UNLANG_ACTION_STOP_PROCESSING;
return UNLANG_ACTION_PUSHED_CHILD;
}
typedef void (*unlang_module_signal_t)(module_ctx_t const *mctx, request_t *request, fr_state_signal_t action);
int unlang_module_timeout_add(request_t *request, unlang_module_timeout_t callback,
- void const *ctx, fr_time_t when);
+ void const *rctx, fr_time_t when);
int unlang_module_timeout_delete(request_t *request, void const *ctx);
unlang_module_resume_t resume,
unlang_module_signal_t signal, void *rctx);
-unlang_action_t unlang_module_yield_to_xlat(TALLOC_CTX *ctx, fr_value_box_list_t *out,
+unlang_action_t unlang_module_yield_to_xlat(TALLOC_CTX *ctx, bool *p_success, fr_value_box_list_t *out,
request_t *request, xlat_exp_t const *xlat,
unlang_module_resume_t resume,
unlang_module_signal_t signal, void *rctx);
* text in the configuration files.
*/
frame_repeat(frame, unlang_tmpl_exec_nowait_resume);
- if (unlang_xlat_push(state->ctx, &state->box, request, tmpl_xlat(ut->tmpl), false) < 0) {
+ if (unlang_xlat_push(state->ctx, NULL, &state->box, request, tmpl_xlat(ut->tmpl), false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
}
*/
if (ut->tmpl->type == TMPL_TYPE_XLAT) {
frame_repeat(frame, unlang_tmpl_resume);
- if (unlang_xlat_push(state->ctx, &state->box, request, tmpl_xlat(ut->tmpl), false) < 0) {
+ if (unlang_xlat_push(state->ctx, NULL, &state->box, request, tmpl_xlat(ut->tmpl), false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
}
* Expand the arguments to the program we're executing.
*/
frame_repeat(frame, unlang_tmpl_exec_wait_resume);
- if (unlang_xlat_push(state->ctx, &state->box, request, xlat, false) < 0) {
+ if (unlang_xlat_push(state->ctx, NULL, &state->box, request, xlat, false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
}
///< expansion.
bool alternate; //!< record which alternate branch we
///< previously took.
- xlat_func_resume_t resume; //!< called on resume
+ xlat_func_t resume; //!< called on resume
xlat_func_signal_t signal; //!< called on signal
void *rctx; //!< for resume / signal
+
+ bool *success; //!< If set, where to record the result
+ ///< of the execution.
} unlang_frame_state_xlat_t;
/** Wrap an #fr_event_timer_t providing data needed for unlang events
*
*/
typedef struct {
- request_t *request; //!< Request this event pertains to.
- int fd; //!< File descriptor to wait on.
- fr_unlang_xlat_timeout_t timeout; //!< Function to call on timeout.
- fr_unlang_xlat_fd_event_t fd_read; //!< Function to call when FD is readable.
- fr_unlang_xlat_fd_event_t fd_write; //!< Function to call when FD is writable.
- fr_unlang_xlat_fd_event_t fd_error; //!< Function to call when FD has errored.
- void const *inst; //!< Module instance to pass to callbacks.
- void *thread; //!< Thread specific xlat instance.
- void const *ctx; //!< ctx data to pass to callbacks.
- fr_event_timer_t const *ev; //!< Event in this worker's event heap.
+ request_t *request; //!< Request this event pertains to.
+ int fd; //!< File descriptor to wait on.
+ fr_unlang_xlat_timeout_t timeout; //!< Function to call on timeout.
+ fr_unlang_xlat_fd_event_t fd_read; //!< Function to call when FD is readable.
+ fr_unlang_xlat_fd_event_t fd_write; //!< Function to call when FD is writable.
+ fr_unlang_xlat_fd_event_t fd_error; //!< Function to call when FD has errored.
+ xlat_inst_t *inst; //!< xlat instance data.
+ xlat_thread_inst_t *thread; //!< Thread specific xlat instance.
+ void const *rctx; //!< rctx data to pass to callbacks.
+ fr_event_timer_t const *ev; //!< Event in this worker's event heap.
} unlang_xlat_event_t;
/** Frees an unlang event, removing it from the request's event loop
*
* @param[in] el the event timer was inserted into.
* @param[in] now The current time, as held by the event_list.
- * @param[in] ctx unlang_module_event_t structure holding callbacks.
+ * @param[in] uctx unlang_module_event_t structure holding callbacks.
*
*/
-static void unlang_xlat_event_timeout_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *ctx)
+static void unlang_xlat_event_timeout_handler(UNUSED fr_event_list_t *el, fr_time_t now, void *uctx)
{
- unlang_xlat_event_t *ev = talloc_get_type_abort(ctx, unlang_xlat_event_t);
-
- void *mutable_ctx;
- void *mutable_inst;
-
- memcpy(&mutable_ctx, &ev->ctx, sizeof(mutable_ctx));
- memcpy(&mutable_inst, &ev->inst, sizeof(mutable_inst));
+ unlang_xlat_event_t *ev = talloc_get_type_abort(uctx, unlang_xlat_event_t);
- ev->timeout(ev->request, mutable_inst, ev->thread, mutable_ctx, now);
+ /*
+ * If the timeout's fired then the xlat must necessarily
+ * be yielded, so it's fine to pass in its rctx.
+ *
+ * It should be able to free the rctx if it wants to.
+ * We never free it explicitly, and instead rely on
+ * talloc parenting.
+ */
+ ev->timeout(XLAT_CTX(ev->inst->data,
+ ev->thread->data,
+ ev->thread->mctx,
+ UNCONST(void *, ev->rctx)),
+ ev->request, now);
/* Remove old references from the request */
talloc_free(ev);
*
* @param[in] request the request
* @param[in] callback to run when the timeout hits
- * @param[in] ctx passed to the callback
+ * @param[in] rctx passed to the callback
* @param[in] when when the timeout fires
* @return
* - <0 on error
* - 0 on success
*/
-int unlang_xlat_event_timeout_add(request_t *request, fr_unlang_xlat_timeout_t callback,
- void const *ctx, fr_time_t when)
+int unlang_xlat_timeout_add(request_t *request,
+ fr_unlang_xlat_timeout_t callback, void const *rctx, fr_time_t when)
{
unlang_stack_t *stack = request->stack;
unlang_stack_frame_t *frame = &stack->frame[stack->depth];
if (!state->event_ctx) MEM(state->event_ctx = talloc_zero(state, bool));
ev = talloc_zero(state->event_ctx, unlang_xlat_event_t);
- if (!ev) return -1;
+ if (unlikely(!ev)) return -1;
ev->request = request;
ev->fd = -1;
ev->timeout = callback;
ev->inst = state->exp->call.inst;
ev->thread = xlat_thread_instance_find(state->exp);
- ev->ctx = ctx;
+ ev->rctx = rctx;
if (fr_event_timer_at(request, unlang_interpret_event_list(request),
&ev->ev, when, unlang_xlat_event_timeout_handler, ev) < 0) {
/** Push a pre-compiled xlat onto the stack for evaluation
*
* @param[in] ctx To allocate value boxes and values in.
+ * @param[out] p_success If set, and execution succeeds, true will be written
+ * here. If execution fails, false will be written.
* @param[out] out Where to write the result of the expansion.
* @param[in] request to push xlat onto.
* @param[in] exp node to evaluate.
* - 0 on success.
* - -1 on failure.
*/
-int unlang_xlat_push(TALLOC_CTX *ctx, fr_value_box_list_t *out,
+int unlang_xlat_push(TALLOC_CTX *ctx, bool *p_success, fr_value_box_list_t *out,
request_t *request, xlat_exp_t const *exp, bool top_frame)
{
/** Static instruction for performing xlat evaluations
/*
* Push a new xlat eval frame onto the stack
*/
- if (unlang_interpret_push(request, &xlat_instruction, RLM_MODULE_NOT_SET, UNLANG_NEXT_STOP, top_frame) < 0) {
- return -1;
- }
+ if (unlang_interpret_push(request, &xlat_instruction,
+ RLM_MODULE_NOT_SET, UNLANG_NEXT_STOP, top_frame) < 0) return -1;
frame = &stack->frame[stack->depth];
/*
*/
MEM(frame->state = state = talloc_zero(stack, unlang_frame_state_xlat_t));
state->exp = talloc_get_type_abort_const(exp, xlat_exp_t); /* Ensure the node is valid */
+ state->success = p_success;
+ state->ctx = ctx;
+ /*
+ * Initialise the input and output lists
+ */
fr_dcursor_init(&state->values, out);
fr_value_box_list_init(&state->rhead);
- state->ctx = ctx;
-
return 0;
}
* multiple sibling nodes.
*/
fr_dlist_talloc_free(&state->rhead);
- if (unlang_xlat_push(state->ctx, &state->rhead, request, child, false) < 0) {
+ if (unlang_xlat_push(state->ctx, state->success, &state->rhead, request, child, false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
}
return UNLANG_ACTION_PUSHED_CHILD;
+ case XLAT_ACTION_PUSH_UNLANG:
+ repeatable_set(frame); /* Call the xlat code on the way back down */
+ return UNLANG_ACTION_PUSHED_CHILD;
+
case XLAT_ACTION_YIELD:
if (!state->resume) {
RWDEBUG("Missing call to unlang_xlat_yield()");
return UNLANG_ACTION_YIELD;
case XLAT_ACTION_DONE:
+ if (state->success) *state->success = true;
*p_result = RLM_MODULE_OK;
return UNLANG_ACTION_CALCULATE_RESULT;
case XLAT_ACTION_FAIL:
fail:
+ if (state->success) *state->success = false;
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_CALCULATE_RESULT;
* multiple sibling nodes.
*/
fr_dlist_talloc_free(&state->rhead);
- if (unlang_xlat_push(state->ctx, &state->rhead, request, child, false) < 0) {
+ if (unlang_xlat_push(state->ctx, state->success, &state->rhead, request, child, false) < 0) {
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_STOP_PROCESSING;
}
return UNLANG_ACTION_PUSHED_CHILD;
+ case XLAT_ACTION_PUSH_UNLANG:
+ repeatable_set(frame); /* Call the xlat code on the way back down */
+ return UNLANG_ACTION_PUSHED_CHILD;
+
case XLAT_ACTION_YIELD:
if (!state->resume) {
RWDEBUG("Missing call to unlang_xlat_yield()");
return UNLANG_ACTION_YIELD;
case XLAT_ACTION_DONE:
+ if (state->success) *state->success = true;
*p_result = RLM_MODULE_OK;
return UNLANG_ACTION_CALCULATE_RESULT;
case XLAT_ACTION_FAIL:
fail:
+ if (state->success) *state->success = false;
*p_result = RLM_MODULE_FAIL;
return UNLANG_ACTION_CALCULATE_RESULT;
*p_result = RLM_MODULE_OK;
return UNLANG_ACTION_CALCULATE_RESULT;
+ case XLAT_ACTION_PUSH_UNLANG:
+ repeatable_set(frame);
+ return UNLANG_ACTION_PUSHED_CHILD;
+
case XLAT_ACTION_PUSH_CHILD:
fr_assert(0);
FALL_THROUGH;
* @return always returns XLAT_ACTION_YIELD
*/
xlat_action_t unlang_xlat_yield(request_t *request,
- xlat_func_resume_t resume, xlat_func_signal_t signal,
+ xlat_func_t resume, xlat_func_signal_t signal,
void *rctx)
{
unlang_stack_t *stack = request->stack;
*/
typedef enum {
XLAT_ACTION_PUSH_CHILD = 1, //!< A deeper level of nesting needs to be evaluated.
+ XLAT_ACTION_PUSH_UNLANG, //!< An xlat function pushed an unlang frame onto the unlang stack.
+ ///< This frame needs to be evaluated, and then we need to call
+ ///< the xlat's resume function.
XLAT_ACTION_YIELD, //!< An xlat function pushed a resume frame onto the stack.
XLAT_ACTION_DONE, //!< We're done evaluating this level of nesting.
XLAT_ACTION_FAIL //!< An xlat function failed.
*
*/
struct xlat_inst {
- fr_rb_node_t inst_node; //!< Entry in rbtree of thread instances.
+ fr_heap_index_t idx; //!< Entry in heap of xlat instances.
+ ///< Identical instances are used for
+ ///< global instance data and thread-specific
+ ///< instance data.
- xlat_exp_t const *node; //!< Node this data relates to.
+ xlat_exp_t *node; //!< Node this data relates to.
void *data; //!< xlat node specific instance data.
};
*
*/
struct xlat_thread_inst {
- fr_rb_node_t inst_node; //!< Entry in rbtree of thread instances.
+ fr_heap_index_t idx; //!< Entry in heap of xlat thread instances.
+ ///< Identical instances are used for
+ ///< global instance data and thread-specific
+ ///< instance data.
+
+ fr_event_list_t *el; //!< Event list associated with this thread.
xlat_exp_t const *node; //!< Node this data relates to.
void *data; //!< Thread specific instance data.
+ module_ctx_t const *mctx; //!< A synthesised module calling ctx containing
+ ///< module global and thread instance data.
+
uint64_t total_calls; //! total number of times we've been called
uint64_t active_callers; //! number of active callers. i.e. number of current yields
};
*
*/
typedef struct {
- bool needs_resolving; //!< Needs pass2 resolution.
+ bool needs_resolving;//!< Needs pass2 resolution.
bool needs_async; //!< Node and all child nodes are guaranteed to not
///< require asynchronous expansion.
bool pure; //!< has no external side effects
* @note The callback is automatically removed on unlang_interpret_mark_runnable(), i.e. if an event
* on a registered FD occurs before the timeout event fires.
*
+ * @param[in] xctx xlat calling ctx. Contains all instance data.
* @param[in] request the request.
- * @param[in] xlat_inst the xlat instance.
- * @param[in] xlat_thread_inst data specific to this xlat instance.
- * @param[in] rctx Resume ctx provided when the xlat last yielded.
* @param[in] fired the time the timeout event actually fired.
*/
-typedef void (*fr_unlang_xlat_timeout_t)(request_t *request, void *xlat_inst,
- void *xlat_thread_inst, void *rctx, fr_time_t fired);
+typedef void (*fr_unlang_xlat_timeout_t)(xlat_ctx_t const *xctx, request_t *request, fr_time_t fired);
/** A callback when the FD is ready for reading
*
*
* @note The callback is automatically removed on unlang_interpret_mark_runnable(), so
*
+ * @param[in] xctx xlat calling ctx. Contains all instance data.
* @param[in] request the current request.
- * @param[in] xlat_inst the xlat instance.
- * @param[in] xlat_thread_inst data specific to this xlat instance.
- * @param[in] rctx Resume ctx provided when the xlat last yielded.
* @param[in] fd the file descriptor.
*/
-typedef void (*fr_unlang_xlat_fd_event_t)(request_t *request, void *xlat_inst,
- void *xlat_thread_inst, void *rctx, int fd);
+typedef void (*fr_unlang_xlat_fd_event_t)(xlat_ctx_t const *xctx, request_t *request, int fd);
/** xlat callback function
*
* @param[in] ctx to allocate any fr_value_box_t in.
* @param[out] out Where to append #fr_value_box_t containing the output of
* this function.
+ * @param[in] xctx xlat calling ctx. Contains all instance data and the resume
+ * ctx if this function is being resumed.
* @param[in] request The current request.
- * @param[in] xlat_inst Global xlat instance.
- * @param[in] xlat_thread_inst Thread specific xlat instance.
* @param[in] in Input arguments.
* @return
* - XLAT_ACTION_YIELD xlat function is waiting on an I/O event and
* - XLAT_ACTION_FAIL the xlat function failed.
*/
typedef xlat_action_t (*xlat_func_t)(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in);
-
-/** xlat callback resumption function
- *
- * Ingests a list of value boxes as arguments, with arguments delimited by spaces.
- *
- * @param[in] ctx to allocate any fr_value_box_t in.
- * @param[out] out Where to append #fr_value_box_t containing the output of
- * this function.
- * @param[in] request The current request.
- * @param[in] xlat_inst Global xlat instance.
- * @param[in] xlat_thread_inst Thread specific xlat instance.
- * @param[in] in Input arguments.
- * @param[in] rctx Resume ctx provided when the xlat last yielded.
- * @return
- * - XLAT_ACTION_YIELD xlat function is waiting on an I/O event and
- * has pushed a resumption function onto the stack.
- * - XLAT_ACTION_DONE xlat function completed. This does not necessarily
- * mean it turned a result.
- * - XLAT_ACTION_FAIL the xlat function failed.
- */
-typedef xlat_action_t (*xlat_func_resume_t)(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in, void *rctx);
+ xlat_ctx_t const *xctx, request_t *request, fr_value_box_list_t *in);
/** A callback when the request gets a fr_state_signal_t.
*
* @note The callback is automatically removed on unlang_interpret_mark_runnable().
*
* @param[in] request The current request.
- * @param[in] xlat_inst the xlat instance.
- * @param[in] xlat_thread_inst data specific to this xlat instance.
- * @param[in] rctx Resume ctx provided when the xlat last yielded.
+ * @param[in] xctx xlat calling ctx. Contains all instance data.
* @param[in] action which is signalling the request.
*/
-typedef void (*xlat_func_signal_t)(request_t *request, void *xlat_inst, void *xlat_thread_inst,
- void *rctx, fr_state_signal_t action);
+typedef void (*xlat_func_signal_t)(xlat_ctx_t const *xctx, request_t *request, fr_state_signal_t action);
/** Allocate new instance data for an xlat instance
*
- * @param[out] xlat_inst Structure to populate. Allocated by #map_proc_instantiate.
- * @param[in] exp Tokenized expression to use in expansion.
- * @param[in] uctx passed to the registration function.
+ * @param[in] xctx instantiate/detach calling ctx.
+
* @return
* - 0 on success.
* - -1 on failure.
*/
-typedef int (*xlat_instantiate_t)(void *xlat_inst, xlat_exp_t const *exp, void *uctx);
+typedef int (*xlat_instantiate_t)(xlat_inst_ctx_t const *xctx);
/** Allocate new thread instance data for an xlat instance
*
- * @param[in] xlat_inst Previously instantiated xlat instance.
- * @param[out] xlat_thread_inst Thread specific structure to populate.
- * Allocated by #map_proc_instantiate.
- * @param[in] exp Tokenized expression to use in expansion.
- * @param[in] uctx passed to the registration function.
+ * @param[in] xctx thread instantiate/detach ctx.
* @return
* - 0 on success.
* - -1 on failure.
*/
-typedef int (*xlat_thread_instantiate_t)(void *xlat_inst, void *xlat_thread_inst,
- xlat_exp_t const *exp, void *uctx);
+typedef int (*xlat_thread_instantiate_t)(xlat_thread_inst_ctx_t const *xctx);
/** xlat detach callback
*
* Detach should close all handles associated with the xlat instance, and
* free any memory allocated during instantiate.
*
- * @param[in] xlat_inst to free.
- * @param[in] uctx passed to the xlat registration function.
+ * @param[in] xctx instantiate/detach calling ctx.
* @return
* - 0 on success.
* - -1 if detach failed.
*/
-typedef int (*xlat_detach_t)(void *xlat_inst, void *uctx);
+typedef int (*xlat_detach_t)(xlat_inst_ctx_t const *xctx);
/** xlat thread detach callback
*
* Detach should close all handles associated with the xlat instance, and
* free any memory allocated during instantiate.
*
- * @param[in] xlat_thread_inst to free.
- * @param[in] uctx passed to the xlat registration function.
+ * @param[in] xctx thread instantiate/detach calling ctx.
* @return
* - 0 on success.
* - -1 if detach failed.
*/
-typedef int (*xlat_thread_detach_t)(void *xlat_thread_inst, void *uctx);
+typedef int (*xlat_thread_detach_t)(xlat_thread_inst_ctx_t const *xctx);
/** legacy xlat callback function
*
typedef size_t (*xlat_escape_legacy_t)(request_t *request, char *out, size_t outlen, char const *in, void *arg);
-
-
int xlat_fmt_get_vp(fr_pair_t **out, request_t *request, char const *name);
ssize_t xlat_eval(char *out, size_t outlen, request_t *request, char const *fmt, xlat_escape_legacy_t escape,
ssize_t xlat_aeval(TALLOC_CTX *ctx, char **out, request_t *request,
char const *fmt, xlat_escape_legacy_t escape, void const *escape_ctx)
- CC_HINT(nonnull (2, 3, 4));
+ CC_HINT(nonnull(2, 3, 4));
ssize_t xlat_aeval_compiled(TALLOC_CTX *ctx, char **out, request_t *request,
xlat_exp_t const *xlat, xlat_escape_legacy_t escape, void const *escape_ctx)
bool xlat_async_required(xlat_exp_t const *xlat);
-ssize_t xlat_tokenize_ephemeral(TALLOC_CTX *ctx, xlat_exp_t **head, xlat_flags_t *flags,
- fr_sbuff_t *in,
+ssize_t xlat_tokenize_ephemeral(TALLOC_CTX *ctx, xlat_exp_t **head,
+ fr_event_list_t *el,
+ xlat_flags_t *flags, fr_sbuff_t *in,
fr_sbuff_parse_rules_t const *p_rules, tmpl_rules_t const *t_rules);
ssize_t xlat_tokenize_argv(TALLOC_CTX *ctx, xlat_exp_t **head, xlat_flags_t *flags, fr_sbuff_t *in,
/*
* xlat_inst.c
*/
-int xlat_instantiate_ephemeral(xlat_exp_t *root);
+int xlat_instantiate_ephemeral(xlat_exp_t *root, fr_event_list_t *el) CC_HINT(nonnull(1));
xlat_thread_inst_t *xlat_thread_instance_find(xlat_exp_t const *node);
-int xlat_thread_instantiate(TALLOC_CTX *ctx);
+int xlat_thread_instantiate(TALLOC_CTX *ctx, fr_event_list_t *el);
int xlat_instantiate(void);
/*
* xlat.c
*/
-int unlang_xlat_event_timeout_add(request_t *request, fr_unlang_xlat_timeout_t callback,
- void const *ctx, fr_time_t when);
+int unlang_xlat_timeout_add(request_t *request, fr_unlang_xlat_timeout_t callback,
+ void const *rctx, fr_time_t when);
-int unlang_xlat_push(TALLOC_CTX *ctx, fr_value_box_list_t *out,
+int unlang_xlat_push(TALLOC_CTX *ctx, bool *p_success, fr_value_box_list_t *out,
request_t *request, xlat_exp_t const *exp, bool top_frame)
CC_HINT(warn_unused_result);
xlat_action_t unlang_xlat_yield(request_t *request,
- xlat_func_resume_t callback, xlat_func_signal_t signal,
+ xlat_func_t callback, xlat_func_signal_t signal,
void *rctx);
#ifdef __cplusplus
}
if (!xlat_root) return NULL;
- if (inlen < 0) {
- return fr_rb_find(xlat_root, &(xlat_t){ .name = in });
- }
+ if (inlen < 0) return fr_rb_find(xlat_root, &(xlat_t){ .name = in });
if ((size_t) inlen >= sizeof(buffer)) return NULL;
return xlat_register_module(ctx, NULL, name, func, flags);
}
+#if 0
+/** Compare two argument entries to see if they're equivalent
+ *
+ * @note Does not check escape function or uctx pointers.
+ *
+ * @param[in] a First argument structure.
+ * @param[in] b Second argument structure.
+ * @return
+ * - 1 if a > b
+ * - 0 if a == b
+ * - -1 if a < b
+ */
+static int xlat_arg_cmp_no_escape(xlat_arg_parser_t const *a, xlat_arg_parser_t const *b)
+{
+ int8_t ret;
+
+ ret = CMP(a->required, b->required);
+ if (ret != 0) return ret;
+
+ ret = CMP(a->concat, b->concat);
+ if (ret != 0) return ret;
+
+ ret = CMP(a->single, b->single);
+ if (ret != 0) return ret;
+
+ ret = CMP(a->variadic, b->variadic);
+ if (ret != 0) return ret;
+
+ ret = CMP(a->always_escape, b->always_escape);
+ if (ret != 0) return ret;
+
+ return CMP(a->type, b->type);
+}
+
+/** Compare two argument lists to see if they're equivalent
+ *
+ * @note Does not check escape function or uctx pointers.
+ *
+ * @param[in] a First argument structure.
+ * @param[in] b Second argument structure.
+ * @return
+ * - 1 if a > b
+ * - 0 if a == b
+ * - -1 if a < b
+ */
+static int xlat_arg_cmp_list_no_escape(xlat_arg_parser_t const a[], xlat_arg_parser_t const b[])
+{
+ xlat_arg_parser_t const *arg_a_p;
+ xlat_arg_parser_t const *arg_b_p;
+
+ for (arg_a_p = a, arg_b_p = b;
+ (arg_a_p->type != FR_TYPE_NULL) && (arg_b_p->type != FR_TYPE_NULL);
+ arg_a_p++, arg_b_p++) {
+ int8_t ret;
+
+ ret = xlat_arg_cmp_no_escape(arg_a_p, arg_b_p);
+ if (ret != 0) return ret;
+ }
+
+ return CMP(arg_a_p, arg_b_p); /* Check we ended at the same point */
+}
+#endif
+
/** Verify xlat arg specifications are valid
*
* @param[in] arg specification to validate.
/*
* Internal redundant handler for xlats
*/
-typedef enum xlat_redundant_type_t {
- XLAT_REDUNDANT_INVALID = 0,
- XLAT_REDUNDANT,
- XLAT_LOAD_BALANCE,
- XLAT_REDUNDANT_LOAD_BALANCE,
+typedef enum {
+ XLAT_REDUNDANT_INVALID = 0, //!< Not a valid redundant type.
+ XLAT_REDUNDANT, //!< Use the first xlat function first, then
+ ///< go through in sequence, using the next
+ ///< function after each failure.
+
+ XLAT_LOAD_BALANCE, //!< Pick a random xlat, and if that fails
+ ///< then the call as a whole fails.
+
+ XLAT_REDUNDANT_LOAD_BALANCE, //!< Pick a random xlat to start, then fail
+ ///< between the other xlats in the redundant
+ ///< group.
} xlat_redundant_type_t;
typedef struct {
- xlat_redundant_type_t type;
- uint32_t count;
- CONF_SECTION const *cs;
+ fr_dlist_t entry; //!< Entry in the redundant function list.
+ xlat_t *func; //!< Resolved xlat function.
+} xlat_redundant_func_t;
+
+typedef struct {
+ xlat_redundant_type_t type; //!< Type of redundant xlat expression.
+ fr_dlist_head_t funcs; //!< List of redundant xlat functions.
+ CONF_SECTION *cs; //!< That this redundant xlat list was created from.
} xlat_redundant_t;
+typedef struct {
+ xlat_redundant_t *xr; //!< Information about the redundant xlat.
+ xlat_exp_t **ex; //!< Array of xlat expressions created by
+ ///< tokenizing the arguments to the redundant
+ ///< xlat, then duplicating them multiple times,
+ ///< one for each xlat function that may be called.
+} xlat_redundant_inst_t;
-/** Make module instance available to xlats
+typedef struct {
+ bool last_success; //!< Did the last call succeed?
+
+ xlat_exp_t **first; //!< First function called.
+ ///< Used for redundant-load-balance.
+ xlat_exp_t **current; //!< Last function called, used for redundant xlats.
+} xlat_redundant_rctx_t;
+
+/** Pass back the result from a single redundant child call
*
*/
-static int xlat_redundant_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
+static xlat_action_t xlat_redundant_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, xlat_redundant_t);
- return 0;
-}
+ xlat_redundant_inst_t const *xri = talloc_get_type_abort_const(xctx->inst, xlat_redundant_inst_t);
+ xlat_redundant_rctx_t *rctx = talloc_get_type_abort(xctx->rctx, xlat_redundant_rctx_t);
+ xlat_action_t xa = XLAT_ACTION_DONE;
+
+ if (rctx->last_success) {
+ done:
+ talloc_free(rctx);
+ return xa;
+ }
+ /*
+ * We're at the end, loop back to the start
+ */
+ if (++rctx->current >= (xri->ex + talloc_array_length(xri->ex))) rctx->current = xri->ex;
+
+ /*
+ * We're back to the first one we tried, fail...
+ */
+ if (rctx->current == rctx->first) {
+ error:
+ xa = XLAT_ACTION_FAIL;
+ goto done;
+ }
+
+ if (unlang_xlat_yield(request, xlat_redundant_resume, NULL, rctx) != UNLANG_ACTION_YIELD) goto error;
+
+ /*
+ * Push the next child...
+ */
+ if (unlang_xlat_push(ctx, &rctx->last_success, out->dlist,
+ request, *rctx->current, UNLANG_SUB_FRAME) < 0) goto error;
+
+ return XLAT_ACTION_PUSH_UNLANG;
+}
-/** xlat "redundant" processing
+/** Pass back the result from a single redundant child call
*
- * Processes xlat calls for modules defined in "redundant"
- * sub-sections of the instantiate configuration.
+ */
+static xlat_action_t xlat_load_balance_resume(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ UNUSED request_t *request, UNUSED fr_value_box_list_t *in)
+{
+ xlat_redundant_rctx_t *rctx = talloc_get_type_abort(xctx->rctx, xlat_redundant_rctx_t);
+ xlat_action_t xa = rctx->last_success ? XLAT_ACTION_DONE : XLAT_ACTION_FAIL;
+
+ talloc_free(rctx);
+
+ return xa;
+}
+
+/** xlat "redundant", "load-balance" and "redundant-load-balance" processing
*
* @ingroup xlat_functions
*/
static xlat_action_t xlat_redundant(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
- xlat_redundant_t const *xr;
- CONF_ITEM *ci;
- char const *name;
- xlat_t *xlat;
+ xlat_redundant_inst_t const *xri = talloc_get_type_abort_const(xctx->inst, xlat_redundant_inst_t);
+ xlat_redundant_rctx_t *rctx;
- memcpy(&xr, xlat_inst, sizeof(xr));
- xr = talloc_get_type_abort_const(xr, xlat_redundant_t);
-
- fr_assert(xr->type == XLAT_REDUNDANT);
+ MEM(rctx = talloc_zero(unlang_interpret_frame_talloc_ctx(request), xlat_redundant_rctx_t));
+ switch (xri->xr->type) {
/*
- * Pick the first xlat which succeeds
+ * Run through each of the redundant functions sequentially
+ * starting at the first.
*/
- for (ci = cf_item_next(xr->cs, NULL);
- ci != NULL;
- ci = cf_item_next(xr->cs, ci)) {
- if (!cf_item_is_pair(ci)) continue;
+ case XLAT_REDUNDANT:
+ rctx->current = rctx->first = xri->ex; /* First element first */
+ if (unlang_xlat_yield(request, xlat_redundant_resume, NULL, rctx) != XLAT_ACTION_YIELD) {
+ error:
+ talloc_free(rctx);
+ return XLAT_ACTION_FAIL;
+ }
+ break;
- name = cf_pair_attr(cf_item_to_pair(ci));
- fr_assert(name != NULL);
+ /*
+ * Run a single random redundant function.
+ */
+ case XLAT_LOAD_BALANCE:
+ rctx->first = &xri->ex[(size_t)fr_rand() & (talloc_array_length(xri->ex) - 1)]; /* Random start */
+ if (unlang_xlat_yield(request, xlat_load_balance_resume, NULL, rctx) != XLAT_ACTION_YIELD) goto error;
+ break;
- /*
- * @todo - cache these in a fixed size array in
- * the xlat_inst, which should save some run-time
- * cost.
- */
- xlat = xlat_func_find(name, -1);
- if (!xlat) continue;
+ /*
+ * Run through each of the redundant functions sequentially
+ * starting at a random element.
+ */
+ case XLAT_REDUNDANT_LOAD_BALANCE:
+ rctx->first = &xri->ex[(size_t)fr_rand() & (talloc_array_length(xri->ex) - 1)]; /* Random start */
+ if (unlang_xlat_yield(request, xlat_redundant_resume, NULL, rctx) != XLAT_ACTION_YIELD) goto error;
+ break;
- return xlat->func.async(ctx, out, request, xlat->mod_inst, xlat->thread_uctx, in);
+ default:
+ fr_assert(0);
}
- /*
- * Everything failed. Don't modify the output. Just return failure.
- */
- return XLAT_ACTION_FAIL;
+ if (unlang_xlat_push(ctx, &rctx->last_success, out->dlist,
+ request, *rctx->current, UNLANG_SUB_FRAME) < 0) return XLAT_ACTION_FAIL;
+
+ return XLAT_ACTION_PUSH_UNLANG;
}
-/** xlat "load-balance" processing
- *
- * Processes xlat calls for modules defined in "load-balance"
- * sub-sections of the instantiate configuration.
+/** Allocate additional nodes for evaluation
*
- * @ingroup xlat_functions
*/
-static xlat_action_t xlat_load_balance(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+static int xlat_redundant_instantiate(xlat_inst_ctx_t const *xctx)
{
- uint32_t count = 0;
- xlat_redundant_t const *xr;
- CONF_ITEM *ci;
- CONF_ITEM *found = NULL;
- char const *name;
- xlat_t *xlat;
+ xlat_redundant_t *xr = talloc_get_type_abort(xctx->uctx, xlat_redundant_t);
+ xlat_redundant_inst_t *xri = talloc_get_type_abort(xctx->inst, xlat_redundant_inst_t);
+ unsigned int num = 0;
+ xlat_redundant_func_t const *head;
- memcpy(&xr, xlat_inst, sizeof(xr));
- xr = talloc_get_type_abort_const(xr, xlat_redundant_t);
+ MEM(xri->ex = talloc_array(xri, xlat_exp_t *, fr_dlist_num_elements(&xr->funcs)));
+ xri->xr = xr;
+
+ head = talloc_get_type_abort(fr_dlist_head(&xr->funcs), xlat_redundant_func_t);
/*
- * Choose a child at random.
+ * Check the calling style matches the first
+ * function.
+ *
+ * We do this here as the redundant xlat
+ * itself can't have an input type or
+ * defined arguments;
*/
- for (ci = cf_item_next(xr->cs, NULL);
- ci != NULL;
- ci = cf_item_next(xr->cs, ci)) {
- if (!cf_item_is_pair(ci)) continue;
- count++;
+ switch (xctx->ex->call.input_type) {
+ case XLAT_INPUT_UNPROCESSED:
+ break;
- /*
- * Replace the previously found one with a random
- * new one.
- */
- if ((count * (fr_rand() & 0xffff)) < (uint32_t) 0x10000) {
- found = ci;
+ case XLAT_INPUT_MONO:
+ if (head->func->input_type == XLAT_INPUT_ARGS) {
+ PERROR("Expansion function \"%s\" takes defined arguments and should "
+ "be called using %%(func:args) syntax",
+ xctx->ex->call.func->name);
+ return -1;
+
+ }
+ break;
+
+ case XLAT_INPUT_ARGS:
+ if (head->func->input_type == XLAT_INPUT_MONO) {
+ PERROR("Expansion function \"%s\" should be called using %%{func:arg} syntax",
+ xctx->ex->call.func->name);
+ return -1;
}
+ break;
}
/*
- * Plain load balancing: do one child, and only one child.
+ * For each function, create the appropriate xlat
+ * node, and duplicate the child arguments.
*/
- if (xr->type == XLAT_LOAD_BALANCE) {
- name = cf_pair_attr(cf_item_to_pair(found));
- fr_assert(name != NULL);
+ fr_dlist_foreach(&xr->funcs, xlat_redundant_func_t, xrf) {
+ xlat_exp_t *node;
/*
- * @todo - cache these in a fixed size array in
- * the xlat_inst, which should save some run-time
- * cost.
+ * We have to do this here as it only
+ * becomes an error when the user tries
+ * to use the redundant xlat.
*/
- xlat = xlat_func_find(name, -1);
- if (!xlat) return XLAT_ACTION_FAIL;
-
- return xlat->func.async(ctx, out, request, xlat->mod_inst, xlat->thread_uctx, in);
- }
+ if (head->func->input_type != xrf->func->input_type) {
+ cf_log_err(xr->cs, "Expansion functions \"%s\" and \"%s\" use different argument styles "
+ "cannot be used in the same redundant section", head->func->name, xrf->func->name);
+ error:
+ talloc_free(xri->ex);
+ return -1;
+ }
- fr_assert(xr->type == XLAT_REDUNDANT_LOAD_BALANCE);
+ /*
+ * We pass the current arguments in
+ * so that the instantiation functions
+ * for the new node can operate
+ * correctly.
+ */
+ MEM(node = xlat_exp_func_alloc(xri->ex, xrf->func, xctx->ex->child));
- /*
- * Try the random one we found. If it fails, keep going
- * through the rest of the children.
- */
- ci = found;
- do {
- name = cf_pair_attr(cf_item_to_pair(ci));
- fr_assert(name != NULL);
+ switch (xrf->func->input_type) {
+ case XLAT_INPUT_UNPROCESSED:
+ break;
- xlat = xlat_func_find(name, -1);
- if (xlat) {
- xlat_action_t xa;
+ case XLAT_INPUT_MONO:
+ if (xlat_validate_function_mono(node) < 0) {
+ PERROR("Invalid arguments for redundant expansion function \"%s\"",
+ xrf->func->name);
+ goto error;
+ }
+ break;
- /*
- * The function shouldn't muck with the
- * output list, unless it succeeds.
- */
- xa = xlat->func.async(ctx, out, request, xlat->mod_inst, xlat->thread_uctx, in);
- if (xa != XLAT_ACTION_FAIL) return xa;
+ case XLAT_INPUT_ARGS:
+ if (xlat_validate_function_args(node) < 0) {
+ PERROR("Invalid arguments for redundant expansion function \"%s\"",
+ xrf->func->name);
+ goto error;
+ }
+ break;
}
/*
- * Go to the next one, wrapping around at the end.
+ * Add the xlat function (and any children)
+ * to the end of the instantiation list so
+ * they'll get called at some point after
+ * we return.
*/
- ci = cf_item_next(xr->cs, ci);
- if (!ci) ci = cf_item_next(xr->cs, NULL);
- } while (ci != found);
+ xlat_bootstrap(node);
+ xri->ex[num++] = node;
+ }
- return XLAT_ACTION_FAIL;
-}
+ /*
+ * Free the original argument nodes so they're
+ * not evaluated when the redundant xlat is called.
+ *
+ * We need to re-evaluate the arguments for each
+ * redundant function call we perform.
+ *
+ * The xlat_exp_func_alloc call above associates
+ * a copy of the original arguments with each
+ * function that's called.
+ */
+ xlat_exp_free(&xctx->ex->child);
+ return 0;
+}
/** Registers a redundant xlat
*
*/
int xlat_register_redundant(CONF_SECTION *cs)
{
- char const *name1, *name2;
- xlat_redundant_t *xr;
- xlat_t const *xlat, *old = NULL;
- CONF_ITEM *ci = NULL;
- xlat_func_t func;
+ static fr_table_num_sorted_t const xlat_redundant_type_table[] = {
+ { L("load-balance"), XLAT_LOAD_BALANCE },
+ { L("redundant"), XLAT_REDUNDANT },
+ { L("redundant-load-balance"), XLAT_REDUNDANT_LOAD_BALANCE },
+ };
+ static size_t xlat_redundant_type_table_len = NUM_ELEMENTS(xlat_redundant_type_table);
- name1 = cf_section_name1(cs);
- name2 = cf_section_name2(cs);
+ char const *name1, *name2;
+ xlat_redundant_type_t xr_type;
+ xlat_redundant_t *xr;
+ xlat_flags_t flags = {
+ .pure = true /* Gets removed by the merge function */
+ };
- if (xlat_func_find(name2, -1)) {
- cf_log_err(cs, "An expansion is already registered for this name");
+ xlat_t const *xlat;
+ CONF_ITEM *ci = NULL;
+
+ name1 = cf_section_name1(cs);
+ xr_type = fr_table_value_by_str(xlat_redundant_type_table, name1, XLAT_REDUNDANT_INVALID);
+ if (unlikely(xr_type == XLAT_REDUNDANT_INVALID)) {
+ cf_log_err(cs, "Invalid redundant section verb \"%s\"", name1);
return -1;
}
- MEM(xr = talloc_zero(cs, xlat_redundant_t));
-
- if (strcmp(name1, "redundant") == 0) {
- xr->type = XLAT_REDUNDANT;
- func = xlat_redundant;
-
- } else if (strcmp(name1, "redundant-load-balance") == 0) {
- xr->type = XLAT_REDUNDANT_LOAD_BALANCE;
- func = xlat_load_balance;
-
- } else if (strcmp(name1, "load-balance") == 0) {
- xr->type = XLAT_LOAD_BALANCE;
- func = xlat_load_balance;
-
- } else {
- fr_assert(0);
- cf_log_err(cs, "Invalid redundant section verb");
+ name2 = cf_section_name2(cs);
+ if (xlat_func_find(name2, talloc_array_length(name2) - 1)) {
+ cf_log_err(cs, "An expansion is already registered for this name");
return -1;
}
+ MEM(xr = talloc_zero(cs, xlat_redundant_t));
+ xr->type = xr_type;
xr->cs = cs;
+ fr_dlist_talloc_init(&xr->funcs, xlat_redundant_func_t, entry);
/*
* Count the number of children for load-balance, and
* also find out a little bit more about the old xlats.
+ *
+ * These are just preemptive checks, the majority of
+ * the work is done when a redundant xlat is
+ * instantiated. There we create an xlat node for
+ * each of the children of the section.
*/
while ((ci = cf_item_next(cs, ci))) {
- char const *attr;
+ xlat_redundant_func_t *xrf;
+ xlat_t *mod_func = NULL;
+ char const *mod_func_name;
if (!cf_item_is_pair(ci)) continue;
- attr = cf_pair_attr(cf_item_to_pair(ci));
+ mod_func_name = cf_pair_attr(cf_item_to_pair(ci));
/*
* This is ok, it just means the module
* doesn't have an xlat method.
+ *
+ * If there are ordering issues we could
+ * move this check to the instantiation
+ * function.
*/
- old = xlat_func_find(attr, -1);
- if (!old) {
+ mod_func = xlat_func_find(mod_func_name, talloc_array_length(mod_func_name) - 1);
+ if (!mod_func) {
talloc_free(xr);
return 1;
}
- xr->count++;
+ MEM(xrf = talloc_zero(xr, xlat_redundant_func_t));
+ xrf->func = mod_func;
+ fr_dlist_insert_tail(&xr->funcs, xrf);
+
+ /*
+ * Figure out pure status. If any of
+ * the children are un-pure then the
+ * whole redundant xlat is un-pure,
+ * same with async.
+ */
+ xlat_flags_merge(&flags, &mod_func->flags);
}
/*
- * At least one "old" xlat has to exist. Look at it in
- * order to find out which arguments we need to pass to
- * xlat_register()
+ * At least one module xlat has to exist.
*/
- if (!old) return 1;
+ if (!fr_dlist_num_elements(&xr->funcs)) {
+ talloc_free(xr);
+ return 1;
+ }
- xlat = xlat_register(NULL, name2, func, &old->flags);
+ xlat = xlat_register(NULL, name2, xlat_redundant, &flags);
if (!xlat) {
- ERROR("Registering xlat for load-balance section failed");
+ ERROR("Registering xlat for %s section failed",
+ fr_table_str_by_value(xlat_redundant_type_table, xr->type, "<INVALID>"));
talloc_free(xr);
return -1;
}
-
- xlat_async_instantiate_set(xlat, xlat_redundant_instantiate, xlat_redundant_t *, NULL, xr);
+ xlat_async_instantiate_set(xlat, xlat_redundant_instantiate, xlat_redundant_inst_t, NULL, xr);
return 0;
}
-
-
/*
* Regular xlat functions
*/
-
-
static xlat_arg_parser_t const xlat_func_debug_args[] = {
{ .single = true, .type = FR_TYPE_INT8 },
XLAT_ARG_PARSER_TERMINATOR
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_debug(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
int level = 0;
fr_value_box_t *vb;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_debug_attr(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_pair_t *vp;
fr_dcursor_t cursor;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_explode(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *strings = fr_dlist_head(in);
fr_value_box_list_t *list = &strings->vb_group;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_integer(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *in_vb = fr_dlist_head(in);
char const *p;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_map(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_map(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
map_t *map = NULL;
int ret;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_next_time(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_next_time(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
long num;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_lpad(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *args)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *args)
{
fr_value_box_t *values = fr_dlist_head(args);
fr_value_box_list_t *list = &values->vb_group;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_rpad(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *args)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *args)
{
fr_value_box_t *values = fr_dlist_head(args);
fr_value_box_list_t *list = &values->vb_group;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_base64_encode(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *args)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *args)
{
size_t alen;
ssize_t elen;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_base64_decode(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *args)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *args)
{
size_t alen;
ssize_t declen = 0;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_bin(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *result;
char const *p, *end;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_concat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *result;
fr_value_box_t *list = fr_dlist_head(in);
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_hex(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_hex(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
char *new_buff;
fr_value_box_t *bin = fr_dlist_pop_head(in); /* First argument */
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_hmac_md5(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_hmac_md5(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
uint8_t digest[MD5_DIGEST_LENGTH];
return xlat_hmac(ctx, out, in, digest, MD5_DIGEST_LENGTH, HMAC_MD5);
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_hmac_sha1(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_hmac_sha1(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
uint8_t digest[SHA1_DIGEST_LENGTH];
return xlat_hmac(ctx, out, in, digest, SHA1_DIGEST_LENGTH, HMAC_SHA1);
*
*/
static xlat_action_t xlat_func_join(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out,
- UNUSED request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *arg = NULL, *vb, *p;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_length(TALLOC_CTX *ctx, fr_dcursor_t *out,
- UNUSED request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *vb = NULL;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_md4(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_md4(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
uint8_t digest[MD5_DIGEST_LENGTH];
fr_value_box_t *vb;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_md5(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_md5(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
uint8_t digest[MD5_DIGEST_LENGTH];
fr_value_box_t *vb;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_module(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
fr_value_box_t *vb = NULL;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_pack(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_pack(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *vb;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_pairs(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
tmpl_t *vpt = NULL;
fr_dcursor_t cursor;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_rand(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_rand(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
int64_t result;
fr_value_box_t *vb;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_randstr(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
/*
* Lookup tables for randstr char classes
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_regex(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *in_head = fr_dlist_head(in);
/*
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_sha1(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_sha1(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
uint8_t digest[SHA1_DIGEST_LENGTH];
fr_sha1_ctx sha1_ctx;
* @ingroup xlat_functions
*/
#ifdef HAVE_OPENSSL_EVP_H
-static xlat_action_t xlat_evp_md(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in, EVP_MD const *md)
+static xlat_action_t xlat_evp_md(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in, EVP_MD const *md)
{
uint8_t digest[EVP_MAX_MD_SIZE];
unsigned int digestlen;
# define EVP_MD_XLAT(_md, _md_func) \
static xlat_action_t xlat_func_##_md(TALLOC_CTX *ctx, fr_dcursor_t *out,\
- request_t *request, void const *xlat_inst, void *xlat_thread_inst,\
- fr_value_box_list_t *in)\
+ xlat_ctx_t const *xctx, \
+ request_t *request,\
+ fr_value_box_list_t *in)\
{\
- return xlat_evp_md(ctx, out, request, xlat_inst, xlat_thread_inst, in, EVP_##_md_func());\
+ return xlat_evp_md(ctx, out, xctx, request, in, EVP_##_md_func());\
}
EVP_MD_XLAT(sha2_224, sha224)
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_string(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_string(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *in_head = fr_dlist_pop_head(in);
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_strlen(TALLOC_CTX *ctx, fr_dcursor_t *out,
- UNUSED request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst, fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *vb;
fr_value_box_t *in_head = fr_dlist_head(in);
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_sub_regex(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
+ UNUSED xlat_ctx_t const *xctx, request_t *request,
fr_value_box_list_t *in)
{
char const *p, *q, *end;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_sub(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request,
#ifdef HAVE_REGEX_PCRE2
- void const *xlat_inst, void *xlat_thread_inst,
+ xlat_ctx_t const *xctx,
#else
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
+ UNUSED xlat_ctx_t const *xctx,
#endif
- fr_value_box_list_t *in)
+ request_t *request, fr_value_box_list_t *in)
{
char const *p, *q, *end;
char *vb_str;
if (*pattern == '/') {
#ifdef HAVE_REGEX_PCRE2
- return xlat_func_sub_regex(ctx, out, request, xlat_inst, xlat_thread_inst, in);
+ return xlat_func_sub_regex(ctx, out, xctx, request, in);
#else
REDEBUG("regex based substitutions require libpcre2. "
"Check ${features.regex-pcre2} to determine support");
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_tolower(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
return xlat_change_case(ctx, out, request, in, false);
}
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_toupper(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
return xlat_change_case(ctx, out, request, in, true);
}
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_func_urlquote(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_func_urlquote(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ UNUSED request_t *request, fr_value_box_list_t *in)
{
char const *p, *end;
char *buff_p;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_urlunquote(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
char const *p, *end;
char *buff_p;
* @ingroup xlat_functions
*/
static xlat_action_t protocol_decode_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- int decoded;
- fr_value_box_t *vb;
- void *decode_ctx = NULL;
- fr_test_point_pair_decode_t const *tp_decode;
-
- memcpy(&tp_decode, xlat_inst, sizeof(tp_decode)); /* const issues */
+ int decoded;
+ fr_value_box_t *vb;
+ void *decode_ctx = NULL;
+ fr_test_point_pair_decode_t const *tp_decode = *(void * const *)xctx->inst;
if (tp_decode->test_ctx) {
if (tp_decode->test_ctx(&decode_ctx, ctx) < 0) {
return XLAT_ACTION_DONE;
}
-static int protocol_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
+static int protocol_xlat_instantiate(xlat_inst_ctx_t const *mctx)
{
- *(void **) xlat_inst = uctx;
+ *(void **) mctx->inst = mctx->uctx;
return 0;
}
* @ingroup xlat_functions
*/
static xlat_action_t protocol_encode_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
tmpl_t *vpt;
fr_pair_t *vp;
void *encode_ctx = NULL;
fr_test_point_pair_encode_t const *tp_encode;
- memcpy(&tp_encode, xlat_inst, sizeof(tp_encode)); /* const issues */
+ memcpy(&tp_encode, xctx->inst, sizeof(tp_encode)); /* const issues */
if (tmpl_afrom_attr_str(ctx, NULL, &vpt, in_head->vb_strvalue,
&(tmpl_rules_t){
--- /dev/null
+#pragma once
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/**
+ * $Id$
+ *
+ * @file lib/unlang/xlat_ctx.h
+ * @brief xlat ephemeral argument passing structures
+ *
+ * @copyright 2021 Arran Cudbard-Bell <a.cudbardb@freeradius.org>
+ */
+RCSIDH(xlat_ctx_h, "$Id$")
+
+#include <freeradius-devel/server/module_ctx.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* So we don't need to include xlat.h */
+typedef struct xlat_exp xlat_exp_t;
+
+/** An xlat calling ctx
+ *
+ * This provides optional arguments to xlat functions.
+ */
+typedef struct {
+ void const *inst; //!< xlat instance data.
+ void *thread; //!< xlat threadinstance data.
+ module_ctx_t const *mctx; //!< Synthesised module calling ctx.
+ void *rctx; //!< Resume context.
+} xlat_ctx_t;
+
+/** An xlat instantiation ctx
+ *
+ * This provides optional arguments to xlat functions.
+ */
+typedef struct {
+ void *inst; //!< xlat instance data to populate.
+ xlat_exp_t *ex; //!< Tokenized expression to use in expansion.
+ module_inst_ctx_t const *mctx; //!< Synthesised module calling ctx.
+ void *uctx; //!< Passed to the registration function.
+} xlat_inst_ctx_t;
+
+/** An xlat thread instantiation ctx
+ *
+ * This provides optional arguments to xlat functions.
+ */
+typedef struct {
+ void const *inst; //!< xlat instance data.
+ void *thread; //!< xlat thread instance data to populate.
+ xlat_exp_t const *ex; //!< Tokenized expression to use in expansion.
+ module_ctx_t const *mctx; //!< Synthesised module calling ctx.
+ fr_event_list_t *el; //!< To register any I/O handlers or timers against.
+ void *uctx; //!< Passed to the registration function.
+} xlat_thread_inst_ctx_t;
+
+/** Wrapper to create a xlat_ctx_t as a compound literal
+ *
+ * This is used so that the compiler will flag any uses of (xlat_ctx_t)
+ * which don't set the required fields. Additional arguments should be added
+ * to this macro whenever the xlat_ctx_t fields are altered.
+ *
+ * @param[in] _inst Instance data of the module being called.
+ * @param[in] _thread Instance data of the thread being called.
+ * @param[in] _mctx Module ctx.
+ * @param[in] _rctx resume ctx data.
+ */
+#define XLAT_CTX(_inst, _thread, _mctx, _rctx) &(xlat_ctx_t){ .inst = _inst, .thread = _thread, .mctx = _mctx, .rctx = _rctx }
+
+/** Wrapper to create a xlat_inst_ctx_t as a compound literal
+ *
+ * This is used so that the compiler will flag any uses of (xlat_inst_ctx_t)
+ * which don't set the required fields. Additional arguments should be added
+ * to this macro whenever the xlat_inst_ctx_t fields are altered.
+ *
+ * @param[in] _inst Instance data of the module being called.
+ * @param[in] _ex xlat expression to be evaluated by the instantiation function.
+ * @param[in] _mctx The module_inst_ctx_t from the parent module (if any).
+ * @param[in] _uctx passed when the instantiation function was registered.
+ */
+#define XLAT_INST_CTX(_inst, _ex, _mctx, _uctx) &(xlat_inst_ctx_t){ .inst = _inst, .ex = _ex, .mctx = _mctx, .uctx = _uctx }
+
+/** Wrapper to create a xlat_thread_inst_ctx_t as a compound literal
+ *
+ * This is used so that the compiler will flag any uses of (xlat_thread_inst_ctx_t)
+ * which don't set the required fields. Additional arguments should be added
+ * to this macro whenever the xlat_thread_inst_ctx_t fields are altered.
+ *
+ * @param[in] _inst Instance data of the module being called.
+ * @param[in] _thread Instance data of the thread being called.
+ * @param[in] _ex xlat expression to be evaluated by the instantiation function.
+ * @param[in] _mctx The module_inst_ctx_t from the parent module (if any).
+ * @param[in] _el To register any I/O handlers or timers against.
+ * @param[in] _uctx passed when the instantiation function was registered.
+ */
+#define XLAT_THREAD_INST_CTX(_inst, _thread, _ex, _mctx, _el, _uctx) &(xlat_thread_inst_ctx_t){ .inst = _inst, .ex = _ex, .mctx = _mctx, .el = _el, .uctx = _uctx }
+
+#ifdef __cplusplus
+}
+#endif
* @file xlat_eval.c
* @brief String expansion ("translation"). Evaluation of pre-parsed xlat epxansions.
*
+ * @copyright 2018-2021 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
* @copyright 2000,2006 The FreeRADIUS server project
* @copyright 2000 Alan DeKok (aland@freeradius.org)
*/
void xlat_signal(xlat_func_signal_t signal, xlat_exp_t const *exp,
request_t *request, void *rctx, fr_state_signal_t action)
{
- signal(request, exp->call.inst, xlat_thread_instance_find(exp)->data, rctx, action);
+ xlat_thread_inst_t *t = xlat_thread_instance_find(exp);
+
+ signal(XLAT_CTX(exp->call.inst, t->data, t->mctx, rctx), request, action);
}
/** Call an xlat's resumption method
* when it yielded.
*/
xlat_action_t xlat_frame_eval_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
- xlat_func_resume_t resume, xlat_exp_t const *exp,
+ xlat_func_t resume, xlat_exp_t const *exp,
request_t *request, fr_value_box_list_t *result, void *rctx)
{
- xlat_thread_inst_t *thread_inst = xlat_thread_instance_find(exp);
+ xlat_thread_inst_t *t = xlat_thread_instance_find(exp);
xlat_action_t xa;
/*
* and don't remove them from the list.
*/
VALUE_BOX_TALLOC_LIST_VERIFY(result);
- xa = resume(ctx, out, request, exp->call.inst, thread_inst->data, result, rctx);
+ xa = resume(ctx, out, XLAT_CTX(exp->call.inst->data, t->data, t->mctx, rctx), request, result);
VALUE_BOX_TALLOC_LIST_VERIFY(result);
RDEBUG2("EXPAND %%%c%s:...%c",
case XLAT_FUNC_NORMAL:
{
xlat_action_t xa;
- xlat_thread_inst_t *thread_inst;
+ xlat_thread_inst_t *t;
fr_value_box_list_t result_copy;
- thread_inst = xlat_thread_instance_find(node);
+ t = xlat_thread_instance_find(node);
+ fr_assert(t);
XLAT_DEBUG("** [%i] %s(func-async) - %%%c%s:%pM%c",
unlang_interpret_stack_depth(request), __FUNCTION__,
}
VALUE_BOX_TALLOC_LIST_VERIFY(result);
- xa = node->call.func->func.async(ctx, out, request,
- node->call.inst ? node->call.inst->data : NULL,
- thread_inst ? thread_inst->data : NULL, result);
+ xa = node->call.func->func.async(ctx, out,
+ XLAT_CTX(node->call.inst->data, t->data, t->mctx, NULL),
+ request, result);
VALUE_BOX_TALLOC_LIST_VERIFY(result);
if (RDEBUG_ENABLED2) xlat_debug_log_expansion(request, *in, &result_copy);
return xa;
case XLAT_ACTION_PUSH_CHILD:
- RDEBUG2(" -- CHILD");
+ RDEBUG3(" -- CHILD");
+ return xa;
+
+ case XLAT_ACTION_PUSH_UNLANG:
+ RDEBUG3(" -- UNLANG");
return xa;
case XLAT_ACTION_YIELD:
- RDEBUG2(" -- YIELD");
+ RDEBUG3(" -- YIELD");
return xa;
case XLAT_ACTION_DONE: /* Process the result */
xlat_debug_log_expansion(request, node, NULL);
if (node->call.func->type == XLAT_FUNC_NORMAL) {
- node->call.func->func.async(ctx, out, request, node->call.func->uctx, NULL, NULL);
+ node->call.func->func.async(ctx, out,
+ XLAT_CTX(node->call.func->uctx, NULL, NULL, NULL),
+ request, NULL);
} else {
MEM(value = fr_value_box_alloc_null(ctx));
slen = node->call.func->func.sync(value, &str, node->call.func->buf_len, node->call.func->mod_inst,
fr_value_box_list_init (&result);
fr_dcursor_init(&out, &result);
- action = node->call.func->func.async(pool, &out, request, node->call.func->uctx, NULL, NULL);
+ action = node->call.func->func.async(ctx, &out,
+ XLAT_CTX(node->call.func->uctx, NULL, NULL, NULL),
+ request, NULL);
if (action == XLAT_ACTION_FAIL) {
talloc_free(pool);
return NULL;
* the async xlat up until the point
* that it needs to yield.
*/
- if (unlang_xlat_push(pool, &result, request, node, true) < 0) {
+ if (unlang_xlat_push(pool, NULL, &result, request, node, true) < 0) {
talloc_free(pool);
return NULL;
}
return slen;
}
-static ssize_t _xlat_eval(TALLOC_CTX *ctx, char **out, size_t outlen, request_t *request, char const *fmt,
- xlat_escape_legacy_t escape, void const *escape_ctx) CC_HINT(nonnull (2, 4, 5));
-
/** Replace %whatever in a string.
*
* See 'doc/unlang/xlat.adoc' for more information.
* @param[in] escape_ctx pointer to pass to escape function.
* @return length of string written @bug should really have -1 for failure.
*/
-static ssize_t _xlat_eval(TALLOC_CTX *ctx, char **out, size_t outlen, request_t *request, char const *fmt,
- xlat_escape_legacy_t escape, void const *escape_ctx)
+static CC_HINT(nonnull (2, 4, 5))
+ssize_t _xlat_eval(TALLOC_CTX *ctx, char **out, size_t outlen, request_t *request, char const *fmt,
+ xlat_escape_legacy_t escape, void const *escape_ctx)
{
ssize_t len;
xlat_exp_t *node;
/*
* Give better errors than the old code.
*/
- len = xlat_tokenize_ephemeral(ctx, &node, NULL,
+ len = xlat_tokenize_ephemeral(ctx, &node, unlang_interpret_event_list(request), NULL,
&FR_SBUFF_IN(fmt, strlen(fmt)),
NULL, &(tmpl_rules_t){ .dict_def = request->dict });
if (len == 0) {
* @file xlat_inst.c
* @brief Create instance data for xlat function calls.
*
+ * @copyright 2018-2021 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
* @copyright 2018 The FreeRADIUS server project
- * @copyright 2018 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
*/
RCSID("$Id$")
+#include <freeradius-devel/io/schedule.h>
#include <freeradius-devel/server/base.h>
-#include <freeradius-devel/util/debug.h>
#include <freeradius-devel/unlang/xlat_priv.h>
-#include <freeradius-devel/io/schedule.h>
-
+#include <freeradius-devel/util/debug.h>
+#include <freeradius-devel/util/heap.h>
/** Holds instance data created by xlat_instantiate
*/
-static fr_rb_tree_t *xlat_inst_tree;
+static fr_heap_t *xlat_inst_tree;
/** Holds thread specific instance data created by xlat_instantiate
*/
-static _Thread_local fr_rb_tree_t *xlat_thread_inst_tree;
+static _Thread_local fr_heap_t *xlat_thread_inst_tree;
/** Compare two xlat instances based on node pointer
*
{
xlat_inst_t const *a = one, *b = two;
- return CMP(a->node, b->node);
+ return CMP(a->node->call.id, b->node->call.id);
}
/** Compare two thread instances based on node pointer
{
xlat_thread_inst_t const *a = one, *b = two;
- return CMP(a->node, b->node);
+ return CMP(a->node->call.id, b->node->call.id);
}
/** Destructor for xlat_thread_inst_t
* as we need to call thread_detach *before* any of the children
* of the talloc ctx are freed.
*/
-static int _xlat_thread_inst_detach(xlat_thread_inst_t *thread_inst)
+static int _xlat_thread_inst_detach(xlat_thread_inst_t *xt)
{
- fr_assert(thread_inst->node->type == XLAT_FUNC);
+ xlat_call_t const *call = &xt->node->call;
- if (thread_inst->node->call.func->thread_detach) {
- (void) thread_inst->node->call.func->thread_detach(thread_inst->data, thread_inst->node->call.func->thread_uctx);
- }
+ fr_assert(xt->node->type == XLAT_FUNC);
- return 0;
-}
-
-/** Destructor for xlat_thread_inst_tree elements
- *
- */
-static void _xlat_thread_inst_free(void *to_free)
-{
- xlat_thread_inst_t *thread_inst = talloc_get_type_abort(to_free, xlat_thread_inst_t);
+ DEBUG4("Cleaning up xlat thread instance (%p/%p)", xt, xt->data);
- DEBUG4("Worker cleaning up xlat thread instance (%p/%p)", thread_inst, thread_inst->data);
+ if (call->func->thread_detach) {
+ (void) call->func->thread_detach(XLAT_THREAD_INST_CTX(call->inst->data,
+ xt->data, xt->node, xt->mctx,
+ xt->el,
+ call->func->thread_uctx));
+ }
- talloc_free(thread_inst);
+ return 0;
}
/** Create thread instances where needed
*
* @param[in] ctx to allocate thread instance data in.
- * @param[in] inst to allocate thread-instance data for.
+ * @param[in] el event list to register I/O handlers against.
+ * @param[in] xi to allocate thread-instance data for.
* @return
* - 0 on success. The node/thread specific data will be inserted
* into xlat_thread_inst_tree.
* - -1 on failure.
*/
-static xlat_thread_inst_t *xlat_thread_inst_alloc(TALLOC_CTX *ctx, xlat_inst_t *inst)
+static xlat_thread_inst_t *xlat_thread_inst_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, xlat_inst_t *xi)
{
- xlat_thread_inst_t *thread_inst = NULL;
+ size_t extra_headers = 0;
+ size_t extra_mem = 0;
+ xlat_call_t const *call = &((xlat_inst_t *)talloc_get_type_abort(xi, xlat_inst_t))->node->call;
+ xlat_thread_inst_t *xt = NULL;
- (void)talloc_get_type_abort(inst, xlat_inst_t);
+ /*
+ * Allocate extra room for the thread instance data
+ */
+ if (call->func->thread_inst_size) {
+ extra_headers++;
+ extra_mem += call->func->thread_inst_size;
+ }
- if (inst->node->call.func->thread_inst_size) {
- MEM(thread_inst = talloc_zero_pooled_object(ctx, xlat_thread_inst_t,
- 1, inst->node->call.func->thread_inst_size));
+ /*
+ * Allocate extra room for the mctx
+ */
+ if (call->func->mctx) {
+ extra_headers++;
+ extra_mem += sizeof(*call->func->mctx);
+ }
+
+ if (extra_headers || extra_mem) {
+ MEM(xt = talloc_zero_pooled_object(ctx, xlat_thread_inst_t, extra_headers, extra_mem));
} else {
- MEM(thread_inst = talloc_zero(ctx, xlat_thread_inst_t));
+ MEM(xt = talloc_zero(ctx, xlat_thread_inst_t));
}
- thread_inst->node = inst->node;
+ xt->node = xi->node;
+ xt->el = el;
- fr_assert(inst->node->type == XLAT_FUNC);
- fr_assert(!inst->node->call.thread_inst); /* May be missing inst, but this is OK */
+ fr_assert(xi->node->type == XLAT_FUNC);
- talloc_set_destructor(thread_inst, _xlat_thread_inst_detach);
- if (inst->node->call.func->thread_inst_size) {
- MEM(thread_inst->data = talloc_zero_array(thread_inst, uint8_t, inst->node->call.func->thread_inst_size));
+ talloc_set_destructor(xt, _xlat_thread_inst_detach);
- /*
- * This is expensive, only do it if we might
- * might be using it.
- */
-#ifndef TALLOC_GET_TYPE_ABORT_NOOP
- talloc_set_name_const(thread_inst->data, inst->node->call.func->thread_inst_type);
-#endif
+ if (call->func->thread_inst_size) {
+ MEM(xt->data = talloc_zero_array(xt, uint8_t, call->func->thread_inst_size));
+
+ if (call->func->thread_inst_type) {
+ talloc_set_name_const(xt->data, call->func->thread_inst_type);
+ } else {
+ talloc_set_name(xt->data, "xlat_%s_thread_t", call->func->name);
+ }
}
- DEBUG4("Worker alloced xlat thread instance (%p/%p)", thread_inst, thread_inst->data);
+ /*
+ * Create a module call ctx.
+ *
+ * We do this now because we're operating in the
+ * context of a thread and can get the thread
+ * specific data for the module.
+ */
+ if (call->func->mctx) {
+ module_ctx_t *mctx;
+
+ mctx = module_ctx_from_inst(xt, call->func->mctx);
+ mctx->thread = module_thread_by_data(mctx->inst->data)->data;
- return thread_inst;
+ xt->mctx = mctx;
+ }
+
+ DEBUG4("Alloced xlat thread instance (%p/%p)", xt, xt->data);
+
+ return xt;
}
/** Destructor for xlat_inst_t
* as we need to call thread_detach *before* any of the children
* of the talloc ctx are freed.
*/
-static int _xlat_inst_detach(xlat_inst_t *inst)
+static int _xlat_inst_detach(xlat_inst_t *xi)
{
- (void)talloc_get_type_abort_const(inst->node, xlat_exp_t);
- fr_assert(inst->node->type == XLAT_FUNC);
+ xlat_call_t const *call = &((xlat_exp_t *)talloc_get_type_abort_const(xi->node, xlat_exp_t))->call;
+
+ fr_assert(xlat_inst_tree); /* xlat_inst_init must have been called */
+ fr_assert(xi->node->type == XLAT_FUNC);
/*
- * Remove permanent data from the instance tree.
+ * Remove permanent data from the instance tree
+ * and auto-free the tree when the last xlat is
+ * freed.
*/
- if (!inst->node->call.ephemeral) {
- fr_rb_delete(xlat_inst_tree, inst);
- if (fr_rb_num_elements(xlat_inst_tree) == 0) TALLOC_FREE(xlat_inst_tree);
+ if (!call->ephemeral) {
+ if (fr_heap_entry_inserted(xi->idx)) fr_heap_extract(xlat_inst_tree, xi);
+ if (fr_heap_num_elements(xlat_inst_tree) == 0) TALLOC_FREE(xlat_inst_tree);
}
- if (inst->node->call.func->detach) (void) inst->node->call.func->detach(inst->data, inst->node->call.func->uctx);
+ DEBUG4("Cleaning up xlat instance (%p/%p)", xi, xi->data);
+ if (call->func->detach) (void) call->func->detach(XLAT_INST_CTX(xi->data,
+ xi->node,
+ call->func->mctx,
+ call->func->uctx));
return 0;
}
-/** Destructor for xlat_inst_tree elements
- *
- */
-static void _xlat_inst_free(void *to_free)
-{
- xlat_inst_t *inst = talloc_get_type_abort(to_free, xlat_inst_t);
- talloc_free(inst);
-}
-
/** Allocate instance data for an xlat expansion
*
* @param[in] node to allocate instance data for.
*/
static xlat_inst_t *xlat_inst_alloc(xlat_exp_t *node)
{
- xlat_inst_t *inst = NULL;
+ xlat_call_t const *call = &node->call;
+ xlat_inst_t *xi = NULL;
(void)talloc_get_type_abort(node, xlat_exp_t);
fr_assert(xlat_inst_tree); /* xlat_inst_init must have been called */
fr_assert(node->type == XLAT_FUNC);
- fr_assert(!node->call.inst);
+ fr_assert(!call->inst);
- if (node->call.func->inst_size) {
- MEM(inst = talloc_zero_pooled_object(node, xlat_inst_t, 1, node->call.func->inst_size));
+ if (call->func->inst_size) {
+ MEM(xi = talloc_zero_pooled_object(node, xlat_inst_t, 1, call->func->inst_size));
} else {
- MEM(inst = talloc_zero(node, xlat_inst_t));
+ MEM(xi = talloc_zero(node, xlat_inst_t));
}
-
- inst->node = node;
+ xi->node = node;
/*
* Instance data is freed when the
* node is freed.
*/
- talloc_set_destructor(inst, _xlat_inst_detach);
- if (node->call.func->inst_size) {
- MEM(inst->data = talloc_zero_array(inst, uint8_t, node->call.func->inst_size));
-
- /*
- * This is expensive, only do it if we might
- * might be using it.
- */
-#ifndef TALLOC_GET_TYPE_ABORT_NOOP
- talloc_set_name_const(inst->data, node->call.func->inst_type);
-#endif
+ talloc_set_destructor(xi, _xlat_inst_detach);
+ if (call->func->inst_size) {
+ MEM(xi->data = talloc_zero_array(xi, uint8_t, call->func->inst_size));
+ if (call->func->inst_type) {
+ talloc_set_name_const(xi->data, call->func->inst_type);
+ } else {
+ talloc_set_name(xi->data, "xlat_%s_t", call->func->name);
+ }
}
- return inst;
+ return xi;
}
/** Callback for creating "ephemeral" instance data for a #xlat_exp_t
* - 0 if instantiation functions were successful.
* - -1 if either instantiation function failed.
*/
-static int _xlat_instantiate_ephemeral_walker(xlat_exp_t *node, UNUSED void *uctx)
+static int _xlat_instantiate_ephemeral_walker(xlat_exp_t *node, void *uctx)
{
- fr_assert(!node->call.inst && !node->call.thread_inst);
+ fr_event_list_t *el = talloc_get_type_abort(uctx, fr_event_list_t);
+ xlat_call_t *call = &node->call;
+ xlat_inst_t *xi;
+ xlat_thread_inst_t *xt;
+
+ fr_assert(!call->inst && !call->thread_inst);
- node->call.inst = xlat_inst_alloc(node);
- if (!node->call.inst) return -1;
+ xi = call->inst = xlat_inst_alloc(node);
+ if (!xi) return -1;
/*
* Instantiate immediately unlike permanent XLATs
* Where it's a separate phase.
*/
- if (node->call.func->instantiate &&
- (node->call.func->instantiate(node->call.inst->data, node, node->call.func->uctx) < 0)) {
+ if (call->func->instantiate &&
+ (call->func->instantiate(XLAT_INST_CTX(xi->data,
+ xi->node,
+ call->func->mctx,
+ call->func->uctx)) < 0)) {
error:
- TALLOC_FREE(node->call.inst);
+ TALLOC_FREE(call->inst);
return -1;
}
/*
* Create a thread instance too.
*/
- node->call.thread_inst = xlat_thread_inst_alloc(node, node->call.inst);
- if (!node->call.thread_inst) goto error;
+ xt = node->call.thread_inst = xlat_thread_inst_alloc(node, el, call->inst);
+ if (!xt) goto error;
- if (node->call.func->thread_instantiate &&
- node->call.func->thread_instantiate(node->call.inst, node->call.thread_inst->data,
- node, node->call.func->thread_uctx) < 0) goto error;
+ if (call->func->thread_instantiate &&
+ (call->func->thread_instantiate(XLAT_THREAD_INST_CTX(xi->data,
+ xt->data,
+ xi->node,
+ xt->mctx,
+ el,
+ call->func->thread_uctx)) < 0)) goto error;
/*
* Mark this up as an ephemeral node, so the destructors
* don't search for it in the xlat_inst_tree.
*/
- node->call.ephemeral = true;
+ call->ephemeral = true;
return 0;
}
*
* @param[in] root of xlat tree to create instance data for.
*/
-int xlat_instantiate_ephemeral(xlat_exp_t *root)
+int xlat_instantiate_ephemeral(xlat_exp_t *root, fr_event_list_t *el)
{
- return xlat_eval_walk(root, _xlat_instantiate_ephemeral_walker, XLAT_FUNC, NULL);
-}
-
-/** Walker callback for xlat_inst_tree
- *
- */
-static int _xlat_thread_instantiate(void *data, void *uctx)
-{
- xlat_thread_inst_t *thread_inst;
- xlat_inst_t *inst = talloc_get_type_abort(data, xlat_inst_t);
-
- thread_inst = xlat_thread_inst_alloc(uctx, data);
- if (!thread_inst) return -1;
-
- DEBUG3("Instantiating xlat \"%s\" node %p, instance %p, new thread instance %p",
- inst->node->call.func->name, inst->node, inst, thread_inst);
-
- if (inst->node->call.func->thread_instantiate) {
- int ret;
-
- ret = inst->node->call.func->thread_instantiate(inst->data, thread_inst->data,
- inst->node, inst->node->call.func->thread_uctx);
- if (ret < 0) {
- talloc_free(thread_inst);
- return -1;
- }
- }
-
- fr_rb_insert(xlat_thread_inst_tree, thread_inst);
-
- return 0;
+ return xlat_eval_walk(root, _xlat_instantiate_ephemeral_walker, XLAT_FUNC, el);
}
/** Retrieve xlat/thread specific instance data
*/
xlat_thread_inst_t *xlat_thread_instance_find(xlat_exp_t const *node)
{
+ xlat_call_t const *call = &node->call;
+ xlat_thread_inst_t *xt;
+
fr_assert(xlat_thread_inst_tree);
fr_assert(node->type == XLAT_FUNC);
+ fr_assert(fr_heap_num_elements(xlat_thread_inst_tree) == fr_heap_num_elements(xlat_inst_tree));
- if (node->call.ephemeral) return node->call.thread_inst;
+ if (call->ephemeral) return call->thread_inst;
- return fr_rb_find(xlat_thread_inst_tree, &(xlat_thread_inst_t){ .node = node });
+ /*
+ * This works because the comparator for
+ * the thread heap returns the same result
+ * as the one for the global instance data
+ * heap, and both heaps contain the same
+ * number of elements.
+ */
+ xt = fr_heap_peek_at(xlat_thread_inst_tree, call->inst->idx);
+ fr_assert(xt && (xt->idx == call->inst->idx));
+
+ return xt;
}
/** Create thread specific instance tree and create thread instances
*
* @param[in] ctx to bind instance tree lifetime to. Must not be
* shared between multiple threads.
+ * @param[in] el Event list to pass to all thread instantiation functions.
* @return
* - 0 on success.
* - -1 on failure.
*/
-int xlat_thread_instantiate(TALLOC_CTX *ctx)
+int xlat_thread_instantiate(TALLOC_CTX *ctx, fr_event_list_t *el)
{
- fr_rb_iter_preorder_t iter;
- void *data;
+ fr_assert(xlat_inst_tree);
+
+ if (unlikely(!xlat_thread_inst_tree)) {
+ MEM(xlat_thread_inst_tree = fr_heap_talloc_alloc(ctx,
+ _xlat_thread_inst_cmp,
+ xlat_thread_inst_t,
+ idx,
+ fr_heap_num_elements(xlat_inst_tree)));
+ }
- if (!xlat_inst_tree) return 0;
+ fr_heap_foreach(xlat_inst_tree, xlat_inst_t, xi) {
+ int ret;
+ xlat_call_t const *call = &xi->node->call;
+ xlat_thread_inst_t *xt = xlat_thread_inst_alloc(xlat_thread_inst_tree, el, xi);
+ if (unlikely(!xt)) return -1;
- if (!xlat_thread_inst_tree) {
- MEM(xlat_thread_inst_tree = fr_rb_inline_talloc_alloc(ctx, xlat_thread_inst_t, inst_node,
- _xlat_thread_inst_cmp, _xlat_thread_inst_free));
- }
+ DEBUG3("Instantiating xlat \"%s\" node %p, instance %p, new thread instance %p",
+ call->func->name, xt->node, xi->data, xt);
- /*
- * Walk the inst tree, creating thread specific instances.
- */
- for (data = fr_rb_iter_init_preorder(&iter, xlat_inst_tree);
- data;
- data = fr_rb_iter_next_preorder(&iter)) {
- if (_xlat_thread_instantiate(data, xlat_thread_inst_tree) < 0) {
- TALLOC_FREE(xlat_thread_inst_tree);
+ ret = fr_heap_insert(xlat_thread_inst_tree, xt);
+ if (!fr_cond_assert(ret == 0)) {
+ error:
+ TALLOC_FREE(xlat_thread_inst_tree); /* Reset the tree on error */
return -1;
}
- }
+
+ if (!call->func->thread_instantiate) continue;
+
+ ret = call->func->thread_instantiate(XLAT_THREAD_INST_CTX(xi->data,
+ xt->data,
+ xi->node,
+ xt->mctx,
+ el,
+ call->func->thread_uctx));
+ if (unlikely(ret < 0)) goto error;
+ }}
return 0;
}
TALLOC_FREE(xlat_thread_inst_tree);
}
-/** Initialise the xlat inst code
+/** Initialise the xlat instance data code
*
*/
static int xlat_instantiate_init(void)
{
- if (xlat_inst_tree) return 0;
+ if (unlikely(xlat_inst_tree)) return 0;
- xlat_inst_tree = fr_rb_inline_talloc_alloc(NULL, xlat_inst_t, inst_node, _xlat_inst_cmp, _xlat_inst_free);
+ xlat_inst_tree = fr_heap_talloc_alloc(NULL, _xlat_inst_cmp, xlat_inst_t, idx, 0);
if (!xlat_inst_tree) return -1;
return 0;
/** Call instantiation functions for "permanent" xlats
*
- * Should be called after module instantiation is complete.
+ * Should be called after all the permanent xlats have been tokenised/bootstrapped.
*/
int xlat_instantiate(void)
{
- fr_rb_iter_preorder_t iter;
- void *data;
+ if (unlikely(!xlat_inst_tree)) xlat_instantiate_init();
- if (!xlat_inst_tree) xlat_instantiate_init();
+ /*
+ * Loop over all the bootstrapped
+ * xlats, instantiating them.
+ */
+ fr_heap_foreach(xlat_inst_tree, xlat_inst_t, xi) {
+ xlat_call_t const *call = &xi->node->call;
- for (data = fr_rb_iter_init_preorder(&iter, xlat_inst_tree);
- data;
- data = fr_rb_iter_next_preorder(&iter)) {
- xlat_inst_t *inst = talloc_get_type_abort(data, xlat_inst_t);
+ /*
+ * We can't instantiate functions which
+ * still have children that need resolving
+ * as this may break redundant xlats
+ * if we end up needing to duplicate the
+ * argument nodes.
+ */
+ fr_assert(!xi->node->flags.needs_resolving);
- if (inst->node->call.func->instantiate &&
- (inst->node->call.func->instantiate(inst->data, inst->node, inst->node->call.func->uctx) < 0)) {
- return -1;
- }
- }
+ if (!call->func->instantiate) continue;
+
+ if (call->func->instantiate(XLAT_INST_CTX(xi->data,
+ xi->node,
+ call->func->mctx,
+ call->func->uctx)) < 0) return -1;
+ }}
return 0;
}
*/
int xlat_bootstrap_func(xlat_exp_t *node)
{
+ static uint64_t call_id;
+ xlat_call_t *call = &node->call;
bool ret;
fr_assert(node->type == XLAT_FUNC);
- fr_assert(!node->call.inst && !node->call.thread_inst);
+ fr_assert(!call->id && !call->inst && !call->thread_inst); /* Node cannot already have instance data */
+ if (!fr_cond_assert(!call->ephemeral)) return -1; /* Can't bootstrap ephemeral calls */
+
+ call->inst = xlat_inst_alloc(node);
+ if (unlikely(!call->inst)) return -1;
- node->call.inst = xlat_inst_alloc(node);
- if (!node->call.inst) return -1;
+ DEBUG3("Instantiating xlat \"%s\" node %p, new instance %p", call->func->name, node, call->inst);
- DEBUG3("Instantiating xlat \"%s\" node %p, new instance %p", node->call.func->name, node, node->call.inst);
+ /*
+ * Assign a unique ID to each xlat function call.
+ *
+ * This is so they're ordered in the heap by
+ * the order in which they were "bootstrapped".
+ *
+ * This allows additional functions to be added
+ * in the instantiation functions of other xlats
+ * which is useful for the redundant xlats.
+ */
+ node->call.id = call_id++;
- ret = fr_rb_insert(xlat_inst_tree, node->call.inst);
- if (!fr_cond_assert(ret)) {
- TALLOC_FREE(node->call.inst);
+ ret = fr_heap_insert(xlat_inst_tree, call->inst);
+ if (!fr_cond_assert(ret == 0)) {
+ TALLOC_FREE(call->inst);
return -1;
}
*/
fr_assert(!xlat_thread_inst_tree);
- if (!xlat_inst_tree) xlat_instantiate_init();
+ /*
+ * Initialise the instance tree if this is the first xlat
+ * being instantiated.
+ */
+ if (unlikely(!xlat_inst_tree)) xlat_instantiate_init();
+ /*
+ * Walk an expression registering all the function calls
+ * so that we can instantiate them later.
+ */
return xlat_eval_walk(root, _xlat_bootstrap_walker, XLAT_FUNC, NULL);
}
*/
void xlat_instances_free(void)
{
- TALLOC_FREE(xlat_inst_tree);
+ xlat_inst_t *xi;
+
+ /*
+ * When we get to zero instances the heap
+ * is freed, so we need to check there's
+ * still a heap to pass to fr_heap_pop.
+ */
+ while (xlat_inst_tree && (xi = fr_heap_pop(xlat_inst_tree))) talloc_free(xi);
}
*
*/
typedef struct {
+ uint64_t id; //!< Identifier unique to each permanent xlat node.
+ ///< This is used by the instantiation code to order
+ ///< nodes by the time they were created.
+
xlat_t const *func; //!< The xlat expansion to expand format with.
bool ephemeral; //!< Instance data is ephemeral (not inserted)
///< into the instance tree.
request_t *request, void *rctx, fr_state_signal_t action);
xlat_action_t xlat_frame_eval_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
- xlat_func_resume_t resume, xlat_exp_t const *exp,
+ xlat_func_t resume, xlat_exp_t const *exp,
request_t *request, fr_value_box_list_t *result, void *rctx);
xlat_action_t xlat_frame_eval_repeat(TALLOC_CTX *ctx, fr_dcursor_t *out,
*
* @param[in] ctx to allocate dynamic buffers in.
* @param[out] head the head of the xlat list / tree structure.
- * @param[in,out] flags that control evaluation and parsing.
+ * @param[in] el for registering any I/O handlers.
+ * @param[in] flags that control evaluation and parsing.
* @param[in] in the format string to expand.
* @param[in] p_rules from the encompassing grammar.
* @param[in] t_rules controlling how attribute references are parsed.
* - 0 and *head != NULL - Zero length expansion
* - <0 the negative offset of the parse failure.
*/
-ssize_t xlat_tokenize_ephemeral(TALLOC_CTX *ctx, xlat_exp_t **head, xlat_flags_t *flags,
- fr_sbuff_t *in,
+ssize_t xlat_tokenize_ephemeral(TALLOC_CTX *ctx, xlat_exp_t **head,
+ fr_event_list_t *el,
+ xlat_flags_t *flags, fr_sbuff_t *in,
fr_sbuff_parse_rules_t const *p_rules, tmpl_rules_t const *t_rules)
{
fr_sbuff_t our_in = FR_SBUFF(in);
/*
* Create ephemeral instance data for the xlat
*/
- if (xlat_instantiate_ephemeral(*head) < 0) {
+ if (xlat_instantiate_ephemeral(*head, el) < 0) {
fr_strerror_const("Failed performing ephemeral instantiation for xlat");
TALLOC_FREE(*head);
return 0;
fr_pair_list_init(©); \
if (fr_pair_list_copy(dst, ©, src) < 0) return -1; \
} while (0)
-
+
switch (op) {
/*
return h->p[1];
}
+void *fr_heap_peek_at(fr_heap_t *hp, fr_heap_index_t idx)
+{
+ heap_t *h = *hp;
+
+ if (unlikely(idx > h->num_elements)) return NULL;
+
+ return h->p[idx];
+}
+
void *fr_heap_pop(fr_heap_t *hp)
{
heap_t *h = *hp;
int fr_heap_extract(fr_heap_t *hp, void *data) CC_HINT(nonnull);
void *fr_heap_pop(fr_heap_t *hp) CC_HINT(nonnull);
void *fr_heap_peek(fr_heap_t *hp) CC_HINT(nonnull);
+void *fr_heap_peek_at(fr_heap_t *hp, fr_heap_index_t idx) CC_HINT(nonnull);
void *fr_heap_peek_tail(fr_heap_t *hp) CC_HINT(nonnull);
uint32_t fr_heap_num_elements(fr_heap_t *hp) CC_HINT(nonnull);
memcpy(replacement, to_replace, sizeof(*replacement));
memset(to_replace, 0, sizeof(*to_replace));
+ /* FIXME - Need to fix children and parent */
+
return true;
}
void fr_rb_iter_delete_inorder(fr_rb_iter_inorder_t *iter) CC_HINT(nonnull);
+#define fr_rb_inorder_foreach(_tree, _type, _iter) \
+{ \
+ fr_rb_iter_inorder_t _state; \
+ for (_type *_iter = fr_rb_iter_init_inorder(&_state, _tree); _iter; _iter = fr_rb_iter_next_inorder(&_state))
+
/** Iterator structure for pre-order traversal of an rbtree
*/
typedef struct {
void *fr_rb_iter_next_preorder(fr_rb_iter_preorder_t *iter) CC_HINT(nonnull);
+#define fr_rb_preorder_foreach(_tree, _type, _iter) \
+{ \
+ fr_rb_iter_preorder_t _state; \
+ for (_type *_iter = fr_rb_iter_init_preorder(&_state, _tree); _iter; _iter = fr_rb_iter_next_preorder(&_state))
+
/** Iterator structure for post-order traversal of an rbtree
*/
typedef struct {
void *fr_rb_iter_next_postorder(fr_rb_iter_postorder_t *iter) CC_HINT(nonnull);
+#define fr_rb_postorder_foreach(_tree, _type, _iter) \
+{ \
+ fr_rb_iter_postorder_t _state; \
+ for (_type *_iter = fr_rb_iter_init_postorder(&_state, _tree); _iter; _iter = fr_rb_iter_next_postorder(&_state))
+
int fr_rb_flatten_inorder(TALLOC_CTX *ctx, void **out[], fr_rb_tree_t *tree);
int fr_rb_flatten_preorder(TALLOC_CTX *ctx, void **out[], fr_rb_tree_t *tree);
CONF_PARSER_TERMINATOR
};
-static int always_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((rlm_always_t **)xlat_inst) = talloc_get_type_abort(uctx, rlm_always_t);
-
- return 0;
-}
-
static xlat_arg_parser_t const always_xlat_args[] = {
{ .single = true, .type = FR_TYPE_STRING },
XLAT_ARG_PARSER_TERMINATOR
* Example: "%{db_status:fail}"
*/
static xlat_action_t always_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_always_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst),rlm_always_t);
+ rlm_always_t *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_always_t);
module_instance_t *mi = inst->mi;
char const *status;
char const *p;
xlat = xlat_register_module(inst, mctx, mctx->inst->name, always_xlat, NULL);
xlat_func_args(xlat, always_xlat_args);
- xlat_async_instantiate_set(xlat, always_xlat_instantiate, rlm_always_t *, NULL, inst);
return 0;
}
RETURN_MODULE_RCODE(rcode);
}
-static int mod_xlat_thread_instantiate(UNUSED void *xlat_inst, void *xlat_thread_inst,
- UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_cache_t *inst = talloc_get_type_abort(uctx, rlm_cache_t);
- cache_xlat_thread_inst_t *xt = xlat_thread_inst;
-
- xt->inst = inst;
- return 0;
-}
-
static xlat_arg_parser_t const cache_xlat_args[] = {
{ .required = true, .single = true, .type = FR_TYPE_STRING },
XLAT_ARG_PARSER_TERMINATOR
*
* @ingroup xlat_functions
*/
-static xlat_action_t cache_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in) CC_HINT(nonnull);
-static xlat_action_t cache_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static CC_HINT(nonnull)
+xlat_action_t cache_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
rlm_cache_entry_t *c = NULL;
- cache_xlat_thread_inst_t *xti = talloc_get_type_abort(xlat_thread_inst, cache_xlat_thread_inst_t);
+ rlm_cache_t *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_cache_t);
rlm_cache_handle_t *handle = NULL;
ssize_t slen;
rlm_rcode_t rcode = RLM_MODULE_NOOP;
key_len = tmpl_expand((char const **)&key, (char *)buffer, sizeof(buffer),
- request, xti->inst->config.key, NULL, NULL);
+ request, inst->config.key, NULL, NULL);
if (key_len < 0) return XLAT_ACTION_FAIL;
slen = tmpl_afrom_attr_substr(ctx, NULL, &target,
return XLAT_ACTION_FAIL;
}
- if (cache_acquire(&handle, xti->inst, request) < 0) {
+ if (cache_acquire(&handle, inst, request) < 0) {
talloc_free(target);
return XLAT_ACTION_FAIL;
}
- cache_find(&rcode, &c, xti->inst, request, &handle, key, key_len);
+ cache_find(&rcode, &c, inst, request, &handle, key, key_len);
switch (rcode) {
case RLM_MODULE_OK: /* found */
break;
*/
if (!map) return XLAT_ACTION_FAIL;
- cache_free(xti->inst, &c);
- cache_release(xti->inst, request, &handle);
+ cache_free(inst, &c);
+ cache_release(inst, request, &handle);
return XLAT_ACTION_DONE;
}
*/
xlat = xlat_register_module(inst, mctx, mctx->inst->name, cache_xlat, XLAT_FLAG_NEEDS_ASYNC);
xlat_func_args(xlat, cache_xlat_args);
- xlat_async_thread_instantiate_set(xlat, mod_xlat_thread_instantiate, cache_xlat_thread_inst_t, NULL, inst);
return 0;
}
fr_map_list_t maps; //!< Head of the maps list.
} rlm_cache_entry_t;
-typedef struct {
- rlm_cache_t *inst; //!< Instance of rlm_cache
-} cache_xlat_thread_inst_t;
-
/** Allocate a new cache entry
*
*/
* @ingroup xlat_functions
*/
static xlat_action_t xlat_func_chap_password(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
uint8_t chap_password[1 + RADIUS_CHAP_CHALLENGE_LENGTH];
fr_value_box_t *vb;
* @ingroup xlat_functions
*/
static xlat_action_t cipher_rsa_encrypt_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_cipher_rsa_thread_inst_t *xt = talloc_get_type_abort(*((void **)xlat_thread_inst),
- rlm_cipher_rsa_thread_inst_t);
+ rlm_cipher_rsa_thread_inst_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_cipher_rsa_thread_inst_t);
char const *plaintext;
size_t plaintext_len;
* Figure out the buffer we need
*/
RHEXDUMP3((uint8_t const *)plaintext, plaintext_len, "Plaintext (%zu bytes)", plaintext_len);
- if (EVP_PKEY_encrypt(xt->evp_encrypt_ctx, NULL, &ciphertext_len,
+ if (EVP_PKEY_encrypt(t->evp_encrypt_ctx, NULL, &ciphertext_len,
(unsigned char const *)plaintext, plaintext_len) <= 0) {
fr_tls_log_error(request, "Failed getting length of encrypted plaintext");
return XLAT_ACTION_FAIL;
MEM(vb = fr_value_box_alloc_null(ctx));
MEM(fr_value_box_mem_alloc(vb, &ciphertext, vb, NULL, ciphertext_len, false) == 0);
- if (EVP_PKEY_encrypt(xt->evp_encrypt_ctx, ciphertext, &ciphertext_len,
+ if (EVP_PKEY_encrypt(t->evp_encrypt_ctx, ciphertext, &ciphertext_len,
(unsigned char const *)plaintext, plaintext_len) <= 0) {
fr_tls_log_error(request, "Failed encrypting plaintext");
talloc_free(vb);
* @ingroup xlat_functions
*/
static xlat_action_t cipher_rsa_sign_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_cipher_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst),
- rlm_cipher_t);
- rlm_cipher_rsa_thread_inst_t *xt = talloc_get_type_abort(*((void **)xlat_thread_inst),
- rlm_cipher_rsa_thread_inst_t);
+ rlm_cipher_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_cipher_t);
+ rlm_cipher_rsa_thread_inst_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_cipher_rsa_thread_inst_t);
char const *msg;
size_t msg_len;
/*
* First produce a digest of the message
*/
- if (unlikely(EVP_DigestInit_ex(xt->evp_md_ctx, inst->rsa->sig_digest, NULL) <= 0)) {
+ if (unlikely(EVP_DigestInit_ex(t->evp_md_ctx, inst->rsa->sig_digest, NULL) <= 0)) {
fr_tls_log_error(request, "Failed initialising message digest");
return XLAT_ACTION_FAIL;
}
- if (EVP_DigestUpdate(xt->evp_md_ctx, msg, msg_len) <= 0) {
+ if (EVP_DigestUpdate(t->evp_md_ctx, msg, msg_len) <= 0) {
fr_tls_log_error(request, "Failed ingesting message");
return XLAT_ACTION_FAIL;
}
- if (EVP_DigestFinal_ex(xt->evp_md_ctx, xt->digest_buff, &digest_len) <= 0) {
+ if (EVP_DigestFinal_ex(t->evp_md_ctx, t->digest_buff, &digest_len) <= 0) {
fr_tls_log_error(request, "Failed finalising message digest");
return XLAT_ACTION_FAIL;
}
- fr_assert((size_t)digest_len == talloc_array_length(xt->digest_buff));
+ fr_assert((size_t)digest_len == talloc_array_length(t->digest_buff));
/*
* Then sign the digest
*/
- if (EVP_PKEY_sign(xt->evp_sign_ctx, NULL, &sig_len, xt->digest_buff, (size_t)digest_len) <= 0) {
+ if (EVP_PKEY_sign(t->evp_sign_ctx, NULL, &sig_len, t->digest_buff, (size_t)digest_len) <= 0) {
fr_tls_log_error(request, "Failed getting length of digest");
return XLAT_ACTION_FAIL;
}
MEM(vb = fr_value_box_alloc_null(ctx));
MEM(fr_value_box_mem_alloc(vb, &sig, vb, NULL, sig_len, false) == 0);
- if (EVP_PKEY_sign(xt->evp_sign_ctx, sig, &sig_len, xt->digest_buff, (size_t)digest_len) <= 0) {
+ if (EVP_PKEY_sign(t->evp_sign_ctx, sig, &sig_len, t->digest_buff, (size_t)digest_len) <= 0) {
fr_tls_log_error(request, "Failed signing message digest");
talloc_free(vb);
return XLAT_ACTION_FAIL;
* @ingroup xlat_functions
*/
static xlat_action_t cipher_rsa_decrypt_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_cipher_rsa_thread_inst_t *xt = talloc_get_type_abort(*((void **)xlat_thread_inst),
- rlm_cipher_rsa_thread_inst_t);
+ rlm_cipher_rsa_thread_inst_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_cipher_rsa_thread_inst_t);
uint8_t const *ciphertext;
size_t ciphertext_len;
* Decrypt the ciphertext
*/
RHEXDUMP3(ciphertext, ciphertext_len, "Ciphertext (%zu bytes)", ciphertext_len);
- if (EVP_PKEY_decrypt(xt->evp_decrypt_ctx, NULL, &plaintext_len, ciphertext, ciphertext_len) <= 0) {
+ if (EVP_PKEY_decrypt(t->evp_decrypt_ctx, NULL, &plaintext_len, ciphertext, ciphertext_len) <= 0) {
fr_tls_log_error(request, "Failed getting length of cleartext");
return XLAT_ACTION_FAIL;
}
MEM(vb = fr_value_box_alloc_null(ctx));
MEM(fr_value_box_bstr_alloc(vb, &plaintext, vb, NULL, plaintext_len, true) == 0);
- if (EVP_PKEY_decrypt(xt->evp_decrypt_ctx, (unsigned char *)plaintext, &plaintext_len,
+ if (EVP_PKEY_decrypt(t->evp_decrypt_ctx, (unsigned char *)plaintext, &plaintext_len,
ciphertext, ciphertext_len) <= 0) {
fr_tls_log_error(request, "Failed decrypting ciphertext");
talloc_free(vb);
* @ingroup xlat_functions
*/
static xlat_action_t cipher_rsa_verify_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_cipher_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst),
- rlm_cipher_t);
- rlm_cipher_rsa_thread_inst_t *xt = talloc_get_type_abort(*((void **)xlat_thread_inst),
- rlm_cipher_rsa_thread_inst_t);
+ rlm_cipher_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_cipher_t);
+ rlm_cipher_rsa_thread_inst_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_cipher_rsa_thread_inst_t);
uint8_t const *sig;
size_t sig_len;
/*
* First produce a digest of the message
*/
- if (unlikely(EVP_DigestInit_ex(xt->evp_md_ctx, inst->rsa->sig_digest, NULL) <= 0)) {
+ if (unlikely(EVP_DigestInit_ex(t->evp_md_ctx, inst->rsa->sig_digest, NULL) <= 0)) {
fr_tls_log_error(request, "Failed initialising message digest");
return XLAT_ACTION_FAIL;
}
- if (EVP_DigestUpdate(xt->evp_md_ctx, msg, msg_len) <= 0) {
+ if (EVP_DigestUpdate(t->evp_md_ctx, msg, msg_len) <= 0) {
fr_tls_log_error(request, "Failed ingesting message");
return XLAT_ACTION_FAIL;
}
- if (EVP_DigestFinal_ex(xt->evp_md_ctx, xt->digest_buff, &digest_len) <= 0) {
+ if (EVP_DigestFinal_ex(t->evp_md_ctx, t->digest_buff, &digest_len) <= 0) {
fr_tls_log_error(request, "Failed finalising message digest");
return XLAT_ACTION_FAIL;
}
- fr_assert((size_t)digest_len == talloc_array_length(xt->digest_buff));
+ fr_assert((size_t)digest_len == talloc_array_length(t->digest_buff));
/*
* Now check the signature matches what we expected
*/
- switch (EVP_PKEY_verify(xt->ePAIR_VERIFY_ctx, sig, sig_len, xt->digest_buff, (size_t)digest_len)) {
+ switch (EVP_PKEY_verify(t->ePAIR_VERIFY_ctx, sig, sig_len, t->digest_buff, (size_t)digest_len)) {
case 1: /* success (signature valid) */
MEM(vb = fr_value_box_alloc(ctx, FR_TYPE_BOOL, NULL, false));
vb->vb_bool = true;
* @ingroup xlat_functions
*/
static xlat_action_t cipher_fingerprint_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_cipher_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst), rlm_cipher_t);
- char const *md_name;
- EVP_MD const *md;
- size_t md_len;
- fr_value_box_t *vb;
- uint8_t *digest;
+ rlm_cipher_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_cipher_t);
+ char const *md_name;
+ EVP_MD const *md;
+ size_t md_len;
+ fr_value_box_t *vb;
+ uint8_t *digest;
if (!fr_dlist_next(in, fr_dlist_head(in))) {
REDEBUG("Missing digest argument");
* @ingroup xlat_functions
*/
static xlat_action_t cipher_serial_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, UNUSED void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
- rlm_cipher_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst), rlm_cipher_t);
+ rlm_cipher_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_cipher_t);
ASN1_INTEGER const *serial;
fr_value_box_t *vb;
}
static xlat_action_t cipher_certificate_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_cipher_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst), rlm_cipher_t);
+ rlm_cipher_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_cipher_t);
char const *attribute = ((fr_value_box_t *)fr_dlist_head(in))->vb_strvalue;
fr_value_box_t *vb;
return XLAT_ACTION_FAIL;
case CIPHER_CERT_ATTR_FINGERPRINT:
- return cipher_fingerprint_xlat(ctx, out, request, xlat_inst, xlat_thread_inst, in);
+ return cipher_fingerprint_xlat(ctx, out, xctx, request, in);
case CIPHER_CERT_ATTR_SERIAL:
- return cipher_serial_xlat(ctx, out, request, xlat_inst, xlat_thread_inst, in);
+ return cipher_serial_xlat(ctx, out, xctx, request, in);
case CIPHER_CERT_ATTR_NOT_BEFORE:
MEM(vb = fr_value_box_alloc(ctx, FR_TYPE_DATE, NULL, true));
return 0;
}
-/** Boilerplate to copy the pointer to the main module thread instance into xlat thread instance data
- *
- */
-static int cipher_xlat_thread_instantiate(UNUSED void *xlat_inst, void *xlat_thread_inst,
- UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_cipher_t *inst = talloc_get_type_abort(uctx, rlm_cipher_t);
-
- *((rlm_cipher_rsa_thread_inst_t **)xlat_thread_inst) =
- talloc_get_type_abort(module_thread_by_data(inst)->data, rlm_cipher_rsa_thread_inst_t);
-
- return 0;
-}
-
-/** Boilerplate to copy the pointer to the main module config into the xlat instance data
- *
- */
-static int cipher_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((rlm_cipher_t **)xlat_inst) = talloc_get_type_abort(uctx, rlm_cipher_t);
-
- return 0;
-}
-
static int cipher_rsa_padding_params_set(EVP_PKEY_CTX *evp_pkey_ctx, cipher_rsa_t const *rsa_inst)
{
if (unlikely(EVP_PKEY_CTX_set_rsa_padding(evp_pkey_ctx, rsa_inst->padding)) <= 0) {
xlat_name = talloc_asprintf(inst, "%s_decrypt", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, xlat_name, cipher_rsa_decrypt_xlat, NULL);
xlat_func_mono(xlat, &cipher_rsa_decrypt_xlat_arg);
- xlat_async_instantiate_set(xlat, cipher_xlat_instantiate,
- rlm_cipher_t *,
- NULL,
- inst);
- xlat_async_thread_instantiate_set(xlat,
- cipher_xlat_thread_instantiate,
- rlm_cipher_rsa_thread_inst_t *,
- NULL,
- inst);
talloc_free(xlat_name);
/*
xlat_name = talloc_asprintf(inst, "%s_verify", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, xlat_name, cipher_rsa_verify_xlat, NULL);
xlat_func_args(xlat, cipher_rsa_verify_xlat_arg);
- xlat_async_instantiate_set(xlat, cipher_xlat_instantiate,
- rlm_cipher_t *,
- NULL,
- inst);
- xlat_async_thread_instantiate_set(xlat,
- cipher_xlat_thread_instantiate,
- rlm_cipher_rsa_thread_inst_t *,
- NULL,
- inst);
talloc_free(xlat_name);
}
xlat_name = talloc_asprintf(inst, "%s_encrypt", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, xlat_name, cipher_rsa_encrypt_xlat, NULL);
xlat_func_mono(xlat, &cipher_rsa_encrypt_xlat_arg);
- xlat_async_instantiate_set(xlat, cipher_xlat_instantiate,
- rlm_cipher_t *,
- NULL,
- inst);
- xlat_async_thread_instantiate_set(xlat, cipher_xlat_thread_instantiate,
- rlm_cipher_rsa_thread_inst_t *,
- NULL,
- inst);
talloc_free(xlat_name);
/*
xlat_name = talloc_asprintf(inst, "%s_sign", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, xlat_name, cipher_rsa_sign_xlat, NULL);
xlat_func_mono(xlat, &cipher_rsa_sign_xlat_arg);
- xlat_async_instantiate_set(xlat, cipher_xlat_instantiate,
- rlm_cipher_t *,
- NULL,
- inst);
- xlat_async_thread_instantiate_set(xlat, cipher_xlat_thread_instantiate,
- rlm_cipher_rsa_thread_inst_t *,
- NULL,
- inst);
talloc_free(xlat_name);
xlat_name = talloc_asprintf(inst, "%s_certificate", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, xlat_name, cipher_certificate_xlat, NULL);
xlat_func_args(xlat, cipher_certificate_xlat_args);
- xlat_async_instantiate_set(xlat, cipher_xlat_instantiate,
- rlm_cipher_t *,
- NULL,
- inst);
- xlat_async_thread_instantiate_set(xlat,
- cipher_xlat_thread_instantiate,
- rlm_cipher_rsa_thread_inst_t *,
- NULL,
- inst);
+
talloc_free(xlat_name);
}
break;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_client(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_client(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
char const *value = NULL;
fr_ipaddr_t ip;
RETURN_MODULE_OK;
}
-/*
- * Do any per-module initialization that is separate to each
- * configured instance of the module. e.g. set up connections
- * to external databases, read configuration files, set up
- * dictionary entries, etc.
- *
- * If configuration information is given in the config section
- * that must be referenced in later calls, store a handle to it
- * in *instance otherwise put a null pointer there.
- */
-static int mod_bootstrap(module_inst_ctx_t const *mctx)
+static int mod_load(void)
{
- CONF_SECTION *conf = mctx->inst->conf;
xlat_t *xlat;
- if (cf_section_name2(conf)) return 0;
-
- xlat = xlat_register(mctx->inst->data, "client", xlat_client, NULL);
+ xlat = xlat_register(NULL, "client", xlat_client, NULL);
+ if (!xlat) return -1;
xlat_func_args(xlat, xlat_client_args);
- map_proc_register(mctx->inst->data, "client", map_proc_client, NULL, 0);
+
+ map_proc_register(NULL, "client", map_proc_client, NULL, 0);
return 0;
}
+static void mod_unload(void)
+{
+ xlat_unregister("client");
+}
/*
* The module name should be the only globally exported symbol.
.magic = RLM_MODULE_INIT,
.name = "dynamic_clients",
.type = RLM_TYPE_THREAD_SAFE, /* type */
- .bootstrap = mod_bootstrap,
+ .onload = mod_load,
+ .unload = mod_unload,
.methods = {
[MOD_AUTHORIZE] = mod_authorize
},
DIAG_OFF(format-nonliteral)
static xlat_action_t date_convert_string(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- const char *str, rlm_date_t const *inst)
+ const char *str, rlm_date_t const *inst)
{
struct tm tminfo;
time_t date = 0;
}
static xlat_action_t date_encode_strftime(TALLOC_CTX *ctx, fr_dcursor_t *out, rlm_date_t const *inst,
- request_t *request, time_t date)
+ request_t *request, time_t date)
{
struct tm tminfo;
char buff[64];
}
DIAG_ON(format-nonliteral)
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_date_t);
- return 0;
-}
-
static xlat_arg_parser_t const xlat_date_convert_args[] = {
{ .required = true, .single = true, .type = FR_TYPE_VOID },
XLAT_ARG_PARSER_TERMINATOR
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_date_convert(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_date_convert(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_date_t const *inst;
- void *instance;
+ rlm_date_t const *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_date_t);
struct tm tminfo;
fr_value_box_t *arg = fr_dlist_head(in);
- memcpy(&instance, xlat_inst, sizeof(instance));
-
- inst = talloc_get_type_abort(instance, rlm_date_t);
-
memset(&tminfo, 0, sizeof(tminfo));
/*
xlat_t *xlat;
xlat = xlat_register_module(inst, mctx, mctx->inst->name, xlat_date_convert, NULL);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_date_t *, NULL, inst);
xlat_func_args(xlat, xlat_date_convert_args);
return 0;
unlang_interpret_mark_runnable(request);
}
-static void _xlat_delay_done(request_t *request,
- UNUSED void *xlat_inst, UNUSED void *xlat_thread_inst, void *rctx, fr_time_t fired)
+static void _xlat_delay_done(xlat_ctx_t const *xctx, request_t *request, fr_time_t fired)
{
- fr_time_t *yielded = talloc_get_type_abort(rctx, fr_time_t);
+ fr_time_t *yielded = talloc_get_type_abort(xctx->rctx, fr_time_t);
RDEBUG2("Delay done");
}
static xlat_action_t xlat_delay_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in, void *rctx)
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
- fr_time_t *yielded_at = talloc_get_type_abort(rctx, fr_time_t);
+ fr_time_t *yielded_at = talloc_get_type_abort(xctx->rctx, fr_time_t);
fr_time_delta_t delayed;
fr_value_box_t *vb;
return XLAT_ACTION_DONE;
}
-static void xlat_delay_cancel(request_t *request, UNUSED void *instance, UNUSED void *thread,
- UNUSED void *rctx, fr_state_signal_t action)
+static void xlat_delay_cancel(UNUSED xlat_ctx_t const *xctx, request_t *request, fr_state_signal_t action)
{
if (action != FR_SIGNAL_CANCEL) return;
* @ingroup xlat_functions
*/
static xlat_action_t xlat_delay(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_delay_t const *inst;
- void *instance;
+ rlm_delay_t const *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_delay_t);
fr_time_t resume_at, *yielded_at;
fr_value_box_t *delay = fr_dlist_head(in);
- memcpy(&instance, xlat_inst, sizeof(instance)); /* Stupid const issues */
-
- inst = talloc_get_type_abort(instance, rlm_delay_t);
-
/*
* Record the time that we yielded the request
*/
yield:
RDEBUG3("Current time %pVs, resume time %pVs", fr_box_time(*yielded_at), fr_box_time(resume_at));
- if (unlang_xlat_event_timeout_add(request, _xlat_delay_done, yielded_at, resume_at) < 0) {
+ if (unlang_xlat_timeout_add(request, _xlat_delay_done, yielded_at, resume_at) < 0) {
RPEDEBUG("Adding event failed");
return XLAT_ACTION_FAIL;
}
return unlang_xlat_yield(request, xlat_delay_resume, xlat_delay_cancel, yielded_at);
}
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_delay_t);
- return 0;
-}
-
static int mod_bootstrap(module_inst_ctx_t const *mctx)
{
rlm_delay_t *inst = talloc_get_type_abort(mctx->inst->data, rlm_delay_t);
xlat = xlat_register_module(inst, mctx, mctx->inst->name, xlat_delay, XLAT_FLAG_NEEDS_ASYNC);
xlat_func_args(xlat, xlat_delay_args);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_delay_t *, NULL, inst);
return 0;
}
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_dict_attr_by_num(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_dict_attr_by_num(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_dict_attr_t const *da;
fr_value_box_t *attr = fr_dlist_head(in);
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_dict_attr_by_oid(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_dict_attr_by_oid(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
unsigned int attr = 0;
fr_dict_attr_t const *parent = fr_dict_root(request->dict);
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_vendor(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_vendor(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_pair_t *vp;
fr_dict_vendor_t const *vendor;
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_vendor_num(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_vendor_num(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_pair_t *vp;
fr_value_box_t *attr = fr_dlist_head(in);
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_attr(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_attr(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_pair_t *vp;
fr_value_box_t *attr = fr_dlist_head(in);
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_attr_num(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_attr_num(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_pair_t *vp;
fr_value_box_t *attr = fr_dlist_head(in);
static char const hextab[] = "0123456789abcdef";
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_escape_t);
- return 0;
-}
-
static xlat_arg_parser_t const escape_xlat_arg = { .required = true, .concat = true, .type = FR_TYPE_STRING };
/** Equivalent to the old safe_characters functionality in rlm_sql but with utf8 support
*
* @ingroup xlat_functions
*/
-static xlat_action_t escape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t escape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_escape_t const *inst;
- void *instance;
+ rlm_escape_t const *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_escape_t);
fr_value_box_t *arg = fr_dlist_head(in);
char const *p = arg->vb_strvalue;
size_t len;
fr_sbuff_uctx_talloc_t sbuff_ctx;
int i;
- memcpy(&instance, xlat_inst, sizeof(instance));
- inst = talloc_get_type_abort(instance, rlm_escape_t);
len = talloc_array_length(inst->allowed_chars) - 1;
MEM(vb = fr_value_box_alloc_null(ctx));
*
* @ingroup xlat_functions
*/
-static xlat_action_t unescape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t unescape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *arg = fr_dlist_head(in);
char const *p, *end;
*/
static int mod_bootstrap(module_inst_ctx_t const *mctx)
{
- rlm_escape_t *inst = talloc_get_type_abort(mctx->inst->data, rlm_escape_t);
char *unescape;
xlat_t *xlat;
MEM(unescape = talloc_asprintf(NULL, "un%s", mctx->inst->name));
xlat = xlat_register_module(NULL, mctx, mctx->inst->name, escape_xlat, XLAT_FLAG_PURE);
xlat_func_mono(xlat, &escape_xlat_arg);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_escape_t *, NULL, inst);
xlat = xlat_register_module(NULL, mctx, unescape, unescape_xlat, XLAT_FLAG_PURE);
xlat_func_mono(xlat, &unescape_xlat_arg);
};
-static xlat_action_t exec_xlat_resume(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in, void *rctx)
+static xlat_action_t exec_xlat_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
- fr_exec_state_t *exec = talloc_get_type_abort(rctx, fr_exec_state_t);
+ fr_exec_state_t *exec = talloc_get_type_abort(xctx->rctx, fr_exec_state_t);
fr_value_box_t *vb;
/*
*
* @ingroup xlat_functions
*/
-static xlat_action_t exec_xlat(TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t exec_xlat(TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_exec_t const *inst = talloc_get_type_abort_const(*UNCONST(void **, xlat_inst), rlm_exec_t);
+ rlm_exec_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_exec_t);
fr_pair_list_t *env_pairs = NULL;
fr_exec_state_t *exec;
return unlang_xlat_yield(request, exec_xlat_resume, NULL, exec);
}
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_exec_t);
- return 0;
-}
-
/*
* Do any per-module initialization that is separate to each
* configured instance of the module. e.g. set up connections
xlat = xlat_register_module(NULL, mctx, mctx->inst->name, exec_xlat, XLAT_FLAG_NEEDS_ASYNC);
xlat_func_args(xlat, exec_xlat_args);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_exec_t *, NULL, inst);
if (inst->input) {
p = inst->input;
fr_value_box_list_t *box = talloc_zero(ctx, fr_value_box_list_t);
fr_value_box_list_init(box);
- return unlang_module_yield_to_xlat(request, box, request, tmpl_xlat(inst->tmpl),
+ return unlang_module_yield_to_xlat(request, NULL, box, request, tmpl_xlat(inst->tmpl),
mod_exec_nowait_resume, NULL, box);
}
*
* @ingroup xlat_functions
*/
-static xlat_action_t expr_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t expr_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
int64_t result;
fr_value_box_t *arg = fr_dlist_head(in);
char const *p = arg->vb_strvalue;
fr_value_box_t *vb;
- if (!get_expression(request, &p, &result, TOKEN_NONE)) {
- return XLAT_ACTION_FAIL;
- }
+ if (!get_expression(request, &p, &result, TOKEN_NONE)) return XLAT_ACTION_FAIL;
if (*p) {
REDEBUG("Invalid text after expression: %s", p);
request_t *request; //!< so it can be resumed when we get the echo reply
} rlm_icmp_echo_t;
-/** Wrapper around the module thread stuct for individual xlats
- *
- */
-typedef struct {
- rlm_icmp_t *inst; //!< Instance of rlm_icmp.
- rlm_icmp_thread_t *t; //!< rlm_icmp thread instance.
-} xlat_icmp_thread_inst_t;
-
typedef struct CC_HINT(__packed__) {
uint8_t type;
uint8_t code;
};
static xlat_action_t xlat_icmp_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
- UNUSED request_t *request,
- UNUSED void const *xlat_inst, void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in, void *rctx)
+ xlat_ctx_t const *xctx,
+ UNUSED request_t *request, UNUSED fr_value_box_list_t *in)
{
- rlm_icmp_echo_t *echo = talloc_get_type_abort(rctx, rlm_icmp_echo_t);
- xlat_icmp_thread_inst_t *thread = talloc_get_type_abort(xlat_thread_inst, xlat_icmp_thread_inst_t);
+ rlm_icmp_echo_t *echo = talloc_get_type_abort(xctx->rctx, rlm_icmp_echo_t);
+ rlm_icmp_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_icmp_thread_t);
fr_value_box_t *vb;
MEM(vb = fr_value_box_alloc(ctx, FR_TYPE_BOOL, NULL, false));
vb->vb_bool = echo->replied;
- (void) fr_rb_delete(thread->t->tree, echo);
+ (void) fr_rb_delete(t->tree, echo);
talloc_free(echo);
fr_dcursor_insert(out, vb);
return XLAT_ACTION_DONE;
}
-static void xlat_icmp_cancel(request_t *request, UNUSED void *xlat_inst, void *xlat_thread_inst,
- void *rctx, fr_state_signal_t action)
+static void xlat_icmp_cancel(xlat_ctx_t const *xctx, request_t *request, fr_state_signal_t action)
{
- rlm_icmp_echo_t *echo = talloc_get_type_abort(rctx, rlm_icmp_echo_t);
- xlat_icmp_thread_inst_t *thread = talloc_get_type_abort(xlat_thread_inst, xlat_icmp_thread_inst_t);
+ rlm_icmp_echo_t *echo = talloc_get_type_abort(xctx->rctx, rlm_icmp_echo_t);
+ rlm_icmp_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_icmp_thread_t);
if (action != FR_SIGNAL_CANCEL) return;
RDEBUG2("Cancelling ICMP request for %pV (counter=%d)", echo->ip, echo->counter);
- (void) fr_rb_delete(thread->t->tree, echo);
+ (void) fr_rb_delete(t->tree, echo);
talloc_free(echo);
}
-static void _xlat_icmp_timeout(request_t *request,
- UNUSED void *xlat_inst, UNUSED void *xlat_thread_inst, void *rctx, UNUSED fr_time_t fired)
+static void _xlat_icmp_timeout(xlat_ctx_t const *xctx, request_t *request, UNUSED fr_time_t fired)
{
- rlm_icmp_echo_t *echo = talloc_get_type_abort(rctx, rlm_icmp_echo_t);
+ rlm_icmp_echo_t *echo = talloc_get_type_abort(xctx->rctx, rlm_icmp_echo_t);
if (echo->replied) return; /* it MUST already have been marked resumable. */
* @ingroup xlat_functions
*/
static xlat_action_t xlat_icmp(TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
- request_t *request, void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- void *instance;
- rlm_icmp_t const *inst;
- xlat_icmp_thread_inst_t *thread = talloc_get_type_abort(xlat_thread_inst, xlat_icmp_thread_inst_t);
+ rlm_icmp_t *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_icmp_t);
+ rlm_icmp_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_icmp_thread_t);
rlm_icmp_echo_t *echo;
icmp_header_t icmp;
uint16_t checksum;
struct sockaddr_storage dst;
fr_value_box_t *in_head = fr_dlist_head(in);
- memcpy(&instance, xlat_inst, sizeof(instance)); /* Stupid const issues */
-
- inst = talloc_get_type_abort(instance, rlm_icmp_t);
-
/*
* If there's no input, do we can't ping anything.
*/
if (!in_head) return XLAT_ACTION_FAIL;
- if (fr_value_box_cast_in_place(ctx, in_head, thread->t->ipaddr_type, NULL) < 0) {
+ if (fr_value_box_cast_in_place(ctx, in_head, t->ipaddr_type, NULL) < 0) {
RPEDEBUG("Failed casting result to IP address");
return XLAT_ACTION_FAIL;
}
MEM(echo = talloc_zero(ctx, rlm_icmp_echo_t));
echo->ip = in_head;
echo->request = request;
- echo->counter = thread->t->counter++;
+ echo->counter = t->counter++;
/*
* Add the IP to the local tracking heap, so that the IO
* This insert will never fail, because of the unique
* counter above.
*/
- if (!fr_rb_insert(thread->t->tree, echo)) {
+ if (!fr_rb_insert(t->tree, echo)) {
RPEDEBUG("Failed inserting IP into tracking table");
talloc_free(echo);
return XLAT_ACTION_FAIL;
}
- if (unlang_xlat_event_timeout_add(request, _xlat_icmp_timeout, echo,
- fr_time_add(fr_time(), inst->timeout)) < 0) {
+ if (unlang_xlat_timeout_add(request, _xlat_icmp_timeout, echo,
+ fr_time_add(fr_time(), inst->timeout)) < 0) {
RPEDEBUG("Failed adding timeout");
- (void) fr_rb_delete(thread->t->tree, echo);
+ (void) fr_rb_delete(t->tree, echo);
talloc_free(echo);
return XLAT_ACTION_FAIL;
}
RDEBUG("Sending ICMP request to %pV (counter=%d)", echo->ip, echo->counter);
icmp = (icmp_header_t) {
- .type = thread->t->request_type,
- .ident = thread->t->ident,
- .data = thread->t->data,
+ .type = t->request_type,
+ .ident = t->ident,
+ .data = t->data,
.counter = echo->counter
};
/*
* Start off with the IPv6 pseudo-header checksum
*/
- if (thread->t->ipaddr_type == FR_TYPE_IPV6_ADDR) {
+ if (t->ipaddr_type == FR_TYPE_IPV6_ADDR) {
checksum = fr_ip6_pesudo_header_checksum(&inst->src_ipaddr.addr.v6, &echo->ip->vb_ip.addr.v6,
sizeof(ip_header6_t) + sizeof(icmp), IPPROTO_ICMPV6);
}
*/
icmp.checksum = htons(icmp_checksum((uint8_t *) &icmp, sizeof(icmp), checksum));
- rcode = sendto(thread->t->fd, &icmp, sizeof(icmp), 0, (struct sockaddr *) &dst, salen);
+ rcode = sendto(t->fd, &icmp, sizeof(icmp), 0, (struct sockaddr *) &dst, salen);
if (rcode < 0) {
REDEBUG("Failed sending ICMP request to %pV: %s", echo->ip, fr_syserror(errno));
- (void) fr_rb_delete(thread->t->tree, echo);
+ (void) fr_rb_delete(t->tree, echo);
talloc_free(echo);
return XLAT_ACTION_FAIL;
}
if ((size_t) rcode < sizeof(icmp)) {
REDEBUG("Failed sending entire ICMP packet");
- (void) fr_rb_delete(thread->t->tree, echo);
+ (void) fr_rb_delete(t->tree, echo);
talloc_free(echo);
return XLAT_ACTION_FAIL;
}
t->fd = -1;
}
-/** Resolves and caches the module's thread instance for use by a specific xlat instance
- *
- * @param[in] xlat_inst UNUSED.
- * @param[in] xlat_thread_inst pre-allocated structure to hold pointer to module's
- * thread instance.
- * @param[in] exp UNUSED.
- * @param[in] uctx Module's global instance. Used to lookup thread
- * specific instance.
- * @return 0.
- */
-static int mod_xlat_thread_instantiate(UNUSED void *xlat_inst, void *xlat_thread_inst,
- UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_icmp_t *inst = talloc_get_type_abort(uctx, rlm_icmp_t);
- xlat_icmp_thread_inst_t *xt = xlat_thread_inst;
-
- xt->inst = inst;
- xt->t = talloc_get_type_abort(module_thread_by_data(inst)->data, rlm_icmp_thread_t);
-
- return 0;
-}
-
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_icmp_t);
- return 0;
-}
-
/** Instantiate thread data for the submodule.
*
*/
xlat = xlat_register_module(inst, mctx, mctx->inst->name, xlat_icmp, XLAT_FLAG_NEEDS_ASYNC);
xlat_func_args(xlat, xlat_icmp_args);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_icmp_t *, NULL, inst);
- xlat_async_thread_instantiate_set(xlat, mod_xlat_thread_instantiate, xlat_icmp_thread_inst_t, NULL, inst);
FR_TIME_DELTA_BOUND_CHECK("timeout", inst->timeout, >=, fr_time_delta_from_msec(100)); /* 1/10s minimum timeout */
FR_TIME_DELTA_BOUND_CHECK("timeout", inst->timeout, <=, fr_time_delta_from_sec(10));
CONF_PARSER_TERMINATOR
};
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_idn_t);
- return 0;
-}
-
static xlat_arg_parser_t const xlat_idna_arg = { .required = true, .concat = true, .type = FR_TYPE_STRING };
/** Convert domain name to ASCII punycode
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_idna(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_idna(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_idn_t const *inst;
- void *instance;
+ rlm_idn_t const *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_idn_t);
char *idna = NULL;
int res;
size_t len;
fr_value_box_t *arg = fr_dlist_head(in);
fr_value_box_t *vb;
- memcpy(&instance, xlat_inst, sizeof(instance));
-
- inst = talloc_get_type_abort(instance, rlm_idn_t);
-
if (inst->use_std3_ascii_rules) {
flags |= IDNA_USE_STD3_ASCII_RULES;
}
xlat = xlat_register_module(inst, mctx, mctx->inst->name, xlat_idna, XLAT_FLAG_PURE);
xlat_func_mono(xlat, &xlat_idna_arg);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_idn_t *, NULL, inst);
return 0;
}
CONF_PARSER_TERMINATOR
};
-
-/** Boilerplate to copy the pointer to the main module config into the xlat instance data
- *
- */
-static int json_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((rlm_json_t **)xlat_inst) = talloc_get_type_abort(uctx, rlm_json_t);
-
- return 0;
-}
-
-
/** Forms a linked list of jpath head node pointers (a list of jpaths)
*/
typedef struct rlm_json_jpath_cache rlm_json_jpath_cache_t;
*
* @ingroup xlat_functions
*
- * @param ctx talloc context
- * @param out Where to write the output
- * @param request The current request.
- * @param xlat_inst unused
- * @param xlat_thread_inst unused
- * @param in list of value boxes as input
- * @return XLAT_ACTION_DONE or XLAT_ACTION_FAIL
*/
-static xlat_action_t json_quote_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t json_quote_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *vb;
fr_value_box_t *in_head = fr_dlist_head(in);
*
* @ingroup xlat_functions
*
- * @param ctx to allocate expansion buffer in.
- * @param out Where to write the output (in the format @verbatim<bytes parsed>[:error]@endverbatim).
- * @param request The current request.
- * @param xlat_inst unused.
- * @param xlat_thread_inst unused.
- * @param in jpath expression to parse.
- * @return
- * - XLAT_ACTION_DONE for valid paths
- * - XLAT_ACTION_FAIL for invalid paths
+ * Writes the output (in the format @verbatim<bytes parsed>[:error]@endverbatim).
*/
-static xlat_action_t jpath_validate_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t jpath_validate_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *path = fr_dlist_head(in);
fr_jpath_node_t *head;
* Usage is `%{json_encode:attr tmpl list}`
*
* @ingroup xlat_functions
- *
- * @param ctx talloc context
- * @param out where to write the output
- * @param request the current request
- * @param xlat_inst xlat instance data
- * @param xlat_thread_inst unused
- * @param in list of value boxes as input
- * @return XLAT_ACTION_DONE or XLAT_ACTION_FAIL
*/
-static xlat_action_t json_encode_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t json_encode_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_json_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst),
- rlm_json_t);
+ rlm_json_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_json_t);
fr_json_format_t const *format = inst->format;
ssize_t slen;
name = talloc_asprintf(inst, "%s_encode", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, name, json_encode_xlat, NULL);
xlat_func_mono(xlat, &json_encode_xlat_arg);
- xlat_async_instantiate_set(xlat, json_xlat_instantiate,
- rlm_json_t *, NULL, inst);
talloc_free(name);
/*
#include <freeradius-devel/server/map_proc.h>
-
-/*
- * Xlat pointer to thread handling the query
- */
-typedef struct {
- fr_ldap_thread_t *t;
-} ldap_xlat_thread_inst_t;
-
static CONF_PARSER sasl_mech_dynamic[] = {
{ FR_CONF_OFFSET("mech", FR_TYPE_TMPL | FR_TYPE_NOT_EMPTY, fr_ldap_sasl_t_dynamic_t, mech) },
{ FR_CONF_OFFSET("proxy", FR_TYPE_TMPL, fr_ldap_sasl_t_dynamic_t, proxy) },
*
* @ingroup xlat_functions
*/
-static xlat_action_t ldap_escape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t ldap_escape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *vb, *in_vb = fr_dlist_head(in);
fr_sbuff_t sbuff;
*
* @ingroup xlat_functions
*/
-static xlat_action_t ldap_unescape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t ldap_unescape_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
fr_value_box_t *vb, *in_vb = fr_dlist_head(in);
fr_sbuff_t sbuff;
fr_sbuff_trim_talloc(&sbuff, len);
fr_value_box_strdup_shallow(vb, NULL, fr_sbuff_buff(&sbuff), in_vb->tainted);
fr_dcursor_append(out, vb);
+
return XLAT_ACTION_DONE;
}
/** Callback when resuming after async ldap query is completed
*
*/
-static xlat_action_t ldap_xlat_resume(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in, void *rctx)
+static xlat_action_t ldap_xlat_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
- fr_ldap_query_t *query = talloc_get_type_abort(rctx, fr_ldap_query_t);
+ fr_ldap_query_t *query = talloc_get_type_abort(xctx->rctx, fr_ldap_query_t);
fr_ldap_connection_t *ldap_conn = query->ldap_conn;
fr_value_box_t *vb = NULL;
LDAPMessage *msg;
/** Callback for signalling async ldap query
*
*/
-static void ldap_xlat_signal(request_t *request, UNUSED void *instance, UNUSED void *thread, void *rctx, fr_state_signal_t action)
+static void ldap_xlat_signal(xlat_ctx_t const *xctx, request_t *request, fr_state_signal_t action)
{
- fr_ldap_query_t *query = talloc_get_type_abort(rctx, fr_ldap_query_t);
+ fr_ldap_query_t *query = talloc_get_type_abort(xctx->rctx, fr_ldap_query_t);
if (action != FR_SIGNAL_CANCEL) return;
*
* @ingroup xlat_functions
*/
-static xlat_action_t ldap_xlat(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t ldap_xlat(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
+ fr_ldap_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, fr_ldap_thread_t);
fr_value_box_t *in_vb = NULL;
- ldap_xlat_thread_inst_t *xt = talloc_get_type_abort(xlat_thread_inst, ldap_xlat_thread_inst_t);
char *host_url;
- fr_ldap_config_t const *handle_config = xt->t->config;
+ fr_ldap_config_t const *handle_config = t->config;
fr_ldap_thread_trunk_t *ttrunk;
fr_ldap_query_t *query = NULL;
ldap_url->lud_host, ldap_url->lud_port);
}
- ttrunk = fr_thread_ldap_trunk_get(xt->t, host_url, handle_config->admin_identity,
+ ttrunk = fr_thread_ldap_trunk_get(t, host_url, handle_config->admin_identity,
handle_config->admin_password, request, handle_config);
if (!ttrunk) {
REDEBUG("Unable to get LDAP query for xlat");
return 0;
}
-static int mod_xlat_thread_instantiate(UNUSED void *xlat_inst, void *xlat_thread_inst,
- UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_ldap_t *inst = talloc_get_type_abort(uctx, rlm_ldap_t);
- ldap_xlat_thread_inst_t *xt = xlat_thread_inst;
-
- xt->t = talloc_get_type_abort(module_thread_by_data(inst)->data, fr_ldap_thread_t);
-
- return 0;
-}
-
/** Initialise thread specific data structure
*
*/
xlat = xlat_register_module(NULL, mctx, mctx->inst->name, ldap_xlat, NULL);
xlat_func_mono(xlat, &ldap_xlat_arg);
- xlat_async_thread_instantiate_set(xlat, mod_xlat_thread_instantiate, ldap_xlat_thread_inst_t, NULL, inst);
xlat = xlat_register_module(NULL, mctx, "ldap_escape", ldap_escape_xlat, XLAT_FLAG_PURE);
if (xlat) xlat_func_mono(xlat, &ldap_escape_xlat_arg);
return acct_ctrl;
}
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_mschap_t);
- return 0;
-}
-
static xlat_arg_parser_t const mschap_xlat_args[] = {
{ .required = true, .single = true, .type = FR_TYPE_STRING },
{ .concat = true, .type = FR_TYPE_STRING },
*
* @ingroup xlat_functions
*/
-static xlat_action_t mschap_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t mschap_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
size_t data_len;
uint8_t const *data = NULL;
uint8_t buffer[32];
fr_pair_t *user_name;
fr_pair_t *chap_challenge, *response;
- rlm_mschap_t const *inst;
- void *instance;
+ rlm_mschap_t const *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_mschap_t);
fr_value_box_t *arg = fr_dlist_head(in);
fr_value_box_t *vb;
bool tainted = false;
- memcpy(&instance, xlat_inst, sizeof(instance));
- inst = talloc_get_type_abort(instance, rlm_mschap_t);
-
response = NULL;
/*
xlat = xlat_register_module(inst, mctx, mctx->inst->name, mschap_xlat, NULL);
xlat_func_args(xlat, mschap_xlat_args);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_mschap_t *, NULL, inst);
return 0;
}
PerlInterpreter *perl; //!< Thread specific perl interpreter.
} rlm_perl_thread_t;
-typedef struct {
- rlm_perl_t *inst; //!< Module global instance
-} rlm_perl_xlat_t;
-
-typedef struct {
- rlm_perl_thread_t *t; //!< Module thread instance
-} rlm_perl_xlat_thread_t;
-
static void *perl_dlhandle; //!< To allow us to load perl's symbols into the global symbol table.
/*
return 0;
}
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_perl_t *inst = talloc_get_type_abort(uctx, rlm_perl_t);
- rlm_perl_xlat_t *xi = talloc_get_type_abort(xlat_inst, rlm_perl_xlat_t);
-
- xi->inst = inst;
-
- return 0;
-}
-
-static int mod_xlat_thread_instantiate(UNUSED void *xlat_inst, void *xlat_thread_inst,
- UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_perl_t *inst = talloc_get_type_abort(uctx, rlm_perl_t);
- rlm_perl_xlat_thread_t *xt = xlat_thread_inst;
-
- xt->t = talloc_get_type_abort(module_thread_by_data(inst)->data, rlm_perl_thread_t);
-
- return 0;
-}
-
static xlat_arg_parser_t const perl_xlat_args[] = {
{ .required = true, .single = true, .type = FR_TYPE_STRING },
{ .variadic = true, .type = FR_TYPE_VOID },
*
* @ingroup xlat_functions
*/
-static xlat_action_t perl_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t perl_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_perl_xlat_thread_t const *xt = talloc_get_type_abort_const(xlat_thread_inst, rlm_perl_xlat_thread_t);
+ rlm_perl_thread_t const *t = talloc_get_type_abort_const(xctx->mctx->thread, rlm_perl_thread_t);
int count, i;
xlat_action_t ret = XLAT_ACTION_FAIL;
STRLEN n_a;
fr_value_box_list_init(&sub_list);
{
- dTHXa(xt->t->perl);
- PERL_SET_CONTEXT(xt->t->perl);
+ dTHXa(t->perl);
+ PERL_SET_CONTEXT(t->perl);
}
{
static int mod_bootstrap(module_inst_ctx_t const *mctx)
{
- rlm_perl_t *inst = talloc_get_type_abort(mctx->inst->data, rlm_perl_t);
xlat_t *xlat;
xlat = xlat_register_module(NULL, mctx, mctx->inst->name, perl_xlat, NULL);
xlat_func_args(xlat, perl_xlat_args);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_perl_xlat_t, NULL, inst);
- xlat_async_thread_instantiate_set(xlat, mod_xlat_thread_instantiate, rlm_perl_xlat_thread_t, NULL, inst);
-
return 0;
}
return 0;
}
-static int redis_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((rlm_redis_t **)xlat_inst) = talloc_get_type_abort(uctx, rlm_redis_t);
-
- return 0;
-}
-
static xlat_arg_parser_t const redis_remap_xlat_args[] = {
{ .required = true, .concat = true, .type = FR_TYPE_STRING },
XLAT_ARG_PARSER_TERMINATOR
* @ingroup xlat_functions
*/
static xlat_action_t redis_remap_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_redis_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst),
- rlm_redis_t);
+ rlm_redis_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_redis_t);
fr_socket_t node_addr;
fr_pool_t *pool;
* @ingroup xlat_functions
*/
static xlat_action_t redis_node_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
- rlm_redis_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst),
- rlm_redis_t);
+ rlm_redis_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_redis_t);
fr_redis_cluster_key_slot_t const *key_slot;
fr_redis_cluster_node_t const *node;
* @ingroup xlat_functions
*/
static xlat_action_t redis_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, void const *xlat_inst,
- UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
-
- rlm_redis_t const *inst = talloc_get_type_abort_const(*((void const * const *)xlat_inst), rlm_redis_t);
+ rlm_redis_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_redis_t);
xlat_action_t action = XLAT_ACTION_DONE;
fr_redis_conn_t *conn;
xlat = xlat_register_module(inst, mctx, mctx->inst->name, redis_xlat, NULL);
xlat_func_args(xlat, redis_args);
- xlat_async_instantiate_set(xlat, redis_xlat_instantiate, rlm_redis_t *, NULL, inst);
/*
* %(redis_node:<key>[ idx])
name = talloc_asprintf(NULL, "%s_node", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, name, redis_node_xlat, NULL);
xlat_func_args(xlat, redis_node_xlat_args);
- xlat_async_instantiate_set(xlat, redis_xlat_instantiate, rlm_redis_t *, NULL, inst);
talloc_free(name);
name = talloc_asprintf(NULL, "%s_remap", mctx->inst->name);
xlat = xlat_register_module(inst, mctx, name, redis_remap_xlat, NULL);
xlat_func_args(xlat, redis_remap_xlat_args);
- xlat_async_instantiate_set(xlat, redis_xlat_instantiate, rlm_redis_t *, NULL, inst);
talloc_free(name);
return 0;
* If we're signalled that the request has been cancelled (FR_SIGNAL_CANCEL).
* Cleanup any pending state and release the connection handle back into the pool.
*/
-void rest_io_module_action(module_ctx_t const *mctx, request_t *request, fr_state_signal_t action)
+void rest_io_module_signal(module_ctx_t const *mctx, request_t *request, fr_state_signal_t action)
{
fr_curl_io_request_t *randle = talloc_get_type_abort(mctx->rctx, fr_curl_io_request_t);
rlm_rest_thread_t *t = talloc_get_type_abort(mctx->thread, rlm_rest_thread_t);
* If we're signalled that the request has been cancelled (FR_SIGNAL_CANCEL).
* Cleanup any pending state and release the connection handle back into the pool.
*/
-void rest_io_xlat_signal(request_t *request, UNUSED void *instance, void *thread, void *rctx, fr_state_signal_t action)
+void rest_io_xlat_signal(xlat_ctx_t const *xctx, request_t *request, fr_state_signal_t action)
{
- rest_xlat_thread_inst_t *xti = talloc_get_type_abort(thread, rest_xlat_thread_inst_t);
- rlm_rest_t *mod_inst = xti->inst;
- rlm_rest_thread_t *t = xti->t;
+ rlm_rest_t *mod_inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_rest_t);
+ rlm_rest_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_rest_thread_t);
- rlm_rest_xlat_rctx_t *our_rctx = talloc_get_type_abort(rctx, rlm_rest_xlat_rctx_t);
+ rlm_rest_xlat_rctx_t *our_rctx = talloc_get_type_abort(xctx->rctx, rlm_rest_xlat_rctx_t);
fr_curl_io_request_t *randle = talloc_get_type_abort(our_rctx->handle, fr_curl_io_request_t);
- rest_io_module_action(MODULE_CTX(dl_module_instance_by_data(mod_inst), t, randle), request, action);
+ rest_io_module_signal(MODULE_CTX(dl_module_instance_by_data(mod_inst), t, randle), request, action);
}
//!< and coralling structure for REST requests.
} rlm_rest_thread_t;
-/** Wrapper around the module thread stuct for individual xlats
- *
- */
-typedef struct {
- rlm_rest_t *inst; //!< Instance of rlm_rest.
- rlm_rest_thread_t *t; //!< rlm_rest thread instance.
-} rest_xlat_thread_inst_t;
-
/*
* States for stream based attribute encoders
*/
/*
* Async IO helpers
*/
-void rest_io_module_action(module_ctx_t const *mctx, request_t *request, fr_state_signal_t action);
-void rest_io_xlat_signal(request_t *request, void *xlat_inst, void *xlat_thread_inst, void *rctx, fr_state_signal_t action);
+void rest_io_module_signal(module_ctx_t const *mctx, request_t *request, fr_state_signal_t action);
+void rest_io_xlat_signal(xlat_ctx_t const *xctx, request_t *request, fr_state_signal_t action);
}
static xlat_action_t rest_xlat_resume(TALLOC_CTX *ctx, fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in, void *rctx)
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
- rest_xlat_thread_inst_t *xti = talloc_get_type_abort(xlat_thread_inst, rest_xlat_thread_inst_t);
- rlm_rest_t const *mod_inst = xti->inst;
- rlm_rest_thread_t *t = xti->t;
-
- rlm_rest_xlat_rctx_t *our_rctx = talloc_get_type_abort(rctx, rlm_rest_xlat_rctx_t);
+ rlm_rest_t const *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_rest_t);
+ rlm_rest_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_rest_thread_t);
+ rlm_rest_xlat_rctx_t *rctx = talloc_get_type_abort(xctx->rctx, rlm_rest_xlat_rctx_t);
int hcode;
ssize_t len;
char const *body;
xlat_action_t xa = XLAT_ACTION_DONE;
- fr_curl_io_request_t *handle = talloc_get_type_abort(our_rctx->handle, fr_curl_io_request_t);
- rlm_rest_section_t *section = &our_rctx->section;
+ fr_curl_io_request_t *handle = talloc_get_type_abort(rctx->handle, fr_curl_io_request_t);
+ rlm_rest_section_t *section = &rctx->section;
if (section->tls.extract_cert_attrs) fr_curl_response_certinfo(request, handle);
}
finish:
- rest_request_cleanup(mod_inst, handle);
+ rest_request_cleanup(inst, handle);
fr_pool_connection_release(t->pool, request, handle);
- talloc_free(our_rctx);
+ talloc_free(rctx);
return xa;
}
* @ingroup xlat_functions
*/
static xlat_action_t rest_xlat(UNUSED TALLOC_CTX *ctx, UNUSED fr_dcursor_t *out,
- request_t *request, UNUSED void const *xlat_inst, void *xlat_thread_inst,
+ xlat_ctx_t const *xctx, request_t *request,
fr_value_box_list_t *in)
{
- rest_xlat_thread_inst_t *xti = talloc_get_type_abort(xlat_thread_inst, rest_xlat_thread_inst_t);
- rlm_rest_t const *mod_inst = xti->inst;
- rlm_rest_thread_t *t = xti->t;
+ rlm_rest_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_rest_t);
+ rlm_rest_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_rest_thread_t);
fr_curl_io_request_t *randle = NULL;
int ret;
/*
* Section gets modified, so we need our own copy.
*/
- memcpy(&rctx->section, &mod_inst->xlat, sizeof(*section));
+ memcpy(&rctx->section, &inst->xlat, sizeof(*section));
fr_assert(in_vb->type == FR_TYPE_GROUP);
RPEDEBUG("Failed escaping URI");
error:
- rest_request_cleanup(mod_inst, randle);
+ rest_request_cleanup(inst, randle);
fr_pool_connection_release(t->pool, request, randle);
talloc_free(section);
*
* @todo We could extract the User-Name and password from the URL string.
*/
- ret = rest_request_config(MODULE_CTX(dl_module_instance_by_data(mod_inst), t, NULL),
+ ret = rest_request_config(MODULE_CTX(dl_module_instance_by_data(inst), t, NULL),
section, request, randle, section->method,
section->body, uri_vb->vb_strvalue, NULL, NULL);
if (ret < 0) goto error;
RETURN_MODULE_FAIL;
}
- return unlang_module_yield(request, mod_authorize_result, rest_io_module_action, handle);
+ return unlang_module_yield(request, mod_authorize_result, rest_io_module_signal, handle);
}
static unlang_action_t mod_authenticate_result(rlm_rcode_t *p_result,
return 0;
}
-/** Resolves and caches the module's thread instance for use by a specific xlat instance
- *
- * @param[in] xlat_inst UNUSED.
- * @param[in] xlat_thread_inst pre-allocated structure to hold pointer to module's
- * thread instance.
- * @param[in] exp UNUSED.
- * @param[in] uctx Module's global instance. Used to lookup thread
- * specific instance.
- * @return 0.
- */
-static int mod_xlat_thread_instantiate(UNUSED void *xlat_inst, void *xlat_thread_inst,
- UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_rest_t *inst = talloc_get_type_abort(uctx, rlm_rest_t);
- rest_xlat_thread_inst_t *xt = xlat_thread_inst;
-
- xt->inst = inst;
- xt->t = talloc_get_type_abort(module_thread_by_data(inst)->data, rlm_rest_thread_t);
-
- return 0;
-}
-
/** Create a thread specific multihandle
*
* Easy handles representing requests are added to the curl multihandle
* that must be referenced in later calls, store a handle to it
* in *instance otherwise put a null pointer there.
*/
-static int mod_instantiate(module_inst_ctx_t const *mctx)
+static int instantiate(module_inst_ctx_t const *mctx)
{
rlm_rest_t *inst = talloc_get_type_abort(mctx->inst->data, rlm_rest_t);
CONF_SECTION *conf = mctx->inst->conf;
xlat = xlat_register_module(inst, mctx, mctx->inst->name, rest_xlat, XLAT_FLAG_NEEDS_ASYNC);
xlat_func_args(xlat, rest_xlat_args);
- xlat_async_thread_instantiate_set(xlat, mod_xlat_thread_instantiate, rest_xlat_thread_inst_t, NULL, inst);
return 0;
}
.onload = mod_load,
.unload = mod_unload,
.bootstrap = mod_bootstrap,
- .instantiate = mod_instantiate,
+ .instantiate = instantiate,
.thread_instantiate = mod_thread_instantiate,
.thread_detach = mod_thread_detach,
.methods = {
*
* @ingroup xlat_functions
*/
-static xlat_action_t soh_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
+static xlat_action_t soh_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx, request_t *request,
fr_value_box_list_t *in)
{
fr_value_box_t *in_head = fr_dlist_head(in);
*
* @ingroup xlat_functions
*/
-static xlat_action_t sql_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t sql_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
rlm_sql_handle_t *handle = NULL;
rlm_sql_row_t row;
- rlm_sql_t const *inst;
- void *instance;
+ rlm_sql_t const *inst = talloc_get_type_abort(xctx->mctx->inst->data, rlm_sql_t);
sql_rcode_t rcode;
xlat_action_t ret = XLAT_ACTION_DONE;
char const *p;
fr_value_box_t *vb = NULL;
bool fetched = false;
- memcpy(&instance, xlat_inst, sizeof(instance));
- inst = talloc_get_type_abort(instance, rlm_sql_t);
-
handle = fr_pool_connection_get(inst->pool, request); /* connection pool should produce error */
if (!handle) return XLAT_ACTION_FAIL;
RETURN_MODULE_RCODE(rcode);
}
-/** Make module instance available to xlats
- *
- */
-static int mod_xlat_instantiate(void *xlat_inst, UNUSED xlat_exp_t const *exp, void *uctx)
-{
- *((void **)xlat_inst) = talloc_get_type_abort(uctx, rlm_sql_t);
- return 0;
-}
-
static int mod_detach(module_detach_ctx_t const *mctx)
{
rlm_sql_t *inst = talloc_get_type_abort(mctx->inst->data, rlm_sql_t);
sql_xlat_arg->uctx = inst;
xlat_func_mono(xlat, sql_xlat_arg);
- xlat_async_instantiate_set(xlat, mod_xlat_instantiate, rlm_sql_t *, NULL, inst);
-
/*
* Register the SQL map processor function
*/
*/
RCSID("$Id$")
-#define LOG_PREFIX inst->name
+#define LOG_PREFIX mctx->inst->name
#include <freeradius-devel/server/base.h>
#include <freeradius-devel/server/module.h>
/** Run a trigger (useful for testing)
*
*/
-static xlat_action_t trigger_test_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
+static xlat_action_t trigger_test_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx, request_t *request,
fr_value_box_list_t *in)
{
fr_value_box_t *in_head = fr_dlist_head(in);
*
* This just copies the input to the output.
*/
-static xlat_action_t test_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, UNUSED request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t test_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx, UNUSED request_t *request,
+ fr_value_box_list_t *in)
{
fr_value_box_t *vb_p = NULL;
fr_value_box_t *vb;
static int mod_thread_detach(module_thread_inst_ctx_t const *mctx)
{
- rlm_test_t *inst = talloc_get_type_abort(mctx->inst->data, rlm_test_t);
rlm_test_thread_t *t = talloc_get_type_abort(mctx->thread, rlm_test_thread_t);
INFO("Performing detach for thread %p", (void *)t->value);
unbound_log_t *u_log; //!< Unbound log structure
} rlm_unbound_thread_t;
-typedef struct {
- rlm_unbound_t *inst; //!< Instance data
- rlm_unbound_thread_t *t; //!< Thread structure
-} unbound_xlat_thread_inst_t;
-
typedef struct {
int async_id; //!< Id of async query
request_t *request; //!< Current request being processed
/*
* Xlat signal callback if an unbound request needs cancelling
*/
-static void xlat_unbound_signal(request_t *request, UNUSED void *instance, UNUSED void *thread,
- void *rctx, fr_state_signal_t action)
+static void xlat_unbound_signal(xlat_ctx_t const *xctx, request_t *request, fr_state_signal_t action)
{
- unbound_request_t *ur = talloc_get_type_abort(rctx, unbound_request_t);
+ unbound_request_t *ur = talloc_get_type_abort(xctx->rctx, unbound_request_t);
if (action != FR_SIGNAL_CANCEL) return;
* Xlat resume callback after unbound has either returned or timed out
* Move the parsed results to the xlat output cursor
*/
-static xlat_action_t xlat_unbound_resume(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
- UNUSED fr_value_box_list_t *in, void *rctx)
+static xlat_action_t xlat_unbound_resume(UNUSED TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, UNUSED fr_value_box_list_t *in)
{
fr_value_box_t *vb;
- unbound_request_t *ur = talloc_get_type_abort(rctx, unbound_request_t);
+ unbound_request_t *ur = talloc_get_type_abort(xctx->rctx, unbound_request_t);
/*
* Request timed out
*
* @ingroup xlat_functions
*/
-static xlat_action_t xlat_unbound(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, void *xlat_thread_inst,
- fr_value_box_list_t *in)
+static xlat_action_t xlat_unbound(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ xlat_ctx_t const *xctx,
+ request_t *request, fr_value_box_list_t *in)
{
+ rlm_unbound_t const *inst = talloc_get_type_abort_const(xctx->mctx->inst->data, rlm_unbound_t);
+ rlm_unbound_thread_t *t = talloc_get_type_abort(xctx->mctx->thread, rlm_unbound_thread_t);
fr_value_box_t *host_vb = fr_dlist_head(in);
fr_value_box_t *query_vb = fr_dlist_next(in, host_vb);
fr_value_box_t *count_vb = fr_dlist_next(in, query_vb);
- unbound_xlat_thread_inst_t *xt = talloc_get_type_abort(xlat_thread_inst, unbound_xlat_thread_inst_t);
unbound_request_t *ur;
if (host_vb->length == 0) {
}
ur->request = request;
- ur->t = xt->t;
+ ur->t = t;
ur->out_ctx = ctx;
#define UB_QUERY(_record, _rrvalue, _return, _hasprio) \
if (strcmp(query_vb->vb_strvalue, _record) == 0) { \
ur->return_type = _return; \
ur->has_priority = _hasprio; \
- ub_resolve_event(xt->t->ev_b->ub, host_vb->vb_strvalue, _rrvalue, 1, ur, \
+ ub_resolve_event(t->ev_b->ub, host_vb->vb_strvalue, _rrvalue, 1, ur, \
xlat_unbound_callback, &ur->async_id); \
}
* unbound returned before we yielded - run the callback
* This is when serving results from local data
*/
- if (ur->async_id == 0) return xlat_unbound_resume(NULL, out, request, NULL, NULL, NULL, ur);
+ if (ur->async_id == 0) {
+ xlat_ctx_t our_xctx = *xctx;
+
+ our_xctx.rctx = ur; /* Make the rctx available to the resume function */
+
+ return xlat_unbound_resume(ctx, out, &our_xctx, request, in);
+ }
- if (fr_event_timer_in(ur, ur->t->ev_b->el, &ur->ev, fr_time_delta_from_msec(xt->inst->timeout),
+ if (fr_event_timer_in(ur, ur->t->ev_b->el, &ur->ev, fr_time_delta_from_msec(inst->timeout),
xlat_unbound_timeout, ur) < 0) {
REDEBUG("Unable to attach unbound timeout_envent");
- ub_cancel(xt->t->ev_b->ub, ur->async_id);
+ ub_cancel(t->ev_b->ub, ur->async_id);
return XLAT_ACTION_FAIL;
}
return unlang_xlat_yield(request, xlat_unbound_resume, xlat_unbound_signal, ur);
}
-static int mod_xlat_thread_instantiate(UNUSED void *xlat_inst, void *xlat_thread_inst,
- UNUSED xlat_exp_t const *exp, void *uctx)
-{
- rlm_unbound_t *inst = talloc_get_type_abort(uctx, rlm_unbound_t);
- unbound_xlat_thread_inst_t *xt = talloc_get_type_abort(xlat_thread_inst, unbound_xlat_thread_inst_t);
-
- xt->inst = inst;
- xt->t = talloc_get_type_abort(module_thread_by_data(inst)->data, rlm_unbound_thread_t);
-
- return 0;
-}
-
static int mod_thread_instantiate(module_thread_inst_ctx_t const *mctx)
{
rlm_unbound_t *inst = talloc_get_type_abort(mctx->inst->data, rlm_unbound_t);
if(!(xlat = xlat_register_module(NULL, mctx, mctx->inst->name, xlat_unbound, XLAT_FLAG_NEEDS_ASYNC))) return -1;
xlat_func_args(xlat, xlat_unbound_args);
- xlat_async_thread_instantiate_set(xlat, mod_xlat_thread_instantiate, unbound_xlat_thread_inst_t, NULL, inst);
return 0;
}
*
* @ingroup xlat_functions
*/
-static xlat_action_t unpack_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
+static xlat_action_t unpack_xlat(TALLOC_CTX *ctx, fr_dcursor_t *out,
+ UNUSED xlat_ctx_t const *xctx, request_t *request,
fr_value_box_list_t *in)
{
size_t len, input_len, offset;
*
* @ingroup xlat_functions
*/
-static xlat_action_t modhex_to_hex_xlat(UNUSED TALLOC_CTX *ctx, fr_dcursor_t * out, request_t *request,
- UNUSED void const *xlat_inst, UNUSED void *xlat_thread_inst,
+static xlat_action_t modhex_to_hex_xlat(UNUSED TALLOC_CTX *ctx, fr_dcursor_t * out,
+ UNUSED xlat_ctx_t const *xctx, request_t *request,
fr_value_box_list_t *in)
{
ssize_t len;