#
#
-# max_request_time:: The maximum time (in seconds) to handle a request.
+# request:: Configuration for request handling.
#
-# Requests which take more time than this to process may be killed, and
-# a REJECT message is returned.
+# These items control how requests are allocated and processed.
#
-# WARNING: If you notice that requests take a long time to be handled,
-# then this MAY INDICATE a bug in the server, in one of the modules
-# used to handle a request, OR in your local configuration.
+# NOTE: Most of these configuration items are per-worker. To get the
+# true number of requests you need to multiply the value by the number
+# of workers (or the number of cores).
#
-# This problem is most often seen when using an SQL database. If it takes
-# more than a second or two to receive an answer from the SQL database,
-# then it probably means that you haven't indexed the database. See your
-# SQL server documentation for more information.
-#
-# Useful range of values: `5` to `120`
-#
-max_request_time = 30
+request {
+ #
+ # max:: The maximum number of requests which the server
+ # keeps track of. This should be at least `256` multiplied by the
+ # number of clients. e.g. With `4` clients, this number should be
+ # `1024`.
+ #
+ # If this number is too low, then when the server becomes busy,
+ # it will not respond to any new requests, until the 'cleanup_delay'
+ # time has passed, and it has removed the old requests.
+ #
+ # If this number is set too high, then the server will use a bit more
+ # memory for no real benefit.
+ #
+ # If you aren't sure what it should be set to, it's better to set it
+ # too high than too low. Setting it to `1000` per client is probably
+ # the highest it should be.
+ #
+ # Unlike v3, this setting is per worker thread, and is not global to
+ # the server.
+ #
+ # Useful range of values: `256` to `infinity`
+ #
+ max = 16384
-#
-# max_requests:: The maximum number of requests which the server
-# keeps track of. This should be at least `256` multiplied by the
-# number of clients. e.g. With `4` clients, this number should be
-# `1024`.
-#
-# If this number is too low, then when the server becomes busy,
-# it will not respond to any new requests, until the 'cleanup_delay'
-# time has passed, and it has removed the old requests.
-#
-# If this number is set too high, then the server will use a bit more
-# memory for no real benefit.
-#
-# If you aren't sure what it should be set to, it's better to set it
-# too high than too low. Setting it to `1000` per client is probably
-# the highest it should be.
-#
-# Unlike v3, this setting is per worker thread, and is not global to
-# the server.
-#
-# Useful range of values: `256` to `infinity`
-#
-max_requests = 16384
+ #
+ # timeout:: The maximum time (in seconds) to handle a request.
+ #
+ # Requests which take more time than this to process may be killed, and
+ # a REJECT message is returned.
+ #
+ # WARNING: If you notice that requests take a long time to be handled,
+ # then this MAY INDICATE a bug in the server, in one of the modules
+ # used to handle a request, OR in your local configuration.
+ #
+ # This problem is most often seen when using an SQL database. If it takes
+ # more than a second or two to receive an answer from the SQL database,
+ # then it probably means that you haven't indexed the database. See your
+ # SQL server documentation for more information.
+ #
+ # Useful range of values: `5` to `120`
+ #
+ timeout = 30
+
+ #
+ # Instead of requests being freed at the end of processing, they can be
+ # returned to a list of requests to reuse.
+ #
+ # As with `request.max` reuse values apply on a per-worker basis, so the
+ # true number of cached requests is `request.reuse.max * <num workers>`.
+ #
+ reuse {
+ #
+ # min:: The minimum number of requests to keep in the reuse list.
+ #
+ min = 10
+
+ #
+ # max:: The maximum number of reusable requests.
+ #
+ # Any requests being processed by a worker beyond
+ # this number will cause a temporary request to be allocated.
+ # This is less efficient than the block allocation so
+ # `max` should be set to reflect the number of outstanding
+ # requests expected at peak load.
+ #
+ # FIXME: Should likely default to request.max
+ #
+# max = 100
+
+ #
+ # cleanup_interval:: How often to free un-used requests.
+ #
+ # Every `cleanup_interval` a cleanup routine runs which
+ # will free any blocks of handles which are not in use,
+ # ensuring that at least `min` handles are kept.
+ #
+ # This ensures that the server's memory usage does not remain
+ # permanently bloated after a load spike.
+ #
+ cleanup_interval = 30s
+ }
+}
#
# reverse_lookups:: Log the names of clients or just their IP addresses
# contents are the same as the `timeout` keyword.
#
# This section limits the total processing time for a request. The
-# values given here should be less than `max_request_time`.
+# values given here should be less than `request.timeout`.
#
-# When a request reaches `max_request_time`, it is forcibly stopped.
+# When a request reaches `request.timeout`, it is forcibly stopped.
# No further processing takes place.
#
# When a request reaches the time specified in this `timeout` section,
# "timeout for the timeout", then just add anoither `timeout` section
# inside of this one.
#
-# Note that `max_request_time` still applies. So the timeout value
+# Note that `request.timeout` still applies. So the timeout value
# given here should be less than the value given by
-# `max_request_time`.
+# `request.timeout`.
#
#
/*
* Create and initialize the new request.
*/
- request = request_alloc_internal(ctx, NULL);
+ request = request_local_alloc_internal(ctx, NULL);
if (!request->packet) request->packet = fr_packet_alloc(request, false);
if (!request->reply) request->reply = fr_packet_alloc(request, false);
{
request_t *request;
- request = request_alloc_internal(NULL, (&(request_init_args_t){ .namespace = old->proto_dict }));
+ request = request_local_alloc_internal(NULL, (&(request_init_args_t){ .namespace = old->proto_dict }));
if (!request) return NULL;
if (!request->packet) request->packet = fr_packet_alloc(request, false);
}
if (count == 1) {
- fr_timer_in(request, el->tl, &cancel_timer, config->max_request_time, false, cancel_request, request);
+ fr_timer_in(request, el->tl, &cancel_timer, config->worker.max_request_time, false, cancel_request, request);
unlang_interpret_synchronous(el, request);
} else {
}
#endif
- fr_timer_in(request, el->tl, &cancel_timer, config->max_request_time, false, cancel_request, request);
+ fr_timer_in(request, el->tl, &cancel_timer, config->worker.max_request_time, false, cancel_request, request);
unlang_interpret_synchronous(el, request);
talloc_free(request);
(chbind->response == NULL));
/* Set-up the fake request */
- fake = request_alloc_internal(request, &(request_init_args_t){ .parent = request });
+ fake = request_local_alloc_internal(request, &(request_init_args_t){ .parent = request });
MEM(fr_pair_prepend_by_da(fake->request_ctx, &vp, &fake->request_pairs, attr_freeradius_proxied_to) >= 0);
(void) fr_pair_value_from_str(vp, "127.0.0.1", sizeof("127.0.0.1") - 1, NULL, false);
#include <freeradius-devel/server/time_tracking.h>
#include <freeradius-devel/util/dlist.h>
#include <freeradius-devel/util/minmax_heap.h>
+#include <freeradius-devel/util/slab.h>
#include <freeradius-devel/util/time.h>
#include <freeradius-devel/util/timer.h>
#define CACHE_LINE_SIZE 64
static alignas(CACHE_LINE_SIZE) atomic_uint64_t request_number = 0;
+FR_SLAB_TYPES(request, request_t);
+FR_SLAB_FUNCS(request, request_t);
+
static _Thread_local fr_ring_buffer_t *fr_worker_rb;
typedef struct {
bool exiting; //!< are we exiting?
fr_worker_channel_t *channel; //!< list of channels
+
+ request_slab_list_t *slab; //!< slab allocator for request_t
};
typedef struct {
{
fr_timer_list_t *tl = fr_time_delta_eq(worker->config.max_request_time, timeout) ? worker->timeout : worker->timeout_custom;
- fr_timer_disarm(request->timeout); /* Remove the existing timer if there is one */
+ /* No need to disarm fr_timer_in does it for us */
if (unlikely(fr_timer_in(request, tl, &request->timeout, timeout,
true, _worker_request_timeout, request) < 0)) {
return fr_timer_list_num_events(worker->timeout) + fr_timer_list_num_events(worker->timeout_custom);
}
+static int _worker_request_deinit(request_t *request, UNUSED void *uctx)
+{
+ return request_slab_deinit(request);
+}
+
static void worker_request_bootstrap(fr_worker_t *worker, fr_channel_data_t *cd, fr_time_t now)
{
int ret = -1;
TALLOC_CTX *ctx;
fr_listen_t *listen = cd->listen;
- if (worker_num_requests(worker) >= (uint32_t) worker->config.max_requests) goto nak;
+ if (worker_num_requests(worker) >= (uint32_t) worker->config.max_requests) {
+ RATE_LIMIT_GLOBAL(ERROR, "Worker at max requests");
+ goto nak;
+ }
/*
* Receive a message to the worker queue, and decode it
*/
fr_assert(listen != NULL);
- ctx = request = request_alloc_external(NULL, (&(request_init_args_t){ .namespace = listen->dict }));
- if (!request) goto nak;
+ ctx = request = request_slab_reserve(worker->slab);
+ if (!request) {
+ RATE_LIMIT_GLOBAL(ERROR, "Worker failed allocating new request");
+ goto nak;
+ }
+ /*
+ * Ensures that both the deinit function runs AND
+ * the request is returned to the slab if something
+ * calls talloc_free() on it.
+ */
+ request_slab_element_set_destructor(request, _worker_request_deinit, worker);
+ /*
+ * Have to initialise the request manually because namspace
+ * changes based on the listener that allocated it.
+ */
+ if (request_init(request, REQUEST_TYPE_EXTERNAL, (&(request_init_args_t){ .namespace = listen->dict })) < 0) {
+ request_slab_release(request);
+ goto nak;
+ }
+
+ /*
+ * Do normal worker init that's shared between internal
+ * and external requests.
+ */
worker_request_init(worker, request, now);
worker_request_name_number(request);
RWARN("Discarding duplicate of request (%"PRIu64")", old->number);
fr_channel_null_reply(request->async->channel);
- talloc_free(request);
+ request_slab_release(request);
/*
* Signal there's a dup, and ignore the
*/
if (unlikely((request->master_state == REQUEST_STOP_PROCESSING) &&
!fr_channel_active(request->async->channel))) {
- talloc_free(request);
+ request_slab_release(request);
return;
}
worker_send_reply(worker, request, request->master_state != REQUEST_STOP_PROCESSING, now);
- talloc_free(request);
+ request_slab_release(request);
}
/** Internal request (i.e. one generated by the interpreter) is now complete
* All other requests must be freed by the
* code which allocated them.
*/
- talloc_free(request);
+ request_slab_release(request);
}
CHECK_CONFIG(max_requests,1024,(1 << 30));
CHECK_CONFIG(max_channels, 64, 1024);
- CHECK_CONFIG(talloc_pool_size, 4096, 65536);
+ CHECK_CONFIG(reuse.child_pool_size, 4096, 65536);
CHECK_CONFIG(message_set_size, 1024, 8192);
CHECK_CONFIG(ring_buffer_size, (1 << 17), (1 << 20));
CHECK_CONFIG_TIME_DELTA(max_request_time, fr_time_delta_from_sec(5), fr_time_delta_from_sec(120));
fr_strerror_const("Failed initialising interpreter");
goto fail;
}
+
+ {
+ if (worker->config.reuse.child_pool_size == 0) worker->config.reuse.child_pool_size = REQUEST_POOL_SIZE;
+ if (worker->config.reuse.num_children == 0) worker->config.reuse.num_children = REQUEST_POOL_HEADERS;
+
+ if (!(worker->slab = request_slab_list_alloc(worker, el, &worker->config.reuse, NULL, NULL,
+ UNCONST(void *, worker), true, false))) {
+ fr_strerror_const("Failed creating request slab list");
+ goto fail;
+ }
+ }
+
unlang_interpret_set_thread_default(worker->intp);
return worker;
#include <freeradius-devel/util/event.h>
#include <freeradius-devel/util/heap.h>
#include <freeradius-devel/util/log.h>
+#include <freeradius-devel/util/slab.h>
#include <freeradius-devel/util/talloc.h>
#include <pthread.h>
extern fr_cmd_table_t cmd_worker_table[];
typedef struct {
- int max_requests; //!< max requests this worker will handle
+ int max_requests; //!< max requests this worker will handle
- int max_channels; //!< maximum number of channels
+ int max_channels; //!< maximum number of channels
- int message_set_size; //!< default start number of messages
- int ring_buffer_size; //!< default start size for the ring buffers
+ int message_set_size; //!< default start number of messages
+ int ring_buffer_size; //!< default start size for the ring buffers
- fr_time_delta_t max_request_time; //!< maximum time a request can be processed
+ fr_time_delta_t max_request_time; //!< maximum time a request can be processed
- size_t talloc_pool_size; //!< for each request
+ fr_slab_config_t reuse; //!< slab allocator configuration
} fr_worker_config_t;
int fr_worker_request_timeout_set(fr_worker_t *worker, request_t *request, fr_time_delta_t timeout) CC_HINT(nonnull);
* the config item will *not* get printed out in debug mode, so that no one knows
* it exists.
*/
- { FR_CONF_OFFSET_TYPE_FLAGS("talloc_pool_size", FR_TYPE_SIZE, CONF_FLAG_HIDDEN, main_config_t, worker.talloc_pool_size), .func = talloc_pool_size_parse }, /* DO NOT SET DEFAULT */
{ FR_CONF_OFFSET_FLAGS("talloc_memory_report", CONF_FLAG_HIDDEN, main_config_t, talloc_memory_report) }, /* DO NOT SET DEFAULT */
CONF_PARSER_TERMINATOR
};
};
#endif
+static const conf_parser_t request_reuse_config[] = {
+ FR_SLAB_CONFIG_CONF_PARSER
+ CONF_PARSER_TERMINATOR
+};
+
+static const conf_parser_t request_config[] = {
+ { FR_CONF_OFFSET("max", main_config_t, worker.max_requests), .dflt = "0" },
+ { FR_CONF_OFFSET("timeout", main_config_t, worker.max_request_time), .dflt = STRINGIFY(MAX_REQUEST_TIME), .func = max_request_time_parse },
+ { FR_CONF_OFFSET_TYPE_FLAGS("talloc_pool_size", FR_TYPE_SIZE, CONF_FLAG_HIDDEN, main_config_t, worker.reuse.child_pool_size), .func = talloc_pool_size_parse }, /* DO NOT SET DEFAULT */
+ { FR_CONF_OFFSET_SUBSECTION("reuse", 0, main_config_t, worker.reuse, request_reuse_config) },
+ CONF_PARSER_TERMINATOR
+};
+
static const conf_parser_t server_config[] = {
/*
* FIXME: 'prefix' is the ONLY one which should be
{ FR_CONF_OFFSET("panic_action", main_config_t, panic_action) },
{ FR_CONF_OFFSET("reverse_lookups", main_config_t, reverse_lookups), .dflt = "no", .func = reverse_lookups_parse },
{ FR_CONF_OFFSET("hostname_lookups", main_config_t, hostname_lookups), .dflt = "yes", .func = hostname_lookups_parse },
- { FR_CONF_OFFSET("max_request_time", main_config_t, worker.max_request_time), .dflt = STRINGIFY(MAX_REQUEST_TIME), .func = max_request_time_parse },
{ FR_CONF_OFFSET("pidfile", main_config_t, pid_file), .dflt = "${run_dir}/radiusd.pid"},
{ FR_CONF_OFFSET_FLAGS("debug_level", CONF_FLAG_HIDDEN, main_config_t, debug_level), .dflt = "0" },
- { FR_CONF_OFFSET("max_requests", main_config_t, worker.max_requests), .dflt = "0" },
+
+ { FR_CONF_POINTER("request", 0, CONF_FLAG_SUBSECTION, NULL), .subcs = (void const *) request_config },
{ FR_CONF_POINTER("log", 0, CONF_FLAG_SUBSECTION, NULL), .subcs = (void const *) log_config },
{ FR_CONF_POINTER("interpret", 0, CONF_FLAG_SUBSECTION, NULL), .subcs = (void const *) interpret_config, .name2 = CF_IDENT_ANY },
#endif
+ { FR_CONF_DEPRECATED("max_requests", main_config_t, worker.max_requests) },
+ { FR_CONF_DEPRECATED("max_request_time", main_config_t, worker.max_request_time) },
+
CONF_PARSER_TERMINATOR
};
memcpy(&value, out, sizeof(value));
- FR_SIZE_BOUND_CHECK("resources.talloc_pool_size", value, >=, (size_t)(2 * 1024));
- FR_SIZE_BOUND_CHECK("resources.talloc_pool_size", value, <=, (size_t)(1024 * 1024));
+ FR_SIZE_BOUND_CHECK("request.talloc_pool_size", value, >=, (size_t)(2 * 1024));
+ FR_SIZE_BOUND_CHECK("request.talloc_pool_size", value, <=, (size_t)(1024 * 1024));
memcpy(out, &value, sizeof(value));
memcpy(&value, out, sizeof(value));
- FR_TIME_DELTA_BOUND_CHECK("max_request_time", value, >=, fr_time_delta_from_sec(5));
- FR_TIME_DELTA_BOUND_CHECK("max_request_time", value, <=, fr_time_delta_from_sec(120));
+ FR_TIME_DELTA_BOUND_CHECK("request.timeout", value, >=, fr_time_delta_from_sec(5));
+ FR_TIME_DELTA_BOUND_CHECK("request.timeout", value, <=, fr_time_delta_from_sec(120));
memcpy(out, &value, sizeof(value));
#endif
INFO("Starting - reading configuration files ...");
- /*
- * About sizeof(request_t) + sizeof(fr_packet_t) * 2 + sizeof(fr_pair_t) * 400
- *
- * Which should be enough for many configurations.
- */
- config->worker.talloc_pool_size = 8 * 1024; /* default */
-
cs = cf_section_alloc(NULL, NULL, "main", NULL);
if (!cs) return -1;
{ NULL }
};
-/** The thread local free list
- *
- * Any entries remaining in the list will be freed when the thread is joined
- */
-static _Thread_local fr_dlist_head_t *request_free_list; /* macro */
-
#ifndef NDEBUG
static int _state_ctx_free(fr_pair_t *state)
{
* @param[in] type of request to initialise.
* @param[in] args Other optional arguments.
*/
-static inline CC_HINT(always_inline) int request_init(char const *file, int line,
- request_t *request, request_type_t type,
- request_init_args_t const *args)
+int _request_init(char const *file, int line,
+ request_t *request, request_type_t type,
+ request_init_args_t const *args)
{
fr_dict_t const *dict;
} else {
request_log_init_orphan(request);
}
+
+ /*
+ * This is only used by src/lib/io/worker.c
+ */
+ fr_dlist_entry_init(&request->listen_entry);
+
return 0;
}
-/** Callback for freeing a request struct
+/** Callback for slabs to deinitialise the request
+ *
+ * Does not need to be called for local requests.
*
- * @param[in] request to free or return to the free list.
+ * @param[in] request deinitialise
* @return
- * - 0 in the request was freed.
- * - -1 if the request was inserted into the free list.
+ * - 0 in the request was deinitialised.
+ * - -1 if the request is in an unexpected state.
*/
-static int _request_free(request_t *request)
+int request_slab_deinit(request_t *request)
{
fr_assert_msg(!fr_timer_armed(request->timeout),
"alloced %s:%i: %s still in the timeout sublist",
request->alloc_line,
request->name ? request->name : "(null)", request->runnable);
- RDEBUG3("Request freed (%p)", request);
-
- /*
- * Reinsert into the free list if it's not already
- * in the free list.
- *
- * If it *IS* already in the free list, then free it.
- */
- if (unlikely(fr_dlist_entry_in_list(&request->free_entry))) {
- fr_dlist_entry_unlink(&request->free_entry); /* Don't trust the list head to be available */
- goto really_free;
- }
-
- /*
- * We keep a buffer of <active> + N requests per
- * thread, to avoid spurious allocations.
- */
- if (fr_dlist_num_elements(request_free_list) <= 256) {
- fr_dlist_head_t *free_list;
-
- if (request->session_state_ctx) {
- fr_assert(talloc_parent(request->session_state_ctx) != request); /* Should never be directly parented */
- TALLOC_FREE(request->session_state_ctx); /* Not parented from the request */
- }
- free_list = request_free_list;
-
- /*
- * Reinitialise the request
- */
- talloc_free_children(request);
-
- memset(request, 0, sizeof(*request));
- request->component = "free_list";
-#ifndef NDEBUG
- request->runnable = FR_HEAP_INDEX_INVALID;
-#endif
-
- /*
- * Reinsert into the free list
- */
- fr_dlist_insert_head(free_list, request);
- request_free_list = free_list;
-
- return -1; /* Prevent free */
- }
-
+ RDEBUG3("Request deinitialising (%p)", request);
/*
- * Ensure anything that might reference the request is
- * freed before it is.
+ * state_ctx is parented separately.
*/
- talloc_free_children(request);
+ if (request->session_state_ctx) TALLOC_FREE(request->session_state_ctx);
-really_free:
/*
- * state_ctx is parented separately.
+ * Zero out everything.
*/
- if (request->session_state_ctx) TALLOC_FREE(request->session_state_ctx);
+ memset(request, 0, sizeof(*request));
#ifndef NDEBUG
+ request->component = "free_list";
+ request->runnable = FR_HEAP_INDEX_INVALID;
request->magic = 0x01020304; /* set the request to be nonsense */
#endif
return 0;
}
-/** Free any free requests when the thread is joined
- *
- */
-static int _request_free_list_free_on_exit(void *arg)
-{
- fr_dlist_head_t *list = talloc_get_type_abort(arg, fr_dlist_head_t);
- request_t *request;
-
- /*
- * See the destructor for why this works
- */
- while ((request = fr_dlist_head(list))) if (talloc_free(request) < 0) return -1;
- return talloc_free(list);
-}
-
static inline CC_HINT(always_inline) request_t *request_alloc_pool(TALLOC_CTX *ctx)
{
request_t *request;
* and would have to be freed.
*/
MEM(request = talloc_pooled_object(ctx, request_t,
- 1 + /* Stack pool */
- UNLANG_STACK_MAX + /* Stack Frames */
- 2 + /* packets */
- 10, /* extra */
- (UNLANG_FRAME_PRE_ALLOC * UNLANG_STACK_MAX) + /* Stack memory */
- (sizeof(fr_pair_t) * 5) + /* pair lists and root*/
- (sizeof(fr_packet_t) * 2) + /* packets */
- 128 /* extra */
- ));
+ REQUEST_POOL_HEADERS,
+ REQUEST_POOL_SIZE));
fr_assert(ctx != request);
return request;
}
-/** Create a new request_t data structure
- *
- * @param[in] file where the request was allocated.
- * @param[in] line where the request was allocated.
- * @param[in] ctx to bind the request to.
- * @param[in] type what type of request to alloc.
- * @param[in] args Optional arguments.
- * @return
- * - A request on success.
- * - NULL on error.
- */
-request_t *_request_alloc(char const *file, int line, TALLOC_CTX *ctx,
- request_type_t type, request_init_args_t const *args)
-{
- request_t *request;
- fr_dlist_head_t *free_list;
-
- /*
- * Setup the free list, or return the free
- * list for this thread.
- */
- if (unlikely(!request_free_list)) {
- MEM(free_list = talloc(NULL, fr_dlist_head_t));
- fr_dlist_init(free_list, request_t, free_entry);
- fr_atexit_thread_local(request_free_list, _request_free_list_free_on_exit, free_list);
- } else {
- free_list = request_free_list;
- }
-
- request = fr_dlist_head(free_list);
- if (!request) {
- /*
- * Must be allocated with in the NULL ctx
- * as chunk is returned to the free list.
- */
- request = request_alloc_pool(NULL);
- talloc_set_destructor(request, _request_free);
- } else {
- /*
- * Remove from the free list, as we're
- * about to use it!
- */
- fr_dlist_remove(free_list, request);
- }
-
- if (request_init(file, line, request, type, args) < 0) {
- talloc_free(request);
- return NULL;
- }
-
- /*
- * Initialise entry in free list
- */
- fr_dlist_entry_init(&request->free_entry); /* Needs to be initialised properly, else bad things happen */
-
- /*
- * This is only used by src/lib/io/worker.c
- */
- fr_dlist_entry_init(&request->listen_entry);
-
- /*
- * Bind lifetime to a parent.
- *
- * If the parent is freed the destructor
- * will fire, and return the request
- * to a "top level" free list.
- */
- if (ctx) talloc_link_ctx(ctx, request);
-
- return request;
-}
-
static int _request_local_free(request_t *request)
{
/*
request_t *request;
request = request_alloc_pool(ctx);
- if (request_init(file, line, request, type, args) < 0) return NULL;
+ if (_request_init(file, line, request, type, args) < 0) return NULL;
talloc_set_destructor(request, _request_local_free);
# define REQUEST_MAGIC (0xdeadbeef)
#endif
+/*
+ * Stack pool +
+ * Stack Frames +
+ * packets +
+ * extra
+ */
+#define REQUEST_POOL_HEADERS ( \
+ 1 + \
+ UNLANG_STACK_MAX + \
+ 2 + \
+ 10 \
+ )
+
+/*
+ * Stack memory +
+ * pair lists and root +
+ * packets +
+ * extra
+ */
+#define REQUEST_POOL_SIZE ( \
+ (UNLANG_FRAME_PRE_ALLOC * UNLANG_STACK_MAX) + \
+ (sizeof(fr_pair_t) * 5) + \
+ (sizeof(fr_packet_t) * 2) + \
+ 128 \
+ )
+
typedef enum {
REQUEST_ACTIVE = 1, //!< Request is active (running or runnable)
REQUEST_STOP_PROCESSING, //!< Request has been signalled to stop
int alloc_line; //!< Line the request was allocated on.
fr_dlist_t listen_entry; //!< request's entry in the list for this listener / socket
- fr_dlist_t free_entry; //!< Request's entry in the free list.
fr_heap_index_t runnable; //!< entry in the heap of runnable packets
}; /* request_t typedef */
#define RAD_REQUEST_OPTION_CTX (1 << 1)
#define RAD_REQUEST_OPTION_DETAIL (1 << 2)
-/** Allocate a new external request
- *
- * Use for requests produced by listeners
- *
- * @param[in] _ctx Talloc ctx to bind the request to.
- * @param[in] _args Optional arguments that control how the request is initialised.
- */
-#define request_alloc_external(_ctx, _args) \
- _request_alloc( __FILE__, __LINE__, (_ctx), REQUEST_TYPE_EXTERNAL, (_args))
+#define request_init(_ctx, _type, _args) \
+ _request_init(__FILE__, __LINE__, _ctx, _type, _args)
-/** Allocate a new internal request
- *
- * Use for requests produced by modules and unlang
- *
- * @param[in] _ctx Talloc ctx to bind the request to.
- * @param[in] _args Optional arguments that control how the request is initialised.
- */
-#define request_alloc_internal(_ctx, _args) \
- _request_alloc( __FILE__, __LINE__, (_ctx), REQUEST_TYPE_INTERNAL, (_args))
+int _request_init(char const *file, int line,
+ request_t *request, request_type_t type,
+ request_init_args_t const *args);
-request_t *_request_alloc(char const *file, int line, TALLOC_CTX *ctx,
- request_type_t type, request_init_args_t const *args);
+int request_slab_deinit(request_t *request);
/** Allocate a new external request outside of the request pool
*
/*
* Allocate a request to run asynchronously in the interpreter.
*/
- request = request_alloc_internal(NULL, (&(request_init_args_t){ .detachable = true }));
+ request = request_local_alloc_internal(NULL, (&(request_init_args_t){ .detachable = true }));
/*
* Add the args to the request data, so they can be picked up by the
box.vb_time_delta = fr_time_delta_from_msec(10);
}
- if (fr_time_delta_cmp(box.vb_time_delta, main_config->max_request_time) > 0) {
+ if (fr_time_delta_cmp(box.vb_time_delta, main_config->worker.max_request_time) > 0) {
cf_log_warn(subcs, "Timeout value %pV too large - limiting it to max_request_time of %pV",
- &box, fr_box_time_delta(main_config->max_request_time));
+ &box, fr_box_time_delta(main_config->worker.max_request_time));
- box.vb_time_delta = main_config->max_request_time;
+ box.vb_time_delta = main_config->worker.max_request_time;
}
rcode = unlang_compile(vs, subcs, &mod_actions_timeout, rules, &instruction);
intp = stack->intp;
+ request->master_state = REQUEST_DONE;
switch (request->type) {
case REQUEST_TYPE_EXTERNAL:
intp->funcs.done_external(request, stack->result, intp->uctx);
intp->funcs.done_detached(request, stack->result, intp->uctx); /* Callback will usually free the request */
break;
}
-
- request->master_state = REQUEST_DONE;
}
static inline CC_HINT(always_inline)
*/
fr_assert(fr_dict_proto_dict(namespace) == namespace);
- child = request_alloc_internal(detachable ? NULL : parent,
- (&(request_init_args_t){
+ child = request_local_alloc_internal(detachable ? NULL : parent,
+ (&(request_init_args_t){
.parent = parent,
.namespace = namespace,
.detachable = detachable
- }));
+ }));
if (!child) return NULL;
/*
if (!head->flags.can_purify) return 0;
- request = request_alloc_internal(NULL, NULL);
+ request = request_local_alloc_internal(NULL, NULL);
if (!request) return -1;
if (intp) unlang_interpret_set(request, intp);
/*
* Allocate a fake request_t structure.
*/
- fake = request_alloc_internal(request, &(request_init_args_t){ .parent = request });
+ fake = request_local_alloc_internal(request, &(request_init_args_t){ .parent = request });
fr_assert(fr_pair_list_empty(&fake->request_pairs));
t = talloc_get_type_abort(tls_session->opaque, eap_fast_tunnel_t);
/*
* Pick the shorter one
*/
- inst->timeout = fr_time_delta_gt(main_config->max_request_time, fr_time_delta_from_sec(EXEC_TIMEOUT)) ?
+ inst->timeout = fr_time_delta_gt(main_config->worker.max_request_time, fr_time_delta_from_sec(EXEC_TIMEOUT)) ?
fr_time_delta_from_sec(EXEC_TIMEOUT):
- main_config->max_request_time;
+ main_config->worker.max_request_time;
}
else {
if (fr_time_delta_lt(inst->timeout, fr_time_delta_from_sec(1))) {
/*
* Blocking a request longer than max_request_time isn't going to help anyone.
*/
- if (fr_time_delta_gt(inst->timeout, main_config->max_request_time)) {
+ if (fr_time_delta_gt(inst->timeout, main_config->worker.max_request_time)) {
cf_log_err(conf, "Timeout '%pVs' is too large (maximum: %pVs)",
- fr_box_time_delta(inst->timeout), fr_box_time_delta(main_config->max_request_time));
+ fr_box_time_delta(inst->timeout), fr_box_time_delta(main_config->worker.max_request_time));
return -1;
}
}
* have mrt=mrc=mrd = 0, which means "retransmit
* forever". We allow that, with the restriction that
* the server core will automatically free the request at
- * max_request_time.
+ * request.timeout.
*/
if (inst->allowed[FR_RADIUS_CODE_ACCOUNTING_REQUEST]) {
FR_TIME_DELTA_BOUND_CHECK("Accounting-Request.initial_rtx_time", inst->retry[FR_RADIUS_CODE_ACCOUNTING_REQUEST].irt, >=, fr_time_delta_from_sec(1));
Platforms:: Any
-Note:: You may need to increase `max_request_time` in radius.conf for this test.
+Note:: You may need to increase `request.timeout` in radius.conf for this test.
This test verifies behaviour when no status_check is configured.
Platforms:: Any
-Note:: You may need to increase `max_request_time` in radius.conf for this test.
+Note:: You may need to increase `request.timeout` in radius.conf for this test.
This test verifies the behaviour when a `Status-Server` packet is configured for status checks.
Platforms:: Any
-Note:: You may need to increase `max_request_time` in radius.conf for this test.
+Note:: You may need to increase `request.timeout` in radius.conf for this test.
See description for previous test.
* Send <<PAP Access-Request>>s at a high rate for 30 minutes.
** Record memory usage every 5 minutes.
** Ensure memory usage stabilises within 15 minutes and does not continue to increase.
-