return ml;
}
-static void _module_global_list_init(void *uctx)
+static int _module_global_list_init(void *uctx)
{
dl_modules = dl_module_loader_init(uctx);
MEM(module_global_inst_list = fr_heap_alloc(NULL, _module_instance_global_cmp, module_instance_t, inst_idx, 256));
* tree is in place...
*/
global_lib_init();
+
+ return 0;
}
static int _module_global_list_free(UNUSED void *uctx)
*/
void modules_init(char const *lib_dir)
{
+ int ret;
/*
* Create the global module heap we use for
* common indexes in the thread-specific
* heaps.
*/
- fr_atexit_global_once(_module_global_list_init, _module_global_list_free, UNCONST(char *, lib_dir));
+ fr_atexit_global_once(NULL, _module_global_list_init, _module_global_list_free, UNCONST(char *, lib_dir));
}
*
* ...when functions are tested directly in the fr_atexit_global_once macro
*/
-static inline void _fr_atexit_global_once_funcs(fr_atexit_t init_func, fr_atexit_t free_func, void *uctx)
+static inline int _fr_atexit_global_once_funcs(fr_atexit_t init_func, fr_atexit_t free_func, void *uctx)
{
- if (init_func) init_func(uctx);
- if (free_func) free_func(uctx);
+ if (init_func) if (init_func(uctx) < 0) return -1;
+ if (free_func) fr_atexit_global(free_func, uctx);
+
+ return 0;
}
+static inline void fr_atexit_noop(void) {}
+
/** Setup pair of global init/free functions
*
* Simplifies setting up data structures the first time a given function
*
* Will not share init status outside of the function.
*
+ * @param[out] _res Where to write the result of the init
+ * function if called.
+ * May be NULL.
* @param[in] _init function to call. Will be called once
* during the process lifetime.
* May be NULL.
* May be NULL.
* @param[in] _uctx data to be passed to free function.
*/
-#define fr_atexit_global_once(_init, _free, _uctx) \
+#define fr_atexit_global_once(_res, _init, _free, _uctx) \
{ \
static atomic_bool _init_done = false; \
static pthread_mutex_t _init_mutex = PTHREAD_MUTEX_INITIALIZER; \
if (unlikely(!atomic_load(&_init_done))) { \
pthread_mutex_lock(&_init_mutex); \
if (!atomic_load(&_init_done)) { \
- _fr_atexit_global_once_funcs(_init, _free, _our_uctx); \
+ if (_fr_atexit_global_once_funcs(_init, _free, _our_uctx) < 0) { \
+ _Generic((_res), int : _res = -1, default: fr_atexit_noop()); \
+ pthread_mutex_unlock(&_init_mutex); \
+ } \
atomic_store(&_init_done, true); \
} \
pthread_mutex_unlock(&_init_mutex); \
} \
+ _Generic((_res), int : _res = 0, default: fr_atexit_noop()); \
}
/** Set a destructor for thread local storage to free the memory on thread exit
*
fr_strerror_clear();
ev->is_registered = false;
-
+
/*
* If the child exited before kevent() was
* called, we need to get its status via
/*
* The user event acts as a surrogate for
- * an EVFILT_PROC event, and will be evaluated
+ * an EVFILT_PROC event, and will be evaluated
* during the next loop through the event loop.
*
* It will be automatically deleted when the
* callback here directly, but this lead to
* multiple problems, the biggest being that
* setting requests back to resumable failed
- * because they were not yet yielded,
+ * because they were not yet yielded,
* leading to hangs.
*/
if (fr_event_user_insert(ev, el, &ev->early_exit.ev, true, _fr_event_pid_early_exit, ev) < 0) {
* - 0 on success.
* - -1 on error.
*/
-int _fr_event_user_insert(NDEBUG_LOCATION_ARGS
+int _fr_event_user_insert(NDEBUG_LOCATION_ARGS
TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p,
bool trigger, fr_event_user_cb_t callback, void *uctx)
{
return 0;
}
-static void _event_build_indexes(UNUSED void *uctx)
+static int _event_build_indexes(UNUSED void *uctx)
{
unsigned int i;
for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) event_fd_func_index_build(&filter_maps[i]);
+ return 0;
}
/** Initialise a new event list
{
fr_event_list_t *el;
struct kevent kev;
+ int ret;
/*
* Build the map indexes the first time this
* function is called.
*/
- fr_atexit_global_once(_event_build_indexes, _event_free_indexes, NULL);
+ fr_atexit_global_once(ret, _event_build_indexes, _event_free_indexes, NULL);
+ if (unlikely(ret < 0)) return NULL;
el = talloc_zero(ctx, fr_event_list_t);
if (!fr_cond_assert(el)) {
return 0;
}
-static void _pcre_globals_configure(UNUSED void *uctx)
+static int _pcre_globals_configure(UNUSED void *uctx)
{
#ifdef PCRE_CONFIG_JIT
int *do_jit = 0;
#endif
pcre_malloc = _pcre_talloc; /* pcre_malloc is a global provided by libpcre */
pcre_free = _pcre_talloc_free; /* pcre_free is a global provided by libpcre */
+
+ return 0;
}
/** Free thread local data
char const *error;
int offset;
int cflags = 0;
+ int ret;
regex_t *preg;
- fr_atexit_global_once(_pcre_globals_configure, _pcre_globals_reset, NULL);
+ fr_atexit_global_once(ret, _pcre_globals_configure, _pcre_globals_reset, NULL);
+ if (unlikely(ret < 0)) return -1;
+
if (unlikely(pcre_tls_init() < 0)) return -1;