memcpy(&s->listen, data, sizeof(s->listen));
s->number = nr->num_sockets++;
- MEM(s->waiting = fr_heap_alloc(s, waiting_cmp, fr_channel_data_t, channel.heap_id));
+ MEM(s->waiting = fr_heap_alloc(s, waiting_cmp, fr_channel_data_t, channel.heap_id, 0));
talloc_set_destructor(s, _network_socket_free);
memcpy(&s->listen, data, sizeof(s->listen));
s->number = nr->num_sockets++;
- MEM(s->waiting = fr_heap_alloc(s, waiting_cmp, fr_channel_data_t, channel.heap_id));
+ MEM(s->waiting = fr_heap_alloc(s, waiting_cmp, fr_channel_data_t, channel.heap_id, 0));
talloc_set_destructor(s, _network_socket_free);
goto fail2;
}
- nr->replies = fr_heap_alloc(nr, reply_cmp, fr_channel_data_t, channel.heap_id);
+ nr->replies = fr_heap_alloc(nr, reply_cmp, fr_channel_data_t, channel.heap_id, 0);
if (!nr->replies) {
fr_strerror_const_push("Failed creating heap for replies");
goto fail2;
goto fail;
}
- worker->runnable = fr_heap_talloc_alloc(worker, worker_runnable_cmp, request_t, runnable_id);
+ worker->runnable = fr_heap_talloc_alloc(worker, worker_runnable_cmp, request_t, runnable_id, 0);
if (!worker->runnable) {
fr_strerror_const("Failed creating runnable heap");
goto fail;
}
- worker->time_order = fr_heap_talloc_alloc(worker, worker_time_order_cmp, request_t, time_order_id);
+ worker->time_order = fr_heap_talloc_alloc(worker, worker_time_order_cmp, request_t, time_order_id, 0);
if (!worker->time_order) {
fr_strerror_const("Failed creating time_order heap");
goto fail;
.from_dir = true
};
- MEM(frame->heap = fr_heap_alloc(frame->directory, filename_cmp, cf_file_heap_t, heap_id));
+ MEM(frame->heap = fr_heap_alloc(frame->directory, filename_cmp, cf_file_heap_t, heap_id, 0));
/*
* Read the whole directory before loading any
* https://code.facebook.com/posts/1499322996995183/solving-the-mystery-of-link-imbalance-a-metastable-failure-state-at-scale/
*/
if (!pool->spread) {
- pool->heap = fr_heap_talloc_alloc(pool, last_reserved_cmp, fr_pool_connection_t, heap_id);
+ pool->heap = fr_heap_talloc_alloc(pool, last_reserved_cmp, fr_pool_connection_t, heap_id, 0);
/*
* For some types of connections we need to used a different
* algorithm, because load balancing benefits are secondary
* That way we maximise time between connection use.
*/
} else {
- pool->heap = fr_heap_talloc_alloc(pool, last_released_cmp, fr_pool_connection_t, heap_id);
+ pool->heap = fr_heap_talloc_alloc(pool, last_released_cmp, fr_pool_connection_t, heap_id, 0);
}
if (!pool->heap) {
ERROR("%s: Failed creating connection heap", __FUNCTION__);
*/
DO_CONNECTION_ALLOC(tconn);
- MEM(tconn->pending = fr_heap_talloc_alloc(tconn, _trunk_request_prioritise,
- fr_trunk_request_t, heap_id));
+ MEM(tconn->pending = fr_heap_talloc_alloc(tconn, _trunk_request_prioritise, fr_trunk_request_t, heap_id, 0));
fr_dlist_talloc_init(&tconn->sent, fr_trunk_request_t, entry);
fr_dlist_talloc_init(&tconn->cancel, fr_trunk_request_t, entry);
fr_dlist_talloc_init(&tconn->cancel_sent, fr_trunk_request_t, entry);
* Request backlog queue
*/
MEM(trunk->backlog = fr_heap_talloc_alloc(trunk, _trunk_request_prioritise,
- fr_trunk_request_t, heap_id));
+ fr_trunk_request_t, heap_id, 0));
/*
* Connection queues and trees
*/
MEM(trunk->active = fr_heap_talloc_alloc(trunk, trunk->funcs.connection_prioritise,
- fr_trunk_connection_t, heap_id));
+ fr_trunk_connection_t, heap_id, 0));
fr_dlist_talloc_init(&trunk->init, fr_trunk_connection_t, entry);
fr_dlist_talloc_init(&trunk->connecting, fr_trunk_connection_t, entry);
fr_dlist_talloc_init(&trunk->full, fr_trunk_connection_t, entry);
unlang_interpret_synchronous_t *intps;
MEM(intps = talloc_zero(ctx, unlang_interpret_synchronous_t));
- MEM(intps->runnable = fr_heap_talloc_alloc(intps, fr_pointer_cmp, request_t, runnable_id));
+ MEM(intps->runnable = fr_heap_talloc_alloc(intps, fr_pointer_cmp, request_t, runnable_id, 0));
if (el) {
intps->el = el;
} else {
el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */
talloc_set_destructor(el, _event_list_free);
- el->times = fr_heap_talloc_alloc(el, fr_event_timer_cmp, fr_event_timer_t, heap_id);
+ el->times = fr_heap_talloc_alloc(el, fr_event_timer_cmp, fr_event_timer_t, heap_id, 0);
if (!el->times) {
fr_strerror_const("Failed allocating event heap");
error:
}
#ifdef LOCAL_PID
- el->pids = fr_heap_talloc_alloc(el, fr_event_pid_cmp, fr_event_pid_t, heap_id);
+ el->pids = fr_heap_talloc_alloc(el, fr_event_pid_cmp, fr_event_pid_t, heap_id, 0);
if (!el->pids) {
fr_strerror_const("Failed allocating PID heap");
goto error;
void **p; //!< Array of nodes.
};
+#define INITIAL_CAPACITY 2048
+
/*
* First node in a heap is element 1. Children of i are 2i and
* 2i+1. These macros wrap the logic, so the code is more
static void fr_heap_bubble(fr_heap_t *hp, fr_heap_index_t child);
-fr_heap_t *_fr_heap_alloc(TALLOC_CTX *ctx, fr_heap_cmp_t cmp, char const *type, size_t offset)
+/** Return how many bytes need to be allocated to hold a heap of a given size
+ *
+ * This is useful for passing to talloc[_zero]_pooled_object to avoid additional mallocs.
+ *
+ * @param[in] count The initial element count.
+ * @return The number of bytes to pre-allocate.
+ */
+size_t fr_heap_pre_alloc_size(unsigned int count)
+{
+ return sizeof(fr_heap_t) + sizeof(void *) * count;
+}
+
+fr_heap_t *_fr_heap_alloc(TALLOC_CTX *ctx, fr_heap_cmp_t cmp, char const *type, size_t offset, unsigned int init)
{
fr_heap_t *hp;
- hp = talloc_zero(ctx, fr_heap_t);
- if (!hp) return NULL;
+ /*
+ * If we've been provided with an initial
+ * element count, assume expanding past
+ * that size is going to be the uncommon
+ * case.
+ */
+ if (init) {
+ hp = talloc_zero_pooled_object(ctx, fr_heap_t, 1, sizeof(void *) * init);
+ } else {
+ init = INITIAL_CAPACITY;
+ hp = talloc_zero(ctx, fr_heap_t);
+ }
+ if (unlikely(!hp)) return NULL;
- hp->size = 2048;
+ hp->size = init;
hp->p = talloc_array(hp, void *, hp->size);
if (unlikely(!hp->p)) {
talloc_free(hp);
* into the heap.
*/
hp->p[0] = (void *)UINTPTR_MAX;
-
hp->type = type;
hp->cmp = cmp;
hp->offset = offset;
return 0;
}
-static inline void fr_heap_bubble(fr_heap_t *hp, fr_heap_index_t child)
+static inline CC_HINT(always_inline) void fr_heap_bubble(fr_heap_t *hp, fr_heap_index_t child)
{
if (!fr_cond_assert(child > 0)) return;
typedef unsigned int fr_heap_index_t;
typedef unsigned int fr_heap_iter_t;
-/*
+/** Comparator to order heap elements
+ *
* Return negative numbers to put 'a' at the top of the heap.
* Return positive numbers to put 'b' at the top of the heap.
*/
typedef int8_t (*fr_heap_cmp_t)(void const *a, void const *b);
+/** The main heap structure
+ *
+ */
typedef struct fr_heap_s fr_heap_t;
+size_t fr_heap_pre_alloc_size(unsigned int count);
+
/** Creates a heap that can be used with non-talloced elements
*
* @param[in] _ctx Talloc ctx to allocate heap in.
* @param[in] _cmp Comparator used to compare elements.
* @param[in] _type Of elements.
* @param[in] _field to store heap indexes in.
+ * @param[in] _init the initial number of elements to allocate.
+ * Pass 0 to use the default.
*/
-#define fr_heap_alloc(_ctx, _cmp, _type, _field) \
- _fr_heap_alloc(_ctx, _cmp, NULL, (size_t)offsetof(_type, _field))
+#define fr_heap_alloc(_ctx, _cmp, _type, _field, _init) \
+ _fr_heap_alloc(_ctx, _cmp, NULL, (size_t)offsetof(_type, _field), _init)
/** Creates a heap that verifies elements are of a specific talloc type
*
* @param[in] _cmp Comparator used to compare elements.
* @param[in] _talloc_type of elements.
* @param[in] _field to store heap indexes in.
+ * @param[in] _init the initial number of elements to allocate.
+ * Pass 0 to use the default.
* @return
* - A new heap.
* - NULL on error.
*/
-#define fr_heap_talloc_alloc(_ctx, _cmp, _talloc_type, _field) \
- _fr_heap_alloc(_ctx, _cmp, #_talloc_type, (size_t)offsetof(_talloc_type, _field))
+#define fr_heap_talloc_alloc(_ctx, _cmp, _talloc_type, _field, _init) \
+ _fr_heap_alloc(_ctx, _cmp, #_talloc_type, (size_t)offsetof(_talloc_type, _field), _init)
-fr_heap_t *_fr_heap_alloc(TALLOC_CTX *ctx, fr_heap_cmp_t cmp, char const *talloc_type, size_t offset) CC_HINT(nonnull(2));
+fr_heap_t *_fr_heap_alloc(TALLOC_CTX *ctx, fr_heap_cmp_t cmp, char const *talloc_type, size_t offset, unsigned int init) CC_HINT(nonnull(2));
/** Check if an entry is inserted into a heap
*
done_init = true;
}
- hp = fr_heap_alloc(NULL, heap_cmp, heap_thing, heap);
+ hp = fr_heap_alloc(NULL, heap_cmp, heap_thing, heap, 0);
TEST_CHECK(hp != NULL);
array = calloc(HEAP_TEST_SIZE, sizeof(heap_thing));
done_init = true;
}
- hp = fr_heap_alloc(NULL, heap_cmp, heap_thing, heap);
+ hp = fr_heap_alloc(NULL, heap_cmp, heap_thing, heap, 0);
TEST_CHECK(hp != NULL);
array = calloc(HEAP_TEST_SIZE, sizeof(heap_thing));
done_init = true;
}
- hp = fr_heap_alloc(NULL, heap_cmp, heap_thing, heap);
+ hp = fr_heap_alloc(NULL, heap_cmp, heap_thing, heap, 0);
TEST_CHECK(hp != NULL);
array = calloc(HEAP_CYCLE_SIZE, sizeof(heap_thing));
lst = fr_lst_alloc(NULL, lst_cmp, lst_thing, index);
TEST_CHECK(lst != NULL);
- hp = fr_heap_alloc(NULL, lst_cmp, lst_thing, index);
+ hp = fr_heap_alloc(NULL, lst_cmp, lst_thing, index, 0);
lst_array = calloc(2 * INITIAL_CAPACITY, sizeof(lst_thing));
hp_array = calloc(2 * INITIAL_CAPACITY, sizeof(lst_thing));
/*
* The heap of entries to expire.
*/
- driver->heap = fr_heap_talloc_alloc(driver, cache_heap_cmp, rlm_cache_rb_entry_t, heap_id);
+ driver->heap = fr_heap_talloc_alloc(driver, cache_heap_cmp, rlm_cache_rb_entry_t, heap_id, 0);
if (!driver->heap) {
ERROR("Failed to create heap for the cache");
return -1;