#include <freeradius-devel/unlang/call.h>
#include <freeradius-devel/unlang/interpret.h>
#include <freeradius-devel/util/dlist.h>
+#include <freeradius-devel/util/minmax_heap.h>
#include <stdalign.h>
int num_channels; //!< actual number of channels
fr_heap_t *runnable; //!< current runnable requests which we've spent time processing
- fr_heap_t *time_order; //!< time ordered heap of requests
+ fr_minmax_heap_t *time_order; //!< time ordered heap of requests
fr_rb_tree_t *dedup; //!< de-dup tree
fr_io_stats_t stats; //!< input / output stats
* Look at the oldest requests, and see if they need to
* be deleted.
*/
- while ((request = fr_heap_peek_tail(worker->time_order)) != NULL) {
+ while ((request = fr_minmax_heap_max_peek(worker->time_order)) != NULL) {
fr_time_t cleanup;
REQUEST_VERIFY(request);
/*
* No more requests, delete the timer.
*/
- request = fr_heap_peek_tail(worker->time_order);
+ request = fr_minmax_heap_max_peek(worker->time_order);
if (!request) return;
cleanup = fr_time_add(request->async->recv_time, worker->config.max_request_time);
* strict time priority. Once they are in the list, they
* are only removed when the request is done / free'd.
*/
- fr_assert(!fr_heap_entry_inserted(request->time_order_id));
- (void) fr_heap_insert(worker->time_order, request);
+ fr_assert(!fr_minmax_heap_entry_inserted(request->time_order_id));
+ (void) fr_minmax_heap_insert(worker->time_order, request);
/*
* Bootstrap the async state machine with the initial
fr_assert(worker->num_active > 0);
worker->num_active--;
- if (fr_heap_entry_inserted(request->time_order_id)) (void) fr_heap_extract(worker->time_order, request);
+ if (fr_minmax_heap_entry_inserted(request->time_order_id)) (void) fr_minmax_heap_extract(worker->time_order, request);
}
/** Send a response packet to the network side
worker->stats.out++;
- fr_assert(!fr_heap_entry_inserted(request->time_order_id));
+ fr_assert(!fr_minmax_heap_entry_inserted(request->time_order_id));
fr_assert(!fr_heap_entry_inserted(request->runnable_id));
#ifndef NDEBUG
TALLOC_CTX *ctx;
fr_listen_t const *listen;
- if (fr_heap_num_elements(worker->time_order) >= (uint32_t) worker->config.max_requests) goto nak;
+ if (fr_minmax_heap_num_elements(worker->time_order) >= (uint32_t) worker->config.max_requests) goto nak;
ctx = request = request_alloc_external(NULL, NULL);
if (!request) goto nak;
* events.
*/
count = 0;
- while ((request = fr_heap_peek(worker->time_order)) != NULL) {
+ while ((request = fr_minmax_heap_min_peek(worker->time_order)) != NULL) {
if (count < 10) {
DEBUG("Worker is exiting - telling request %s to stop", request->name);
count++;
worker_request_time_tracking_end(worker, request, fr_time());
fr_assert(!fr_heap_entry_inserted(request->runnable_id));
- fr_assert(!fr_heap_entry_inserted(request->time_order_id));
+ fr_assert(!fr_minmax_heap_entry_inserted(request->time_order_id));
}
/** Detached request (i.e. one generated by the interpreter with no parent) is now complete
* order heap, but we need to do that for
* detached requests.
*/
- (void)fr_heap_extract(worker->time_order, request);
+ (void)fr_minmax_heap_extract(worker->time_order, request);
/*
* Detached requests have to be freed by us
goto fail;
}
- worker->time_order = fr_heap_talloc_alloc(worker, worker_time_order_cmp, request_t, time_order_id, 0);
+ worker->time_order = fr_minmax_heap_talloc_alloc(worker, worker_time_order_cmp, request_t, time_order_id, 0);
if (!worker->time_order) {
fr_strerror_const("Failed creating time_order heap");
goto fail;
#include <freeradius-devel/util/misc.h>
#include <freeradius-devel/util/syserror.h>
#include <freeradius-devel/util/table.h>
+#include <freeradius-devel/util/minmax_heap.h>
#ifdef HAVE_STDATOMIC_H
# include <stdatomic.h>
fr_dlist_head_t connecting; //!< Connections which are not yet in the open state.
- fr_heap_t *active; //!< Connections which can service requests.
+ fr_minmax_heap_t *active; //!< Connections which can service requests.
fr_dlist_head_t full; //!< Connections which have too many outstanding
///< requests.
#define CONN_REORDER(_tconn) \
do { \
int _ret; \
- if ((fr_heap_num_elements((_tconn)->pub.trunk->active) == 1)) break; \
+ if ((fr_minmax_heap_num_elements((_tconn)->pub.trunk->active) == 1)) break; \
if (!fr_cond_assert((_tconn)->pub.state == FR_TRUNK_CONN_ACTIVE)) break; \
- _ret = fr_heap_extract((_tconn)->pub.trunk->active, (_tconn)); \
+ _ret = fr_minmax_heap_extract((_tconn)->pub.trunk->active, (_tconn)); \
if (!fr_cond_assert_msg(_ret == 0, "Failed extracting conn from active heap: %s", fr_strerror())) break; \
- fr_heap_insert((_tconn)->pub.trunk->active, (_tconn)); \
+ fr_minmax_heap_insert((_tconn)->pub.trunk->active, (_tconn)); \
} while (0)
/** Call a list of watch functions associated with a state
* If we have an active connection then
* return that.
*/
- tconn = fr_heap_peek(trunk->active);
+ tconn = fr_minmax_heap_min_peek(trunk->active);
if (tconn) {
*tconn_out = tconn;
return FR_TRUNK_ENQUEUE_OK;
if (tconn->pub.state == FR_TRUNK_CONN_ACTIVE) {
int ret;
- ret = fr_heap_extract(trunk->active, tconn);
+ ret = fr_minmax_heap_extract(trunk->active, tconn);
if (!fr_cond_assert_msg(ret == 0,
"Failed extracting conn from active heap: %s", fr_strerror())) goto done;
if (tconn->pub.state == FR_TRUNK_CONN_ACTIVE) {
int ret;
- ret = fr_heap_insert(trunk->active, tconn);
+ ret = fr_minmax_heap_insert(trunk->active, tconn);
if (!fr_cond_assert_msg(ret == 0,
"Failed re-inserting conn into active heap: %s", fr_strerror())) goto done;
}
if (conn_state & FR_TRUNK_CONN_INIT) count += fr_dlist_num_elements(&trunk->init);
if (conn_state & FR_TRUNK_CONN_CONNECTING) count += fr_dlist_num_elements(&trunk->connecting);
- if (conn_state & FR_TRUNK_CONN_ACTIVE) count += fr_heap_num_elements(trunk->active);
+ if (conn_state & FR_TRUNK_CONN_ACTIVE) count += fr_minmax_heap_num_elements(trunk->active);
if (conn_state & FR_TRUNK_CONN_FULL) count += fr_dlist_num_elements(&trunk->full);
if (conn_state & FR_TRUNK_CONN_INACTIVE) count += fr_dlist_num_elements(&trunk->inactive);
if (conn_state & FR_TRUNK_CONN_INACTIVE_DRAINING) count += fr_dlist_num_elements(&trunk->inactive_draining);
{
int ret;
- ret = fr_heap_extract(trunk->active, tconn);
+ ret = fr_minmax_heap_extract(trunk->active, tconn);
if (!fr_cond_assert_msg(ret == 0, "Failed extracting conn from active heap: %s", fr_strerror())) return;
}
return;
CONN_BAD_STATE_TRANSITION(FR_TRUNK_CONN_ACTIVE);
}
- ret = fr_heap_insert(trunk->active, tconn); /* re-insert into the active heap*/
+ ret = fr_minmax_heap_insert(trunk->active, tconn); /* re-insert into the active heap*/
if (!fr_cond_assert_msg(ret == 0, "Failed inserting connection into active heap: %s", fr_strerror())) {
trunk_connection_enter_inactive_draining(tconn);
return;
{
fr_trunk_connection_t *head;
- head = fr_heap_peek(trunk->active);
+ head = fr_minmax_heap_min_peek(trunk->active);
/*
* Only rebalance if the top and bottom of
* the heap are not equal.
*/
- if (trunk->funcs.connection_prioritise(fr_heap_peek_tail(trunk->active), head) == 0) return;
+ if (trunk->funcs.connection_prioritise(fr_minmax_heap_max_peek(trunk->active), head) == 0) return;
DEBUG3("Rebalancing requests");
* connection at the top is shifted from that
* position.
*/
- while ((fr_heap_peek(trunk->active) == head) &&
- trunk_connection_requests_requeue(fr_heap_peek_tail(trunk->active),
+ while ((fr_minmax_heap_min_peek(trunk->active) == head) &&
+ trunk_connection_requests_requeue(fr_minmax_heap_max_peek(trunk->active),
FR_TRUNK_REQUEST_STATE_PENDING, 1, false));
}
* connections, start draining "active"
* connections.
*/
- } else if ((tconn = fr_heap_peek_tail(trunk->active))) {
+ } else if ((tconn = fr_minmax_heap_max_peek(trunk->active))) {
/*
* If the connection has no requests associated
* with it then immediately free.
{
uint64_t count = 0;
fr_trunk_connection_t *tconn = NULL;
- fr_heap_iter_t iter;
+ fr_minmax_heap_iter_t iter;
#define COUNT_BY_STATE(_state, _list) \
do { \
} while (0);
if (conn_state & FR_TRUNK_CONN_ACTIVE) {
- for (tconn = fr_heap_iter_init(trunk->active, &iter);
+ for (tconn = fr_minmax_heap_iter_init(trunk->active, &iter);
tconn;
- tconn = fr_heap_iter_next(trunk->active, &iter)) {
+ tconn = fr_minmax_heap_iter_next(trunk->active, &iter)) {
count += fr_trunk_request_count_by_connection(tconn, req_state);
}
}
continue;
case FR_TRUNK_ENQUEUE_NO_CAPACITY:
- fr_assert(fr_heap_num_elements(trunk->active) == 0);
+ fr_assert(fr_minmax_heap_num_elements(trunk->active) == 0);
return;
}
}
if (states & FR_TRUNK_CONN_ACTIVE) {
fr_trunk_connection_t *tconn;
- while ((tconn = fr_heap_peek(trunk->active))) fr_connection_signal_reconnect(tconn->pub.conn, reason);
+ while ((tconn = fr_minmax_heap_min_peek(trunk->active))) fr_connection_signal_reconnect(tconn->pub.conn, reason);
}
RECONNECT_BY_STATE(FR_TRUNK_CONN_INIT, init);
* Each time a connection is freed it removes itself from the list
* its in, which means the head should keep advancing automatically.
*/
- while ((tconn = fr_heap_peek(trunk->active))) fr_connection_signal_halt(tconn->pub.conn);
+ while ((tconn = fr_minmax_heap_min_peek(trunk->active))) fr_connection_signal_halt(tconn->pub.conn);
while ((tconn = fr_dlist_head(&trunk->init))) fr_connection_signal_halt(tconn->pub.conn);
while ((tconn = fr_dlist_head(&trunk->connecting))) fr_connection_signal_halt(tconn->pub.conn);
while ((tconn = fr_dlist_head(&trunk->full))) fr_connection_signal_halt(tconn->pub.conn);
/*
* Connection queues and trees
*/
- MEM(trunk->active = fr_heap_talloc_alloc(trunk, trunk->funcs.connection_prioritise,
+ MEM(trunk->active = fr_minmax_heap_talloc_alloc(trunk, trunk->funcs.connection_prioritise,
fr_trunk_connection_t, heap_id, 0));
fr_dlist_talloc_init(&trunk->init, fr_trunk_connection_t, entry);
fr_dlist_talloc_init(&trunk->connecting, fr_trunk_connection_t, entry);
hmac_tests.mk \
libfreeradius-util.mk \
lst_tests.mk \
+ minmax_heap_tests.mk \
pair_legacy_tests.mk \
pair_list_perf_test.mk \
pair_tests.mk \
machine.c \
md4.c \
md5.c \
+ minmax_heap.c \
misc.c \
missing.c \
net.c \
--- /dev/null
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/** Functions for a minmax heap
+ *
+ * @file src/lib/util/minmax_heap.c
+ *
+ * @copyright 2021 Network RADIUS SARL (legal@networkradius.com)
+ */
+RCSID("$Id$")
+
+#include <freeradius-devel/util/minmax_heap.h>
+#include <freeradius-devel/util/strerror.h>
+#include <freeradius-devel/util/debug.h>
+#include <freeradius-devel/util/misc.h>
+
+/*
+ * The internal representation of minmax heaps is that of plain
+ * binary heaps. They differ in where entries are placed, and how
+ * the operations are done. Also, minmax heaps allow peeking or
+ * popping the maximum value as well as the minimum.
+ *
+ * The heap itself is an array of pointers to objects, each of which
+ * contains a key and an fr_minmax_heap_index_t value indicating the
+ * location in the array holding the pointer to it. To allow 0 to
+ * represent objects not in a heap, the pointers start at element
+ * one of the array rather than element zero. The offset of that
+ * fr_minmax_heap_index_t value is held inside the heap structure.
+ *
+ * Minmax heaps are trees, like binary heaps, but the levels (all
+ * values at the same depth) alternate between "min" (starting at
+ * depth 0, i.e. the root) and "max" levels. The operations preserve
+ * these properties:
+ * - A node on a min level will compare as less than or equal to any
+ * of its descendants.
+ * - A node on a max level will compare as greater than or equal to
+ * any of its descendants.
+ */
+
+struct fr_minmax_heap_s {
+ unsigned int size; //!< Number of nodes allocated.
+ size_t offset; //!< Offset of heap index in element structure.
+
+ unsigned int num_elements; //!< Number of nodes used.
+
+ char const *type; //!< Talloc type of elements.
+ fr_minmax_heap_cmp_t cmp; //!< Comparator function.
+
+ void *p[]; //!< Array of nodes.
+};
+
+typedef struct fr_minmax_heap_s minmax_heap_t;
+
+#define INITIAL_CAPACITY 2048
+
+/*
+ * First node in a heap is element 1. Children of i are 2i and
+ * 2i+1. These macros wrap the logic, so the code is more
+ * descriptive.
+ */
+#define HEAP_PARENT(_x) ((_x) >> 1)
+#define HEAP_GRANDPARENT(_x) HEAP_PARENT(HEAP_PARENT(_x))
+#define HEAP_LEFT(_x) (2 * (_x))
+#define HEAP_RIGHT(_x) (2 * (_x) + 1 )
+#define HEAP_SWAP(_a, _b) { void *_tmp = _a; _a = _b; _b = _tmp; }
+
+static inline uint8_t depth(fr_minmax_heap_index_t i)
+{
+ return fr_high_bit_pos(i) - 1;
+}
+
+static inline bool is_min_level_index(fr_minmax_heap_index_t i)
+{
+ return (depth(i) & 1) == 0;
+}
+
+static inline bool is_descendant(fr_minmax_heap_index_t candidate, fr_minmax_heap_index_t ancestor)
+{
+ fr_minmax_heap_index_t level_min;
+ uint8_t candidate_depth = depth(candidate);
+ uint8_t ancestor_depth = depth(ancestor);
+
+ /*
+ * This will never happen given the its use by fr_minmax_heap_extract(),
+ * but it's here for safety and to make static analysis happy.
+ */
+ if (unlikely(candidate_depth < ancestor_depth)) return false;
+
+ level_min = ((fr_minmax_heap_index_t) 1) << (candidate_depth - ancestor_depth);
+ return (candidate - level_min) < level_min;
+}
+
+#define is_max_level_index(_i) (!(is_min_level_index(_i)))
+
+fr_minmax_heap_t *_fr_minmax_heap_alloc(TALLOC_CTX *ctx, fr_minmax_heap_cmp_t cmp, char const *type, size_t offset, unsigned int init)
+{
+ fr_minmax_heap_t *hp;
+ minmax_heap_t *h;
+
+ if (!init) init = INITIAL_CAPACITY;
+
+ hp = talloc(ctx, fr_minmax_heap_t);
+ if (unlikely(!hp)) return NULL;
+
+ /*
+ * For small heaps (< 40 elements) the
+ * increase in memory locality gives us
+ * a 100% performance increase
+ * (talloc headers are big);
+ */
+ h = (minmax_heap_t *)talloc_array(hp, uint8_t, sizeof(minmax_heap_t) + (sizeof(void *) * (init + 1)));
+ if (unlikely(!h)) return NULL;
+ talloc_set_type(h, minmax_heap_t);
+
+ *h = (minmax_heap_t){
+ .size = init,
+ .type = type,
+ .cmp = cmp,
+ .offset = offset
+ };
+
+ /*
+ * As we're using unsigned index values
+ * index 0 is a special value meaning
+ * that the data isn't currently inserted
+ * into the heap.
+ */
+ h->p[0] = (void *)UINTPTR_MAX;
+
+ *hp = h;
+
+ return hp;
+}
+
+static CC_HINT(nonnull) int minmax_heap_expand(fr_minmax_heap_t *hp)
+{
+ minmax_heap_t *h = *hp;
+ unsigned int n_size;
+
+ /*
+ * One will almost certainly run out of RAM first,
+ * but the size must be representable. This form
+ * of the check avoids overflow.
+ */
+ if (unlikely(h->size > UINT_MAX - h->size)) {
+ if (h->size == UINT_MAX) {
+ fr_strerror_const("Heap is full");
+ return -1;
+ }
+ n_size = UINT_MAX;
+ } else {
+ n_size = 2 * h->size;
+ }
+
+ h = (minmax_heap_t *)talloc_realloc(hp, h, uint8_t, sizeof(minmax_heap_t) + (sizeof(void *) * (n_size + 1)));
+ if (unlikely(!h)) {
+ fr_strerror_printf("Failed expanding heap to %u elements (%u bytes)",
+ n_size, (n_size * (unsigned int)sizeof(void *)));
+ return -1;
+ }
+
+ talloc_set_type(h, minmax_heap_t);
+ h->size = n_size;
+ *hp = h;
+ return 0;
+}
+
+
+static inline CC_HINT(always_inline, nonnull) fr_minmax_heap_index_t index_get(minmax_heap_t *h, void *data)
+{
+ return *((fr_minmax_heap_index_t const *)(((uint8_t const *)data) + h->offset));
+}
+
+static inline CC_HINT(always_inline, nonnull) void index_set(minmax_heap_t *h, void *data, fr_minmax_heap_index_t idx)
+{
+ *((fr_minmax_heap_index_t *)(((uint8_t *)data) + h->offset)) = idx;
+}
+
+static inline CC_HINT(always_inline, nonnull) bool has_children(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ return HEAP_LEFT(idx) <= h->num_elements;
+}
+
+static inline bool has_grandchildren(minmax_heap_t *h, fr_minmax_heap_index_t i)
+{
+ return HEAP_LEFT(HEAP_LEFT(i)) <= h->num_elements;
+}
+
+#define OFFSET_SET(_heap, _idx) index_set(_heap, _heap->p[_idx], _idx);
+#define OFFSET_RESET(_heap, _idx) index_set(_heap, _heap->p[_idx], 0);
+
+/*
+ * The minmax heap has the same basic idea as binary heaps:
+ * 1. To insert a value, put it at the bottom and push it up to where it should be.
+ * 2. To remove a value, take it out; if it's not at the bottom, move what is at the
+ * bottom up to fill the hole, and push it down to where it should be.
+ * The difference is how you push, and the invariants to preserve.
+ *
+ * Since we store the index in the item (or zero if it's not in the heap), when we
+ * move an item around, we have to set its index. The general principle is that we
+ * set it when we put the item in the place it will ultimately be when the push_down()
+ * or push_up() is finished.
+ */
+
+/** Find the index of the minimum child or grandchild of the entry at a given index.
+ * precondition: has_children(h, idx), i.e. there is stuff in the heap below
+ * idx.
+ *
+ * These functions are called by push_down_{min, max}() with idx the index of
+ * an element moved into that position but which may or may not be where it
+ * should ultimately go. The minmax heap property still holds for its (positional,
+ * at least) descendants, though. That lets us cut down on the number of
+ * comparisons over brute force iteration over every child and grandchild.
+ *
+ * In the case where the desired item must be a child, there are at most two,
+ * so we just do it inlne; no loop needed.
+ */
+static CC_HINT(nonnull) fr_minmax_heap_index_t min_child_or_grandchild(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ fr_minmax_heap_index_t lwb, upb, min;
+
+ if (is_max_level_index(idx) || !has_grandchildren(h, idx)) {
+ /* minimum must be a chld */
+ min = HEAP_LEFT(idx);
+ upb = HEAP_RIGHT(idx);
+ if (upb <= h->num_elements && h->cmp(h->p[upb], h->p[min]) < 0) min = upb;
+ return min;
+ }
+
+ /* minimum must be a grandchild, unless the right child is childless */
+ if (!has_children(h, HEAP_RIGHT(idx))) {
+ min = HEAP_RIGHT(idx);
+ lwb = HEAP_LEFT(HEAP_LEFT(idx));
+ } else {
+ min = HEAP_LEFT(HEAP_LEFT(idx));
+ lwb = min + 1;
+ }
+ upb = HEAP_RIGHT(HEAP_RIGHT(idx));
+
+ /* Some grandchildren may not exist. */
+ if (upb > h->num_elements) upb = h->num_elements;
+
+ for (fr_minmax_heap_index_t i = lwb; i <= upb; i++) {
+ if (h->cmp(h->p[i], h->p[min]) < 0) min = i;
+ }
+ return min;
+}
+
+static CC_HINT(nonnull) fr_minmax_heap_index_t max_child_or_grandchild(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ fr_minmax_heap_index_t lwb, upb, max;
+
+ if (is_min_level_index(idx) || !has_grandchildren(h, idx)) {
+ /* maximum must be a chld */
+ max = HEAP_LEFT(idx);
+ upb = HEAP_RIGHT(idx);
+ if (upb <= h->num_elements && h->cmp(h->p[upb], h->p[max]) > 0) max = upb;
+ return max;
+ }
+
+ /* minimum must be a grandchild, unless the right child is childless */
+ if (!has_children(h, HEAP_RIGHT(idx))) {
+ max = HEAP_RIGHT(idx);
+ lwb = HEAP_LEFT(HEAP_LEFT(idx));
+ } else {
+ max = HEAP_LEFT(HEAP_LEFT(idx));
+ lwb = max + 1;
+ }
+ upb = HEAP_RIGHT(HEAP_RIGHT(idx));
+
+ /* Some grandchildren may not exist. */
+ if (upb > h->num_elements) upb = h->num_elements;
+
+ for (fr_minmax_heap_index_t i = lwb; i <= upb; i++) {
+ if (h->cmp(h->p[i], h->p[max]) > 0) max = i;
+ }
+ return max;
+}
+
+/**
+ * precondition: idx is the index of an existing entry on a min level
+ */
+static inline CC_HINT(always_inline, nonnull) void push_down_min(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ while (has_children(h, idx)) {
+ fr_minmax_heap_index_t m = min_child_or_grandchild(h, idx);
+
+ /*
+ * If p[m] doesn't precede p[idx], we're done.
+ */
+ if (h->cmp(h->p[m], h->p[idx]) >= 0) break;
+
+ HEAP_SWAP(h->p[idx], h->p[m]);
+ OFFSET_SET(h, idx);
+
+ /*
+ * The entry now at m may belong where the parent is.
+ */
+ if (HEAP_GRANDPARENT(m) == idx && h->cmp(h->p[m], h->p[HEAP_PARENT(m)]) > 0) {
+ HEAP_SWAP(h->p[HEAP_PARENT(m)], h->p[m]);
+ OFFSET_SET(h, HEAP_PARENT(m));
+ }
+ idx = m;
+ }
+ OFFSET_SET(h, idx);
+}
+
+/**
+ * precondition: idx is the index of an existing entry on a max level
+ * (Just like push_down_min() save for reversal of ordering, so comments there apply,
+ * mutatis mutandis.)
+ */
+static CC_HINT(nonnull) void push_down_max(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ while (has_children(h, idx)) {
+ fr_minmax_heap_index_t m = max_child_or_grandchild(h, idx);
+
+ if (h->cmp(h->p[m], h->p[idx]) <= 0) break;
+
+ HEAP_SWAP(h->p[idx], h->p[m]);
+ OFFSET_SET(h, idx);
+
+ if (HEAP_GRANDPARENT(m) == idx && h->cmp(h->p[m], h->p[HEAP_PARENT(m)]) < 0) {
+ HEAP_SWAP(h->p[HEAP_PARENT(m)], h->p[m]);
+ OFFSET_SET(h, HEAP_PARENT(m));
+ }
+ idx = m;
+ }
+ OFFSET_SET(h, idx);
+}
+
+static void push_down(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ if (is_min_level_index(idx)) {
+ push_down_min(h, idx);
+ } else {
+ push_down_max(h, idx);
+ }
+}
+
+static void push_up_min(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ fr_minmax_heap_index_t grandparent;
+
+ while ((grandparent = HEAP_GRANDPARENT(idx)) > 0 && h->cmp(h->p[idx], h->p[grandparent]) < 0) {
+ HEAP_SWAP(h->p[idx], h->p[grandparent]);
+ OFFSET_SET(h, idx);
+ idx = grandparent;
+ }
+ OFFSET_SET(h, idx);
+}
+
+static void push_up_max(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ fr_minmax_heap_index_t grandparent;
+
+ while ((grandparent = HEAP_GRANDPARENT(idx)) > 0 && h->cmp(h->p[idx], h->p[grandparent]) > 0) {
+ HEAP_SWAP(h->p[idx], h->p[grandparent]);
+ OFFSET_SET(h, idx);
+ idx = grandparent;
+ }
+ OFFSET_SET(h, idx);
+}
+
+static void push_up(minmax_heap_t *h, fr_minmax_heap_index_t idx)
+{
+ fr_minmax_heap_index_t parent;
+ int8_t order;
+
+ /*
+ * First entry? No need to move; set its index and be done with it.
+ */
+ if (idx == 1) {
+ OFFSET_SET(h, idx);
+ return;
+ }
+
+ /*
+ * Otherwise, move to the next level up if need be.
+ * Once it's positioned appropriately on an even or odd layer,
+ * it can percolate up two at a time.
+ */
+ parent = HEAP_PARENT(idx);
+ order = h->cmp(h->p[idx], h->p[parent]);
+
+ if (is_min_level_index(idx)) {
+ if (order > 0) {
+ HEAP_SWAP(h->p[idx], h->p[parent]);
+ OFFSET_SET(h, idx);
+ push_up_max(h, parent);
+ } else {
+ push_up_min(h, idx);
+ }
+ } else {
+ if (order < 0) {
+ HEAP_SWAP(h->p[idx], h->p[parent]);
+ OFFSET_SET(h, idx);
+ push_up_min(h, parent);
+ } else {
+ push_up_max(h, idx);
+ }
+ }
+}
+
+int fr_minmax_heap_insert(fr_minmax_heap_t *hp, void *data)
+{
+ minmax_heap_t *h = *hp;
+ fr_minmax_heap_index_t child = index_get(h, data);
+
+ if (unlikely(fr_minmax_heap_entry_inserted(child))) {
+ fr_strerror_const("Node is already in a heap");
+ return -1;
+ }
+
+ child = h->num_elements + 1;
+ if (unlikely(child > h->size)) {
+ if (unlikely(minmax_heap_expand(hp) < 0)) return -1;
+ h = *hp;
+ }
+
+ /*
+ * Add it to the end, and move it up as needed.
+ */
+ h->p[child] = data;
+ h->num_elements++;
+ push_up(h, child);
+ return 0;
+}
+
+void *fr_minmax_heap_min_peek(fr_minmax_heap_t *hp)
+{
+ minmax_heap_t *h = *hp;
+
+ if (unlikely(h->num_elements == 0)) return NULL;
+ return h->p[1];
+}
+
+void *fr_minmax_heap_min_pop(fr_minmax_heap_t *hp)
+{
+ void *data = fr_minmax_heap_min_peek(hp);
+
+ if (unlikely(!data)) return NULL;
+ if (unlikely(fr_minmax_heap_extract(hp, data) < 0)) return NULL;
+ return data;
+}
+
+void *fr_minmax_heap_max_peek(fr_minmax_heap_t *hp)
+{
+ minmax_heap_t *h = *hp;
+
+ if (unlikely(h->num_elements == 0)) return NULL;
+
+ if (h->num_elements < 3) return h->p[h->num_elements];
+
+ return h->p[2 + (h->cmp(h->p[2], h->p[3]) < 0)];
+}
+
+void *fr_minmax_heap_max_pop(fr_minmax_heap_t *hp)
+{
+ void *data = fr_minmax_heap_max_peek(hp);
+
+ if (unlikely(!data)) return NULL;
+ if (unlikely(fr_minmax_heap_extract(hp, data) < 0)) return NULL;
+ return data;
+}
+
+int fr_minmax_heap_extract(fr_minmax_heap_t *hp, void *data)
+{
+ minmax_heap_t *h = *hp;
+ fr_minmax_heap_index_t idx = index_get(h, data);
+
+ if (unlikely(h->num_elements < idx)) {
+ fr_strerror_printf("data (index %u) exceeds heap size %u", idx, h->num_elements);
+ return -1;
+ }
+ if (unlikely(!fr_minmax_heap_entry_inserted(index_get(h, data)) || h->p[idx] != data)) {
+ fr_strerror_printf("data (index %u) not in heap", idx);
+ return -1;
+ }
+
+ OFFSET_RESET(h, idx);
+
+ /*
+ * Removing the last element can't break the minmax heap property, so
+ * decrement the number of elements and be done with it.
+ */
+ if (h->num_elements == idx) {
+ h->num_elements--;
+ return 0;
+ }
+
+ /*
+ * Move the last element into the now-available position,
+ * and then move it as needed.
+ */
+ h->p[idx] = h->p[h->num_elements];
+ h->num_elements--;
+ /*
+ * If the new position is the root, that's as far up as it gets.
+ * If the old position is a descendant of the new position,
+ * the entry itself remains a descendant of the new position's
+ * parent, and hence by minmax heap property is in the proper
+ * relation to the parent and doesn't need to move up.
+ */
+ if (idx > 1 && !is_descendant(h->num_elements, idx)) push_up(h, idx);
+ push_down(h, idx);
+ return 0;
+}
+
+/** Return the number of elements in the minmax heap
+ *
+ * @param[in] hp to return the number of elements from.
+ */
+unsigned int fr_minmax_heap_num_elements(fr_minmax_heap_t *hp)
+{
+ minmax_heap_t *h = *hp;
+
+ return h->num_elements;
+}
+
+/** Iterate over entries in a minmax heap
+ *
+ * @note If the heap is modified the iterator should be considered invalidated.
+ *
+ * @param[in] hp to iterate over.
+ * @param[in] iter Pointer to an iterator struct, used to maintain
+ * state between calls.
+ * @return
+ * - User data.
+ * - NULL if at the end of the list.
+ */
+void *fr_minmax_heap_iter_init(fr_minmax_heap_t *hp, fr_minmax_heap_iter_t *iter)
+{
+ minmax_heap_t *h = *hp;
+
+ *iter = 1;
+
+ if (h->num_elements == 0) return NULL;
+
+ return h->p[1];
+}
+
+/** Get the next entry in a minmax heap
+ *
+ * @note If the heap is modified the iterator should be considered invalidated.
+ *
+ * @param[in] hp to iterate over.
+ * @param[in] iter Pointer to an iterator struct, used to maintain
+ * state between calls.
+ * @return
+ * - User data.
+ * - NULL if at the end of the list.
+ */
+void *fr_minmax_heap_iter_next(fr_minmax_heap_t *hp, fr_minmax_heap_iter_t *iter)
+{
+ minmax_heap_t *h = *hp;
+
+ if ((*iter + 1) > h->num_elements) return NULL;
+ *iter += 1;
+
+ return h->p[*iter];
+}
+
+#ifndef TALLOC_GET_TYPE_ABORT_NOOP
+void fr_minmax_heap_verify(char const *file, int line, fr_minmax_heap_t const *hp)
+{
+ minmax_heap_t *h;
+
+ /*
+ * The usual start...
+ */
+ fr_fatal_assert_msg(hp, "CONSISTENCY CHECK FAILED %s[%i]: fr_minmax_heap_t pointer was NULL", file, line);
+ (void) talloc_get_type_abort(hp, fr_minmax_heap_t);
+
+ /*
+ * Allocating the heap structure and the array holding the heap as described in data structure
+ * texts together is a respectable savings, but it means adding a level of indirection so the
+ * fr_heap_t * isn't realloc()ed out from under the user, hence the following (and the use of h
+ * rather than hp to access anything in the heap structure).
+ */
+ h = *hp;
+ fr_fatal_assert_msg(h, "CONSISTENCY CHECK FAILED %s[%i]: minmax_heap_t pointer was NULL", file, line);
+ (void) talloc_get_type_abort(h, minmax_heap_t);
+
+ fr_fatal_assert_msg(h->num_elements <= h->size,
+ "CONSISTENCY CHECK FAILED %s[%i]: num_elements exceeds size", file, line);
+
+ fr_fatal_assert_msg(h->p[0] == (void *)UINTPTR_MAX,
+ "CONSISTENCY CHECK FAILED %s[%i]: zeroeth element special value overwritten", file, line);
+
+ for (fr_minmax_heap_index_t i = 1; i <= h->num_elements; i++) {
+ void *data = h->p[i];
+
+ fr_fatal_assert_msg(data, "CONSISTENCY CHECK FAILED %s[%i]: node %u was NULL", file, line, i);
+ if (h->type) (void)_talloc_get_type_abort(data, h->type, __location__);
+ fr_fatal_assert_msg(index_get(h, data) == i,
+ "CONSISTENCY CHECK FAILED %s[%i]: node %u index != %u", file, line, i, i);
+ }
+
+ /*
+ * Verify minmax heap property, which is:
+ * A node in a min level precedes all its descendants;
+ * a node in a max level follows all its descencdants.
+ * (if equal keys are allowed, that should be "doesn't follow" and
+ * "doesn't precede" respectively)
+ *
+ * We claim looking at one's children and grandchildren (if any)
+ * suffices. Why? Induction on floor(depth / 2):
+ *
+ * Base case:
+ * If the depth of the tree is <= 2, that *is* all the
+ * descendants, so we're done.
+ * Induction step:
+ * Suppose you're on a min level and the check passes.
+ * If the test works on the next min level down, transitivity
+ * of <= means the level you're on satisfies the property
+ * two levels further down.
+ * For max level, >= is transitive, too, so you're good.
+ */
+
+ for (fr_minmax_heap_index_t i = 1; HEAP_LEFT(i) <= h->num_elements; i++) {
+ bool on_min_level = is_min_level_index(i);
+ fr_minmax_heap_index_t others[] = {
+ HEAP_LEFT(i),
+ HEAP_RIGHT(i),
+ HEAP_LEFT(HEAP_LEFT(i)),
+ HEAP_RIGHT(HEAP_LEFT(i)),
+ HEAP_LEFT(HEAP_RIGHT(i)),
+ HEAP_RIGHT(HEAP_RIGHT(i))
+ };
+
+ for (size_t j = 0; j < NUM_ELEMENTS(others) && others[j] <= h->num_elements; j++) {
+ int8_t cmp_result = h->cmp(h->p[i], h->p[others[j]]);
+
+ fr_fatal_assert_msg(on_min_level ? (cmp_result <= 0) : (cmp_result >= 0),
+ "CONSISTENCY CHECK FAILED %s[%i]: node %u violates %s level condition",
+ file, line, i, on_min_level ? "min" : "max");
+ }
+ }
+}
+#endif
--- /dev/null
+#pragma once
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/** Structures and prototypes for binary min-max heaps
+ *
+ * @file src/lib/util/minmax_heap.h
+ *
+ * @copyright 2021 Network RADIUS SARL (legal@networkradius.com)
+ */
+RCSIDH(minmax_heap_h, "$Id$")
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <freeradius-devel/build.h>
+#include <freeradius-devel/missing.h>
+#include <freeradius-devel/util/talloc.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+typedef unsigned int fr_minmax_heap_index_t;
+typedef unsigned int fr_minmax_heap_iter_t;
+
+/** How many talloc headers need to be pre-allocated for a minmax heap
+ */
+#define FR_MINMAX_HEAP_TALLOC_HEADERS 2
+
+/** Comparator to order elements
+ *
+ * Return a negative number if 'a' precedes 'b'.
+ * Return zero if the ordering of 'a' and 'b' doesn't matter.
+ * Return a positive number if 'b' precedes 'a'.
+ */
+typedef int8_t (*fr_minmax_heap_cmp_t)(void const *a, void const *b);
+
+/** The main minmax heap structure
+ * Note that fr_minmax_heap_t is a pointer to fr_minmax_heap_s. This added level of indirection
+ * lets one allocate/reallocate the heap structure and the array of pointers to items in the
+ * minmax heap as a unit without affecting the caller.
+ */
+typedef struct fr_minmax_heap_s * fr_minmax_heap_t;
+
+size_t fr_minmax_heap_pre_alloc_size(unsigned int count);
+
+/** Creates a minmax heap that can be used with non-talloced elements
+ *
+ * @param[in] _ctx Talloc ctx to allocate heap in.
+ * @param[in] _cmp Comparator used to compare elements.
+ * @param[in] _type Of elements.
+ * @param[in] _field to store heap indexes in.
+ * @param[in] _init the initial number of elements to allocate.
+ * Pass 0 to use the default.
+ */
+#define fr_minmax_heap_alloc(_ctx, _cmp, _type, _field, _init) \
+ _fr_minmax_heap_alloc(_ctx, _cmp, NULL, (size_t)offsetof(_type, _field), _init)
+
+/** Creates a minmax heap that verifies elements are of a specific talloc type
+ *
+ * @param[in] _ctx Talloc ctx to allocate heap in.
+ * @param[in] _cmp Comparator used to compare elements.
+ * @param[in] _talloc_type of elements.
+ * @param[in] _field to store heap indexes in.
+ * @param[in] _init the initial number of elements to allocate.
+ * Pass 0 to use the default.
+ * @return
+ * - A new minmax heap.
+ * - NULL on error.
+ */
+#define fr_minmax_heap_talloc_alloc(_ctx, _cmp, _talloc_type, _field, _init) \
+ _fr_minmax_heap_alloc(_ctx, _cmp, #_talloc_type, (size_t)offsetof(_talloc_type, _field), _init)
+
+fr_minmax_heap_t *_fr_minmax_heap_alloc(TALLOC_CTX *ctx, fr_minmax_heap_cmp_t cmp, char const *talloc_type, size_t offset, unsigned int init) CC_HINT(nonnull(2));
+
+/** Check if an entry is inserted into a heap
+ *
+ */
+static inline bool fr_minmax_heap_entry_inserted(fr_minmax_heap_index_t heap_idx)
+{
+ return (heap_idx > 0);
+}
+
+int fr_minmax_heap_insert(fr_minmax_heap_t *hp, void *data) CC_HINT(nonnull);
+int fr_minmax_heap_extract(fr_minmax_heap_t *hp, void *data) CC_HINT(nonnull);
+void *fr_minmax_heap_min_pop(fr_minmax_heap_t *hp) CC_HINT(nonnull);
+void *fr_minmax_heap_min_peek(fr_minmax_heap_t *hp) CC_HINT(nonnull);
+void *fr_minmax_heap_max_pop(fr_minmax_heap_t *hp) CC_HINT(nonnull);
+void *fr_minmax_heap_max_peek(fr_minmax_heap_t *hp) CC_HINT(nonnull);
+
+uint32_t fr_minmax_heap_num_elements(fr_minmax_heap_t *hp) CC_HINT(nonnull);
+
+void *fr_minmax_heap_iter_init(fr_minmax_heap_t *hp, fr_minmax_heap_iter_t *iter) CC_HINT(nonnull);
+void *fr_minmax_heap_iter_next(fr_minmax_heap_t *hp, fr_minmax_heap_iter_t *iter) CC_HINT(nonnull);
+
+/** Iterate over the contents of a minmax_heap
+ *
+ * @note The initializer section of a for loop can't declare variables with distinct
+ * base types, so we require a containing block, and can't follow the standard
+ * do {...} while(0) dodge. The code to be run for each item in the heap should
+ * therefore start with 1 open braces and end with 2 close braces, and shouldn't
+ * be followed with a semicolon.
+ * This may fake out code formatting programs, including editors.
+ *
+ * @param[in] _hp to iterate over.
+ * @param[in] _type of item the heap contains.
+ * @param[in] _data Name of variable holding a pointer to the heap element.
+ * Will be declared in the scope of the loop.
+ */
+#define fr_minmax_heap_foreach(_hp, _type, _data) \
+{ \
+ fr_minmax_heap_iter_t _iter; \
+ for (_type *_data = fr_minmax_heap_iter_init(_hp, &_iter); _data; _data = fr_minmax_heap_iter_next(_hp, &_iter))
+
+#ifndef TALLOC_GET_TYPE_ABORT_NOOP
+CC_HINT(nonnull(1)) void fr_minmax_heap_verify(char const *file, int line, fr_minmax_heap_t const *hp);
+# define FR_MINMAX_HEAP_VERIFY(_hp) fr_minmax_heap_verify(__FILE__, __LINE__, _hp)
+#elif !defined(NDEBUG)
+# define FR_MINMAX_HEAP_VERIFY(_hp) fr_assert(_hp)
+#else
+# define FR_MINMAX_HEAP_VERIFY(_hp)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
--- /dev/null
+#include <freeradius-devel/util/acutest.h>
+#include <freeradius-devel/util/heap.h>
+#include <freeradius-devel/util/rand.h>
+#include <freeradius-devel/util/time.h>
+
+#include "minmax_heap.c"
+
+typedef struct {
+ unsigned int data;
+ fr_minmax_heap_index_t idx; /* for the heap */
+ bool visited;
+} minmax_heap_thing;
+
+static bool minmax_heap_contains(fr_minmax_heap_t *hp, void *data)
+{
+ minmax_heap_t *h = *hp;
+
+ for (unsigned int i = 1; i <= h->num_elements; i++) if (h->p[i] == data) return true;
+
+ return false;
+}
+
+static int8_t minmax_heap_cmp(void const *one, void const *two)
+{
+ minmax_heap_thing const *a = one, *b = two;
+
+ return CMP_PREFER_SMALLER(a->data, b->data);
+}
+
+#if 0
+#define is_power_of_2(_n) !((_n) & ((_n) - 1))
+/*
+ * A simple minmax heap dump function, specific to minmax_heap_thing and
+ * intended for use only with small heaps. It only shows the data members
+ * in the order they appear in the array, ignoring the unused zeroeth
+ * entry and printing a vertical bar before the start of each successive level.
+ */
+static void minmax_heap_dump(fr_minmax_heap_t *hp)
+{
+ minmax_heap_t *h = *hp;
+ unsigned int num_elements = h->num_elements;
+
+ fprintf(stderr, "%3u: ", num_elements);
+
+ for (fr_minmax_heap_index_t i = 1; i <= num_elements; i++) {
+ if (is_power_of_2(i)) fprintf(stderr, "|");
+ fprintf(stderr, "%6u", ((minmax_heap_thing *)(h->p[i]))->data);
+ }
+ fprintf(stderr, "\n");
+}
+#endif
+
+static void populate_values(minmax_heap_thing values[], unsigned int len)
+{
+ unsigned int i;
+ fr_fast_rand_t rand_ctx;
+
+ for (i = 0; i < len; i++) {
+ values[i].data = i;
+ values[i].idx = 0;
+ values[i].visited = false;
+ }
+
+ /* shuffle values before insertion, so the heap has to work to give them back in order */
+ rand_ctx.a = fr_rand();
+ rand_ctx.b = fr_rand();
+
+ for (i = 0; i < len; i++) {
+ unsigned int j = fr_fast_rand(&rand_ctx) % len;
+ int temp = values[i].data;
+
+ values[i].data = values[j].data;
+ values[j].data = temp;
+ }
+}
+
+#define NVALUES 20
+static void minmax_heap_test_basic(void)
+{
+ fr_minmax_heap_t *hp;
+ minmax_heap_thing values[NVALUES];
+
+ hp = fr_minmax_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, NVALUES);
+ TEST_CHECK(hp != NULL);
+
+ populate_values(values, NVALUES);
+
+ /*
+ * minmax heaps can get the minimum value...
+ */
+ for (unsigned int i = 0; i < NVALUES; i++) {
+ TEST_CHECK(fr_minmax_heap_insert(hp, &values[i]) >= 0);
+ TEST_CHECK(fr_minmax_heap_entry_inserted(values[i].idx));
+ }
+
+ for (unsigned int i = 0; i < NVALUES; i++) {
+ minmax_heap_thing *value = fr_minmax_heap_min_pop(hp);
+
+ TEST_CHECK(value != NULL);
+ TEST_CHECK(!fr_minmax_heap_entry_inserted(value->idx));
+ TEST_CHECK(value->data == i);
+ TEST_MSG("iteration %u, popped %u", i, value->data);
+ }
+
+ /*
+ * ...or the maximum value.
+ */
+ for (unsigned int i = 0; i < NVALUES; i++) {
+ TEST_CHECK(fr_minmax_heap_insert(hp, &values[i]) >= 0);
+ TEST_CHECK(fr_minmax_heap_entry_inserted(values[i].idx));
+ }
+
+ for (unsigned int i = NVALUES; --i > 0; ) {
+ minmax_heap_thing *value = fr_minmax_heap_max_pop(hp);
+
+ TEST_CHECK(value != NULL);
+ TEST_CHECK(!fr_minmax_heap_entry_inserted(value->idx));
+ TEST_CHECK(value->data == i);
+ TEST_MSG("iteration %u, popped %u", NVALUES - 1 - i, value->data);
+ }
+
+ talloc_free(hp);
+}
+
+#define MINMAX_HEAP_TEST_SIZE (4096)
+
+static void minmax_heap_test(int skip)
+{
+ fr_minmax_heap_t *hp;
+ int i;
+ minmax_heap_thing *array;
+ int left;
+ int ret;
+
+ static bool done_init = false;
+
+ if (!done_init) {
+ unsigned int seed = /* 1634677281 */ (unsigned int) time(NULL);
+
+ // fprintf(stderr, "seed %u\n", seed);
+ srand(seed);
+ done_init = true;
+ }
+
+ hp = fr_minmax_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, 0);
+ TEST_CHECK(hp != NULL);
+
+ array = talloc_zero_array(hp, minmax_heap_thing, MINMAX_HEAP_TEST_SIZE);
+
+ /*
+ * Initialise random values
+ */
+ for (i = 0; i < MINMAX_HEAP_TEST_SIZE; i++) array[i].data = rand() % 65537;
+
+ TEST_CASE("insertions");
+ for (i = 0; i < MINMAX_HEAP_TEST_SIZE; i++) {
+ FR_MINMAX_HEAP_VERIFY(hp);
+ TEST_CHECK((ret = fr_minmax_heap_insert(hp, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+
+ TEST_CHECK(minmax_heap_contains(hp, &array[i]));
+ TEST_MSG("element %i inserted but not in heap", i);
+ }
+
+ TEST_CASE("deletions");
+ {
+ unsigned int entry;
+
+ for (i = 0; i < MINMAX_HEAP_TEST_SIZE / skip; i++) {
+ entry = i * skip;
+
+ FR_MINMAX_HEAP_VERIFY(hp);
+ TEST_CHECK(array[entry].idx != 0);
+ TEST_MSG("element %i removed out of order", entry);
+
+ TEST_CHECK((ret = fr_minmax_heap_extract(hp, &array[entry])) >= 0);
+ TEST_MSG("element %i removal failed, returned %i - %s", entry, ret, fr_strerror());
+
+ TEST_CHECK(!minmax_heap_contains(hp, &array[entry]));
+ TEST_MSG("element %i removed but still in heap", entry);
+
+ TEST_CHECK(array[entry].idx == 0);
+ TEST_MSG("element %i removed out of order", entry);
+ }
+ }
+
+ left = fr_minmax_heap_num_elements(hp);
+ for (i = 0; i < left; i++) {
+ minmax_heap_thing *t;
+
+ FR_MINMAX_HEAP_VERIFY(hp);
+ TEST_CHECK((t = fr_minmax_heap_min_peek(hp)) != NULL);
+ TEST_MSG("expected %i elements remaining in the heap", left - i);
+
+ TEST_CHECK(fr_minmax_heap_extract(hp, t) >= 0);
+ TEST_MSG("failed extracting %i", i);
+ }
+
+ TEST_CHECK((ret = fr_minmax_heap_num_elements(hp)) == 0);
+ TEST_MSG("%i elements remaining", ret);
+
+ talloc_free(hp);
+}
+
+/*
+ * minmax heaps can do anything heaps can do, so let's make sure we have
+ * a (proper!) superset of the heap tests.
+ */
+
+static void minmax_heap_test_skip_0(void)
+{
+ minmax_heap_test(1);
+}
+
+static void minmax_heap_test_skip_2(void)
+{
+ minmax_heap_test(2);
+}
+
+static void minmax_heap_test_skip_10(void)
+{
+ minmax_heap_test(10);
+}
+
+#define BURN_IN_OPS (10000000)
+
+static void minmax_heap_burn_in(void)
+{
+ fr_minmax_heap_t *hp = NULL;
+ minmax_heap_thing *array = NULL;
+ static bool done_init = false;
+ int insert_count = 0;
+ int element_count = 0;
+
+ if (!done_init) {
+ srand((unsigned int) time(0));
+ done_init = true;
+ }
+
+ array = calloc(BURN_IN_OPS, sizeof(minmax_heap_thing));
+ for (unsigned int i = 0; i < BURN_IN_OPS; i++) array[i].data = rand() % 65537;
+
+ hp = fr_minmax_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, 0);
+
+ for (unsigned int i = 0; i < BURN_IN_OPS; i++) {
+ minmax_heap_thing *ret_thing = NULL;
+ int ret_insert = -1;
+
+ if (fr_minmax_heap_num_elements(hp) == 0) {
+ insert:
+ TEST_CHECK((ret_insert = fr_minmax_heap_insert(hp, &array[insert_count])) >= 0);
+ insert_count++;
+ element_count++;
+ } else {
+ switch (rand() % 3) {
+ case 0: /* insert */
+ goto insert;
+
+ case 1: /* min pop */
+ ret_thing = fr_minmax_heap_min_pop(hp);
+ TEST_CHECK(ret_thing != NULL);
+ element_count--;
+ break;
+ case 2: /* min peek */
+ ret_thing = fr_minmax_heap_min_peek(hp);
+ TEST_CHECK(ret_thing != NULL);
+ break;
+ case 3: /* max pop */
+ ret_thing = fr_minmax_heap_max_pop(hp);
+ TEST_CHECK(ret_thing != NULL);
+ element_count--;
+ break;
+ case 4: /* max peek */
+ ret_thing = fr_minmax_heap_max_peek(hp);
+ TEST_CHECK(ret_thing != NULL);
+ break;
+ }
+ }
+ }
+
+ talloc_free(hp);
+ free(array);
+}
+
+#define MINMAX_HEAP_CYCLE_SIZE (1600000)
+
+static void minmax_heap_test_order(void)
+{
+ fr_minmax_heap_t *hp;
+ int i;
+ minmax_heap_thing *array;
+ minmax_heap_thing *thing, *prev = NULL;
+ unsigned int data;
+ unsigned int count;
+ int ret;
+
+ static bool done_init = false;
+
+ if (!done_init) {
+ srand((unsigned int)time(NULL));
+ done_init = true;
+ }
+
+ hp = fr_minmax_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, 0);
+ TEST_CHECK(hp != NULL);
+
+ array = talloc_zero_array(hp, minmax_heap_thing, MINMAX_HEAP_TEST_SIZE);
+
+ /*
+ * Initialise random values
+ */
+ for (i = 0; i < MINMAX_HEAP_TEST_SIZE; i++) array[i].data = rand() % 65537;
+
+ TEST_CASE("insertions for min");
+ for (i = 0; i < MINMAX_HEAP_TEST_SIZE; i++) {
+ TEST_CHECK((ret = fr_minmax_heap_insert(hp, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+
+ TEST_CHECK(minmax_heap_contains(hp, &array[i]));
+ TEST_MSG("element %i inserted but not in heap", i);
+ }
+
+ TEST_CASE("min ordering");
+
+ count = 0;
+ data = 0;
+ prev = NULL;
+ while ((thing = fr_minmax_heap_min_pop(hp))) {
+ TEST_CHECK(thing->data >= data);
+ TEST_MSG("Expected data >= %i, got %i", data, thing->data);
+ if (thing->data >= data) data = thing->data;
+ TEST_CHECK(thing != prev);
+ prev = thing;
+ count++;
+ }
+
+ TEST_CHECK(count == MINMAX_HEAP_TEST_SIZE);
+
+ TEST_CASE("insertions for max");
+ for (i = 0; i < MINMAX_HEAP_TEST_SIZE; i++) {
+ TEST_CHECK((ret = fr_minmax_heap_insert(hp, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+
+ TEST_CHECK(minmax_heap_contains(hp, &array[i]));
+ TEST_MSG("element %i inserted but not in heap", i);
+ }
+
+ TEST_CASE("max ordering");
+
+ count = 0;
+ data = UINT_MAX;
+ prev = NULL;
+ while ((thing = fr_minmax_heap_max_pop(hp))) {
+ TEST_CHECK(thing->data <= data);
+ TEST_MSG("Expected data >= %i, got %i", data, thing->data);
+ if (thing->data <= data) data = thing->data;
+ TEST_CHECK(thing != prev);
+ prev = thing;
+ count++;
+ }
+
+ TEST_CHECK(count == MINMAX_HEAP_TEST_SIZE);
+
+ talloc_free(hp);
+}
+
+static CC_HINT(noinline) minmax_heap_thing *array_pop(minmax_heap_thing **array, unsigned int count)
+{
+ minmax_heap_thing *low = NULL;
+ unsigned int idx = 0;
+
+ for (unsigned int j = 0; j < count; j++) {
+ if (!array[j]) continue;
+
+ if (!low || (minmax_heap_cmp(array[j], low) < 0)) {
+ idx = j;
+ low = array[j];
+ }
+ }
+ if (low) array[idx] = NULL;
+
+ return low;
+}
+
+/** Benchmarks for minmax heaps vs heaps when used as queues
+ *
+ */
+static void queue_cmp(unsigned int count)
+{
+ fr_minmax_heap_t *minmax;
+ fr_heap_t *heap;
+
+ minmax_heap_thing *values;
+
+ unsigned int i;
+
+ values = talloc_array(NULL, minmax_heap_thing, count);
+
+ /*
+ * Check times for minmax heap alloc, insert, pop
+ */
+ {
+ fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first = fr_time_wrap(0);
+
+ populate_values(values, count);
+
+ start_alloc = fr_time();
+ minmax = fr_minmax_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, 0);
+ end_alloc = fr_time();
+ TEST_CHECK(minmax != NULL);
+
+ start_insert = fr_time();
+ for (i = 0; i < count; i++) fr_minmax_heap_insert(minmax, &values[i]);
+ end_insert = fr_time();
+
+ start_pop = fr_time();
+ for (i = 0; i < count; i++) {
+ TEST_CHECK(fr_minmax_heap_min_pop(minmax) != NULL);
+ if (i == 0) end_pop_first = fr_time();
+
+ TEST_MSG("expected %u elements remaining in the minmax heap", count - i);
+ TEST_MSG("failed extracting %u", i);
+ }
+ end_pop = fr_time();
+
+ TEST_MSG_ALWAYS("\nminmax heap size: %u\n", count);
+ TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_alloc, start_alloc)));
+ TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_insert, start_insert)));
+ TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_pop_first, start_pop)));
+ TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_pop, start_pop)));
+ talloc_free(minmax);
+ }
+
+ /*
+ * Check times for heap alloc, insert, pop
+ */
+ {
+ fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first;
+
+ populate_values(values, count);
+
+ start_alloc = fr_time();
+ heap = fr_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, count);
+ end_alloc = fr_time();
+ TEST_CHECK(heap != NULL);
+
+ start_insert = fr_time();
+ for (i = 0; i < count; i++) fr_heap_insert(heap, &values[i]);
+ end_insert = fr_time();
+
+ start_pop = fr_time();
+ for (i = 0; i < count; i++) {
+ TEST_CHECK(fr_heap_pop(heap) != NULL);
+ if (i == 0) end_pop_first = fr_time();
+
+ TEST_MSG("expected %u elements remaining in the heap", count - i);
+ TEST_MSG("failed extracting %u", i);
+ }
+ end_pop = fr_time();
+
+ TEST_MSG_ALWAYS("\nheap size: %u\n", count);
+ TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_alloc, start_alloc)));
+ TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_insert, start_insert)));
+ TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_pop_first, start_pop)));
+ TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_pop, start_pop)));
+
+ talloc_free(heap);
+ }
+
+ /*
+ * Array
+ */
+ {
+ minmax_heap_thing **array;
+ populate_values(values, count);
+ fr_time_t start_alloc, end_alloc, start_insert, end_insert, start_pop, end_pop, end_pop_first;
+
+ start_alloc = fr_time();
+ array = talloc_array(NULL, minmax_heap_thing *, count);
+ end_alloc = fr_time();
+
+ start_insert = fr_time();
+ for (i = 0; i < count; i++) array[i] = &values[i];
+ end_insert = fr_time();
+
+ start_pop = fr_time();
+ for (i = 0; i < count; i++) {
+ TEST_CHECK(array_pop(array, count) != NULL);
+ if (i == 0) end_pop_first = fr_time();
+ }
+ end_pop = fr_time();
+
+ TEST_MSG_ALWAYS("\narray size: %u\n", count);
+ TEST_MSG_ALWAYS("alloc: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_alloc, start_alloc)));
+ TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_insert, start_insert)));
+ TEST_MSG_ALWAYS("pop-first: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_pop_first, start_pop)));
+ TEST_MSG_ALWAYS("pop: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end_pop, start_pop)));
+
+ talloc_free(array);
+ }
+
+ talloc_free(values);
+}
+
+static void queue_cmp_10(void)
+{
+ queue_cmp(10);
+}
+
+static void queue_cmp_50(void)
+{
+ queue_cmp(50);
+}
+
+static void queue_cmp_100(void)
+{
+ queue_cmp(100);
+}
+
+static void queue_cmp_1000(void)
+{
+ queue_cmp(1000);
+}
+
+static void minmax_heap_cycle(void)
+{
+ fr_minmax_heap_t *hp;
+ int i;
+ minmax_heap_thing *array;
+ int to_remove;
+ int inserted, removed;
+ int ret;
+ fr_time_t start_insert, start_remove, start_swap, end;
+
+ static bool done_init = false;
+
+ if (!done_init) {
+ srand((unsigned int)time(NULL));
+ done_init = true;
+ }
+
+ hp = fr_minmax_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, 0);
+ TEST_CHECK(hp != NULL);
+
+ array = calloc(MINMAX_HEAP_CYCLE_SIZE, sizeof(minmax_heap_thing));
+
+ /*
+ * Initialise random values
+ */
+ for (i = 0; i < MINMAX_HEAP_CYCLE_SIZE; i++) array[i].data = rand() % 65537;
+
+ start_insert = fr_time();
+ TEST_CASE("insertions");
+ for (i = 0; i < MINMAX_HEAP_CYCLE_SIZE; i++) {
+ TEST_CHECK((ret = fr_minmax_heap_insert(hp, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+ }
+ TEST_CHECK(fr_minmax_heap_num_elements(hp) == MINMAX_HEAP_CYCLE_SIZE);
+
+ TEST_CASE("pop");
+
+ /*
+ * Remove a random number of elements from the heap
+ */
+ to_remove = fr_minmax_heap_num_elements(hp) / 2;
+ start_remove = fr_time();
+ for (i = 0; i < to_remove; i++) {
+ minmax_heap_thing *t;
+
+ TEST_CHECK((t = fr_minmax_heap_min_peek(hp)) != NULL);
+ TEST_MSG("expected %i elements remaining in the heap", to_remove - i);
+
+ TEST_CHECK(fr_minmax_heap_extract(hp, t) >= 0);
+ TEST_MSG("failed extracting %i - %s", i, fr_strerror());
+ }
+
+ /*
+ * Now swap the inserted and removed set creating churn
+ */
+ start_swap = fr_time();
+ inserted = 0;
+ removed = 0;
+
+ for (i = 0; i < MINMAX_HEAP_CYCLE_SIZE; i++) {
+ if (!fr_minmax_heap_entry_inserted(array[i].idx)) {
+ TEST_CHECK((ret = fr_minmax_heap_insert(hp, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+ inserted++;
+ } else {
+ TEST_CHECK((ret = fr_minmax_heap_extract(hp, &array[i])) >= 0);
+ TEST_MSG("element %i removal failed, returned %i - %s", i, ret, fr_strerror());
+ removed++;
+ }
+ }
+
+ TEST_CHECK(removed == (MINMAX_HEAP_CYCLE_SIZE - to_remove));
+ TEST_MSG("expected %i", MINMAX_HEAP_CYCLE_SIZE - to_remove);
+ TEST_MSG("got %i", removed);
+
+ TEST_CHECK(inserted == to_remove);
+ TEST_MSG("expected %i", to_remove);
+ TEST_MSG("got %i", inserted);
+
+ end = fr_time();
+
+ TEST_MSG_ALWAYS("\ncycle size: %d\n", MINMAX_HEAP_CYCLE_SIZE);
+ TEST_MSG_ALWAYS("insert: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(start_remove, start_insert)));
+ TEST_MSG_ALWAYS("extract: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(start_swap, start_remove)));
+ TEST_MSG_ALWAYS("swap: %"PRIu64" μs\n", fr_time_delta_to_usec(fr_time_sub(end, start_swap)));
+
+ talloc_free(hp);
+ free(array);
+}
+
+static void minmax_heap_iter(void)
+{
+ fr_minmax_heap_t *hp;
+ fr_minmax_heap_iter_t iter;
+ minmax_heap_thing values[NVALUES], *data;
+ unsigned int total;
+
+ hp = fr_minmax_heap_alloc(NULL, minmax_heap_cmp, minmax_heap_thing, idx, 0);
+ TEST_CHECK(hp != NULL);
+
+ populate_values(values, NUM_ELEMENTS(values));
+
+ for (unsigned int i = 0; i < NUM_ELEMENTS(values); i++) fr_minmax_heap_insert(hp, &values[i]);
+
+ data = fr_minmax_heap_iter_init(hp, &iter);
+
+ for (unsigned int i = 0; i < NUM_ELEMENTS(values); i++, data = fr_minmax_heap_iter_next(hp, &iter)) {
+ TEST_CHECK(data != NULL);
+ TEST_CHECK(!data->visited);
+ TEST_CHECK(data->idx > 0);
+ data->visited = true;
+ }
+
+ TEST_CHECK(data == NULL);
+
+ total = 0;
+ fr_minmax_heap_foreach(hp, minmax_heap_thing, item) {
+ total += item->data;
+ }}
+ TEST_CHECK(total = 190);
+
+ talloc_free(hp);
+}
+
+TEST_LIST = {
+ /*
+ * Basic tests
+ */
+ { "minmax_heap_test_basic", minmax_heap_test_basic },
+ { "minmax_heap_test_skip_0", minmax_heap_test_skip_0 },
+ { "minmax_heap_test_skip_2", minmax_heap_test_skip_2 },
+ { "minmax_heap_test_skip_10", minmax_heap_test_skip_10 },
+ { "minmax_heap_test_order", minmax_heap_test_order },
+ { "minmax_heap_burn_in", minmax_heap_burn_in },
+ { "minmax_heap_cycle", minmax_heap_cycle },
+ { "minmax_heap_iter", minmax_heap_iter },
+ { "queue_cmp_10", queue_cmp_10 },
+ { "queue_cmp_50", queue_cmp_50 },
+ { "queue_cmp_100", queue_cmp_100 },
+ { "queue_cmp_1000", queue_cmp_1000 },
+ { NULL }
+};
+
--- /dev/null
+TARGET := minmax_heap_tests
+
+SOURCES := minmax_heap_tests.c
+
+TGT_LDLIBS := $(LIBS) $(GPERFTOOLS_LIBS)
+TGT_LDFLAGS := $(LDFLAGS) $(GPERFTOOLS_LDFLAGS)
+
+TGT_PREREQS += libfreeradius-util.a