dlist_tests.mk \
heap_tests.mk \
libfreeradius-util.mk \
+ lst_tests.mk \
pair_legacy_tests.mk \
pair_list_perf_test.mk \
pair_tests.mk \
#include <freeradius-devel/util/acutest.h>
+#include <freeradius-devel/util/time.h>
#include "heap.c"
int i;
heap_thing *array;
int to_remove;
+ int inserted, removed;
int ret;
+ fr_time_t start_insert, start_remove, start_swap, end;
static bool done_init = false;
*/
for (i = 0; i < HEAP_CYCLE_SIZE; i++) array[i].data = rand() % 65537;
+ start_insert = fr_time();
TEST_CASE("insertions");
for (i = 0; i < HEAP_CYCLE_SIZE; i++) {
TEST_CHECK((ret = fr_heap_insert(hp, &array[i])) >= 0);
* Remove a random number of elements from the heap
*/
to_remove = fr_heap_num_elements(hp) / 2;
+ start_remove = fr_time();
for (i = 0; i < to_remove; i++) {
heap_thing *t;
/*
* Now swap the inserted and removed set creating churn
*/
- {
- int inserted = 0, removed = 0;
-
- for (i = 0; i < HEAP_CYCLE_SIZE; i++) {
- if (array[i].heap == -1) {
- TEST_CHECK((ret = fr_heap_insert(hp, &array[i])) >= 0);
- TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
- inserted++;
- } else {
- TEST_CHECK((ret = fr_heap_extract(hp, &array[i])) >= 0);
- TEST_MSG("element %i removal failed, returned %i", i, ret);
- removed++;
- }
+ start_swap = fr_time();
+ inserted = 0;
+ removed = 0;
+
+ for (i = 0; i < HEAP_CYCLE_SIZE; i++) {
+ if (array[i].heap == -1) {
+ TEST_CHECK((ret = fr_heap_insert(hp, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+ inserted++;
+ } else {
+ TEST_CHECK((ret = fr_heap_extract(hp, &array[i])) >= 0);
+ TEST_MSG("element %i removal failed, returned %i", i, ret);
+ removed++;
}
+ }
- TEST_CHECK(removed == (HEAP_CYCLE_SIZE - to_remove));
- TEST_MSG("expected %i", HEAP_CYCLE_SIZE - to_remove);
- TEST_MSG("got %i", removed);
+ TEST_CHECK(removed == (HEAP_CYCLE_SIZE - to_remove));
+ TEST_MSG("expected %i", HEAP_CYCLE_SIZE - to_remove);
+ TEST_MSG("got %i", removed);
- TEST_CHECK(inserted == to_remove);
- TEST_MSG("expected %i", to_remove);
- TEST_MSG("got %i", inserted);
- }
+ TEST_CHECK(inserted == to_remove);
+ TEST_MSG("expected %i", to_remove);
+ TEST_MSG("got %i", inserted);
+
+ end = fr_time();
+
+ TEST_MSG_ALWAYS("\ncycle size: %d\n", HEAP_CYCLE_SIZE);
+ TEST_MSG_ALWAYS("insert: %2.2f ns\n", ((double)(start_remove - start_insert)) / NSEC);
+ TEST_MSG_ALWAYS("extract: %2.2f ns\n", ((double)(start_swap - start_remove)) / NSEC);
+ TEST_MSG_ALWAYS("swap: %2.2f ns\n", ((double)(end - start_swap)) / NSEC);
talloc_free(hp);
free(array);
inet.c \
isaac.c \
log.c \
+ lst.c \
md4.c \
md5.c \
misc.c \
--- /dev/null
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/** Functions for a Leftmost Skeleton Tree
+ *
+ * @file src/lib/util/lst.c
+ *
+ * @copyright 2021 Network RADIUS SARL (legal@networkradius.com)
+ */
+RCSID("$Id$")
+
+#include <freeradius-devel/util/lst.h>
+#include <freeradius-devel/util/rand.h>
+#include <freeradius-devel/util/strerror.h>
+
+/*
+ * Leftmost Skeleton Trees are defined in "Stronger Quickheaps" (Gonzalo Navarro,
+ * Rodrigo Paredes, Patricio V. Poblete, and Peter Sanders) International Journal
+ * of Foundations of Computer Science, November 2011. As the title suggests, it
+ * is inspired by quickheaps, and indeed the underlying representation looks
+ * like a quickheap.
+ *
+ * heap/priority queue operations are defined in the paper in terms of LST
+ * operations.
+ */
+
+typedef int stack_index_t;
+
+typedef struct {
+ stack_index_t depth;
+ stack_index_t size;
+ fr_lst_index_t *data; /* array of indices of the pivots (sometimes called roots) */
+} pivot_stack_t;
+
+struct fr_lst_s {
+ fr_lst_index_t capacity; //!< Number of elements that will fit
+ fr_lst_index_t idx; //!< Starting index, initially zero
+ fr_lst_index_t num_elements; //!< Number of elements in the LST
+ size_t offset; //!< Offset of heap index in element structure.
+ void **p; //!< Array of elements.
+ pivot_stack_t *s; //!< Stack of pivots, always with depth >= 1.
+ fr_fast_rand_t rand_ctx; //!< Seed for random choices.
+ char const *type; //!< Type of elements.
+ fr_lst_cmp_t cmp; //!< Comparator function.
+};
+
+#define index_addr(_lst, _data) ((uint8_t *)(_data) + (_lst)->offset)
+#define item_index(_lst, _data) (*(fr_lst_index_t *)index_addr((_lst), (_data)))
+
+#define is_equivalent(_lst, _index1, _index2) (index_reduce((_lst), (_index1) - (_index2)) == 0)
+#define item(_lst, _index) ((_lst)->p[index_reduce((_lst), (_index))])
+#define index_reduce(_lst, _index) ((_index) & ((_lst)->capacity - 1))
+#define pivot_item(_lst, _index) item((_lst), stack_item((_lst)->s, (_index)))
+
+/*
+ * The LST as defined in the paper has a fixed size set at creation.
+ * Here, as with quickheaps, but we want to allow for expansion...
+ * though given that, as the paper shows, the expected stack depth
+ * is proportion to the log of the number of items in the LST, expanding
+ * the pivot stack may be a rare event.
+ */
+#define INITIAL_CAPACITY 2048
+#define INITIAL_STACK_CAPACITY 32
+
+/*
+ * The paper defines randomized priority queue operations appropriately for the
+ * sum type definition the authors use for LSTs, which are used to implement the
+ * RPQ operations. This code, however, deals with the internal representation,
+ * including the root/pivot stack, which must change as the LST changes. Also, an
+ * insertion or deletion may shift the position of any number of buckets or change
+ * the number of buckets.
+ *
+ * So... for those operations, we will pass in the pointer to the LST, but
+ * internally, we'll represent it and its subtrees with an (LST pointer, stack index)
+ * pair. The index is that of the least pivot greater than or equal to all items in
+ * the subtree (considering the "fictitious" pivot greater than anything, so (lst, 0)
+ * represents the entire tree.
+ *
+ * The fictitious pivot at the bottom of the stack isn't actually in the array,
+ * so don't try to refer to what's there.
+ *
+ * The index is visible for the size and length functions, since they need
+ * to know the subtree they're working on.
+ */
+
+#define is_bucket(_lst, _stack_index) (lst_length((_lst), (_stack_index)) == 1)
+
+/*
+ * First, the canonical stack implementation, customized for LST usage:
+ * 1. pop doesn't return a stack value, and even lets you discard multiple
+ * stack items at a time
+ * 2. one can fetch and modify arbitrary stack items; when array elements must be
+ * moved to keep them contiguous, the pivot stack entries must change to match.
+ */
+static pivot_stack_t *stack_alloc(TALLOC_CTX *ctx)
+{
+ pivot_stack_t *s;
+
+ s = talloc_zero(ctx, pivot_stack_t);
+ if (!s) return NULL;
+
+ s->data = talloc_array(s, fr_lst_index_t, INITIAL_STACK_CAPACITY);
+ if (!s->data) {
+ talloc_free(s);
+ return NULL;
+ }
+ s->depth = 0;
+ s->size = INITIAL_STACK_CAPACITY;
+ return s;
+}
+
+static bool stack_expand(pivot_stack_t *s)
+{
+ fr_lst_index_t *n;
+ size_t n_size = 2 * s->size;
+
+ n = talloc_realloc(s, s->data, fr_lst_index_t, n_size);
+ if (unlikely(!n)) {
+ fr_strerror_printf("Failed expanding lst stack to %zu elements (%zu bytes)",
+ n_size, n_size * sizeof(fr_lst_index_t));
+ return false;
+ }
+
+ s->size = n_size;
+ s->data = n;
+ return true;
+}
+
+static inline CC_HINT(always_inline, nonnull) int stack_push(pivot_stack_t *s, fr_lst_index_t pivot)
+{
+ if (unlikely(s->depth == s->size && !stack_expand(s))) return -1;
+
+ s->data[s->depth++] = pivot;
+ return 0;
+}
+
+static inline CC_HINT(always_inline, nonnull) void stack_pop(pivot_stack_t *s, size_t n)
+{
+ s->depth -= n;
+}
+
+static inline CC_HINT(always_inline, nonnull) size_t stack_depth(pivot_stack_t *s)
+{
+ return s->depth;
+}
+
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t stack_item(pivot_stack_t *s, stack_index_t index)
+{
+ return s->data[index];
+}
+
+static inline CC_HINT(always_inline, nonnull) void stack_set(pivot_stack_t *s, stack_index_t index, fr_lst_index_t new_value)
+{
+ s->data[index] = new_value;
+}
+
+fr_lst_t *_fr_lst_alloc(TALLOC_CTX *ctx, fr_lst_cmp_t cmp, char const *type, size_t offset)
+{
+ fr_lst_t *lst;
+
+ lst = talloc_zero(ctx, fr_lst_t);
+ if (!lst) return NULL;
+
+ lst->capacity = INITIAL_CAPACITY;
+ lst->p = talloc_array(lst, void *, lst->capacity);
+ if (!lst->p) {
+ cleanup:
+ talloc_free(lst);
+ return NULL;
+ }
+
+ lst->s = stack_alloc(lst);
+ if (!lst->s) goto cleanup;
+
+ /* Initially the LST is empty and we start at the beginning of the array */
+ stack_push(lst->s, 0);
+ lst->idx = 0;
+
+ /* Prepare for random choices */
+ lst->rand_ctx.a = fr_rand();
+ lst->rand_ctx.b = fr_rand();
+
+ lst->type = type;
+ lst->cmp = cmp;
+ lst->offset = offset;
+
+ return lst;
+}
+
+/*
+ * The length function for LSTs (how many buckets it contains)
+ */
+static inline CC_HINT(always_inline, nonnull) stack_index_t lst_length(fr_lst_t *lst, stack_index_t stack_index)
+{
+ return stack_depth(lst->s) - stack_index;
+}
+
+/*
+ * The size function for LSTs (number of items a (sub)tree contains)
+ */
+static CC_HINT(nonnull) fr_lst_index_t lst_size(fr_lst_t *lst, stack_index_t stack_index)
+{
+ fr_lst_index_t reduced_right, reduced_idx;
+
+ if (stack_index == 0) return lst->num_elements;
+
+ reduced_right = index_reduce(lst, stack_item(lst->s, stack_index));
+ reduced_idx = index_reduce(lst, lst->idx);
+
+ if (reduced_idx <= reduced_right) return reduced_right - reduced_idx; /* No wraparound--easy. */
+
+ return (lst->capacity - reduced_idx) + reduced_right;
+}
+
+/*
+ * Flatten an LST, i.e. turn it into the base-case one bucket [sub]tree
+ * NOTE: so doing leaves the passed stack_index valid--we just add
+ * everything once in the left subtree to it.
+ */
+static inline CC_HINT(always_inline, nonnull) void lst_flatten(fr_lst_t *lst, stack_index_t stack_index)
+{
+ stack_pop(lst->s, stack_depth(lst->s) - stack_index);
+}
+
+/*
+ * Move data to a specific location in an LST's array.
+ * The caller must have made sure the location is available and exists
+ * in said array.
+ */
+static inline CC_HINT(always_inline, nonnull) void lst_move(fr_lst_t *lst, fr_lst_index_t location, void *data)
+{
+ item(lst, location) = data;
+ item_index(lst, data) = index_reduce(lst, location);
+}
+
+/*
+ * Add data to the bucket of a specified (sub)tree..
+ */
+static void bucket_add(fr_lst_t *lst, stack_index_t stack_index, void *data)
+{
+ fr_lst_index_t new_space;
+
+ /*
+ * For each bucket to the right, starting from the top,
+ * make a space available at the top and move the bottom item
+ * into it. Since ordering within a bucket doesn't matter, we
+ * can do that, minimizing moving and index adjustment.
+ *
+ * The fictitious pivot doesn't correspond to an actual value,
+ * so we save pivot moving for the end of the loop.
+ */
+ for (stack_index_t rindex = 0; rindex < stack_index; rindex++) {
+ fr_lst_index_t prev_pivot_index = stack_item(lst->s, rindex + 1);
+ bool empty_bucket;
+
+ new_space = stack_item(lst->s, rindex);
+ empty_bucket = (new_space - prev_pivot_index) == 1;
+ stack_set(lst->s, rindex, new_space + 1);
+
+ if (!empty_bucket) lst_move(lst, new_space, item(lst, prev_pivot_index + 1));
+
+ /* move the pivot up, leaving space for the next bucket */
+ lst_move(lst, prev_pivot_index + 1, item(lst, prev_pivot_index));
+ }
+
+ /*
+ * If the bucket isn't the leftmost, the above loop has made space
+ * available where the pivot used to be.
+ * If it is the leftmost, the loop wasn't executed, but the fictitious
+ * pivot isn't there, which is just as good.
+ */
+ new_space = stack_item(lst->s, stack_index);
+ stack_set(lst->s, stack_index, new_space + 1);
+ lst_move(lst, new_space, data);
+
+ lst->num_elements++;
+}
+
+/*
+ * Reduce pivot stack indices based on their difference from lst->idx,
+ * and then reduce lst->idx.
+ */
+static void lst_indices_reduce(fr_lst_t *lst)
+{
+ fr_lst_index_t reduced_idx = index_reduce(lst, lst->idx);
+ stack_index_t depth = stack_depth(lst->s);
+
+ for (stack_index_t i = 0; i < depth; i++) {
+ stack_set(lst->s, i, reduced_idx + stack_item(lst->s, i) - lst->idx);
+ }
+ lst->idx = reduced_idx;
+}
+
+/*
+ * Make more space available in an LST.
+ * The LST paper only mentions this option in passing, pointing out that it's O(n); the only
+ * constructor in the paper lets you hand it an array of items to initially insert
+ * in the LST, so elements will have to be removed to make room for more (though it's
+ * easy to see how one could specify extra space).
+ *
+ * Were it not for the circular array optimization, it would be talloc_realloc() and done;
+ * it works or it doesn't. (That's still O(n), since it may require copying the data.)
+ *
+ * With the circular array optimization, if lst->idx refers to something other than the
+ * beginning of the array, you have to move the elements preceding it to beginning of the
+ * newly-available space so it's still contiguous, and keep pivot stack entries consistent
+ * with the positions of the elements.
+ */
+static bool lst_expand(fr_lst_t *lst)
+{
+ void **n;
+ size_t n_capacity = 2 * lst->capacity;
+ fr_lst_index_t old_capacity = lst->capacity;
+
+ n = talloc_realloc(lst, lst->p, void *, n_capacity);
+ if (unlikely(!n)) {
+ fr_strerror_printf("Failed expanding lst to %zu elements (%zu bytes)",
+ n_capacity, n_capacity * sizeof(void *));
+ return false;
+ }
+
+ lst->p = n;
+ lst->capacity = n_capacity;
+
+ lst_indices_reduce(lst);
+
+ for (fr_lst_index_t i = 0; i < lst->idx; i++) {
+ void *to_be_moved = item(lst, i);
+ fr_lst_index_t new_index = item_index(lst, to_be_moved) + old_capacity;
+ lst_move(lst, new_index, to_be_moved);
+ }
+
+ return true;
+}
+
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_lwb(fr_lst_t *lst, size_t stack_index)
+{
+ if (is_bucket(lst, stack_index)) return lst->idx;
+ return stack_item(lst->s, stack_index + 1) + 1;
+}
+
+/*
+ * Note: buckets can be empty,
+ */
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_upb(fr_lst_t *lst, size_t stack_index)
+{
+ return stack_item(lst->s, stack_index) - 1;
+}
+
+/*
+ * Partition an LST
+ * It's only called for trees that are a single nonempty bucket;
+ * if it's a subtree, it is thus necessarily the leftmost.
+ */
+static void partition(fr_lst_t *lst, stack_index_t stack_index)
+{
+ fr_lst_index_t low = bucket_lwb(lst, stack_index);
+ fr_lst_index_t high = bucket_upb(lst, stack_index);
+ fr_lst_index_t l, h;
+ fr_lst_index_t pivot_index;
+ void *pivot;
+ void *temp;
+
+ /*
+ * Hoare partition doesn't do the trivial case, so catch it here.
+ */
+ if (is_equivalent(lst, low, high)) {
+ stack_push(lst->s, low);
+ return;
+ }
+
+ pivot_index = low + (fr_fast_rand(&lst->rand_ctx) % (high + 1 - low));
+ pivot = item(lst, pivot_index);
+
+ if (pivot_index != low) {
+ lst_move(lst, pivot_index, item(lst, low));
+ lst_move(lst, low, pivot);
+ }
+
+ /*
+ * Hoare partition; on the avaerage, it does a third the swaps of
+ * Lomuto.
+ */
+ l = low - 1;
+ h = high + 1;
+ for (;;) {
+ while (lst->cmp(item(lst, --h), pivot) > 0) ;
+ while (lst->cmp(item(lst, ++l), pivot) < 0) ;
+ if (l >= h) break;
+ temp = item(lst, l);
+ lst_move(lst, l, item(lst, h));
+ lst_move(lst, h, temp);
+ }
+
+ /*
+ * Hoare partition doesn't guarantee the pivot sits at location h
+ * the way Lomuto does and LST needs, so first get its location...
+ */
+ pivot_index = item_index(lst, pivot);
+ if (pivot_index >= index_reduce(lst, low)) {
+ pivot_index = low + pivot_index - index_reduce(lst, low);
+ } else {
+ pivot_index = high - (index_reduce(lst, high) - pivot_index);
+ }
+
+ /*
+ * ...and then move it if need be.
+ */
+ if (pivot_index < h) {
+ lst_move(lst, pivot_index, item(lst, h));
+ lst_move(lst, h, pivot);
+ }
+ if (pivot_index > h) {
+ h++;
+ lst_move(lst, pivot_index, item(lst, h));
+ lst_move(lst, h, pivot);
+ }
+
+ stack_push(lst->s, h);
+}
+
+/*
+ * Delete an item from a bucket in an LST
+ */
+static void bucket_delete(fr_lst_t *lst, stack_index_t stack_index, void *data)
+{
+ fr_lst_index_t location = item_index(lst, data);
+ fr_lst_index_t top;
+
+ if (is_equivalent(lst, location, lst->idx)) {
+ lst->idx++;
+ if (is_equivalent(lst, lst->idx, 0)) lst_indices_reduce(lst);
+ } else {
+ for (;;) {
+ top = bucket_upb(lst, stack_index);
+ if (!is_equivalent(lst, location, top)) lst_move(lst, location, item(lst, top));
+ stack_set(lst->s, stack_index, top);
+ if (stack_index == 0) break;
+ lst_move(lst, top, item(lst, top + 1));
+ stack_index--;
+ location = top + 1;
+ }
+ }
+
+ lst->num_elements--;
+ item_index(lst, data) = -1;
+}
+
+/*
+ * We precede each function that does the real work with a Pythonish
+ * (but colon-free) version of the pseudocode from the paper.
+ *
+ * clang, in version 13, will have a way to force tail call optimization
+ * with a "musttail" attribute. gcc has -f-foptimize-sibling-calls, but
+ * it works only with -O[23s]. For now, -O2 will assure TCO. In its absence,
+ * the recursion depth is bounded by the number of pivot stack entries, aka
+ * the "length" of the LST, which has an expected value proportional to
+ * log(number of nodes).
+ *
+ * NOTE: inlining a recursive function is not advisable, so no
+ * always_inline here.
+ */
+
+/*
+ * ExtractMin(LST T ) // assumes s(T ) > 0
+ * If T = bucket(B) Then
+ * Partition(T ) // O(|B|)
+ * Let T = tree(r, L, B )
+ * If s(L) = 0 Then
+ * Flatten T into bucket(B ) // O(1)
+ * Remove r from bucket B // O(1)
+ * Return r
+ * Else
+ * Return ExtractMin(L)
+ */
+static inline CC_HINT(nonnull) void *_fr_lst_pop(fr_lst_t *lst, stack_index_t stack_index)
+{
+ if (is_bucket(lst, stack_index)) partition(lst, stack_index);
+ ++stack_index;
+ if (lst_size(lst, stack_index) == 0) {
+ void *min = pivot_item(lst, stack_index);
+
+ lst_flatten(lst, stack_index);
+ bucket_delete(lst, stack_index, min);
+ return min;
+ }
+ return _fr_lst_pop(lst, stack_index);
+}
+
+/*
+ * FindMin(LST T ) // assumes s(T ) > 0
+ * If T = bucket(B) Then
+ * Partition(T ) // O(|B|)
+ * Let T = tree(r, L, B )
+ * If s(L) = 0 Then
+ * Return r
+ * Else
+ * Return FindMin(L)
+ */
+static inline CC_HINT(nonnull) void *_fr_lst_peek(fr_lst_t *lst, stack_index_t stack_index)
+{
+ if (is_bucket(lst, stack_index)) partition(lst, stack_index);
+ ++stack_index;
+ if (lst_size(lst, stack_index) == 0) return pivot_item(lst, stack_index);
+ return _fr_lst_peek(lst, stack_index);
+}
+
+/*
+ * Delete(LST T, x ∈ Z)
+ * If T = bucket(B) Then
+ * Remove x from bucket B // O(depth)
+ * Else
+ * Let T = tree(r, L, B′)
+ * If x < r Then
+ * Delete(L, x)
+ * Else If x > r Then
+ * Remove x from bucket B ′ // O(depth)
+ * Else
+ * Flatten T into bucket(B′′) // O(1)
+ * Remove x from bucket B′′ // O(depth)
+ */
+static inline CC_HINT(nonnull) void _fr_lst_extract(fr_lst_t *lst, stack_index_t stack_index, void *data)
+{
+ int8_t cmp;
+
+ if (is_bucket(lst, stack_index)) {
+ bucket_delete(lst, stack_index, data);
+ return;
+ }
+ stack_index++;
+ cmp = lst->cmp(data, pivot_item(lst, stack_index));
+ if (cmp < 0) {
+ _fr_lst_extract(lst, stack_index, data);
+ } else if (cmp > 0) {
+ bucket_delete(lst, stack_index - 1, data);
+ } else {
+ lst_flatten(lst, stack_index);
+ bucket_delete(lst, stack_index, data);
+ }
+}
+
+/*
+ * Insert(LST T, x ∈ Z)
+ * If T = bucket(B) Then
+ * Add x to bucket B // O(depth)
+ * Else
+ * Let T = tree(r, L, B)
+ * If random(s(T) + 1) != 1 Then
+ * If x < r Then
+ * Insert(L, x)
+ * Else
+ * Add x to bucket B // O(depth)
+ * Else
+ * Flatten T into bucket(B′) // O(1)
+ * Add x to bucket B′ // O(depth)
+ */
+static inline CC_HINT(nonnull) void _fr_lst_insert(fr_lst_t *lst, stack_index_t stack_index, void *data)
+{
+ if (is_bucket(lst, stack_index)) {
+ bucket_add(lst, stack_index, data);
+ return;
+ }
+ stack_index++;
+ if (fr_fast_rand(&lst->rand_ctx) % (lst_size(lst, stack_index) + 1) != 0) {
+ if (lst->cmp(data, pivot_item(lst, stack_index)) < 0) {
+ _fr_lst_insert(lst, stack_index, data);
+ } else {
+ bucket_add(lst, stack_index - 1, data);
+ }
+ } else {
+ lst_flatten(lst, stack_index);
+ bucket_add(lst, stack_index, data);
+ }
+}
+
+/*
+ * We represent a (sub)tree with an (lst, stack index) pair, so
+ * fr_lst_pop(), fr_lst_peek(), and fr_lst_extract() are minimal
+ * wrappers that
+ *
+ * (1) hide our representation from the user and preserve the interface
+ * (2) check preconditions
+ */
+
+void *fr_lst_pop(fr_lst_t *lst)
+{
+ if (unlikely(lst->num_elements == 0)) return NULL;
+ return _fr_lst_pop(lst, 0);
+}
+
+void *fr_lst_peek(fr_lst_t *lst)
+{
+ if (unlikely(lst->num_elements == 0)) return NULL;
+ return _fr_lst_peek(lst, 0);
+}
+
+int fr_lst_extract(fr_lst_t *lst, void *data)
+{
+ if (unlikely(lst->num_elements == 0)) {
+ fr_strerror_const("Tried to extract element from empty LST");
+ return -1;
+ }
+
+ if (unlikely(item_index(lst, data) < 0)) {
+ fr_strerror_const("Tried to extract element not in LST");
+ return -1;
+ }
+
+ _fr_lst_extract(lst, 0, data);
+ return 1;
+}
+
+int fr_lst_insert(fr_lst_t *lst, void *data)
+{
+ fr_lst_index_t data_index;
+
+ /*
+ * Expand if need be. Not in the paper, but we want the capability.
+ */
+ if (unlikely(lst->num_elements == lst->capacity && !lst_expand(lst))) return -1;
+
+ /*
+ * Don't insert something that looks like it's already in an LST.
+ */
+ data_index = item_index(lst, data);
+ if (unlikely(data_index > 0 ||
+ (data_index == 0 && lst->num_elements > 0 && lst->idx == 0 && item(lst, 0) == data))) {
+ fr_strerror_const("Node is already in the LST");
+ return -1;
+ }
+
+ _fr_lst_insert(lst, 0, data);
+ return 1;
+}
+
+fr_lst_index_t fr_lst_num_elements(fr_lst_t *lst)
+{
+ return lst->num_elements;
+}
+
+void *fr_lst_iter_init(fr_lst_t *lst, fr_lst_iter_t *iter)
+{
+ if (unlikely(!lst) || (lst->num_elements == 0)) return NULL;
+
+ *iter = lst->idx;
+ return item(lst, *iter);
+}
+
+void *fr_lst_iter_next(fr_lst_t *lst, fr_lst_iter_t *iter)
+{
+ if (unlikely(!lst)) return NULL;
+
+ if ((*iter + 1) >= stack_item(lst->s, 0)) return NULL;
+ *iter += 1;
+
+ return item(lst, *iter);
+}
--- /dev/null
+#pragma once
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+ */
+
+/** Structures and prototypes for leftmost skeleton trees (LSTs)
+ *
+ * @file src/lib/util/lst.h
+ *
+ * @copyright 2021 Network RADIUS SARL (legal@networkradius.com)
+ */
+RCSIDH(lst_h, "$Id$")
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <freeradius-devel/build.h>
+#include <freeradius-devel/util/talloc.h>
+
+#include <stdint.h>
+#include <freeradius-devel/util/heap.h>
+
+typedef struct fr_lst_s fr_lst_t;
+
+/*
+ * The type of LST indexes.
+ * The type passed to fr_lst_alloc() and fr_lst_talloc_alloc() in _type must be the
+ * type of a structure with a member of type fr_lst_index_t. That member's name must be
+ * passed as the _field argument.
+ */
+typedef int fr_lst_index_t;
+
+typedef fr_lst_index_t fr_lst_iter_t;
+
+/*
+ * Return a negative number to make a "precede" b.
+ * Return a positive number to make a "follow" b.
+ */
+typedef int8_t (*fr_lst_cmp_t)(void const *a, void const *b);
+
+/** Creates an LST that can be used with non-talloced elements
+ *
+ * @param[in] _ctx Talloc ctx to allocate LST in.
+ * @param[in] _cmp Comparator used to compare elements.
+ * @param[in] _type Of elements.
+ * @param[in] _field to store LST indexes in.
+ */
+#define fr_lst_alloc(_ctx, _cmp, _type, _field) \
+ _fr_lst_alloc(_ctx, _cmp, NULL, (size_t)offsetof(_type, _field))
+
+/** Creates an LST that verifies elements are of a specific talloc type
+ *
+ * @param[in] _ctx Talloc ctx to allocate LST in.
+ * @param[in] _cmp Comparator used to compare elements.
+ * @param[in] _talloc_type of elements.
+ * @param[in] _field to store heap indexes in.
+ * @return
+ * - A pointer to the new LST.
+ * - NULL on error.
+ */
+#define fr_lst_talloc_alloc(_ctx, _cmp, _talloc_type, _field) \
+ _fr_lst_alloc(_ctx, _cmp, #_talloc_type, (size_t)offsetof(_talloc_type, _field))
+
+fr_lst_t *_fr_lst_alloc(TALLOC_CTX *ctx, fr_lst_cmp_t cmp, char const *type, size_t offset) CC_HINT(nonnull(2));
+
+void *fr_lst_peek(fr_lst_t *lst) CC_HINT(nonnull);
+
+void *fr_lst_pop(fr_lst_t *lst) CC_HINT(nonnull);
+
+int fr_lst_insert(fr_lst_t *lst, void *data) CC_HINT(nonnull);
+
+/** Remove an element from an LST
+ *
+ * @param[in] lst the LST to remove an element from
+ * @param[in] data the element to remove
+ * @return
+ * - 0 if removal succeeds
+ * - -1 if removal fails
+ */
+int fr_lst_extract(fr_lst_t *lst, void *data) CC_HINT(nonnull);
+
+fr_lst_index_t fr_lst_num_elements(fr_lst_t *lst) CC_HINT(nonnull);
+
+/** Iterate over entries in LST
+ *
+ * @note If the LST is modified, the iterator should be considered invalidated.
+ *
+ * @param[in] lst to iterate over.
+ * @param[in] iter Pointer to an iterator struct, used to maintain
+ * state between calls.
+ * @return
+ * - User data.
+ * - NULL if at the end of the list.
+ */
+void *fr_lst_iter_init(fr_lst_t *lst, fr_lst_iter_t *iter);
+
+/** Get the next entry in an LST
+ *
+ * @note If the LST is modified, the iterator should be considered invalidated.
+ *
+ * @param[in] lst to iterate over.
+ * @param[in] iter Pointer to an iterator struct, used to maintain
+ * state between calls.
+ * @return
+ * - User data.
+ * - NULL if at the end of the list.
+ */
+void *fr_lst_iter_next(fr_lst_t *lst, fr_lst_iter_t *iter);
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+#include <freeradius-devel/util/acutest.h>
+#include <freeradius-devel/util/acutest_helpers.h>
+#include <freeradius-devel/util/rand.h>
+#include <freeradius-devel/util/time.h>
+
+/*
+ * This counterintuitive #include gives these separately-compiled tests
+ * access to fr_lst_t internals that lst.h doesn't reveal
+ * to those who #include it.
+ */
+#include "lst.c"
+
+typedef struct {
+ int data;
+ fr_lst_index_t index;
+ bool visited; /* Only used by iterator test */
+} heap_thing;
+
+#if 0
+static bool lst_validate(fr_lst_t *lst, bool show_items);
+#endif
+
+static bool fr_lst_contains(fr_lst_t *lst, void *data)
+{
+ int size = fr_lst_num_elements(lst);
+
+ for (int i = 0; i < size; i++) if (item(lst, i + lst->idx) == data) return true;
+
+ return false;
+}
+
+static int8_t heap_cmp(void const *one, void const *two)
+{
+ heap_thing const *item1 = one, *item2 = two;
+
+ return (item1->data > item2->data) - (item2->data > item1->data);
+}
+
+#define NVALUES 20
+static void lst_test_basic(void)
+{
+ fr_lst_t *lst;
+ heap_thing values[NVALUES];
+ fr_fast_rand_t rand_ctx;
+
+ lst = fr_lst_alloc(NULL, heap_cmp, heap_thing, index);
+ TEST_CHECK(lst != NULL);
+
+ for (int i = 0; i < NVALUES; i++) {
+ values[i].data = i;
+ values[i].index = 0;
+ }
+
+ /* shuffle values before insertion, so the heap has to work to give them back in order */
+ rand_ctx.a = fr_rand();
+ rand_ctx.b = fr_rand();
+
+ for (int i = 0; i < NVALUES - 1; i++) {
+ int j = fr_fast_rand(&rand_ctx) % (NVALUES - i);
+ int temp = values[i].data;
+
+ values[i].data = values[j].data;
+ values[j].data = temp;
+ }
+
+ for (int i = 0; i < NVALUES; i++) fr_lst_insert(lst, &values[i]);
+
+ for (int i = 0; i < NVALUES; i++) {
+ heap_thing *value = fr_lst_pop(lst);
+
+ TEST_CHECK(value != NULL);
+ TEST_CHECK(value->data == i);
+ }
+ talloc_free(lst);
+}
+
+#define LST_TEST_SIZE (4096)
+
+static void lst_test(int skip)
+{
+ fr_lst_t *lst;
+ int i;
+ heap_thing *array;
+ int left;
+ int ret;
+
+ static bool done_init = false;
+
+ if (!done_init) {
+ srand((unsigned int)time(NULL));
+ done_init = true;
+ }
+
+ lst = fr_lst_alloc(NULL, heap_cmp, heap_thing, index);
+ TEST_CHECK(lst != NULL);
+
+ array = malloc(sizeof(heap_thing) * LST_TEST_SIZE);
+
+ /*
+ * Initialise random values
+ */
+ for (i = 0; i < LST_TEST_SIZE; i++) array[i].data = rand() % 65537;
+
+ TEST_CASE("insertions");
+ for (i = 0; i < LST_TEST_SIZE; i++) {
+ TEST_CHECK((ret = fr_lst_insert(lst, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+
+ TEST_CHECK(fr_lst_contains(lst, &array[i]));
+ TEST_MSG("element %i inserted but not in LST", i);
+ }
+
+ TEST_CASE("deletions");
+ for (int entry = 0; entry < LST_TEST_SIZE; entry += skip) {
+ TEST_CHECK(array[entry].index != -1);
+ TEST_MSG("element %i removed out of order", entry);
+
+ TEST_CHECK((ret = fr_lst_extract(lst, &array[entry])) >= 0);
+ TEST_MSG("element %i removal failed, returned %i", entry, ret);
+
+ TEST_CHECK(!fr_lst_contains(lst, &array[entry]));
+ TEST_MSG("element %i removed but still in LST", entry);
+
+ TEST_CHECK(array[entry].index == -1);
+ TEST_MSG("element %i removed out of order", entry);
+ }
+
+ left = fr_lst_num_elements(lst);
+ for (i = 0; i < left; i++) {
+ TEST_CHECK(fr_lst_pop(lst) != NULL);
+ TEST_MSG("expected %i elements remaining in the heap", left - i);
+ TEST_MSG("failed extracting %i", i);
+ }
+
+ TEST_CHECK((ret = fr_lst_num_elements(lst)) == 0);
+ TEST_MSG("%i elements remaining", ret);
+
+ talloc_free(lst);
+ free(array);
+}
+
+static void lst_test_skip_1(void)
+{
+ lst_test(1);
+}
+
+static void lst_test_skip_2(void)
+{
+ lst_test(2);
+}
+
+static void lst_test_skip_10(void)
+{
+ lst_test(10);
+}
+
+
+static void lst_stress_realloc(void)
+{
+ fr_lst_t *lst;
+ fr_heap_t *hp;
+ heap_thing *lst_array, *hp_array;
+ static bool done_init = false;
+ int ret;
+ heap_thing *from_lst, *from_hp;
+
+ if (!done_init) {
+ srand((unsigned int) time(NULL));
+ done_init = true;
+ }
+
+ lst = fr_lst_alloc(NULL, heap_cmp, heap_thing, index);
+ TEST_CHECK(lst != NULL);
+ hp = fr_heap_alloc(NULL, heap_cmp, heap_thing, index);
+
+ lst_array = calloc(2 * INITIAL_CAPACITY, sizeof(heap_thing));
+ hp_array = calloc(2 * INITIAL_CAPACITY, sizeof(heap_thing));
+
+ /*
+ * Initialise random values
+ */
+ for (int i = 0; i < 2 * INITIAL_CAPACITY; i++) lst_array[i].data = hp_array[i].data = rand() % 65537;
+
+ /* Add the first INITIAL_CAPACITY values to lst and to hp */
+ TEST_CASE("partial fill");
+ for (int i = 0; i < INITIAL_CAPACITY; i++) {
+ TEST_CHECK((ret = fr_lst_insert(lst, &lst_array[i])) >= 0);
+ TEST_MSG("lst insert failed, iteration %d; returned %i - %s", i, ret, fr_strerror());
+ TEST_CHECK((ret = fr_heap_insert(hp, &hp_array[i])) >= 0);
+ TEST_MSG("heap insert failed, iteration %d; returned %i - %s", i, ret, fr_strerror());
+ }
+
+ /* Pop INITIAL_CAPACITY / 2 values from each (they should all be equal) */
+ TEST_CASE("partial pop");
+ for (int i = 0; i < INITIAL_CAPACITY / 2; i++) {
+ TEST_CHECK((from_lst = fr_lst_pop(lst)) != NULL);
+ TEST_CHECK((from_hp = fr_heap_pop(hp)) != NULL);
+ TEST_CHECK(heap_cmp(from_lst, from_hp) == 0);
+ }
+
+ /*
+ * Add the second INITIAL_CAPACITY values to lst and to hp.
+ * This should force lst to move entries to maintain adjacency,
+ * which is what we're testing here.
+ */
+ TEST_CASE("force move with expansion");
+ for (int i = INITIAL_CAPACITY; i < 2 * INITIAL_CAPACITY; i++) {
+ TEST_CHECK((ret = fr_lst_insert(lst, &lst_array[i])) >= 0);
+ TEST_MSG("lst insert failed, iteration %d; returned %i - %s", i, ret, fr_strerror());
+ TEST_CHECK((ret = fr_heap_insert(hp, &hp_array[i])) >= 0);
+ TEST_MSG("heap insert failed, iteration %d; returned %i - %s", i, ret, fr_strerror());
+ }
+
+ /* pop the remaining 3 * INITIAL_CAPACITY / 2 values from each (they should all be equal) */
+ TEST_CASE("complete pop");
+ for (int i = 0; i < 3 * INITIAL_CAPACITY / 2; i++) {
+ TEST_CHECK((from_lst = fr_lst_pop(lst)) != NULL);
+ TEST_CHECK((from_hp = fr_heap_pop(hp)) != NULL);
+ TEST_CHECK(heap_cmp(from_lst, from_hp) == 0);
+ }
+
+ TEST_CHECK(fr_lst_num_elements(lst) == 0);
+ TEST_CHECK(fr_heap_num_elements(hp) == 0);
+
+ talloc_free(lst);
+ talloc_free(hp);
+ free(lst_array);
+ free(hp_array);
+}
+
+#define BURN_IN_OPS (10000000)
+
+static void lst_burn_in(void)
+{
+ fr_lst_t *lst = NULL;
+ heap_thing *array = NULL;
+ static bool done_init = false;
+ int insert_count = 0;
+ int element_count = 0;
+
+ if (!done_init) {
+ srand((unsigned int) time(0));
+ done_init = true;
+ }
+
+ array = calloc(BURN_IN_OPS, sizeof(heap_thing));
+ for (int i = 0; i < BURN_IN_OPS; i++) array[i].data = rand() % 65537;
+
+ lst = fr_lst_alloc(NULL, heap_cmp, heap_thing, index);
+
+ for (int i = 0; i < BURN_IN_OPS; i++) {
+ heap_thing *ret_thing = NULL;
+ int ret_insert = -1;
+
+ if (fr_lst_num_elements(lst) == 0) {
+ insert:
+ TEST_CHECK((ret_insert = fr_lst_insert(lst, &array[insert_count])) >= 0);
+ insert_count++;
+ element_count++;
+ } else {
+ switch (rand() % 3) {
+ case 0: /* insert */
+ goto insert;
+
+ case 1: /* pop */
+ ret_thing = fr_lst_pop(lst);
+ TEST_CHECK(ret_thing != NULL);
+ element_count--;
+ break;
+ case 2: /* peek */
+ ret_thing = fr_lst_peek(lst);
+ TEST_CHECK(ret_thing != NULL);
+ break;
+ }
+ }
+ }
+
+ talloc_free(lst);
+ free(array);
+}
+
+#define LST_CYCLE_SIZE (1600000)
+
+static void lst_cycle(void)
+{
+ fr_lst_t *lst;
+ int i;
+ heap_thing *array;
+ int to_remove;
+ int inserted, removed;
+ int ret;
+ fr_time_t start_insert, start_remove, start_swap, end;
+
+ static bool done_init = false;
+
+ if (!done_init) {
+ srand((unsigned int)time(NULL));
+ done_init = true;
+ }
+
+ lst = fr_lst_alloc(NULL, heap_cmp, heap_thing, index);
+ TEST_CHECK(lst != NULL);
+
+ array = calloc(LST_CYCLE_SIZE, sizeof(heap_thing));
+
+ /*
+ * Initialise random values
+ */
+ for (i = 0; i < LST_CYCLE_SIZE; i++) array[i].data = rand() % 65537;
+
+ start_insert = fr_time();
+ TEST_CASE("insertions");
+ for (i = 0; i < LST_CYCLE_SIZE; i++) {
+ TEST_CHECK((ret = fr_lst_insert(lst, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+ }
+ TEST_CHECK(fr_lst_num_elements(lst) == LST_CYCLE_SIZE);
+
+ TEST_CASE("pop");
+
+ /*
+ * Remove a random number of elements from the LST
+ */
+ to_remove = fr_lst_num_elements(lst) / 2;
+ start_remove = fr_time();
+ for (i = 0; i < to_remove; i++) {
+ TEST_CHECK(fr_lst_pop(lst) != NULL);
+ TEST_MSG("failed extracting %i", i);
+ TEST_MSG("expected %i elements remaining in the LST", to_remove - i);
+ }
+
+ /*
+ * Now swap the inserted and removed set creating churn
+ */
+ start_swap = fr_time();
+
+ inserted = 0;
+ removed = 0;
+
+ for (i = 0; i < LST_CYCLE_SIZE; i++) {
+ if (array[i].index == -1) {
+ TEST_CHECK((ret = fr_lst_insert(lst, &array[i])) >= 0);
+ TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
+ inserted++;
+ } else {
+ TEST_CHECK((ret = fr_lst_extract(lst, &array[i])) >= 0);
+ TEST_MSG("element %i removal failed, returned %i", i, ret);
+ removed++;
+ }
+ }
+
+ TEST_CHECK(removed == (LST_CYCLE_SIZE - to_remove));
+ TEST_MSG("expected %i", LST_CYCLE_SIZE - to_remove);
+ TEST_MSG("got %i", removed);
+
+ TEST_CHECK(inserted == to_remove);
+ TEST_MSG("expected %i", to_remove);
+ TEST_MSG("got %i", inserted);
+
+ end = fr_time();
+
+ TEST_MSG_ALWAYS("\ncycle size: %d\n", LST_CYCLE_SIZE);
+ TEST_MSG_ALWAYS("insert: %2.2f ns\n", ((double)(start_remove - start_insert)) / NSEC);
+ TEST_MSG_ALWAYS("extract: %2.2f ns\n", ((double)(start_swap - start_remove)) / NSEC);
+ TEST_MSG_ALWAYS("swap: %2.2f ns\n", ((double)(end - start_swap)) / NSEC);
+
+ talloc_free(lst);
+ free(array);
+}
+
+static void lst_iter(void)
+{
+ fr_lst_t *lst;
+ fr_lst_iter_t iter;
+ heap_thing values[NVALUES], *data;
+ fr_fast_rand_t rand_ctx;
+
+ lst = fr_lst_alloc(NULL, heap_cmp, heap_thing, index);
+ TEST_CHECK(lst != NULL);
+
+ for (int i = 0; i < NVALUES; i++) {
+ values[i].data = i;
+ values[i].index = 0;
+ values[i].visited = false;
+ }
+
+ /* shuffle values before insertion, so the heap has to work to give them back in order */
+ rand_ctx.a = fr_rand();
+ rand_ctx.b = fr_rand();
+
+ for (int i = 0; i < NVALUES - 1; i++) {
+ int j = fr_fast_rand(&rand_ctx) % (NVALUES - i);
+ int temp = values[i].data;
+
+ values[i].data = values[j].data;
+ values[j].data = temp;
+ }
+
+ for (int i = 0; i < NVALUES; i++) fr_lst_insert(lst, &values[i]);
+
+ data = fr_lst_iter_init(lst, &iter);
+
+ for (int i = 0; i < NVALUES; i++, data = fr_lst_iter_next(lst, &iter)) {
+ TEST_CHECK(data != NULL);
+ TEST_CHECK(!data->visited);
+ TEST_CHECK(data->index >= 0);
+ data->visited = true;
+ }
+
+ TEST_CHECK(data == NULL);
+ talloc_free(lst);
+}
+
+#if 0
+static void lst_validate(fr_lst_t *lst, bool show_items)
+{
+ lst_index fake_pivot_index, reduced_fake_pivot_index, reduced_end;
+ int depth = stack_depth(lst->s);
+ int bucket_size_sum;
+ bool pivots_in_order = true;
+ bool pivot_indices_in_order = true;
+
+ /*
+ * There has to be at least the fictitious pivot.
+ */
+ if (depth < 1) {
+ TEST_MSG_ALWAYS("LST pivot stack empty");
+ return;
+ }
+
+ /*
+ * Modulo circularity, idx + the number of elements should be the index
+ * of the fictitious pivot.
+ */
+ fake_pivot_index = stack_item(lst->s, 0);
+ reduced_fake_pivot_index = reduce(lst, fake_pivot_index);
+ reduced_end = reduce(lst, lst->idx + lst->num_elements);
+ if (reduced_fake_pivot_index != reduced_end) {
+ TEST_MSG_ALWAYS("fictitious pivot inconsistent with idx and number of elements");
+ }
+
+ /*
+ * Bucket sizes must make sense.
+ */
+ if (lst->num_elements) {
+ bucket_size_sum = 0;
+
+ for (int stack_index = 0; stack_index < depth; stack_index++) {
+ lst_index bucket_size = bucket_upb(lst, stack_index) - bucket_lwb(lst, stack_index) + 1;
+ if (bucket_size > lst->num_elements) {
+ TEST_MSG_ALWAYS("bucket %d size %d is invalid\n", stack_index, bucket_size);
+ }
+ bucket_size_sum += bucket_size;
+ }
+
+ if (bucket_size_sum + depth - 1 != lst->num_elements) {
+ TEST_MSG_ALWAYS("total bucket size inconsistent with number of elements");
+ }
+ }
+
+ /*d
+ * No elements should be NULL.
+ */
+ for (lst_index i = 0; i < lst->num_elements; i++) {
+ if (!item(lst, lst->idx + i)) TEST_MSG_ALWAYS("null element at %d\n", lst->idx + i);
+ }
+
+ /*
+ * There's nothing more to check for a one-bucket tree.
+ */
+ if (is_bucket(lst, 0)) return;
+
+ /*
+ * Otherwise, first, pivots from left to right (aside from the fictitious
+ * one) should be in ascending order.
+ */
+ for (int stack_index = 1; stack_index + 1 < depth; stack_index++) {
+ heap_thing *current_pivot = pivot(lst, stack_index);
+ heap_thing *next_pivot = pivot(lst, stack_index + 1);
+
+ if (current_pivot && next_pivot && lst->cmp(current_pivot, next_pivot) < 0) pivots_in_order = false;
+ }
+ if (!pivots_in_order) TEST_MSG_ALWAYS("pivots not in ascending order");
+
+ /*
+ * Next, all non-fictitious pivots must correspond to non-null elements of the array.
+ */
+ for (int stack_index = 1; stack_index < depth; stack_index++) {
+ if (!pivot(lst, stack_index)) TEST_MSG_ALWAYS("pivot #%d refers to NULL", stack_index);
+ }
+
+ /*
+ * Next, the stacked pivot indices should decrease as you ascend from
+ * the bottom of the pivot stack. Here we *do* include the fictitious
+ * pivot; we're just comparing indices.
+ */
+ for (int stack_index = 0; stack_index + 1 < depth; stack_index++) {
+ fr_lst_index_t current_pivot_index = stack_item(lst->s, stack_index);
+ fr_lst_index_t previous_pivot_index = stack_item(lst->s, stack_index + 1);
+
+
+ if (previous_pivot_index >= current_pivot_index) pivot_indices_in_order = false;
+ }
+
+ if (!pivot_indices_in_order) TEST_MSG_ALWAYS("pivot indices not in order");
+
+ /*
+ * Finally...
+ * values in buckets shouldn't "follow" the pivot to the immediate right (if it exists)
+ * and shouldn't "precede" the pivot to the immediate left (if it exists)
+ *
+ * todo: this will find pivot ordering issues as well; get rid of that ultimately,
+ * since pivot-pivot ordering errors are caught above.
+ */
+ for (int stack_index = 0; stack_index < depth; stack_index++) {
+ fr_lst_index_t lwb, upb, pivot_index;
+ void *pivot_item, *element;
+
+ if (stack_index > 0) {
+ lwb = (stack_index + 1 == depth) ? lst->idx : stack_item(lst->s, stack_index + 1);
+ pivot_index = upb = stack_item(lst->s, stack_index);
+ pivot_item = item(lst, pivot_index);
+ for (fr_lst_index_t index = lwb; index < upb; index++) {
+ element = item(lst, index);
+ if (element && pivot_item && lst->cmp(element, pivot_item) > 0) {
+ TEST_MSG_ALWAYS("element at %d > pivot at %d", index, pivot_index);
+ }
+ }
+ }
+ if (stack_index + 1 < depth) {
+ upb = stack_item(lst->s, stack_index);
+ lwb = pivot_index = stack_item(lst->s, stack_index + 1);
+ pivot_item = item(lst, pivot_index);
+ for (fr_lst_index_t index = lwb; index < upb; index++) {
+ element = item(lst, index);
+ if (element && pivot_item && lst->cmp(pivot_item, element) > 0) {
+ TEST_MSG_ALWAYS( "element at %d < pivot at %d", index, pivot_index);
+ }
+ }
+ }
+ }
+}
+#endif
+
+TEST_LIST = {
+ /*
+ * Basic tests
+ */
+ { "lst_test_basic", lst_test_basic },
+ { "lst_test_skip_1", lst_test_skip_1 },
+ { "lst_test_skip_2", lst_test_skip_2 },
+ { "lst_test_skip_10", lst_test_skip_10 },
+ { "lst_stress_realloc", lst_stress_realloc },
+ { "lst_burn_in", lst_burn_in },
+ { "lst_cycle", lst_cycle },
+ { "lst_iter", lst_iter },
+ { NULL }
+};
--- /dev/null
+TARGET := lst_tests
+
+SOURCES := lst_tests.c
+
+TGT_LDLIBS := $(LIBS) $(GPERFTOOLS_LIBS)
+TGT_LDFLAGS := $(LDFLAGS) $(GPERFTOOLS_LDFLAGS)
+
+TGT_PREREQS += libfreeradius-util.a