fr_lst_cmp_t cmp; //!< Comparator function.
};
-static inline fr_lst_index_t stack_item(pivot_stack_t *s, stack_index_t idx) CC_HINT(always_inline, nonnull);
-static inline stack_index_t lst_length(fr_lst_t *lst, stack_index_t stack_index) CC_HINT(always_inline, nonnull);
+static inline fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx) CC_HINT(always_inline, nonnull);
+static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index) CC_HINT(always_inline, nonnull);
-static inline CC_HINT(always_inline, nonnull) void *index_addr(fr_lst_t *lst, void *data)
+static inline CC_HINT(always_inline, nonnull) void *index_addr(fr_lst_t const *lst, void *data)
{
return ((uint8_t *)data) + (lst)->offset;
}
*
* fr_item_insert() needs to see the value actually stored, hence raw_item_index().
*/
-static inline CC_HINT(always_inline, nonnull) fr_lst_index_t raw_item_index(fr_lst_t *lst, void *data)
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t raw_item_index(fr_lst_t const *lst, void *data)
{
return *(fr_lst_index_t *)index_addr(lst, data);
}
-static inline CC_HINT(always_inline, nonnull) fr_lst_index_t item_index(fr_lst_t *lst, void *data)
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t item_index(fr_lst_t const *lst, void *data)
{
return raw_item_index(lst, data) - 1;
}
(*(fr_lst_index_t *)index_addr(lst, data)) = idx + 1;
}
-static inline CC_HINT(always_inline, nonnull) fr_lst_index_t index_reduce(fr_lst_t *lst, fr_lst_index_t idx)
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t index_reduce(fr_lst_t const *lst, fr_lst_index_t idx)
{
return idx & ((lst)->capacity - 1);
}
static inline CC_HINT(always_inline, nonnull)
-bool is_equivalent(fr_lst_t *lst, fr_lst_index_t idx1, fr_lst_index_t idx2)
+bool is_equivalent(fr_lst_t const *lst, fr_lst_index_t idx1, fr_lst_index_t idx2)
{
return (index_reduce(lst, idx1 - idx2) == 0);
}
lst->p[index_reduce(lst, idx)] = data;
}
-static inline CC_HINT(always_inline, nonnull) void *item(fr_lst_t *lst, fr_lst_index_t idx)
+static inline CC_HINT(always_inline, nonnull) void *item(fr_lst_t const *lst, fr_lst_index_t idx)
{
return (lst->p[index_reduce(lst, idx)]);
}
-static inline CC_HINT(always_inline, nonnull) void *pivot_item(fr_lst_t *lst, stack_index_t idx)
+static inline CC_HINT(always_inline, nonnull) void *pivot_item(fr_lst_t const *lst, stack_index_t idx)
{
return item(lst, stack_item(&lst->s, idx));
}
* The index is visible for the size and length functions, since they need
* to know the subtree they're working on.
*/
-static inline CC_HINT(always_inline, nonnull) bool is_bucket(fr_lst_t *lst, stack_index_t idx)
+static inline CC_HINT(always_inline, nonnull) bool is_bucket(fr_lst_t const *lst, stack_index_t idx)
{
return lst_length(lst, idx) == 1;
}
s->depth -= n;
}
-static inline CC_HINT(always_inline, nonnull) stack_index_t stack_depth(pivot_stack_t *s)
+static inline CC_HINT(always_inline, nonnull) stack_index_t stack_depth(pivot_stack_t const *s)
{
return s->depth;
}
-static inline fr_lst_index_t stack_item(pivot_stack_t *s, stack_index_t idx)
+static inline fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx)
{
return s->data[idx];
}
/** The length function for LSTs (how many buckets it contains)
*
*/
-static inline stack_index_t lst_length(fr_lst_t *lst, stack_index_t stack_index)
+static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index)
{
return stack_depth(&lst->s) - stack_index;
}
return true;
}
-static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_lwb(fr_lst_t *lst, stack_index_t stack_index)
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_lwb(fr_lst_t const *lst, stack_index_t stack_index)
{
if (is_bucket(lst, stack_index)) return lst->idx;
/*
* Note: buckets can be empty,
*/
-static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_upb(fr_lst_t *lst, stack_index_t stack_index)
+static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_upb(fr_lst_t const *lst, stack_index_t stack_index)
{
return stack_item(&lst->s, stack_index) - 1;
}
return item(lst, *iter);
}
+
+#ifndef TALLOC_GET_TYPE_ABORT_NOOP
+void fr_lst_verify(char const *file, int line, fr_lst_t const *lst)
+{
+ fr_lst_index_t fake_pivot_index, reduced_fake_pivot_index, reduced_end;
+ stack_index_t depth = stack_depth(&(lst->s));
+ int bucket_size_sum;
+ bool pivots_in_order = true;
+ bool pivot_indices_in_order = true;
+
+ fr_fatal_assert_msg(lst, "CONSISTENCY CHECK FAILED %s[%i]: LST pointer NULL", file, line);
+ talloc_get_type_abort(lst, fr_lst_t);
+
+ /*
+ * There must be at least the fictitious pivot.
+ */
+ fr_fatal_assert_msg(depth >= 1, "CONSISTENCY CHECK FAILED %s[%i]: LST pivot stack empty", file, line);
+
+ /*
+ * Modulo circularity, idx + the number of elements should be the index
+ * of the fictitious pivot.
+ */
+ fake_pivot_index = stack_item(&(lst->s), 0);
+ reduced_fake_pivot_index = index_reduce(lst, fake_pivot_index);
+ reduced_end = index_reduce(lst, lst->idx + lst->num_elements);
+ fr_fatal_assert_msg(reduced_fake_pivot_index == reduced_end,
+ "CONSISTENCY CHECK FAILED %s[%i]: fictitious pivot doesn't point past last element",
+ file, line);
+
+ /*
+ * Bucket sizes must make sense.
+ */
+ if (lst->num_elements) {
+ bucket_size_sum = 0;
+
+ for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
+ fr_lst_index_t bucket_size = bucket_upb(lst, stack_index) - bucket_lwb(lst, stack_index) + 1;
+ fr_fatal_assert_msg(bucket_size <= lst->num_elements,
+ "CONSISTENCY CHECK FAILED %s[%i]: bucket %u size %u is invalid",
+ file, line, stack_index, bucket_size);
+ bucket_size_sum += bucket_size;
+ }
+
+ fr_fatal_assert_msg(bucket_size_sum + depth - 1 == lst->num_elements,
+ "CONSISTENCY CHECK FAILED %s[%i]: buckets inconsistent with number of elements",
+ file, line);
+ }
+
+ /*
+ * No elements should be NULL;
+ * they should have the correct index stored,
+ * and if a type is specified, they should point at something of that type,
+ */
+ for (fr_lst_index_t i = 0; i < lst->num_elements; i++) {
+ void *element = item(lst, lst->idx + i);
+
+ fr_fatal_assert_msg(element, "CONSISTENCY CHECK FAILED %s[%i]: null element pointer at %u",
+ file, line, lst->idx + i);
+ fr_fatal_assert_msg(is_equivalent(lst, lst->idx + i, item_index(lst, element)),
+ "CONSISTENCY CHECK FAILED %s[%i]: element %u index mismatch", file, line, i);
+ if (lst->type) (void) _talloc_get_type_abort(element, lst->type, __location__);
+ }
+
+ /*
+ * There's nothing more to check for a one-bucket tree.
+ */
+ if (is_bucket(lst, 0)) return;
+
+ /*
+ * Otherwise, first, pivots from left to right (aside from the fictitious
+ * one) should be in ascending order.
+ */
+ for (stack_index_t stack_index = 1; stack_index + 1 < depth; stack_index++) {
+ void *current_pivot = pivot_item(lst, stack_index);
+ void *next_pivot = pivot_item(lst, stack_index + 1);
+
+ if (current_pivot && next_pivot && lst->cmp(current_pivot, next_pivot) < 0) pivots_in_order = false;
+ }
+ fr_fatal_assert_msg(pivots_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots not in ascending order",
+ file, line);
+
+ /*
+ * Next, the stacked pivot indices should decrease as you ascend from
+ * the bottom of the pivot stack. Here we *do* include the fictitious
+ * pivot; we're just comparing indices.
+ */
+ for (stack_index_t stack_index = 0; stack_index + 1 < depth; stack_index++) {
+ fr_lst_index_t current_pivot_index = stack_item(&(lst->s), stack_index);
+ fr_lst_index_t previous_pivot_index = stack_item(&(lst->s), stack_index + 1);
+
+ if (previous_pivot_index >= current_pivot_index) pivot_indices_in_order = false;
+ }
+ fr_fatal_assert_msg(pivot_indices_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots indices not in order",
+ file, line);
+
+ /*
+ * Finally...
+ * values in buckets shouldn't "follow" the pivot to the immediate right (if it exists)
+ * and shouldn't "precede" the pivot to the immediate left (if it exists)
+ */
+ for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
+ fr_lst_index_t lwb, upb, pivot_index;
+ void *pivot_item, *element;
+
+ if (stack_index > 0) {
+ lwb = (stack_index + 1 == depth) ? lst->idx : stack_item(&(lst->s), stack_index + 1);
+ pivot_index = upb = stack_item(&(lst->s), stack_index);
+ pivot_item = item(lst, pivot_index);
+ for (fr_lst_index_t index = lwb; index < upb; index++) {
+ element = item(lst, index);
+ fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(element, pivot_item) <= 0,
+ "CONSISTENCY CHECK FAILED %s[%i]: element at %u > pivot at %u",
+ file, line, index, pivot_index);
+ }
+ }
+ if (stack_index + 1 < depth) {
+ upb = stack_item(&(lst->s), stack_index);
+ lwb = pivot_index = stack_item(&(lst->s), stack_index + 1);
+ pivot_item = item(lst, pivot_index);
+ for (fr_lst_index_t index = lwb; index < upb; index++) {
+ element = item(lst, index);
+ fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(pivot_item, element) <= 0,
+ "CONSISTENCY CHECK FAILED %s[%i]: element at %u < pivot at %u",
+ file, line, index, pivot_index);
+ }
+ }
+ }
+}
+#endif
TEST_CASE("insertions");
for (i = 0; i < LST_TEST_SIZE; i++) {
+ FR_LST_VERIFY(lst);
TEST_CHECK((ret = fr_lst_insert(lst, &values[i])) >= 0);
TEST_MSG("insert failed, returned %i - %s", ret, fr_strerror());
TEST_CASE("deletions");
for (int entry = 0; entry < LST_TEST_SIZE; entry += skip) {
+ FR_LST_VERIFY(lst);
TEST_CHECK(values[entry].idx != 0);
TEST_MSG("element %i removed out of order", entry);
left = fr_lst_num_elements(lst);
for (i = 0; i < left; i++) {
+ FR_LST_VERIFY(lst);
TEST_CHECK(fr_lst_pop(lst) != NULL);
TEST_MSG("expected %i elements remaining in the lst", left - i);
TEST_MSG("failed extracting %i", i);
queue_cmp(1000);
}
-#if 0
-static void lst_validate(fr_lst_t *lst)
-{
- fr_lst_index_t fake_pivot_index, reduced_fake_pivot_index, reduced_end;
- stack_index_t depth = stack_depth(&(lst->s));
- int bucket_size_sum;
- bool pivots_in_order = true;
- bool pivot_indices_in_order = true;
-
- /*
- * There has to be at least the fictitious pivot.
- */
- if (depth < 1) {
- TEST_MSG_ALWAYS("LST pivot stack empty");
- return;
- }
-
- /*
- * Modulo circularity, idx + the number of elements should be the index
- * of the fictitious pivot.
- */
- fake_pivot_index = stack_item(&(lst->s), 0);
- reduced_fake_pivot_index = index_reduce(lst, fake_pivot_index);
- reduced_end = index_reduce(lst, lst->idx + lst->num_elements);
- if (reduced_fake_pivot_index != reduced_end) {
- TEST_MSG_ALWAYS("fictitious pivot inconsistent with idx and number of elements");
- }
-
- /*
- * Bucket sizes must make sense.
- */
- if (lst->num_elements) {
- bucket_size_sum = 0;
-
- for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
- fr_lst_index_t bucket_size = bucket_upb(lst, stack_index) - bucket_lwb(lst, stack_index) + 1;
- if (bucket_size > lst->num_elements) {
- TEST_MSG_ALWAYS("bucket %d size %d is invalid\n", stack_index, bucket_size);
- }
- bucket_size_sum += bucket_size;
- }
-
- if (bucket_size_sum + depth - 1 != lst->num_elements) {
- TEST_MSG_ALWAYS("total bucket size inconsistent with number of elements");
- }
- }
-
- /*d
- * No elements should be NULL.
- */
- for (fr_lst_index_t i = 0; i < lst->num_elements; i++) {
- if (!item(lst, lst->idx + i)) TEST_MSG_ALWAYS("null element at %d\n", lst->idx + i);
- }
-
- /*
- * There's nothing more to check for a one-bucket tree.
- */
- if (is_bucket(lst, 0)) return;
-
- /*
- * Otherwise, first, pivots from left to right (aside from the fictitious
- * one) should be in ascending order.
- */
- for (stack_index_t stack_index = 1; stack_index + 1 < depth; stack_index++) {
- lst_thing *current_pivot = pivot_item(lst, stack_index);
- lst_thing *next_pivot = pivot_item(lst, stack_index + 1);
-
- if (current_pivot && next_pivot && lst->cmp(current_pivot, next_pivot) < 0) pivots_in_order = false;
- }
- if (!pivots_in_order) TEST_MSG_ALWAYS("pivots not in ascending order");
-
- /*
- * Next, all non-fictitious pivots must correspond to non-null elements of the array.
- */
- for (stack_index_t stack_index = 1; stack_index < depth; stack_index++) {
- if (!pivot_item(lst, stack_index)) TEST_MSG_ALWAYS("pivot #%d refers to NULL", stack_index);
- }
-
- /*
- * Next, the stacked pivot indices should decrease as you ascend from
- * the bottom of the pivot stack. Here we *do* include the fictitious
- * pivot; we're just comparing indices.
- */
- for (stack_index_t stack_index = 0; stack_index + 1 < depth; stack_index++) {
- fr_lst_index_t current_pivot_index = stack_item(&(lst->s), stack_index);
- fr_lst_index_t previous_pivot_index = stack_item(&(lst->s), stack_index + 1);
-
-
- if (previous_pivot_index >= current_pivot_index) pivot_indices_in_order = false;
- }
-
- if (!pivot_indices_in_order) TEST_MSG_ALWAYS("pivot indices not in order");
-
- /*
- * Finally...
- * values in buckets shouldn't "follow" the pivot to the immediate right (if it exists)
- * and shouldn't "precede" the pivot to the immediate left (if it exists)
- *
- * todo: this will find pivot ordering issues as well; get rid of that ultimately,
- * since pivot-pivot ordering errors are caught above.
- */
- for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
- fr_lst_index_t lwb, upb, pivot_index;
- void *pivot_item, *element;
-
- if (stack_index > 0) {
- lwb = (stack_index + 1 == depth) ? lst->idx : stack_item(&(lst->s), stack_index + 1);
- pivot_index = upb = stack_item(&(lst->s), stack_index);
- pivot_item = item(lst, pivot_index);
- for (fr_lst_index_t index = lwb; index < upb; index++) {
- element = item(lst, index);
- if (element && pivot_item && lst->cmp(element, pivot_item) > 0) {
- TEST_MSG_ALWAYS("element at %d > pivot at %d", index, pivot_index);
- }
- }
- }
- if (stack_index + 1 < depth) {
- upb = stack_item(&(lst->s), stack_index);
- lwb = pivot_index = stack_item(&(lst->s), stack_index + 1);
- pivot_item = item(lst, pivot_index);
- for (fr_lst_index_t index = lwb; index < upb; index++) {
- element = item(lst, index);
- if (element && pivot_item && lst->cmp(pivot_item, element) > 0) {
- TEST_MSG_ALWAYS( "element at %d < pivot at %d", index, pivot_index);
- }
- }
- }
- }
-}
-#endif
-
TEST_LIST = {
/*
* Basic tests