/* Empty init; filled via ./lua/config.lua */
kr_zonecut_init(&engine->resolver.root_hints, (const uint8_t *)"", engine->pool);
/* Open NS rtt + reputation cache */
- lru_create(&engine->resolver.cache_rtt, LRU_RTT_SIZE, engine->pool, NULL);
- lru_create(&engine->resolver.cache_rep, LRU_REP_SIZE, engine->pool, NULL);
- lru_create(&engine->resolver.cache_cookie, LRU_COOKIES_SIZE, engine->pool, NULL);
+ lru_create(&engine->resolver.cache_rtt, LRU_RTT_SIZE, NULL, NULL);
+ lru_create(&engine->resolver.cache_rep, LRU_REP_SIZE, NULL, NULL);
+ lru_create(&engine->resolver.cache_cookie, LRU_COOKIES_SIZE, NULL, NULL);
/* Load basic modules */
engine_register(engine, "iterate", NULL, NULL);
kr_zonecut_deinit(&engine->resolver.root_hints);
kr_cache_close(&engine->resolver.cache);
- /* The lru keys are currently malloc-ated and need to be freed. */
+ /* The LRUs are currently malloc-ated and need to be freed. */
lru_free(engine->resolver.cache_rtt);
lru_free(engine->resolver.cache_rep);
lru_free(engine->resolver.cache_cookie);
#include "lib/generic/lru.h"
#include "contrib/murmurhash3/murmurhash3.h"
+#include "contrib/ucw/mempool.h"
typedef struct lru_group lru_group_t;
group_count = 1 << log_groups;
assert(max_slots <= group_count * LRU_ASSOC && group_count * LRU_ASSOC < 2 * max_slots);
+ /* Get a sufficiently aligning mm_array if NULL is passed. */
+ if (!mm_array) {
+ static knot_mm_t mm_array_default = { 0 };
+ if (!mm_array_default.ctx)
+ mm_ctx_init_aligned(&mm_array_default, __alignof(struct lru));
+ mm_array = &mm_array_default;
+ }
+ assert(mm_array->alloc != mm_malloc && mm_array->alloc != (knot_mm_alloc_t)mp_alloc);
+
size_t size = offsetof(struct lru, groups[group_count]);
struct lru *lru = mm_alloc(mm_array, size);
if (unlikely(lru == NULL))
* @param ptable pointer to a pointer to the LRU
* @param max_slots number of slots
* @param mm_ctx_array memory context to use for the huge array, NULL for default
+ * If you pass your own, it needs to produce CACHE_ALIGNED allocations (ubsan).
* @param mm_ctx memory context to use for individual key-value pairs, NULL for default
*
* @note The pointers to memory contexts need to remain valid
(void)ctx;
return malloc(n);
}
+void *mm_malloc_aligned(void *ctx, size_t n)
+{
+ size_t alignment = (size_t)ctx;
+ void *res;
+ int err = posix_memalign(&res, alignment, n);
+ if (err == 0) {
+ return res;
+ } else {
+ assert(err == -1 && errno == ENOMEM);
+ return NULL;
+ }
+}
/*
* Macros.
/** Trivial malloc() wrapper. */
void *mm_malloc(void *ctx, size_t n);
+/** posix_memalign() wrapper. */
+void *mm_malloc_aligned(void *ctx, size_t n);
/** Initialize mm with standard malloc+free. */
static inline void mm_ctx_init(knot_mm_t *mm)
mm->alloc = mm_malloc;
mm->free = free;
}
+
+/** Initialize mm with malloc+free with higher alignment (a power of two). */
+static inline void mm_ctx_init_aligned(knot_mm_t *mm, size_t alignment)
+{
+ assert(__builtin_popcount(alignment) == 1);
+ mm->ctx = (uint8_t *)NULL + alignment; /*< roundabout to satisfy linters */
+ /* posix_memalign() doesn't allow alignment < sizeof(void*),
+ * and there's no point in using it for small values anyway,
+ * as plain malloc() guarantees at least max_align_t.
+ * Nitpick: we might use that type when assuming C11. */
+ mm->alloc = alignment > sizeof(void*) ? mm_malloc_aligned : mm_malloc;
+ mm->free = free;
+}
+
/* @endcond */
/** A strcmp() variant directly usable for qsort() on an array of strings. */