#include "lib/cache.h"
#include "lib/defines.h"
+/* Key size */
+#define KEY_SIZE (sizeof(uint8_t) + KNOT_DNAME_MAXLEN + sizeof(uint16_t))
+
/** Used cache storage engine (default LMDB) */
const namedb_api_t *(*kr_cache_storage)(void) = namedb_lmdb_api;
#define db_api kr_cache_storage()
+/** Generic storage options */
+union storage_opts {
+ struct namedb_lmdb_opts lmdb;
+};
+
namedb_t *kr_cache_open(const char *handle, mm_ctx_t *mm, size_t maxsize)
{
- if (handle == NULL || maxsize == 0) {
+ if (!handle || maxsize == 0) {
return NULL;
}
- struct namedb_lmdb_opts opts = NAMEDB_LMDB_OPTS_INITIALIZER;
- opts.mapsize = maxsize;
- opts.path = handle;
+ union storage_opts opts;
+ memset(&opts, 0, sizeof(opts));
+ if (db_api == namedb_lmdb_api()) {
+ opts.lmdb.mapsize = maxsize;
+ opts.lmdb.path = handle;
+ }
namedb_t *db = NULL;
int ret = db_api->init(&db, mm, &opts);
- if (ret != KNOT_EOK) {
+ if (ret != 0) {
return NULL;
}
void kr_cache_close(namedb_t *cache)
{
- if (cache != NULL) {
+ if (cache) {
db_api->deinit(cache);
}
}
int kr_cache_txn_begin(namedb_t *cache, namedb_txn_t *txn, unsigned flags)
{
- if (cache == NULL || txn == NULL) {
- return KNOT_EINVAL;
+ if (!cache || !txn) {
+ return kr_error(EINVAL);
}
return db_api->txn_begin(cache, txn, flags);
int kr_cache_txn_commit(namedb_txn_t *txn)
{
- if (txn == NULL) {
- return KNOT_EINVAL;
+ if (!txn) {
+ return kr_error(EINVAL);
}
int ret = db_api->txn_commit(txn);
- if (ret != KNOT_EOK) {
+ if (ret != 0) {
kr_cache_txn_abort(txn);
}
return ret;
void kr_cache_txn_abort(namedb_txn_t *txn)
{
- if (txn != NULL) {
+ if (txn) {
db_api->txn_abort(txn);
}
}
-static size_t cache_key(uint8_t *buf, const knot_dname_t *name, uint16_t type)
+/** @internal Composed key as { u8 tag, u8[1-255] name, u16 type } */
+static size_t cache_key(uint8_t *buf, uint8_t tag, const knot_dname_t *name, uint16_t type)
{
- size_t len = knot_dname_to_wire(buf, name, KNOT_DNAME_MAXLEN);
- memcpy(buf + len, &type, sizeof(uint16_t));
- return len + sizeof(uint16_t);
+ knot_dname_lf(buf, name, NULL);
+ size_t len = buf[0] + 1;
+ memcpy(buf + len, &type, sizeof(type));
+ buf[0] = tag;
+ return len + sizeof(type);
}
-static struct kr_cache_rrset *cache_rr(namedb_txn_t *txn, const knot_dname_t *name, uint16_t type)
+static struct kr_cache_entry *cache_entry(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type)
{
- uint8_t keybuf[KNOT_DNAME_MAXLEN + sizeof(uint16_t)];
- size_t key_len = cache_key(keybuf, name, type);
+ uint8_t keybuf[KEY_SIZE];
+ size_t key_len = cache_key(keybuf, tag, name, type);
/* Look up and return value */
namedb_val_t key = { keybuf, key_len };
return NULL;
}
- return (struct kr_cache_rrset *)val.data;
+ return (struct kr_cache_entry *)val.data;
}
-int kr_cache_peek(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp)
+struct kr_cache_entry *kr_cache_peek(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name,
+ uint16_t type, uint32_t *timestamp)
{
- if (txn == NULL || rr == NULL) {
- return KNOT_EINVAL;
+ if (!txn || !tag || !name) {
+ return NULL;
}
- /* Check if the RRSet is in the cache. */
- struct kr_cache_rrset *found_rr = cache_rr(txn, rr->owner, rr->type);
- if (found_rr != NULL) {
+ struct kr_cache_entry *entry = cache_entry(txn, tag, name, type);
+ if (!entry) {
+ return NULL;
+ }
+
+ /* No time constraint */
+ if (!timestamp) {
+ return entry;
+ } else if (*timestamp <= entry->timestamp) {
+ /* John Connor record cached in the future. */
+ *timestamp = 0;
+ return entry;
+ } else {
+ /* Check if the record is still valid. */
+ uint32_t drift = *timestamp - entry->timestamp;
+ if (drift < entry->ttl) {
+ *timestamp = drift;
+ return entry;
+ }
+ }
- /* Assign data and return success. */
- rr->rrs.rr_count = found_rr->count;
- rr->rrs.data = found_rr->data;
+ return NULL;
+}
- /* No time constraint */
- if (timestamp == NULL) {
- return KNOT_EOK;
- }
+static void entry_write(struct kr_cache_entry *dst, struct kr_cache_entry *header, namedb_val_t data)
+{
+ assert(dst);
+ memcpy(dst, header, sizeof(*header));
+ memcpy(dst->data, data.data, data.len);
+}
- /* John Connor record cached from the future. */
- if (*timestamp < found_rr->timestamp) {
- *timestamp = 0;
- return KNOT_EOK;
- }
+int kr_cache_insert(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type,
+ struct kr_cache_entry *header, namedb_val_t data)
+{
+ if (!txn || !name || !tag || !header) {
+ return kr_error(EINVAL);
+ }
- /* Check if at least one RR is still valid. */
- uint32_t drift = *timestamp - found_rr->timestamp;
- for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
- const knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
- if (knot_rdata_ttl(rd) > drift) {
- *timestamp = drift;
- return KNOT_EOK;
- }
+ /* Insert key */
+ uint8_t keybuf[KEY_SIZE];
+ size_t key_len = cache_key(keybuf, tag, name, type);
+ namedb_val_t key = { keybuf, key_len };
+ namedb_val_t entry = { NULL, sizeof(*header) + data.len };
+
+ /* LMDB can do late write and avoid copy */
+ if (db_api == namedb_lmdb_api()) {
+ int ret = db_api->insert(txn, &key, &entry, 0);
+ if (ret != 0) {
+ return ret;
}
+ entry_write(entry.data, header, data);
+ } else {
+ /* Other backends must prepare contiguous data first */
+ entry.data = malloc(entry.len);
+ if (!entry.data) {
+ return kr_error(ENOMEM);
+ }
+ entry_write(entry.data, header, data);
+ int ret = db_api->insert(txn, &key, &entry, 0);
+ free(entry.data);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ return kr_ok();
+}
- return KNOT_ENOENT;
+int kr_cache_remove(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type)
+{
+ if (!txn || !tag || !name ) {
+ return kr_error(EINVAL);
+ }
+
+ uint8_t keybuf[KEY_SIZE];
+ size_t key_len = cache_key(keybuf, tag, name, type);
+ namedb_val_t key = { keybuf, key_len };
+ return db_api->del(txn, &key);
+}
+
+int kr_cache_clear(namedb_txn_t *txn)
+{
+ if (!txn) {
+ return kr_error(EINVAL);
+ }
+
+ return db_api->clear(txn);
+}
+
+int kr_cache_peek_rr(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp)
+{
+ if (!txn || !rr || !timestamp) {
+ return kr_error(EINVAL);
+ }
+
+ /* Check if the RRSet is in the cache. */
+ struct kr_cache_entry *entry = kr_cache_peek(txn, KR_CACHE_RR, rr->owner, rr->type, timestamp);
+ if (entry) {
+ rr->rrs.rr_count = entry->count;
+ rr->rrs.data = entry->data;
+ return kr_ok();
}
/* Not found. */
- return KNOT_ENOENT;
+ return kr_error(ENOENT);
}
knot_rrset_t kr_cache_materialize(const knot_rrset_t *src, uint32_t drift, mm_ctx_t *mm)
knot_rrset_t copy;
knot_rrset_init(©, NULL, src->type, src->rclass);
copy.owner = knot_dname_copy(src->owner, mm);
- if (copy.owner == NULL) {
+ if (!copy.owner) {
return copy;
}
for (uint16_t i = 0; i < src->rrs.rr_count; ++i) {
knot_rdata_t *rd = knot_rdataset_at(&src->rrs, i);
if (knot_rdata_ttl(rd) > drift) {
- if (knot_rdataset_add(©.rrs, rd, mm) != KNOT_EOK) {
+ if (knot_rdataset_add(©.rrs, rd, mm) != 0) {
knot_rrset_clear(©, mm);
return copy;
}
return copy;
}
-int kr_cache_insert(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp)
+int kr_cache_insert_rr(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp)
{
- if (txn == NULL || rr == NULL) {
- return KNOT_EINVAL;
+ if (!txn || !rr) {
+ return kr_error(EINVAL);
}
- /* Ignore empty records. */
+ /* Ignore empty records */
if (knot_rrset_empty(rr)) {
- return KNOT_EOK;
- }
-
- uint8_t keybuf[KNOT_DNAME_MAXLEN + sizeof(uint16_t)];
- size_t key_len = cache_key(keybuf, rr->owner, rr->type);
- namedb_val_t key = { keybuf, key_len };
- namedb_val_t val = { NULL, sizeof(struct kr_cache_rrset) + knot_rdataset_size(&rr->rrs) };
-
- int ret = db_api->insert(txn, &key, &val, 0);
- if (ret != KNOT_EOK) {
- return ret;
+ return kr_ok();
}
- /* Write cached record. */
- struct kr_cache_rrset *cache_rr = val.data;
- cache_rr->timestamp = timestamp;
- cache_rr->count = rr->rrs.rr_count;
- memcpy(cache_rr->data, rr->rrs.data, knot_rdataset_size(&rr->rrs));
-
- return KNOT_EOK;
-}
-
-int kr_cache_remove(namedb_txn_t *txn, const knot_rrset_t *rr)
-{
- if (txn == NULL || rr == NULL) {
- return KNOT_EINVAL;
- }
-
- uint8_t keybuf[KNOT_DNAME_MAXLEN + sizeof(uint16_t)];
- size_t key_len = cache_key(keybuf, rr->owner, rr->type);
- namedb_val_t key = { keybuf, key_len };
-
- return db_api->del(txn, &key);
-}
-
-int kr_cache_clear(namedb_txn_t *txn)
-{
- if (txn == NULL) {
- return KNOT_EINVAL;
+ /* Prepare header to write */
+ struct kr_cache_entry header = {
+ .timestamp = timestamp,
+ .ttl = 0,
+ .count = rr->rrs.rr_count
+ };
+ for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
+ knot_rdata_t *rd = knot_rdataset_at(&rr->rrs, i);
+ if (knot_rdata_ttl(rd) > header.ttl) {
+ header.ttl = knot_rdata_ttl(rd);
+ }
}
- return db_api->clear(txn);
+ namedb_val_t data = { rr->rrs.data, knot_rdataset_size(&rr->rrs) };
+ return kr_cache_insert(txn, KR_CACHE_RR, rr->owner, rr->type, &header, data);
}
#include <libknot/rrset.h>
#include <libknot/internal/namedb/namedb.h>
+/** Cache entry tag */
+enum kr_cache_tag {
+ KR_CACHE_RR = 0x01,
+ KR_CACHE_PKT = 0x02,
+ KR_CACHE_USER = 0xF0
+};
+
/**
- * Serialized form of the RRSet with inception timestamp.
+ * Serialized form of the RRSet with inception timestamp and maximum TTL.
*/
-struct kr_cache_rrset
+struct kr_cache_entry
{
uint32_t timestamp;
+ uint32_t ttl;
uint16_t count;
uint8_t data[];
};
/**
* Open/create persistent cache in given path.
- * @param handle Path to existing directory where the DB should be created.
+ * @param handle Configuration string (e.g. path to existing directory where the DB should be created)
* @param mm Memory context.
* @param maxsize Maximum database size (bytes)
* @return database instance or NULL
* @param cache database instance
* @param txn transaction instance to be initialized (output)
* @param flags transaction flags (see namedb.h in libknot)
- * @return KNOT_E*
+ * @return 0 or an errcode
*/
int kr_cache_txn_begin(namedb_t *cache, namedb_txn_t *txn, unsigned flags);
-
/**
* Commit existing transaction.
* @param txn transaction instance
- * @return KNOT_E*
+ * @return 0 or an errcode
*/
int kr_cache_txn_commit(namedb_txn_t *txn);
void kr_cache_txn_abort(namedb_txn_t *txn);
/**
- * Peek the cache for given RRSet (name, type, class)
+ * Peek the cache for asset (name, type, tag)
+ * @note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
+ * @param txn transaction instance
+ * @param tag asset tag
+ * @param name asset name
+ * @param type asset type
+ * @param timestamp current time (will be replaced with drift if successful)
+ * @return cache entry or NULL
+ */
+struct kr_cache_entry *kr_cache_peek(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name,
+ uint16_t type, uint32_t *timestamp);
+
+/**
+ * Insert asset into cache, replacing any existing data.
+ * @param txn transaction instance
+ * @param tag asset tag
+ * @param name asset name
+ * @param type asset type
+ * @param header filled entry header (count, ttl and timestamp)
+ * @return 0 or an errcode
+ */
+int kr_cache_insert(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type,
+ struct kr_cache_entry *header, namedb_val_t data);
+
+/**
+ * Remove asset from cache.
+ * @param txn transaction instance
+ * @param tag asset tag
+ * @param name asset name
+ * @param type record type
+ * @return 0 or an errcode
+ */
+int kr_cache_remove(namedb_txn_t *txn, uint8_t tag, const knot_dname_t *name, uint16_t type);
+
+/**
+ * Clear all items from the cache.
+ * @param txn transaction instance
+ * @return 0 or an errcode
+ */
+int kr_cache_clear(namedb_txn_t *txn);
+
+/**
+ * Peek the cache for given RRSet (name, type)
* @note The 'drift' is the time passed between the cache time of the RRSet and now (in seconds).
* @param txn transaction instance
* @param rr query RRSet (its rdataset may be changed depending on the result)
* @param timestamp current time (will be replaced with drift if successful)
- * @return KNOT_E*
+ * @return 0 or an errcode
*/
-int kr_cache_peek(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp);
+int kr_cache_peek_rr(namedb_txn_t *txn, knot_rrset_t *rr, uint32_t *timestamp);
/**
* Clone read-only RRSet and adjust TTLs.
* @param txn transaction instance
* @param rr inserted RRSet
* @param timestamp current time
- * @return KNOT_E*
- */
-int kr_cache_insert(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp);
-
-/**
- * Remove RRSet from cache.
- * @param txn transaction instance
- * @param rr removed RRSet
- * @return KNOT_E*
- */
-int kr_cache_remove(namedb_txn_t *txn, const knot_rrset_t *rr);
-
-/**
- * Clear all items from the cache.
- * @param txn transaction instance
- * @return KNOT_E*
+ * @return 0 or an errcode
*/
-int kr_cache_clear(namedb_txn_t *txn);
+int kr_cache_insert_rr(namedb_txn_t *txn, const knot_rrset_t *rr, uint32_t timestamp);
rr_callback_t cb, struct kr_request *req)
{
/* Query cache for requested record */
- if (kr_cache_peek(txn, cache_rr, ×tamp) != KNOT_EOK) {
+ if (kr_cache_peek_rr(txn, cache_rr, ×tamp) != KNOT_EOK) {
return KNOT_STATE_NOOP;
}
/* Check if already cached. */
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
- if (kr_cache_peek(txn, &query_rr, ×tamp) == KNOT_EOK) {
+ if (kr_cache_peek_rr(txn, &query_rr, ×tamp) == KNOT_EOK) {
return KNOT_EOK;
}
rr->type = KNOT_RRTYPE_CNAME;
while((merge_in_section(rr, section, 0, pool)) == KNOT_EOK) {
/* Cache the merged RRSet */
- ret = kr_cache_insert(txn, rr, timestamp);
+ ret = kr_cache_insert_rr(txn, rr, timestamp);
if (ret != KNOT_EOK) {
return ret;
}
rr->type = orig_rrtype;
ret = merge_in_section(rr, section, 0, pool);
if (ret == KNOT_EOK) {
- kr_cache_insert(txn, rr, timestamp);
+ kr_cache_insert_rr(txn, rr, timestamp);
knot_rdataset_clear(&rr->rrs, pool);
}
{
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
- if (kr_cache_peek(txn, &cached_rr, ×tamp) != 0) {
+ if (kr_cache_peek_rr(txn, &cached_rr, ×tamp) != 0) {
return;
}
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
- int ret = kr_cache_peek(txn, &cached_rr, &drift);
+ int ret = kr_cache_peek_rr(txn, &cached_rr, &drift);
if (ret != 0) {
return ret;
}
* Properties.
*/
-/** Return boolean true if a record in the RR set is expired. */
-static int is_expired(struct kr_cache_rrset *rr, uint32_t drift)
+/** Return boolean true if a record is expired. */
+static bool is_expired(struct kr_cache_entry *entry, uint32_t drift)
{
- /* Initialize set. */
- knot_rdataset_t rrs;
- rrs.rr_count = rr->count;
- rrs.data = rr->data;
-
- for (unsigned i = 0; i < rrs.rr_count; ++i) {
- const knot_rdata_t *rd = knot_rdataset_at(&rrs, i);
- if (knot_rdata_ttl(rd) <= drift) {
- return 1;
- }
- }
-
- return 0;
+ return entry->ttl >= drift;
}
/**
break;
}
/* Prune expired records. */
- struct kr_cache_rrset *rr = val.data;
- if (is_expired(rr, now - rr->timestamp)) {
+ struct kr_cache_entry *entry = val.data;
+ if (is_expired(entry, now - entry->timestamp)) {
storage->del(&txn, &key);
pruned += 1;
}
/* Test invalid parameters. */
static void test_invalid(void **state)
{
- assert_null(kr_cache_open(NULL, NULL, 0));
- assert_null(kr_cache_open(global_env, NULL, 0));
- assert_int_not_equal(kr_cache_txn_begin(NULL, &global_txn, 0), KNOT_EOK);
- assert_int_not_equal(kr_cache_txn_begin(&global_env, NULL, 0), KNOT_EOK);
- assert_int_not_equal(kr_cache_txn_commit(NULL), KNOT_EOK);
- assert_int_not_equal(kr_cache_peek(NULL, NULL, NULL), KNOT_EOK);
- assert_int_not_equal(kr_cache_peek(&global_txn, NULL, NULL), KNOT_EOK);
- assert_int_not_equal(kr_cache_insert(&global_txn, NULL, 0), KNOT_EOK);
- assert_int_not_equal(kr_cache_insert(NULL, NULL, 0), KNOT_EOK);
- assert_int_not_equal(kr_cache_remove(&global_txn, NULL), KNOT_EOK);
- assert_int_not_equal(kr_cache_remove(NULL, NULL), KNOT_EOK);
- assert_int_not_equal(kr_cache_clear(NULL), KNOT_EOK);
+ assert_null((void *)kr_cache_open(NULL, NULL, 0));
+ assert_null((void *)kr_cache_open(global_env, NULL, 0));
+ assert_int_not_equal(kr_cache_txn_begin(NULL, &global_txn, 0), 0);
+ assert_int_not_equal(kr_cache_txn_begin(&global_env, NULL, 0), 0);
+ assert_int_not_equal(kr_cache_txn_commit(NULL), 0);
+ assert_int_not_equal(kr_cache_peek_rr(NULL, NULL, NULL), 0);
+ assert_int_not_equal(kr_cache_peek_rr(&global_txn, NULL, NULL), 0);
+ assert_int_not_equal(kr_cache_insert_rr(&global_txn, NULL, 0), 0);
+ assert_int_not_equal(kr_cache_insert_rr(NULL, NULL, 0), 0);
+ assert_int_not_equal(kr_cache_remove(&global_txn, 0, NULL, 0), 0);
+ assert_int_not_equal(kr_cache_remove(&global_txn, KR_CACHE_RR, NULL, 0), 0);
+ assert_int_not_equal(kr_cache_remove(NULL, 0, NULL, 0), 0);
+ assert_int_not_equal(kr_cache_clear(NULL), 0);
}
/* Test cache open */
static namedb_txn_t *test_txn_rdonly(void **state)
{
assert_non_null(*state);
- assert_int_equal(kr_cache_txn_begin(*state, &global_txn, NAMEDB_RDONLY), KNOT_EOK);
+ assert_int_equal(kr_cache_txn_begin(*state, &global_txn, NAMEDB_RDONLY), 0);
return &global_txn;
}
test_random_rr(&global_rr, CACHE_TTL);
namedb_txn_t *txn = test_txn_write(state);
- int ret = kr_cache_insert(txn, &global_rr, CACHE_TIME);
+ int ret = kr_cache_insert_rr(txn, &global_rr, CACHE_TIME);
if (ret == KNOT_EOK) {
ret = kr_cache_txn_commit(txn);
} else {
for (uint32_t timestamp = CACHE_TIME; timestamp < CACHE_TIME + CACHE_TTL; ++timestamp) {
uint32_t drift = timestamp;
- int query_ret = kr_cache_peek(txn, &cache_rr, &drift);
+ int query_ret = kr_cache_peek_rr(txn, &cache_rr, &drift);
bool rr_equal = knot_rrset_equal(&global_rr, &cache_rr, KNOT_RRSET_COMPARE_WHOLE);
assert_int_equal(query_ret, KNOT_EOK);
assert_true(rr_equal);
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
namedb_txn_t *txn = test_txn_rdonly(state);
- int ret = kr_cache_peek(txn, &cache_rr, ×tamp);
+ int ret = kr_cache_peek_rr(txn, &cache_rr, ×tamp);
assert_int_equal(ret, KNOT_ENOENT);
kr_cache_txn_abort(txn);
}
knot_rrset_init(&cache_rr, global_rr.owner, global_rr.type, global_rr.rclass);
namedb_txn_t *txn = test_txn_write(state);
- int ret = kr_cache_remove(txn, &cache_rr);
+ int ret = kr_cache_remove(txn, KR_CACHE_RR, cache_rr.owner, cache_rr.type);
assert_int_equal(ret, KNOT_EOK);
- ret = kr_cache_peek(txn, &cache_rr, ×tamp);
+ ret = kr_cache_peek_rr(txn, &cache_rr, ×tamp);
assert_int_equal(ret, KNOT_ENOENT);
kr_cache_txn_commit(txn);
}
for (unsigned i = 0; i < CACHE_SIZE; ++i) {
knot_rrset_t rr;
test_random_rr(&rr, CACHE_TTL);
- ret = kr_cache_insert(txn, &rr, CACHE_TTL - 1);
+ ret = kr_cache_insert_rr(txn, &rr, CACHE_TTL - 1);
if (ret != KNOT_EOK) {
break;
}