return 1;
}
+#if 0
/** @internal Prefix walk. */
static int cache_prefixed(struct kr_cache *cache, const char *args, knot_db_val_t *results, int maxresults)
{
}
return ret;
}
+#endif
/** Prune expired/invalid records. */
static int cache_prune(lua_State *L)
/* Clear a sub-tree in cache. */
if (args && strlen(args) > 0) {
- int ret = cache_remove_prefix(cache, args);
+ int ret = kr_error(ENOSYS); // FIXME cache_remove_prefix(cache, args);
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
const char *args = lua_tostring(L, 1);
/* Retrieve set of keys */
static knot_db_val_t result_set[100];
- int ret = cache_prefixed(cache, args, result_set, 100);
+ int ret = kr_error(ENOSYS); // FIXME cache_prefixed(cache, args, result_set, 100);
if (ret < 0) {
format_error(L, kr_strerror(ret));
lua_error(L);
/* Load basic modules */
engine_register(engine, "iterate", NULL, NULL);
engine_register(engine, "validate", NULL, NULL);
- engine_register(engine, "rrcache", NULL, NULL);
- engine_register(engine, "pktcache", NULL, NULL);
+ engine_register(engine, "cache_lmdb", NULL, NULL);
return array_push(engine->backends, kr_cdb_lmdb());
}
#define VERBOSE_MSG(qry, fmt...) QRVERBOSE((qry), "cach", fmt)
/* Cache version */
-#define KEY_VERSION "V\x04"
+static const uint16_t CACHE_VERSION = 1;
/* Key size */
#define KEY_HSIZE (sizeof(uint8_t) + sizeof(uint16_t))
#define KEY_SIZE (KEY_HSIZE + KNOT_DNAME_MAXLEN)
#define cache_op(cache, op, ...) (cache)->api->op((cache)->db, ## __VA_ARGS__)
/** @internal Removes all records from cache. */
-static inline int cache_purge(struct kr_cache *cache)
+static inline int cache_clear(struct kr_cache *cache)
{
cache->stats.delete += 1;
return cache_op(cache, clear);
static int assert_right_version(struct kr_cache *cache)
{
/* Check cache ABI version */
- knot_db_val_t key = { KEY_VERSION, 2 };
- knot_db_val_t val = { KEY_VERSION, 2 };
+ uint8_t key_str[] = "\x00\x00V"; /* CACHE_KEY */
+ knot_db_val_t key = { .data = key_str, .len = sizeof(key) };
+ knot_db_val_t val = { };
int ret = cache_op(cache, read, &key, &val, 1);
- if (ret == 0) {
+ if (ret == 0 && val.len == sizeof(CACHE_VERSION)
+ && memcmp(val.data, &CACHE_VERSION, sizeof(CACHE_VERSION)) == 0) {
ret = kr_error(EEXIST);
} else {
/* Version doesn't match. Recreate cache and write version key. */
ret = cache_op(cache, count);
if (ret != 0) { /* Non-empty cache, purge it. */
kr_log_info("[cache] incompatible cache database detected, purging\n");
- ret = cache_purge(cache);
+ ret = cache_clear(cache);
}
/* Either purged or empty. */
if (ret == 0) {
/* Key/Val is invalidated by cache purge, recreate it */
- key.data = KEY_VERSION;
- key.len = 2;
- val = key;
+ val.data = /*const-cast*/(void *)&CACHE_VERSION;
+ val.len = sizeof(CACHE_VERSION);
ret = cache_op(cache, write, &key, &val, 1);
}
}
if (!cache_isvalid(cache)) {
return kr_error(EINVAL);
}
- int ret = cache_purge(cache);
+ int ret = cache_clear(cache);
if (ret == 0) {
ret = assert_right_version(cache);
}
}
#include "lib/dnssec/ta.h"
+#include "lib/layer/iterate.h"
#include "lib/resolve.h"
#include "lib/rplan.h"
-/** TODO */
+/** TODO
+ * CACHE_KEY */
static knot_db_val_t key_exact_type(struct key *k, uint16_t ktype)
{
k->buf[k->name_len + 1] = 0; /* make sure different names can never match */
/** function for .produce phase */
-int read_lmdb(kr_layer_t *ctx, knot_pkt_t *pkt)
+int cache_lmdb_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
struct key k_storage, *k = &k_storage;
int ret = knot_dname_lf(k->buf, qry->sname, NULL);
if (ret) {
- return kr_error(ret);
+ return KR_STATE_FAIL;
}
k->name_len = k->buf[0];
const struct entry_h *eh = closest_NS(ctx, k);
if (!eh) { /* fall back to root hints? */
ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
- if (ret) return kr_error(ret);
+ if (ret) return KR_STATE_FAIL;
assert(!qry->zone_cut.parent);
+
+ //VERBOSE_MSG(qry, "=> using root hints\n");
+ //qry->flags.AWAIT_CUT = false;
return kr_ok();
}
switch (k->type) {
* and that's the only place to start - we may either find
* a negative proof or we may query upstream from that point. */
kr_zonecut_set(&qry->zone_cut, k->dname);
+ ret = kr_make_query(qry, pkt); // FIXME: probably not yet - qname minimization
+ if (ret) return KR_STATE_FAIL;
+
/* Note: up to here we can run on any cache backend,
* without touching the code. */
struct kr_query *qry = req->current_query;
struct kr_cache *cache = &req->ctx->cache;
+ // FIXME: DS is parent-side record
bool exact_match = true;
// LATER(optim): if stype is NS, we check the same value again
do {
uint32_t ttl_min, ttl_max; /**< Maximum TTL of inserted entries */
};
+
+
+#include "lib/module.h"
+int cache_lmdb_peek(kr_layer_t *ctx, knot_pkt_t *pkt);
+
+
/**
* Open/create cache with provided storage options.
* @param cache cache structure to be initialized
return cache->db != NULL;
}
+#if 0
/**
* Peek the cache for asset (name, type, tag)
* @note The 'drift' is the time passed between the inception time and now (in seconds).
KR_EXPORT
int kr_cache_remove(struct kr_cache *cache, uint8_t tag, const knot_dname_t *name, uint16_t type);
+#endif
/**
* Clear all items from the cache.
* @param cache cache structure
*/
KR_EXPORT
int kr_cache_clear(struct kr_cache *cache);
+#if 0
/**
* Prefix scan on cached items.
*/
KR_EXPORT
int kr_cache_insert_rrsig(struct kr_cache *cache, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp);
+
+#endif
--- /dev/null
+
+#include "lib/module.h"
+#include "lib/cache.h"
+
+
+/** Module implementation. */
+const kr_layer_api_t *cache_lmdb_layer(struct kr_module *module)
+{
+ static const kr_layer_api_t _layer = {
+ .produce = &cache_lmdb_peek,
+ //.consume = &cache_stash
+ };
+
+ return &_layer;
+}
+
+KR_MODULE_EXPORT(cache_lmdb)
lib/generic/map.c \
lib/layer/iterate.c \
lib/layer/validate.c \
- lib/layer/rrcache.c \
- lib/layer/pktcache.c \
+ lib/layer/cache_lmdb.c \
lib/dnssec/nsec.c \
lib/dnssec/nsec3.c \
lib/dnssec/signature.c \
/* List of embedded modules */
const kr_layer_api_t *iterate_layer(struct kr_module *module);
const kr_layer_api_t *validate_layer(struct kr_module *module);
-const kr_layer_api_t *rrcache_layer(struct kr_module *module);
-const kr_layer_api_t *pktcache_layer(struct kr_module *module);
+const kr_layer_api_t *cache_lmdb_layer(struct kr_module *module);
static const struct kr_module embedded_modules[] = {
{ "iterate", NULL, NULL, NULL, iterate_layer, NULL, NULL, NULL },
{ "validate", NULL, NULL, NULL, validate_layer, NULL, NULL, NULL },
- { "rrcache", NULL, NULL, NULL, rrcache_layer, NULL, NULL, NULL },
- { "pktcache", NULL, NULL, NULL, pktcache_layer, NULL, NULL, NULL },
+ { "cache_lmdb", NULL, NULL, NULL, cache_lmdb_layer, NULL, NULL, NULL },
};
/** Library extension. */
*/
static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
{
+ return; // FIXME cleanup, etc.
+#if 0
if (qry->flags.NO_MINIMIZE) {
return;
}
target = knot_wire_next_label(target, NULL);
}
kr_cache_sync(cache);
+#endif
}
static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name,
/** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
+/* TODO: using cache on this point in this way just isn't nice; remove in time */
{
/* Stub mode, just forward and do not solve cut. */
if (qry->flags.STUB) {