]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
remove the old cache modules from git
authorVladimír Čunát <vladimir.cunat@nic.cz>
Fri, 20 Oct 2017 17:00:49 +0000 (19:00 +0200)
committerVladimír Čunát <vladimir.cunat@nic.cz>
Fri, 20 Oct 2017 17:00:49 +0000 (19:00 +0200)
lib/layer/pktcache.c [deleted file]
lib/layer/rrcache.c [deleted file]

diff --git a/lib/layer/pktcache.c b/lib/layer/pktcache.c
deleted file mode 100644 (file)
index 702d299..0000000
+++ /dev/null
@@ -1,323 +0,0 @@
-/*  Copyright (C) 2015-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-/** @file pktcache.c
- *
- * This builtin module caches whole packets from/for negative answers
- * or answers where wildcard expansion has occured (.DNSSEC_WEXPAND).
- *
- * Note: it also persists some DNSSEC_* flags.
- * The ranks are stored in *(uint8_t *)rrset->additional (all are the same for one packet).
- */
-
-#include <libknot/descriptor.h>
-#include <libknot/rrset.h>
-#include <libknot/rrtype/soa.h>
-
-#include <contrib/ucw/lib.h>
-#include "lib/layer/iterate.h"
-#include "lib/cache.h"
-#include "lib/dnssec/ta.h"
-#include "lib/module.h"
-#include "lib/resolve.h"
-
-#define VERBOSE_MSG(qry, fmt...) QRVERBOSE((qry), " pc ",  fmt)
-#define DEFAULT_MAXTTL (15 * 60)
-#define DEFAULT_NOTTL (5) /* Short-time "no data" retention to avoid bursts */
-
-static uint32_t limit_ttl(uint32_t ttl)
-{
-       /* @todo Configurable limit */
-       return (ttl > DEFAULT_MAXTTL) ? DEFAULT_MAXTTL : ttl;
-}
-
-static void adjust_ttl(knot_rrset_t *rr, uint32_t drift)
-{
-       knot_rdata_t *rd = rr->rrs.data;
-       for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
-               uint32_t ttl = knot_rdata_ttl(rd);
-               if (ttl >= drift) {
-                       knot_rdata_set_ttl(rd, ttl - drift);
-               }
-               rd = kr_rdataset_next(rd);
-       }
-}
-
-/** @internal Try to find a shortcut directly to searched packet. */
-static int loot_pktcache(struct kr_context *ctx, knot_pkt_t *pkt,
-                        struct kr_request *req, uint8_t *flags)
-{
-       struct kr_query *qry = req->current_query;
-       uint32_t timestamp = qry->timestamp.tv_sec;
-       const knot_dname_t *qname = qry->sname;
-       uint16_t rrtype = qry->stype;
-
-       struct kr_cache_entry *entry = NULL;
-       int ret = kr_cache_peek(&ctx->cache, KR_CACHE_PKT, qname,
-                               rrtype, &entry, &timestamp);
-       if (ret != 0) { /* Not in the cache */
-               if (ret == kr_error(ESTALE)) {
-                       VERBOSE_MSG(qry, "=> only stale entry found\n")
-               }
-               return ret;
-       }
-
-       uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
-       /* There's probably little sense for NONAUTH in pktcache. */
-
-       if (!knot_wire_get_cd(req->answer->wire) && !(qry->flags.STUB)) {
-               /* Records not present under any TA don't have their security verified at all. */
-               bool ta_covers = kr_ta_covers_qry(ctx, qry->sname, qry->stype);
-               /* ^ TODO: performance? */
-               if (ta_covers) {
-                       kr_rank_set(&lowest_rank, KR_RANK_INSECURE);
-               }
-       }
-       const bool rank_enough = entry->rank >= lowest_rank;
-       VERBOSE_MSG(qry, "=> rank: 0%0.2o, lowest 0%0.2o -> satisfied=%d\n",
-                       entry->rank, lowest_rank, (int)rank_enough);
-       if (!rank_enough) {
-               return kr_error(ENOENT);
-       }
-
-       /* Copy answer, keep the original message id */
-       if (entry->count <= pkt->max_size) {
-               /* Keep original header and copy cached */
-               uint16_t msgid = knot_wire_get_id(pkt->wire);
-               /* Copy and reparse */
-               knot_pkt_clear(pkt);
-               memcpy(pkt->wire, entry->data, entry->count);
-               pkt->size = entry->count;
-               knot_pkt_parse(pkt, 0);
-               /* Restore header bits */
-               knot_wire_set_id(pkt->wire, msgid);
-       }
-
-       /* Rank-related fixups.  Add rank into the additional field. */
-       if (kr_rank_test(entry->rank, KR_RANK_INSECURE)) {
-               qry->flags.DNSSEC_INSECURE = true;
-               qry->flags.DNSSEC_WANT = false;
-       }
-       for (size_t i = 0; i < pkt->rrset_count; ++i) {
-               assert(!pkt->rr[i].additional);
-               uint8_t *rr_rank = mm_alloc(&pkt->mm, sizeof(*rr_rank));
-               if (!rr_rank) {
-                       return kr_error(ENOMEM);
-               }
-               *rr_rank = entry->rank;
-               pkt->rr[i].additional = rr_rank;
-       }
-
-       /* Adjust TTL in records. */
-       for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
-               const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
-               for (unsigned k = 0; k < sec->count; ++k) {
-                       const knot_rrset_t *rr = knot_pkt_rr(sec, k);
-                       adjust_ttl((knot_rrset_t *)rr, timestamp);
-               }
-       }
-
-       /* Copy cache entry flags */
-       if (flags) {
-               *flags = entry->flags;
-       }
-
-       return ret;
-}
-
-static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
-{
-       struct kr_request *req = ctx->req;
-       struct kr_query *qry = req->current_query;
-       if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) ||
-           (qry->flags.NO_CACHE)) {
-               return ctx->state; /* Already resolved/failed */
-       }
-       /* Both caches only peek for qry->sname and that would be useless
-        * to repeat on every iteration, so disable it from now on.
-        * Note: it's important to skip this if rrcache sets KR_STATE_DONE,
-        * as CNAME chains need more iterations to get fetched. */
-       qry->flags.NO_CACHE = true;
-
-       if (knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
-               return ctx->state; /* Only IN class */
-       }
-
-       /* Fetch either answer to original or minimized query */
-       uint8_t flags = 0;
-       int ret = loot_pktcache(req->ctx, pkt, req, &flags);
-       kr_cache_sync(&req->ctx->cache);
-       if (ret == 0) {
-               qry->flags.CACHED = true;
-               qry->flags.NO_MINIMIZE = true;
-               if (flags & KR_CACHE_FLAG_WCARD_PROOF) {
-                       qry->flags.DNSSEC_WEXPAND = true;
-               }
-               if (flags & KR_CACHE_FLAG_OPTOUT) {
-                       qry->flags.DNSSEC_OPTOUT = true;
-               }
-               pkt->parsed = pkt->size;
-               knot_wire_set_qr(pkt->wire);
-               knot_wire_set_aa(pkt->wire);
-               return KR_STATE_DONE;
-       }
-       return ctx->state;
-}
-
-static uint32_t packet_ttl(knot_pkt_t *pkt, bool is_negative)
-{
-       bool has_ttl = false;
-       uint32_t ttl = UINT32_MAX;
-       /* Find minimum entry TTL in the packet or SOA minimum TTL. */
-       for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
-               const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
-               for (unsigned k = 0; k < sec->count; ++k) {
-                       const knot_rrset_t *rr = knot_pkt_rr(sec, k);
-                       if (is_negative) {
-                               /* Use SOA minimum TTL for negative answers. */
-                               if (rr->type == KNOT_RRTYPE_SOA) {
-                                       return limit_ttl(MIN(knot_rrset_ttl(rr), knot_soa_minimum(&rr->rrs)));
-                               } else {
-                                       continue; /* Use SOA only for negative answers. */
-                               }
-                       }
-                       if (knot_rrtype_is_metatype(rr->type)) {
-                               continue; /* Skip metatypes. */
-                       }
-                       /* Find minimum TTL in the record set */
-                       knot_rdata_t *rd = rr->rrs.data;
-                       for (uint16_t j = 0; j < rr->rrs.rr_count; ++j) {
-                               if (knot_rdata_ttl(rd) < ttl) {
-                                       ttl = limit_ttl(knot_rdata_ttl(rd));
-                                       has_ttl = true;
-                               }
-                               rd = kr_rdataset_next(rd);
-                       }
-               }
-       }
-       /* Get default if no valid TTL present */
-       if (!has_ttl) {
-               ttl = DEFAULT_NOTTL;
-       }
-       return limit_ttl(ttl);
-}
-
-static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
-{
-       struct kr_request *req = ctx->req;
-       struct kr_query *qry = req->current_query;
-       /* Cache only answers that make query resolved (i.e. authoritative)
-        * that didn't fail during processing and are negative. */
-       if (qry->flags.CACHED || ctx->state & KR_STATE_FAIL) {
-               return ctx->state; /* Don't cache anything if failed. */
-       }
-       /* Cache only authoritative answers from IN class. */
-       if (!knot_wire_get_aa(pkt->wire) || knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
-               return ctx->state;
-       }
-       /* Cache only NODATA/NXDOMAIN or metatype/RRSIG or wildcard expanded answers. */
-       const uint16_t qtype = knot_pkt_qtype(pkt);
-       const bool is_eligible = (knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
-       bool is_negative = kr_response_classify(pkt) & (PKT_NODATA|PKT_NXDOMAIN);
-       bool wcard_expansion = (qry->flags.DNSSEC_WEXPAND);
-       if (is_negative && qry->flags.FORWARD && qry->flags.CNAME) {
-               /* Don't cache CNAME'ed NXDOMAIN answer in forwarding mode
-                  since it can contain records
-                  which have not been validated by validator */
-               return ctx->state;
-       }
-       if (!(is_eligible || is_negative || wcard_expansion)) {
-               return ctx->state;
-       }
-       uint32_t ttl = packet_ttl(pkt, is_negative);
-       if (ttl == 0) {
-               return ctx->state; /* No useable TTL, can't cache this. */
-       }
-       const knot_dname_t *qname = knot_pkt_qname(pkt);
-       if (!qname) {
-               return ctx->state;
-       }
-
-       knot_db_val_t data = { pkt->wire, pkt->size };
-       struct kr_cache_entry header = {
-               .timestamp = qry->timestamp.tv_sec,
-               .ttl = ttl,
-               .rank = KR_RANK_AUTH,
-               .flags = KR_CACHE_FLAG_NONE,
-               .count = data.len
-       };
-
-       /* If cd bit is set or we got answer via non-validated forwarding,
-        * make the rank bad; otherwise it depends on flags. */
-       if (knot_wire_get_cd(req->answer->wire) || qry->flags.STUB) {
-               kr_rank_set(&header.rank, KR_RANK_OMIT);
-       } else {
-               if (qry->flags.DNSSEC_BOGUS) {
-                       kr_rank_set(&header.rank, KR_RANK_BOGUS);
-               } else if (qry->flags.DNSSEC_INSECURE) {
-                       kr_rank_set(&header.rank, KR_RANK_INSECURE);
-               } else if (qry->flags.DNSSEC_WANT) {
-                       kr_rank_set(&header.rank, KR_RANK_SECURE);
-               }
-       }
-       VERBOSE_MSG(qry, "=> candidate rank: 0%0.2o\n", header.rank);
-
-       /* Set cache flags */
-       if (qry->flags.DNSSEC_WEXPAND) {
-               header.flags |= KR_CACHE_FLAG_WCARD_PROOF;
-       }
-       if (qry->flags.DNSSEC_OPTOUT) {
-               header.flags |= KR_CACHE_FLAG_OPTOUT;
-       }
-
-       /* Check if we can replace (allow current or better rank, SECURE is always accepted). */
-       struct kr_cache *cache = &ctx->req->ctx->cache;
-       if (header.rank < KR_RANK_SECURE) {
-               int cached_rank = kr_cache_peek_rank
-                       (cache, KR_CACHE_PKT, qname, qtype, header.timestamp);
-               if (cached_rank >= 0) {
-                       VERBOSE_MSG(qry, "=> cached rank:    0%0.2o\n", cached_rank);
-                       if (cached_rank > header.rank) {
-                               return ctx->state;
-                       }
-               }
-       }
-
-       /* Stash answer in the cache */
-       int ret1 = kr_cache_insert(cache, KR_CACHE_PKT, qname, qtype, &header, data);
-       int ret2 = kr_cache_sync(cache);
-       if (!ret1 && !ret2) {
-               VERBOSE_MSG(qry, "=> answer cached for TTL=%u\n", ttl);
-       } else {
-               VERBOSE_MSG(qry, "=> stashing failed; codes: %d and %d\n", ret1, ret2);
-       }
-       return ctx->state;
-}
-
-/** Module implementation. */
-const kr_layer_api_t *pktcache_layer(struct kr_module *module)
-{
-       static const kr_layer_api_t _layer = {
-               .produce = &pktcache_peek,
-               .consume = &pktcache_stash
-       };
-
-       return &_layer;
-}
-
-KR_MODULE_EXPORT(pktcache)
-
-#undef VERBOSE_MSG
diff --git a/lib/layer/rrcache.c b/lib/layer/rrcache.c
deleted file mode 100644 (file)
index fc7adc4..0000000
+++ /dev/null
@@ -1,487 +0,0 @@
-/*  Copyright (C) 2014-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-/** @file rrcache.c
- *
- * This builtin module caches resource records from/for positive answers.
- *
- * Produce phase: if an RRset answering the query exists, the packet is filled
- * by it, including the corresponding RRSIGs (subject to some conditions).
- * Such a packet is recognizable: pkt->size == PKT_SIZE_NOWIRE, and flags.CACHED
- * is set in the query.  The ranks are stored in *(uint8_t *)rrset->additional.
- *
- * TODO
- */
-
-#include <assert.h>
-
-#include <libknot/descriptor.h>
-#include <libknot/errcode.h>
-#include <libknot/rrset.h>
-#include <libknot/rrtype/rrsig.h>
-#include <libknot/rrtype/rdname.h>
-#include <ucw/config.h>
-#include <ucw/lib.h>
-
-#include "lib/layer/iterate.h"
-#include "lib/cache.h"
-#include "lib/dnssec/ta.h"
-#include "lib/module.h"
-#include "lib/utils.h"
-#include "lib/resolve.h"
-
-#define VERBOSE_MSG(qry, fmt...) QRVERBOSE((qry), " rc ",  fmt)
-#define DEFAULT_MINTTL (5) /* Short-time "no data" retention to avoid bursts */
-
-/** Record is expiring if it has less than 1% TTL (or less than 5s) */
-static inline bool is_expiring(const knot_rrset_t *rr, uint32_t drift)
-{
-       return 100 * (drift + 5) > 99 * knot_rrset_ttl(rr);
-}
-
-static int loot_rr(struct kr_cache *cache, knot_pkt_t *pkt, const knot_dname_t *name,
-                  uint16_t rrclass, uint16_t rrtype, struct kr_query *qry,
-                  uint8_t *rank, uint8_t *flags, bool fetch_rrsig, uint8_t lowest_rank)
-{
-       const bool precond = rank && flags;
-       if (!precond) {
-               assert(false);
-               return kr_error(EINVAL);
-       }
-       /* Check if record exists in cache */
-       int ret = 0;
-       uint32_t drift = qry->timestamp.tv_sec;
-       knot_rrset_t cache_rr;
-       knot_rrset_init(&cache_rr, (knot_dname_t *)name, rrtype, rrclass);
-       if (fetch_rrsig) {
-               ret = kr_cache_peek_rrsig(cache, &cache_rr, rank, flags, &drift);
-       } else {
-               ret = kr_cache_peek_rr(cache, &cache_rr, rank, flags, &drift);
-       }
-       if (ret != 0) {
-               return ret;
-       }
-
-       WITH_VERBOSE {
-               VERBOSE_MSG(qry, "=> rank: 0%0.2o, lowest 0%0.2o, ", *rank, lowest_rank);
-               if (fetch_rrsig) {
-                       kr_log_verbose("RRSIG for ");
-               }
-               kr_rrtype_print(rrtype, "", " ");
-               kr_dname_print(name, "", "\n");
-       }
-
-       if (*rank < lowest_rank) {
-               return kr_error(ENOENT);
-       }
-
-       if (is_expiring(&cache_rr, drift)) {
-               qry->flags.EXPIRING = true;
-       }
-
-       if ((*flags) & KR_CACHE_FLAG_WCARD_PROOF) {
-               /* Record was found, but wildcard answer proof is needed.
-                * Do not update packet, try to fetch whole packet from pktcache instead. */
-               qry->flags.DNSSEC_WEXPAND = true;
-               return kr_error(ENOENT);
-       }
-
-       /* Update packet question */
-       if (!knot_dname_is_equal(knot_pkt_qname(pkt), name)) {
-               kr_pkt_recycle(pkt);
-               knot_pkt_put_question(pkt, qry->sname, qry->sclass, qry->stype);
-       }
-
-       /* Update packet answer */
-       knot_rrset_t rr_copy;
-       ret = kr_cache_materialize(&rr_copy, &cache_rr, drift, qry->reorder, &pkt->mm);
-       if (ret) {
-               return ret;
-       }
-
-       uint8_t *rr_rank = mm_alloc(&pkt->mm, sizeof(*rr_rank));
-       if (!rr_rank) {
-               goto enomem;
-       }
-       *rr_rank = *rank;
-       rr_copy.additional = rr_rank;
-       /* Ensure the pkt->rr array is long enough. */
-       if (pkt->rrset_count + 1 > pkt->rrset_allocd) {
-               size_t rrset_allocd = pkt->rrset_count + 2;
-               pkt->rr = mm_realloc(&pkt->mm, pkt->rr,
-                                       sizeof(pkt->rr[0]) * rrset_allocd,
-                                       sizeof(pkt->rr[0]) * pkt->rrset_allocd);
-               if (!pkt->rr) {
-                       goto enomem;
-               }
-               pkt->rr_info = mm_realloc(&pkt->mm, pkt->rr,
-                                       sizeof(pkt->rr_info[0]) * rrset_allocd,
-                                       sizeof(pkt->rr_info[0]) * pkt->rrset_allocd);
-               if (!pkt->rr_info) {
-                       goto enomem;
-               }
-               pkt->rrset_allocd = rrset_allocd;
-       }
-       /* Append the RR array. */
-       assert(pkt->sections[pkt->current].count == pkt->rrset_count);
-       pkt->rr[pkt->rrset_count] = rr_copy;
-       pkt->sections[pkt->current].count = ++pkt->rrset_count;
-       return ret;
-enomem:
-       knot_rrset_clear(&rr_copy, &pkt->mm);
-       mm_free(&pkt->mm, rr_rank);
-       return kr_error(ENOMEM);
-}
-
-/** @internal Try to find a shortcut directly to searched record. */
-static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
-                       struct kr_query *qry, uint16_t rrtype)
-{
-       const bool allow_unverified = knot_wire_get_cd(req->answer->wire)
-                                       || qry->flags.STUB;
-       /* Lookup direct match first; only consider authoritative records.
-        * TODO: move rank handling into the iterator (DNSSEC_* flags)? */
-       uint8_t rank  = 0;
-       uint8_t flags = 0;
-       uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
-       if (qry->flags.NONAUTH) {
-               lowest_rank = KR_RANK_INITIAL;
-               /* Note: there's little sense in validation status for non-auth records.
-                * In case of using NONAUTH to get NS IPs, knowing that you ask correct
-                * IP doesn't matter much for security; it matters whether you can
-                * validate the answers from the NS.
-                */
-       } else if (!allow_unverified) {
-                               /* ^^ in stub mode we don't trust RRs anyway */
-               /* Records not present under any TA don't have their security
-                * verified at all, so we also accept low ranks in that case. */
-               const bool ta_covers = kr_ta_covers_qry(req->ctx, qry->sname, rrtype);
-               /* ^ TODO: performance? */
-               if (ta_covers) {
-                       kr_rank_set(&lowest_rank, KR_RANK_INSECURE);
-               }
-       }
-
-       struct kr_cache *cache = &req->ctx->cache;
-       int ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
-                         &rank, &flags, 0, lowest_rank);
-       if (ret != 0 && rrtype != KNOT_RRTYPE_CNAME
-           && !(qry->flags.STUB)) {
-               /* Chase CNAME if no direct hit.
-                * We avoid this in STUB mode because the current iterator
-                * (process_stub()) is unable to iterate in STUB mode to follow
-                * the CNAME chain. */
-               rrtype = KNOT_RRTYPE_CNAME;
-               ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
-                             &rank, &flags, 0, lowest_rank);
-       }
-       if (ret) {
-               return ret;
-       }
-
-       if (kr_rank_test(rank, KR_RANK_INSECURE)) {
-               qry->flags.DNSSEC_INSECURE = true;
-               qry->flags.DNSSEC_WANT = false;
-       }
-
-       /* Record may have RRSIGs, try to find them. */
-       if (allow_unverified
-           || ((qry->flags.DNSSEC_WANT) && kr_rank_test(rank, KR_RANK_SECURE))) {
-               kr_rank_set(&lowest_rank, KR_RANK_INITIAL); /* no security for RRSIGs */
-               ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
-                             &rank, &flags, true, lowest_rank);
-               if (allow_unverified) {
-                       /* TODO: in STUB mode, if we cached from a query without
-                        * DO bit, we will return without RRSIGs even if they
-                        * would be contained in upstream answer with DO. */
-                       ret = 0;
-               }
-               if (ret) {
-                       VERBOSE_MSG(qry, "=> RRSIG(s) expected but not found, skipping\n");
-                       /* In some cases, e.g. due to bugs, this may fail.
-                        * A possible good example is that a cache backend
-                        * (such as redis) chose to evict RRSIG but not RRset.
-                        * Let's return cache failure, but the packet has been
-                        * updated already by the RRs!  Let's try to clear it.
-                        * The following command might theoretically fail again
-                        * while parsing question, but let's just log that
-                        * condition in non-debug mode (it might be non-fatal). */
-                       if (kr_pkt_clear_payload(pkt)) {
-                               kr_log_error("[ rc ] => ERROR: work-around failed\n");
-                               assert(false);
-                       }
-               }
-       }
-       return ret;
-}
-
-static int rrcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
-{
-       struct kr_request *req = ctx->req;
-       struct kr_query *qry = req->current_query;
-       if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags.NO_CACHE)) {
-               return ctx->state; /* Already resolved/failed or already tried, etc. */
-       }
-       /* Reconstruct the answer from the cache,
-        * it may either be a CNAME chain or direct answer.
-        * Only one step of the chain is resolved at a time.
-        */
-       int ret = -1;
-       if (qry->stype != KNOT_RRTYPE_ANY) {
-               ret = loot_rrcache(req, pkt, qry, qry->stype);
-       } else {
-               /* ANY query are used by either qmail or certain versions of Firefox.
-                * Probe cache for a few interesting records. */
-               static uint16_t any_types[] = { KNOT_RRTYPE_A, KNOT_RRTYPE_AAAA, KNOT_RRTYPE_MX };
-               for (size_t i = 0; i < sizeof(any_types)/sizeof(any_types[0]); ++i) {
-                       if (loot_rrcache(req, pkt, qry, any_types[i]) == 0) {
-                               ret = 0; /* At least single record matches */
-                       }
-               }
-       }
-       kr_cache_sync(&req->ctx->cache);
-       if (ret == 0) {
-               VERBOSE_MSG(qry, "=> satisfied from cache\n");
-               qry->flags.CACHED = true;
-               qry->flags.NO_MINIMIZE = true;
-               pkt->parsed = pkt->size = PKT_SIZE_NOWIRE;
-               knot_wire_set_qr(pkt->wire);
-               knot_wire_set_aa(pkt->wire);
-               return KR_STATE_DONE;
-       }
-       return ctx->state;
-}
-
-/** @internal Baton for stash_commit */
-struct rrcache_baton
-{
-       struct kr_request *req;
-       struct kr_query *qry;
-       struct kr_cache *cache;
-       unsigned timestamp;
-};
-
-static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
-{
-       /* If not doing secure resolution, ignore (unvalidated) RRSIGs. */
-       if (!(baton->qry->flags.DNSSEC_WANT)) {
-               return kr_ok();
-       }
-       /* Commit covering RRSIG to a separate cache namespace. */
-       return kr_cache_insert_rrsig(baton->cache, rr, rank, flags, baton->timestamp);
-}
-
-static int commit_rr(const char *key, void *val, void *data)
-{
-       knot_rrset_t *rr = val;
-       struct rrcache_baton *baton = data;
-
-       /* Save RRSIG in a special cache. */
-       uint8_t rank = KEY_FLAG_RANK(key);
-       if (KEY_COVERING_RRSIG(key)) {
-               return commit_rrsig(baton, rank, KR_CACHE_FLAG_NONE, rr);
-       }
-       /* Accept only better rank if not secure. */
-       if (!kr_rank_test(rank, KR_RANK_SECURE)) {
-               int cached_rank = kr_cache_peek_rank(baton->cache, KR_CACHE_RR, rr->owner, rr->type, baton->timestamp);
-               /* If equal rank was accepted, spoofing a single answer would be enough
-                * to e.g. override NS record in AUTHORITY section.
-                * This way they would have to hit the first answer (whenever TTL expires). */
-               if (cached_rank >= 0) {
-                       VERBOSE_MSG(baton->qry, "=> orig. rank: 0%0.2o\n", cached_rank);
-                       bool accept = rank > cached_rank;
-                       /* Additionally accept equal rank if the cached RR is expiring.
-                        * This is primarily for prefetching from predict module. */
-                       if (rank == cached_rank) {
-                               uint32_t drift = baton->timestamp;
-                               knot_rrset_t cache_rr;
-                               knot_rrset_init(&cache_rr, rr->owner, rr->type, rr->rclass);
-                               int ret = kr_cache_peek_rr(baton->cache, &cache_rr, NULL, NULL, &drift);
-                               if (ret != kr_ok() || is_expiring(&cache_rr, drift)) {
-                                       accept = true;
-                               }
-                       }
-                       if (!accept) {
-                               return kr_ok();
-                       }
-               }
-       }
-
-       WITH_VERBOSE {
-               VERBOSE_MSG(baton->qry, "=> stashing rank: 0%0.2o, ", rank);
-               kr_rrtype_print(rr->type, "", " ");
-               kr_dname_print(rr->owner, "", "\n");
-       }
-
-       uint8_t flags = KR_CACHE_FLAG_NONE;
-       if (kr_rank_test(rank, KR_RANK_AUTH)) {
-               if (baton->qry->flags.DNSSEC_WEXPAND) {
-                       flags |= KR_CACHE_FLAG_WCARD_PROOF;
-               }
-               if ((rr->type == KNOT_RRTYPE_NS) &&
-                   (baton->qry->flags.DNSSEC_NODS)) {
-                       flags |= KR_CACHE_FLAG_NODS;
-               }
-       }
-
-       return kr_cache_insert_rr(baton->cache, rr, rank, flags, baton->timestamp);
-}
-
-static int stash_commit(map_t *stash, struct kr_query *qry, struct kr_cache *cache, struct kr_request *req)
-{
-       struct rrcache_baton baton = {
-               .req = req,
-               .qry = qry,
-               .cache = cache,
-               .timestamp = qry->timestamp.tv_sec,
-       };
-       return map_walk(stash, &commit_rr, &baton);
-}
-
-static void stash_glue(map_t *stash, knot_pkt_t *pkt, const knot_dname_t *ns_name, knot_mm_t *pool)
-{
-       const knot_pktsection_t *additional = knot_pkt_section(pkt, KNOT_ADDITIONAL);
-       for (unsigned i = 0; i < additional->count; ++i) {
-               const knot_rrset_t *rr = knot_pkt_rr(additional, i);
-               if ((rr->type != KNOT_RRTYPE_A && rr->type != KNOT_RRTYPE_AAAA) ||
-                   !knot_dname_is_equal(rr->owner, ns_name)) {
-                       continue;
-               }
-               kr_rrmap_add(stash, rr, KR_RANK_OMIT, pool);
-       }
-}
-
-static int stash_selected(struct kr_request *req, knot_pkt_t *pkt, map_t *stash,
-                bool is_authority, knot_mm_t *pool)
-{
-       ranked_rr_array_t *arr = is_authority
-               ? &req->auth_selected : &req->answ_selected;
-       const struct kr_query *qry = req->current_query;
-       if (!arr->len) {
-               return kr_ok();
-       }
-
-       uint32_t min_ttl = MAX(DEFAULT_MINTTL, req->ctx->cache.ttl_min);
-       /* uncached entries are located at the end */
-       for (ssize_t i = arr->len - 1; i >= 0; --i) {
-               ranked_rr_array_entry_t *entry = arr->at[i];
-               if (entry->qry_uid != qry->uid) {
-                       continue; /* TODO: probably safe to break but maybe not worth it */
-               }
-               if (entry->cached) {
-                       continue;
-               }
-               knot_rrset_t *rr = entry->rr;
-               
-               /* Ensure minimum TTL */
-               knot_rdata_t *rd = rr->rrs.data;
-               for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
-                       if (knot_rdata_ttl(rd) < min_ttl) {
-                               knot_rdata_set_ttl(rd, min_ttl);
-                       }
-                       rd = kr_rdataset_next(rd);
-               }
-
-               /* Skip NSEC3 RRs and their signatures.  We don't use them this way.
-                * They would be stored under the hashed name, etc. */
-               if (kr_rrset_type_maysig(rr) == KNOT_RRTYPE_NSEC3) {
-                       continue;
-               }
-               /* Look up glue records for NS */
-               if (is_authority && rr->type == KNOT_RRTYPE_NS) {
-                       for (size_t j = 0; j < rr->rrs.rr_count; ++j) {
-                               const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, j);
-                               if (knot_dname_in(qry->zone_cut.name, ns_name)) {
-                                       stash_glue(stash, pkt, ns_name, pool);
-                               }
-                       }
-               }
-               kr_rrmap_add(stash, rr, entry->rank, pool);
-               entry->cached = true;
-       }
-       return kr_ok();
-}
-
-static int rrcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
-{
-       struct kr_request *req = ctx->req;
-       struct kr_query *qry = req->current_query;
-       if (!qry || ctx->state & KR_STATE_FAIL) {
-               return ctx->state;
-       }
-       /* Do not cache truncated answers. */
-       if (knot_wire_get_tc(pkt->wire)) {
-               return ctx->state;
-       }
-
-       /* Cache only positive answers, not meta types or RRSIG. */
-       const uint16_t qtype = knot_pkt_qtype(pkt);
-       const bool is_eligible = !(knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
-       if (qry->flags.CACHED || knot_wire_get_rcode(pkt->wire) != KNOT_RCODE_NOERROR || !is_eligible) {
-               return ctx->state;
-       }
-       /* Stash data selected by iterator from the last receieved packet. */
-       map_t stash = map_make();
-       stash.malloc = (map_alloc_f) mm_alloc;
-       stash.free = (map_free_f) mm_free;
-       stash.baton = &req->pool;
-       int ret = 0;
-       bool is_auth = knot_wire_get_aa(pkt->wire);
-       if (is_auth) {
-               ret = stash_selected(req, pkt, &stash, false, &req->pool);
-       }
-       if (ret == 0) {
-               ret = stash_selected(req, pkt, &stash, true, &req->pool);
-               /* this also stashes DS records in referrals */
-       }
-       /* Cache stashed records */
-       if (ret == 0 && stash.root != NULL) {
-               /* Open write transaction */
-               struct kr_cache *cache = &req->ctx->cache;
-               ret = stash_commit(&stash, qry, cache, req);
-               if (ret == 0) {
-                       ret = kr_cache_sync(cache);
-               } else {
-                       kr_cache_sync(cache);
-               }
-               /* Clear if full */
-               if (ret == kr_error(ENOSPC)) {
-                       kr_log_info("[cache] clearing because overfull\n");
-                       ret = kr_cache_clear(cache);
-                       if (ret != 0 && ret != kr_error(EEXIST)) {
-                               kr_log_error("[cache] failed to clear cache: %s\n", kr_strerror(ret));
-                       }
-               } else if (ret) {
-                       VERBOSE_MSG(qry, "=> stashing failed: %d\n", ret);
-               }
-       }
-       return ctx->state;
-}
-
-/** Module implementation. */
-const kr_layer_api_t *rrcache_layer(struct kr_module *module)
-{
-       static const kr_layer_api_t _layer = {
-               .produce = &rrcache_peek,
-               .consume = &rrcache_stash
-       };
-
-       return &_layer;
-}
-
-KR_MODULE_EXPORT(rrcache)
-
-#undef VERBOSE_MSG