]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
treewide refactor: avoid kr_cache_top_context_switch() docs-cache-kru-8qejro/deployments/7394 cache-kru 1726/head
authorVladimír Čunát <vladimir.cunat@nic.cz>
Wed, 13 Aug 2025 10:57:53 +0000 (12:57 +0200)
committerVladimír Čunát <vladimir.cunat@nic.cz>
Wed, 13 Aug 2025 12:10:58 +0000 (14:10 +0200)
It seemed to bring more complexity than benefit.
In many parts this meant revert to code before a few commits.

16 files changed:
daemon/lua/kres-gen-33.lua
daemon/worker.c
lib/cache/api.c
lib/cache/api.h
lib/cache/entry_pkt.c
lib/cache/impl.h
lib/cache/nsec1.c
lib/cache/nsec3.c
lib/cache/peek.c
lib/cache/top.c
lib/cache/top.h
lib/resolve-produce.c
lib/resolve.c
lib/selection.c
lib/selection.h
lib/zonecut.c

index 6305b425cece485f5b4c88c9f56dc7a6cc708f9a..b36e660833d1340d17bd21324d843640a89de9a7 100644 (file)
@@ -319,7 +319,6 @@ struct mmapped {
 struct kr_cache_top {
        struct mmapped mmapped;
        struct top_data *data;
-       struct kr_cache_top_context *ctx;
 };
 struct kr_cache {
        kr_cdb_pt db;
index 51de2d3ad90455489444db0677e4984d68c27f70..c912fd158906171bdea8174d9b6c55d6fc60514d 100644 (file)
@@ -2054,7 +2054,6 @@ static enum protolayer_event_cb_result pl_dns_stream_disconnected(
                                qry->flags.TCP = false;
                        }
                        qr_task_step(task, NULL, NULL);
-
                        defer_sample_restart();
                } else {
                        kr_assert(task->ctx->source.session == session);
index a0cd9f8a1850e9a1125838b64ec4ef148a3f74b4..6e6fb89f6a41617b611789c15b4a9c346bd93118 100644 (file)
@@ -399,7 +399,7 @@ static int stash_rrarray_entry(ranked_rr_array_t *arr, int arr_i,
 /** Stash a single nsec_p.  \return 0 (errors are ignored). */
 static int stash_nsec_p(const knot_dname_t *dname, const char *nsec_p_v,
                        struct kr_cache *cache, uint32_t timestamp, knot_mm_t *pool,
-                       const struct kr_query *qry/*logging*/);
+                       const struct kr_query *qry/*logging + cache_top*/);
 
 /** The whole .consume phase for the cache module. */
 int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
@@ -653,7 +653,8 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
        rdataset_dematerialize(rds_sigs, eh->data + rr_ssize);
        if (kr_fails_assert(entry_h_consistent_E(val_new_entry, rr->type)))
                return kr_error(EINVAL);
-       kr_cache_top_access(&cache->top, key.data, key.len, val_new_entry.len, "stash_rrset");
+       if (qry) // it's possible to insert outside a request
+               kr_cache_top_access(qry->request, key.data, key.len, val_new_entry.len, "stash_rrset");
 
        #if 0 /* Occasionally useful when debugging some kinds of changes. */
        {
@@ -830,7 +831,8 @@ static int stash_nsec_p(const knot_dname_t *dname, const char *nsec_p_v,
                VERBOSE_MSG(qry, "=> EL write failed (ret: %d)\n", ret);
                return kr_ok();
        }
-       kr_cache_top_access(&cache->top, key.data, key.len, val.len, "stash_nsec_p");
+       if (qry)
+               kr_cache_top_access(qry->request, key.data, key.len, val.len, "stash_nsec_p");
        if (log_refresh_by) {
                VERBOSE_MSG(qry, "=> nsec_p stashed for %s (refresh by %d, hash: %x)\n",
                                log_dname, log_refresh_by, log_hash);
@@ -879,7 +881,8 @@ int kr_cache_insert_rr(struct kr_cache *cache,
        return (int) written;
 }
 
-static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
+static int peek_exact_real(struct kr_cache *cache, struct kr_request *req,
+                       const knot_dname_t *name, uint16_t type,
                        struct kr_cache_p *peek)
 {
        if (!check_rrtype(type, NULL) || !check_dname_for_lf(name, NULL)) {
@@ -912,13 +915,14 @@ static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uin
                .raw_data = val.data,
                .raw_bound = knot_db_val_bound(val),
        };
-       kr_cache_top_access(&cache->top, key.data, key.len, whole_val_len, "peek_exact_real"); // hits only
+       kr_cache_top_access(req, key.data, key.len, whole_val_len, "peek_exact_real"); // hits only
        return kr_ok();
 }
-int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
+int kr_cache_peek_exact(struct kr_cache *cache, struct kr_request *req,
+                       const knot_dname_t *name, uint16_t type,
                        struct kr_cache_p *peek)
 {      /* Just wrap with extra verbose logging. */
-       const int ret = peek_exact_real(cache, name, type, peek);
+       const int ret = peek_exact_real(cache, req, name, type, peek);
        if (false && kr_log_is_debug(CACHE, NULL)) { /* too noisy for usual --verbose */
                auto_free char *type_str = kr_rrtype_text(type),
                        *name_str = kr_dname_text(name);
index 7ae13807b86502fcc85a0f238db67631a5309ce4..b3085f16183433850be2a8ebdb9d961e421ca3e7 100644 (file)
@@ -122,7 +122,8 @@ struct kr_cache_p {
        };
 };
 KR_EXPORT
-int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
+int kr_cache_peek_exact(struct kr_cache *cache, struct kr_request *req,
+                       const knot_dname_t *name, uint16_t type,
                        struct kr_cache_p *peek);
 /* Parameters (qry, name, type) are used for timestamp and stale-serving decisions. */
 KR_EXPORT
index b853cad0f019451324707fb2faa2ec5007b7c0a6..d76b8b95f9505fefc863ba4d318aa8d99262d3d7 100644 (file)
@@ -33,7 +33,7 @@ uint32_t packet_ttl(const knot_pkt_t *pkt)
 
 
 void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
-               const struct kr_request *req, const bool needs_pkt)
+               struct kr_request *req, const bool needs_pkt)
 {
        /* In some cases, stash also the packet. */
        const bool is_negative = kr_response_classify(pkt)
@@ -114,7 +114,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
        eh->has_optout = qf->DNSSEC_OPTOUT;
        memcpy(eh->data, &pkt_size, sizeof(pkt_size));
        memcpy(eh->data + sizeof(pkt_size), pkt->wire, pkt_size);
-       kr_cache_top_access(&cache->top, key.data, key.len, val_new_entry.len, "stash_pkt");
+       kr_cache_top_access(req, key.data, key.len, val_new_entry.len, "stash_pkt");
 
        WITH_VERBOSE(qry) {
                auto_free char *type_str = kr_rrtype_text(pkt_type),
index 87ff6cd4b8d58cfb8ab6fafe654bbfe9a306fb91..0ca3a6562226bd02b642a04f466a280d16a95fcf 100644 (file)
@@ -267,7 +267,7 @@ void entry_list_memcpy(struct entry_apex *ea, entry_list_t list);
  *             see stash_rrset() for details
  * It assumes check_dname_for_lf(). */
 void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
-               const struct kr_request *req, bool needs_pkt);
+               struct kr_request *req, bool needs_pkt);
 
 /** Try answering from packet cache, given an entry_h.
  *
index 4865a47f189c7b5ee09767d11de95c10310bf689..008d514084474527e2c579531214c107c4f2216a 100644 (file)
@@ -250,7 +250,7 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query
 
 success:
 
-       kr_cache_top_access(&cache->top, key_nsec.data, key_nsec.len, val.len, "leq_nsec1");  // hits only
+       kr_cache_top_access(qry->request, key_nsec.data, key_nsec.len, val.len, "leq_nsec1");  // hits only
        return NULL;
 }
 
index a9a4395962a90859f3373d5e8ad61740ffad508d..24fbd5f91edb518f83565c3706d99d5552fd8de5 100644 (file)
@@ -219,7 +219,7 @@ static const char * find_leq_NSEC3(struct kr_cache *cache, const struct kr_query
 
 success:
 
-       kr_cache_top_access(&cache->top, key_found.data, key_found.len, val.len, "leq_nsec3");  // hits only
+       kr_cache_top_access(qry->request, key_found.data, key_found.len, val.len, "leq_nsec3");  // hits only
        return NULL;
 }
 
index 5bf4b41cf00870aab42a61a10afbe8576b35ce40..9c79894243c9481ecc24b05e59b12c3b6b44e99f 100644 (file)
@@ -128,7 +128,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
                        ret = found_exact_hit(qry, pkt, val, lowest_rank);
                }
                if (!ret) {
-                       kr_cache_top_access(&cache->top, key.data, key.len, val.len, "peek_nosync:exact");  // hits only
+                       kr_cache_top_access(req, key.data, key.len, val.len, "peek_nosync:exact");  // hits only
                        return KR_STATE_DONE;
                } else if (kr_fails_assert(ret == kr_error(ENOENT))) {
                        VERBOSE_MSG(qry, "=> exact hit error: %d %s\n", ret, kr_strerror(ret));
@@ -276,7 +276,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
                ret = entry2answer(&ans, AR_SOA, eh, knot_db_val_bound(val),
                                   k->zname, KNOT_RRTYPE_SOA, new_ttl);
                if (ret) return ctx->state;
-               kr_cache_top_access(&cache->top, key.data, key.len, val.len, "peek_nosync:SOA");  // hits only
+               kr_cache_top_access(req, key.data, key.len, val.len, "peek_nosync:SOA");  // hits only
        }
 
        /* Find our target RCODE. */
@@ -597,7 +597,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc
                        ret, (int)new_ttl);
        if (ret) return kr_error(ret);
        ans->rcode = PKT_NOERROR;
-       kr_cache_top_access(&cache->top, key.data, key.len, whole_val_len, "try_wild"); // hits only
+       kr_cache_top_access(qry->request, key.data, key.len, whole_val_len, "try_wild"); // hits only
        return kr_ok();
 }
 
@@ -728,7 +728,8 @@ static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el,
 
 success:
        k->zlf_len = zlf_len;
-       kr_cache_top_access(&cache->top, key.data, key.len, val.len, "closest_NS"); // hits only
+       if (qry)
+               kr_cache_top_access(qry->request, key.data, key.len, val.len, "closest_NS"); // hits only
        return kr_ok();
 }
 
index 372104ff4732c4f718f7e378fe69b0e4d0e0301f..a438706e216216058edd820c30afa2fe5968c901 100644 (file)
@@ -9,6 +9,7 @@
 #include "lib/cache/top.h"
 #include "lib/cache/impl.h"
 #include "lib/mmapped.h"
+#include "lib/resolve.h"
 #include "lib/kru.h"
 
 #define FILE_FORMAT_VERSION 1  // fail if different
@@ -146,7 +147,6 @@ int kr_cache_top_init(struct kr_cache_top *top, char *mmap_file, size_t cache_si
        if (state < 0) goto fail;
        kr_assert(state == 0);
 
-       top->ctx = NULL;
        kr_log_info(CACHE, "Cache top initialized %s (%s).\n",
                        using_existing ? "using existing data" : "as empty",
                        (kru_using_avx2() ? "AVX2" : "generic"));
@@ -236,10 +236,12 @@ char *kr_cache_top_strkey(void *key, size_t len)
        return str;
 }
 
-void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, size_t data_size, char *debug_label)
+void kr_cache_top_access(struct kr_request *req, void *key, size_t key_len, size_t data_size, char *debug_label)
 {
+       struct kr_cache_top *top = &req->ctx->cache.top;
+       struct kr_cache_top_context *ctx = &req->cache_top_context;
        kru_hash_t hash = KRU.hash_bytes((struct kru *)&top->data->kru, (uint8_t *)key, key_len);
-       const bool unique = top->ctx ? first_access(top->ctx, hash) : true;
+       const bool unique = ctx ? first_access(ctx, hash) : true;
        if (!unique) return;
 
        const size_t size = kr_cache_top_entry_size(key_len, data_size);
@@ -247,14 +249,6 @@ void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, si
        KRU.load_hash((struct kru *)&top->data->kru, ticks_now(), hash, price);
 }
 
-struct kr_cache_top_context *kr_cache_top_context_switch(struct kr_cache_top *top,
-               struct kr_cache_top_context *new_ctx, char *debug_label)
-{
-       struct kr_cache_top_context *old_ctx = top->ctx;
-       top->ctx = new_ctx;
-       return old_ctx;
-}
-
 uint16_t kr_cache_top_load(struct kr_cache_top *top, void *key, size_t len)
 {
        kru_hash_t hash = KRU.hash_bytes((struct kru *)&top->data->kru, (uint8_t *)key, len);
index 91e2d6dc03b6dd130c2556f70d942e1a5503f549..cb1353fcfdfa1c742eeb388b1a5602b19645df46 100644 (file)
 #include <stdalign.h>
 #include "lib/mmapped.h"
 
+struct kr_request;
+
 /// Data related to open cache.
 struct kr_cache_top {
        struct mmapped mmapped;
        struct top_data *data;
-       struct kr_cache_top_context *ctx;
 };
 
 /// Part of the previous, shared between all processes.
@@ -71,23 +72,13 @@ void kr_cache_top_deinit(struct kr_cache_top *top);
 /// Charge cache access to the accessed key
 /// unless it was already accessed in the current request context.
 KR_EXPORT
-void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, size_t data_size, char *debug_label);
+void kr_cache_top_access(struct kr_request *req, void *key, size_t key_len, size_t data_size, char *debug_label);
        // debug_label is currently not used, TODO remove?
 
 /// Get current KRU load value assigned to the given cache entry key.
 KR_EXPORT
 uint16_t kr_cache_top_load(struct kr_cache_top *top, void *key, size_t len);
 
-/// Switch request context; the ctx has to be kept valid until next call.
-/// The context of a new kr_request has to be initialized with zeroes.
-/// Use NULL as ctx to stop using current context;
-/// all cache accesses in such a state are considered unique,
-/// but no such access is expected to happen.
-/// Returns the previous context.
-KR_EXPORT
-struct kr_cache_top_context *kr_cache_top_context_switch(struct kr_cache_top *top, struct kr_cache_top_context *ctx, char *debug_label);
-       // debug_label is currently not used, TODO remove?
-
 /// Return readable string representation of a cache key in a statically allocated memory.
 /// By default printable characters are kept unchanged and NULL-bytes are printed as '|'.
 /// Where numeric values are expected (CACHE_KEY_DEF) or non-printable characters occur,
index 7e052b5f284d6e63603891300045ce2bb75fb6b1..a3a2401e41dd5487ee6cf79af55734c877dddd59 100644 (file)
@@ -598,11 +598,9 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
 {
        kr_require(request && transport && packet);
        struct kr_rplan *rplan = &request->rplan;
-       kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "produce");
 
        /* No query left for resolution */
        if (kr_rplan_empty(rplan)) {
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
                return KR_STATE_FAIL;
        }
 
@@ -620,12 +618,8 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
                }
 
                switch(state) {
-               case KR_STATE_FAIL:
-                       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
-                       return KR_STATE_FAIL;
-               case KR_STATE_DONE:
-                       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
-                       return KR_STATE_PRODUCE;
+               case KR_STATE_FAIL: return KR_STATE_FAIL;
+               case KR_STATE_DONE: return KR_STATE_PRODUCE;
                default: break;
                }
                VERBOSE_MSG(qry, "=> resuming yielded answer\n");
@@ -643,12 +637,8 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
                 * this is normally not required, and incurs another cache lookups for cached answer. */
                if (qry->flags.ALWAYS_CUT) { // LATER: maybe the flag doesn't work well anymore
                        switch(zone_cut_check(request, qry, packet)) {
-                       case KR_STATE_FAIL:
-                               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
-                               return KR_STATE_FAIL;
-                       case KR_STATE_DONE:
-                               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
-                               return KR_STATE_PRODUCE;
+                       case KR_STATE_FAIL: return KR_STATE_FAIL;
+                       case KR_STATE_DONE: return KR_STATE_PRODUCE;
                        default: break;
                        }
                }
@@ -664,9 +654,7 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
                }
        }
        switch(request->state) {
-       case KR_STATE_FAIL:
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
-               return request->state;
+       case KR_STATE_FAIL: return request->state;
        case KR_STATE_CONSUME: break;
        case KR_STATE_DONE:
        default: /* Current query is done */
@@ -674,7 +662,6 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
                        kr_rplan_pop(rplan, qry);
                }
                ITERATE_LAYERS(request, qry, reset);
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
                return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
        }
        /* At this point we need to send a query upstream to proceed towards success. */
@@ -683,19 +670,14 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
        if (qry->stype == KNOT_RRTYPE_ANY ||
            !knot_wire_get_rd(request->qsource.packet->wire)) {
                VERBOSE_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
                return KR_STATE_FAIL;
        }
 
        /* Update zone cut, spawn new subrequests. */
        int state = zone_cut_check(request, qry, packet);
        switch(state) {
-       case KR_STATE_FAIL:
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
-               return KR_STATE_FAIL;
-       case KR_STATE_DONE:
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
-               return KR_STATE_PRODUCE;
+       case KR_STATE_FAIL: return KR_STATE_FAIL;
+       case KR_STATE_DONE: return KR_STATE_PRODUCE;
        default: break;
        }
 
@@ -727,14 +709,12 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
                        }
                        kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
 
-                       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
                        return KR_STATE_FAIL;
                } else {
                        /* FIXME: This is probably quite inefficient:
                        * we go through the whole qr_task_step loop just because of the serve_stale
                        * module which might not even be loaded. */
                        qry->flags.NO_NS_FOUND = true;
-                       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
                        return KR_STATE_PRODUCE;
                }
        }
@@ -743,7 +723,6 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
                uint16_t type = (*transport)->protocol == KR_TRANSPORT_RESOLVE_A ? KNOT_RRTYPE_A : KNOT_RRTYPE_AAAA;
                ns_resolve_addr(qry, qry->request, *transport, type);
                ITERATE_LAYERS(request, qry, reset);
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
                return KR_STATE_PRODUCE;
        }
 
@@ -757,7 +736,6 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
         * kr_resolve_checkout().
         */
        qry->timestamp_mono = kr_now();
-       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
        return request->state;
 }
 
index 148e7e0cd32a6278bfb84ac503c5d483c2f818ab..bc00471bc746a2e1167e519b06fb88d2a1f0a88a 100644 (file)
@@ -673,13 +673,10 @@ fail:
 int kr_resolve_consume(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet)
 {
        struct kr_rplan *rplan = &request->rplan;
-       kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "consume");
 
        /* Empty resolution plan, push packet as the new query */
        if (packet && kr_rplan_empty(rplan)) {
-               int ret = resolve_query(request, packet);
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
-               return ret;
+               return resolve_query(request, packet);
        }
 
        /* Different processing for network error */
@@ -687,14 +684,11 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
        /* Check overall resolution time */
        if (kr_now() - qry->creation_time_mono >= KR_RESOLVE_TIME_LIMIT) {
                kr_query_inform_timeout(request, qry);
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
                return KR_STATE_FAIL;
        }
        bool tried_tcp = (qry->flags.TCP);
-       if (!packet || packet->size == 0) {
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
+       if (!packet || packet->size == 0)
                return KR_STATE_PRODUCE;
-       }
 
        /* Packet cleared, derandomize QNAME. */
        knot_dname_t *qname_raw = kr_pkt_qname_raw(packet);
@@ -717,10 +711,8 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
        if (transport && !qry->flags.CACHED) {
                if (!(request->state & KR_STATE_FAIL)) {
                        /* Do not complete NS address resolution on soft-fail. */
-                       if (kr_fails_assert(packet->wire)) {
-                               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
+                       if (kr_fails_assert(packet->wire))
                                return KR_STATE_FAIL;
-                       }
                        const int rcode = knot_wire_get_rcode(packet->wire);
                        if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) {
                                qry->flags.AWAIT_IPV6 = false;
@@ -744,7 +736,6 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
                                }
                                if (!qry->flags.NO_NS_FOUND) {
                                        qry->flags.NO_NS_FOUND = true;
-                                       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
                                        return KR_STATE_PRODUCE;
                                }
 
@@ -758,7 +749,6 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
                                                        "OLX2: delegation ", cut_buf);
                                }
                                kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
-                               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
                                return KR_STATE_FAIL;
                        }
                } else {
@@ -768,12 +758,10 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
 
        /* Pop query if resolved. */
        if (request->state == KR_STATE_YIELD) { // NOLINT(bugprone-branch-clone)
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
                return KR_STATE_PRODUCE; /* Requery */
        } else if (qry->flags.RESOLVED) {
                kr_rplan_pop(rplan, qry);
        } else if (!tried_tcp && (qry->flags.TCP)) {
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
                return KR_STATE_PRODUCE; /* Requery over TCP */
        } else { /* Clear query flags for next attempt */
                qry->flags.CACHED = false;
@@ -789,16 +777,13 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
                if (qry->flags.FORWARD || qry->flags.STUB
                                /* Probably CPU exhaustion attempt, so do not retry. */
                                || qry->vld_limit_crypto_remains <= 0) {
-                       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
                        return KR_STATE_FAIL;
                }
                /* Other servers might not have broken DNSSEC. */
                qry->flags.DNSSEC_BOGUS = false;
-               kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
                return KR_STATE_PRODUCE;
        }
 
-       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
        return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
 }
 
@@ -938,7 +923,6 @@ int kr_resolve_checkout(struct kr_request *request, const struct sockaddr *src,
 
 int kr_resolve_finish(struct kr_request *request, int state)
 {
-       kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "finish");
        request->state = state;
        /* Finalize answer and construct whole wire-format (unless dropping). */
        knot_pkt_t *answer = kr_request_ensure_answer(request);
@@ -977,7 +961,6 @@ int kr_resolve_finish(struct kr_request *request, int state)
        request->trace_finish = NULL;
        request->trace_log = NULL;
 
-       kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "finish");
        return KR_STATE_DONE;
 }
 
index 558e58891963561f74bb0e5447718a5cac4a4aff..0d1e8bb7742997a2336bf28c7f07769cd5c25949 100644 (file)
@@ -141,7 +141,7 @@ static const struct rtt_state default_rtt_state = { .srtt = 0,
                                                    .dead_since = 0 };
 
 struct rtt_state get_rtt_state(const uint8_t *ip, size_t len,
-                              struct kr_cache *cache)
+                              struct kr_cache *cache, struct kr_request *req)
 {
        struct rtt_state state;
        knot_db_val_t value;
@@ -157,7 +157,7 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len,
                state = default_rtt_state;
        } else { // memcpy is safe for unaligned case (on non-x86)
                memcpy(&state, value.data, sizeof(state));
-               kr_cache_top_access(&cache->top, key.data, key.len, value.len, "get_rtt");
+               kr_cache_top_access(req, key.data, key.len, value.len, "get_rtt");
        }
 
        free(key.data);
@@ -165,7 +165,7 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len,
 }
 
 int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state,
-                 struct kr_cache *cache)
+                 struct kr_cache *cache, struct kr_request *req)
 {
        knot_db_t *db = cache->db;
        struct kr_cdb_stats *stats = &cache->stats;
@@ -176,7 +176,7 @@ int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state,
 
        int ret = cache->api->write(db, stats, &key, &value, 1);
        kr_cache_commit(cache);
-       kr_cache_top_access(&cache->top, key.data, key.len, value.len, "put_rtt");
+       kr_cache_top_access(req, key.data, key.len, value.len, "put_rtt");
 
        free(key.data);
        return ret;
@@ -324,7 +324,7 @@ void update_address_state(struct address_state *state, union kr_sockaddr *addres
                               qry->flags.NO_IPV6);
        state->rtt_state =
                get_rtt_state(ip_to_bytes(address, address_len),
-                             address_len, &qry->request->ctx->cache);
+                             address_len, &qry->request->ctx->cache, qry->request);
        invalidate_dead_upstream(
                state, qry->request->ctx->cache_rtt_tout_retry_interval);
 #ifdef SELECTION_CHOICE_LOGGING
@@ -562,9 +562,8 @@ void update_rtt(struct kr_query *qry, struct address_state *addr_state,
                return;
        }
 
-       struct kr_cache *cache = &qry->request->ctx->cache;
-       struct kr_cache_top_context *old_cache_top_ctx =
-               kr_cache_top_context_switch(&the_resolver->cache.top, &qry->request->cache_top_context, "update_rtt");
+       struct kr_request *req = qry->request;
+       struct kr_cache *cache = &req->ctx->cache;
 
        uint8_t *address = ip_to_bytes(&transport->address, transport->address_len);
        /* This construct is a bit racy since the global state may change
@@ -572,9 +571,9 @@ void update_rtt(struct kr_query *qry, struct address_state *addr_state,
         * care that much since it is rare and we only risk slightly suboptimal
         * transport choice. */
        struct rtt_state cur_rtt_state =
-               get_rtt_state(address, transport->address_len, cache);
+               get_rtt_state(address, transport->address_len, cache, req);
        struct rtt_state new_rtt_state = calc_rtt_state(cur_rtt_state, rtt);
-       put_rtt_state(address, transport->address_len, new_rtt_state, cache);
+       put_rtt_state(address, transport->address_len, new_rtt_state, cache, req);
 
        if (transport->address_len == sizeof(struct in6_addr))
                no6_success(qry);
@@ -591,8 +590,6 @@ void update_rtt(struct kr_query *qry, struct address_state *addr_state,
                        qry->id, ns_name, ns_str ? ns_str : "", zonecut_str,
                        rtt, new_rtt_state.srtt, new_rtt_state.variance);
        }
-
-       kr_cache_top_context_switch(&the_resolver->cache.top, old_cache_top_ctx, "update_rtt");
 }
 
 /// Update rtt_state (including caching) after a server timed out.
@@ -603,9 +600,6 @@ static void server_timeout(const struct kr_query *qry, const struct kr_transport
        if (transport->timeout_capped)
                return;
 
-       struct kr_cache_top_context *old_cache_top_ctx =
-               kr_cache_top_context_switch(&the_resolver->cache.top, &qry->request->cache_top_context, "server_timeout");
-
        const uint8_t *address = ip_to_bytes(&transport->address, transport->address_len);
        if (transport->address_len == sizeof(struct in6_addr))
                no6_timed_out(qry, address);
@@ -614,7 +608,7 @@ static void server_timeout(const struct kr_query *qry, const struct kr_transport
        // While we were waiting for timeout, the stats might have changed considerably,
        // so let's overwrite what we had by fresh cache contents.
        // This is useful when the address is busy (we query it concurrently).
-       *state = get_rtt_state(address, transport->address_len, cache);
+       *state = get_rtt_state(address, transport->address_len, cache, qry->request);
 
        ++state->consecutive_timeouts;
        // Avoid overflow; we don't utilize very high values anyway (arbitrary limit).
@@ -628,12 +622,10 @@ static void server_timeout(const struct kr_query *qry, const struct kr_transport
 
        // If transport was chosen by a different query, that one will cache it.
        if (!transport->deduplicated) {
-               put_rtt_state(address, transport->address_len, *state, cache);
+               put_rtt_state(address, transport->address_len, *state, cache, qry->request);
        } else {
                kr_cache_commit(cache); // Avoid any risk of long transaction.
        }
-
-       kr_cache_top_context_switch(&the_resolver->cache.top, old_cache_top_ctx, "server_timeout");
 }
 // Not everything can be checked in nice ways like static_assert()
 static __attribute__((constructor)) void test_RTT_consts(void)
index f9488ff88ca42d6e635764f3503afe50868bad48..c207d028530824bd9fc0a94c895046a294bcf549 100644 (file)
@@ -246,10 +246,10 @@ void error(struct kr_query *qry, struct address_state *addr_state,
  * (e.g. calling kr_cache_commit).
  */
 struct rtt_state get_rtt_state(const uint8_t *ip, size_t len,
-                              struct kr_cache *cache);
+                              struct kr_cache *cache, struct kr_request *req);
 
 int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state,
-                 struct kr_cache *cache);
+                 struct kr_cache *cache, struct kr_request *req);
 
 /**
  * @internal Helper function for conversion between different IP representations.
index 8004fd222d808c5963f21835bc879d0bb2b8c5b0..9faac3ab56b9e0853b0c27b2a882db8f943102df 100644 (file)
@@ -304,7 +304,7 @@ static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t
 
        struct kr_context *ctx = qry->request->ctx;
        struct kr_cache_p peek;
-       if (kr_cache_peek_exact(&ctx->cache, ns, rrtype, &peek) != 0) {
+       if (kr_cache_peek_exact(&ctx->cache, qry->request, ns, rrtype, &peek) != 0) {
                return AI_UNKNOWN;
        }
        int32_t new_ttl = kr_cache_ttl(&peek, qry, ns, rrtype);
@@ -369,7 +369,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
                    uint8_t * restrict rank)
 {
        struct kr_cache_p peek;
-       int ret = kr_cache_peek_exact(&ctx->cache, name, KNOT_RRTYPE_NS, &peek);
+       int ret = kr_cache_peek_exact(&ctx->cache, qry->request, name, KNOT_RRTYPE_NS, &peek);
        if (ret != 0) {
                return ret;
        }
@@ -499,7 +499,7 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
                return kr_error(EINVAL);
        /* peek, check rank and TTL */
        struct kr_cache_p peek;
-       int ret = kr_cache_peek_exact(cache, owner, type, &peek);
+       int ret = kr_cache_peek_exact(cache, qry->request, owner, type, &peek);
        if (ret != 0)
                return ret;
        if (!kr_rank_test(peek.rank, KR_RANK_SECURE))