From f6407ce126a77bb00d42b6194968a87c3c14245a Mon Sep 17 00:00:00 2001 From: =?utf8?q?Vladim=C3=ADr=20=C4=8Cun=C3=A1t?= Date: Wed, 13 Aug 2025 12:57:53 +0200 Subject: [PATCH] treewide refactor: avoid kr_cache_top_context_switch() It seemed to bring more complexity than benefit. In many parts this meant revert to code before a few commits. --- daemon/lua/kres-gen-33.lua | 1 - daemon/worker.c | 1 - lib/cache/api.c | 18 +++++++++++------- lib/cache/api.h | 3 ++- lib/cache/entry_pkt.c | 4 ++-- lib/cache/impl.h | 2 +- lib/cache/nsec1.c | 2 +- lib/cache/nsec3.c | 2 +- lib/cache/peek.c | 9 +++++---- lib/cache/top.c | 16 +++++----------- lib/cache/top.h | 15 +++------------ lib/resolve-produce.c | 36 +++++++----------------------------- lib/resolve.c | 23 +++-------------------- lib/selection.c | 30 +++++++++++------------------- lib/selection.h | 4 ++-- lib/zonecut.c | 6 +++--- 16 files changed, 57 insertions(+), 115 deletions(-) diff --git a/daemon/lua/kres-gen-33.lua b/daemon/lua/kres-gen-33.lua index 6305b425c..b36e66083 100644 --- a/daemon/lua/kres-gen-33.lua +++ b/daemon/lua/kres-gen-33.lua @@ -319,7 +319,6 @@ struct mmapped { struct kr_cache_top { struct mmapped mmapped; struct top_data *data; - struct kr_cache_top_context *ctx; }; struct kr_cache { kr_cdb_pt db; diff --git a/daemon/worker.c b/daemon/worker.c index 51de2d3ad..c912fd158 100644 --- a/daemon/worker.c +++ b/daemon/worker.c @@ -2054,7 +2054,6 @@ static enum protolayer_event_cb_result pl_dns_stream_disconnected( qry->flags.TCP = false; } qr_task_step(task, NULL, NULL); - defer_sample_restart(); } else { kr_assert(task->ctx->source.session == session); diff --git a/lib/cache/api.c b/lib/cache/api.c index a0cd9f8a1..6e6fb89f6 100644 --- a/lib/cache/api.c +++ b/lib/cache/api.c @@ -399,7 +399,7 @@ static int stash_rrarray_entry(ranked_rr_array_t *arr, int arr_i, /** Stash a single nsec_p. \return 0 (errors are ignored). */ static int stash_nsec_p(const knot_dname_t *dname, const char *nsec_p_v, struct kr_cache *cache, uint32_t timestamp, knot_mm_t *pool, - const struct kr_query *qry/*logging*/); + const struct kr_query *qry/*logging + cache_top*/); /** The whole .consume phase for the cache module. */ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt) @@ -653,7 +653,8 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, rdataset_dematerialize(rds_sigs, eh->data + rr_ssize); if (kr_fails_assert(entry_h_consistent_E(val_new_entry, rr->type))) return kr_error(EINVAL); - kr_cache_top_access(&cache->top, key.data, key.len, val_new_entry.len, "stash_rrset"); + if (qry) // it's possible to insert outside a request + kr_cache_top_access(qry->request, key.data, key.len, val_new_entry.len, "stash_rrset"); #if 0 /* Occasionally useful when debugging some kinds of changes. */ { @@ -830,7 +831,8 @@ static int stash_nsec_p(const knot_dname_t *dname, const char *nsec_p_v, VERBOSE_MSG(qry, "=> EL write failed (ret: %d)\n", ret); return kr_ok(); } - kr_cache_top_access(&cache->top, key.data, key.len, val.len, "stash_nsec_p"); + if (qry) + kr_cache_top_access(qry->request, key.data, key.len, val.len, "stash_nsec_p"); if (log_refresh_by) { VERBOSE_MSG(qry, "=> nsec_p stashed for %s (refresh by %d, hash: %x)\n", log_dname, log_refresh_by, log_hash); @@ -879,7 +881,8 @@ int kr_cache_insert_rr(struct kr_cache *cache, return (int) written; } -static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uint16_t type, +static int peek_exact_real(struct kr_cache *cache, struct kr_request *req, + const knot_dname_t *name, uint16_t type, struct kr_cache_p *peek) { if (!check_rrtype(type, NULL) || !check_dname_for_lf(name, NULL)) { @@ -912,13 +915,14 @@ static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uin .raw_data = val.data, .raw_bound = knot_db_val_bound(val), }; - kr_cache_top_access(&cache->top, key.data, key.len, whole_val_len, "peek_exact_real"); // hits only + kr_cache_top_access(req, key.data, key.len, whole_val_len, "peek_exact_real"); // hits only return kr_ok(); } -int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type, +int kr_cache_peek_exact(struct kr_cache *cache, struct kr_request *req, + const knot_dname_t *name, uint16_t type, struct kr_cache_p *peek) { /* Just wrap with extra verbose logging. */ - const int ret = peek_exact_real(cache, name, type, peek); + const int ret = peek_exact_real(cache, req, name, type, peek); if (false && kr_log_is_debug(CACHE, NULL)) { /* too noisy for usual --verbose */ auto_free char *type_str = kr_rrtype_text(type), *name_str = kr_dname_text(name); diff --git a/lib/cache/api.h b/lib/cache/api.h index 7ae13807b..b3085f161 100644 --- a/lib/cache/api.h +++ b/lib/cache/api.h @@ -122,7 +122,8 @@ struct kr_cache_p { }; }; KR_EXPORT -int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type, +int kr_cache_peek_exact(struct kr_cache *cache, struct kr_request *req, + const knot_dname_t *name, uint16_t type, struct kr_cache_p *peek); /* Parameters (qry, name, type) are used for timestamp and stale-serving decisions. */ KR_EXPORT diff --git a/lib/cache/entry_pkt.c b/lib/cache/entry_pkt.c index b853cad0f..d76b8b95f 100644 --- a/lib/cache/entry_pkt.c +++ b/lib/cache/entry_pkt.c @@ -33,7 +33,7 @@ uint32_t packet_ttl(const knot_pkt_t *pkt) void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry, - const struct kr_request *req, const bool needs_pkt) + struct kr_request *req, const bool needs_pkt) { /* In some cases, stash also the packet. */ const bool is_negative = kr_response_classify(pkt) @@ -114,7 +114,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry, eh->has_optout = qf->DNSSEC_OPTOUT; memcpy(eh->data, &pkt_size, sizeof(pkt_size)); memcpy(eh->data + sizeof(pkt_size), pkt->wire, pkt_size); - kr_cache_top_access(&cache->top, key.data, key.len, val_new_entry.len, "stash_pkt"); + kr_cache_top_access(req, key.data, key.len, val_new_entry.len, "stash_pkt"); WITH_VERBOSE(qry) { auto_free char *type_str = kr_rrtype_text(pkt_type), diff --git a/lib/cache/impl.h b/lib/cache/impl.h index 87ff6cd4b..0ca3a6562 100644 --- a/lib/cache/impl.h +++ b/lib/cache/impl.h @@ -267,7 +267,7 @@ void entry_list_memcpy(struct entry_apex *ea, entry_list_t list); * see stash_rrset() for details * It assumes check_dname_for_lf(). */ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry, - const struct kr_request *req, bool needs_pkt); + struct kr_request *req, bool needs_pkt); /** Try answering from packet cache, given an entry_h. * diff --git a/lib/cache/nsec1.c b/lib/cache/nsec1.c index 4865a47f1..008d51408 100644 --- a/lib/cache/nsec1.c +++ b/lib/cache/nsec1.c @@ -250,7 +250,7 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query success: - kr_cache_top_access(&cache->top, key_nsec.data, key_nsec.len, val.len, "leq_nsec1"); // hits only + kr_cache_top_access(qry->request, key_nsec.data, key_nsec.len, val.len, "leq_nsec1"); // hits only return NULL; } diff --git a/lib/cache/nsec3.c b/lib/cache/nsec3.c index a9a439596..24fbd5f91 100644 --- a/lib/cache/nsec3.c +++ b/lib/cache/nsec3.c @@ -219,7 +219,7 @@ static const char * find_leq_NSEC3(struct kr_cache *cache, const struct kr_query success: - kr_cache_top_access(&cache->top, key_found.data, key_found.len, val.len, "leq_nsec3"); // hits only + kr_cache_top_access(qry->request, key_found.data, key_found.len, val.len, "leq_nsec3"); // hits only return NULL; } diff --git a/lib/cache/peek.c b/lib/cache/peek.c index 5bf4b41cf..9c7989424 100644 --- a/lib/cache/peek.c +++ b/lib/cache/peek.c @@ -128,7 +128,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) ret = found_exact_hit(qry, pkt, val, lowest_rank); } if (!ret) { - kr_cache_top_access(&cache->top, key.data, key.len, val.len, "peek_nosync:exact"); // hits only + kr_cache_top_access(req, key.data, key.len, val.len, "peek_nosync:exact"); // hits only return KR_STATE_DONE; } else if (kr_fails_assert(ret == kr_error(ENOENT))) { VERBOSE_MSG(qry, "=> exact hit error: %d %s\n", ret, kr_strerror(ret)); @@ -276,7 +276,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) ret = entry2answer(&ans, AR_SOA, eh, knot_db_val_bound(val), k->zname, KNOT_RRTYPE_SOA, new_ttl); if (ret) return ctx->state; - kr_cache_top_access(&cache->top, key.data, key.len, val.len, "peek_nosync:SOA"); // hits only + kr_cache_top_access(req, key.data, key.len, val.len, "peek_nosync:SOA"); // hits only } /* Find our target RCODE. */ @@ -597,7 +597,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc ret, (int)new_ttl); if (ret) return kr_error(ret); ans->rcode = PKT_NOERROR; - kr_cache_top_access(&cache->top, key.data, key.len, whole_val_len, "try_wild"); // hits only + kr_cache_top_access(qry->request, key.data, key.len, whole_val_len, "try_wild"); // hits only return kr_ok(); } @@ -728,7 +728,8 @@ static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el, success: k->zlf_len = zlf_len; - kr_cache_top_access(&cache->top, key.data, key.len, val.len, "closest_NS"); // hits only + if (qry) + kr_cache_top_access(qry->request, key.data, key.len, val.len, "closest_NS"); // hits only return kr_ok(); } diff --git a/lib/cache/top.c b/lib/cache/top.c index 372104ff4..a438706e2 100644 --- a/lib/cache/top.c +++ b/lib/cache/top.c @@ -9,6 +9,7 @@ #include "lib/cache/top.h" #include "lib/cache/impl.h" #include "lib/mmapped.h" +#include "lib/resolve.h" #include "lib/kru.h" #define FILE_FORMAT_VERSION 1 // fail if different @@ -146,7 +147,6 @@ int kr_cache_top_init(struct kr_cache_top *top, char *mmap_file, size_t cache_si if (state < 0) goto fail; kr_assert(state == 0); - top->ctx = NULL; kr_log_info(CACHE, "Cache top initialized %s (%s).\n", using_existing ? "using existing data" : "as empty", (kru_using_avx2() ? "AVX2" : "generic")); @@ -236,10 +236,12 @@ char *kr_cache_top_strkey(void *key, size_t len) return str; } -void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, size_t data_size, char *debug_label) +void kr_cache_top_access(struct kr_request *req, void *key, size_t key_len, size_t data_size, char *debug_label) { + struct kr_cache_top *top = &req->ctx->cache.top; + struct kr_cache_top_context *ctx = &req->cache_top_context; kru_hash_t hash = KRU.hash_bytes((struct kru *)&top->data->kru, (uint8_t *)key, key_len); - const bool unique = top->ctx ? first_access(top->ctx, hash) : true; + const bool unique = ctx ? first_access(ctx, hash) : true; if (!unique) return; const size_t size = kr_cache_top_entry_size(key_len, data_size); @@ -247,14 +249,6 @@ void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, si KRU.load_hash((struct kru *)&top->data->kru, ticks_now(), hash, price); } -struct kr_cache_top_context *kr_cache_top_context_switch(struct kr_cache_top *top, - struct kr_cache_top_context *new_ctx, char *debug_label) -{ - struct kr_cache_top_context *old_ctx = top->ctx; - top->ctx = new_ctx; - return old_ctx; -} - uint16_t kr_cache_top_load(struct kr_cache_top *top, void *key, size_t len) { kru_hash_t hash = KRU.hash_bytes((struct kru *)&top->data->kru, (uint8_t *)key, len); diff --git a/lib/cache/top.h b/lib/cache/top.h index 91e2d6dc0..cb1353fcf 100644 --- a/lib/cache/top.h +++ b/lib/cache/top.h @@ -21,11 +21,12 @@ #include #include "lib/mmapped.h" +struct kr_request; + /// Data related to open cache. struct kr_cache_top { struct mmapped mmapped; struct top_data *data; - struct kr_cache_top_context *ctx; }; /// Part of the previous, shared between all processes. @@ -71,23 +72,13 @@ void kr_cache_top_deinit(struct kr_cache_top *top); /// Charge cache access to the accessed key /// unless it was already accessed in the current request context. KR_EXPORT -void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, size_t data_size, char *debug_label); +void kr_cache_top_access(struct kr_request *req, void *key, size_t key_len, size_t data_size, char *debug_label); // debug_label is currently not used, TODO remove? /// Get current KRU load value assigned to the given cache entry key. KR_EXPORT uint16_t kr_cache_top_load(struct kr_cache_top *top, void *key, size_t len); -/// Switch request context; the ctx has to be kept valid until next call. -/// The context of a new kr_request has to be initialized with zeroes. -/// Use NULL as ctx to stop using current context; -/// all cache accesses in such a state are considered unique, -/// but no such access is expected to happen. -/// Returns the previous context. -KR_EXPORT -struct kr_cache_top_context *kr_cache_top_context_switch(struct kr_cache_top *top, struct kr_cache_top_context *ctx, char *debug_label); - // debug_label is currently not used, TODO remove? - /// Return readable string representation of a cache key in a statically allocated memory. /// By default printable characters are kept unchanged and NULL-bytes are printed as '|'. /// Where numeric values are expected (CACHE_KEY_DEF) or non-printable characters occur, diff --git a/lib/resolve-produce.c b/lib/resolve-produce.c index 7e052b5f2..a3a2401e4 100644 --- a/lib/resolve-produce.c +++ b/lib/resolve-produce.c @@ -598,11 +598,9 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo { kr_require(request && transport && packet); struct kr_rplan *rplan = &request->rplan; - kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "produce"); /* No query left for resolution */ if (kr_rplan_empty(rplan)) { - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); return KR_STATE_FAIL; } @@ -620,12 +618,8 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo } switch(state) { - case KR_STATE_FAIL: - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); - return KR_STATE_FAIL; - case KR_STATE_DONE: - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); - return KR_STATE_PRODUCE; + case KR_STATE_FAIL: return KR_STATE_FAIL; + case KR_STATE_DONE: return KR_STATE_PRODUCE; default: break; } VERBOSE_MSG(qry, "=> resuming yielded answer\n"); @@ -643,12 +637,8 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo * this is normally not required, and incurs another cache lookups for cached answer. */ if (qry->flags.ALWAYS_CUT) { // LATER: maybe the flag doesn't work well anymore switch(zone_cut_check(request, qry, packet)) { - case KR_STATE_FAIL: - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); - return KR_STATE_FAIL; - case KR_STATE_DONE: - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); - return KR_STATE_PRODUCE; + case KR_STATE_FAIL: return KR_STATE_FAIL; + case KR_STATE_DONE: return KR_STATE_PRODUCE; default: break; } } @@ -664,9 +654,7 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo } } switch(request->state) { - case KR_STATE_FAIL: - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); - return request->state; + case KR_STATE_FAIL: return request->state; case KR_STATE_CONSUME: break; case KR_STATE_DONE: default: /* Current query is done */ @@ -674,7 +662,6 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo kr_rplan_pop(rplan, qry); } ITERATE_LAYERS(request, qry, reset); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE; } /* At this point we need to send a query upstream to proceed towards success. */ @@ -683,19 +670,14 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo if (qry->stype == KNOT_RRTYPE_ANY || !knot_wire_get_rd(request->qsource.packet->wire)) { VERBOSE_MSG(qry, "=> qtype is ANY or RD=0, bail out\n"); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); return KR_STATE_FAIL; } /* Update zone cut, spawn new subrequests. */ int state = zone_cut_check(request, qry, packet); switch(state) { - case KR_STATE_FAIL: - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); - return KR_STATE_FAIL; - case KR_STATE_DONE: - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); - return KR_STATE_PRODUCE; + case KR_STATE_FAIL: return KR_STATE_FAIL; + case KR_STATE_DONE: return KR_STATE_PRODUCE; default: break; } @@ -727,14 +709,12 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo } kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); return KR_STATE_FAIL; } else { /* FIXME: This is probably quite inefficient: * we go through the whole qr_task_step loop just because of the serve_stale * module which might not even be loaded. */ qry->flags.NO_NS_FOUND = true; - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); return KR_STATE_PRODUCE; } } @@ -743,7 +723,6 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo uint16_t type = (*transport)->protocol == KR_TRANSPORT_RESOLVE_A ? KNOT_RRTYPE_A : KNOT_RRTYPE_AAAA; ns_resolve_addr(qry, qry->request, *transport, type); ITERATE_LAYERS(request, qry, reset); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); return KR_STATE_PRODUCE; } @@ -757,7 +736,6 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo * kr_resolve_checkout(). */ qry->timestamp_mono = kr_now(); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce"); return request->state; } diff --git a/lib/resolve.c b/lib/resolve.c index 148e7e0cd..bc00471bc 100644 --- a/lib/resolve.c +++ b/lib/resolve.c @@ -673,13 +673,10 @@ fail: int kr_resolve_consume(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet) { struct kr_rplan *rplan = &request->rplan; - kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "consume"); /* Empty resolution plan, push packet as the new query */ if (packet && kr_rplan_empty(rplan)) { - int ret = resolve_query(request, packet); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); - return ret; + return resolve_query(request, packet); } /* Different processing for network error */ @@ -687,14 +684,11 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo /* Check overall resolution time */ if (kr_now() - qry->creation_time_mono >= KR_RESOLVE_TIME_LIMIT) { kr_query_inform_timeout(request, qry); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return KR_STATE_FAIL; } bool tried_tcp = (qry->flags.TCP); - if (!packet || packet->size == 0) { - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); + if (!packet || packet->size == 0) return KR_STATE_PRODUCE; - } /* Packet cleared, derandomize QNAME. */ knot_dname_t *qname_raw = kr_pkt_qname_raw(packet); @@ -717,10 +711,8 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo if (transport && !qry->flags.CACHED) { if (!(request->state & KR_STATE_FAIL)) { /* Do not complete NS address resolution on soft-fail. */ - if (kr_fails_assert(packet->wire)) { - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); + if (kr_fails_assert(packet->wire)) return KR_STATE_FAIL; - } const int rcode = knot_wire_get_rcode(packet->wire); if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) { qry->flags.AWAIT_IPV6 = false; @@ -744,7 +736,6 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo } if (!qry->flags.NO_NS_FOUND) { qry->flags.NO_NS_FOUND = true; - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return KR_STATE_PRODUCE; } @@ -758,7 +749,6 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo "OLX2: delegation ", cut_buf); } kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg); - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return KR_STATE_FAIL; } } else { @@ -768,12 +758,10 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo /* Pop query if resolved. */ if (request->state == KR_STATE_YIELD) { // NOLINT(bugprone-branch-clone) - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return KR_STATE_PRODUCE; /* Requery */ } else if (qry->flags.RESOLVED) { kr_rplan_pop(rplan, qry); } else if (!tried_tcp && (qry->flags.TCP)) { - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return KR_STATE_PRODUCE; /* Requery over TCP */ } else { /* Clear query flags for next attempt */ qry->flags.CACHED = false; @@ -789,16 +777,13 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo if (qry->flags.FORWARD || qry->flags.STUB /* Probably CPU exhaustion attempt, so do not retry. */ || qry->vld_limit_crypto_remains <= 0) { - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return KR_STATE_FAIL; } /* Other servers might not have broken DNSSEC. */ qry->flags.DNSSEC_BOGUS = false; - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return KR_STATE_PRODUCE; } - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume"); return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE; } @@ -938,7 +923,6 @@ int kr_resolve_checkout(struct kr_request *request, const struct sockaddr *src, int kr_resolve_finish(struct kr_request *request, int state) { - kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "finish"); request->state = state; /* Finalize answer and construct whole wire-format (unless dropping). */ knot_pkt_t *answer = kr_request_ensure_answer(request); @@ -977,7 +961,6 @@ int kr_resolve_finish(struct kr_request *request, int state) request->trace_finish = NULL; request->trace_log = NULL; - kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "finish"); return KR_STATE_DONE; } diff --git a/lib/selection.c b/lib/selection.c index 558e58891..0d1e8bb77 100644 --- a/lib/selection.c +++ b/lib/selection.c @@ -141,7 +141,7 @@ static const struct rtt_state default_rtt_state = { .srtt = 0, .dead_since = 0 }; struct rtt_state get_rtt_state(const uint8_t *ip, size_t len, - struct kr_cache *cache) + struct kr_cache *cache, struct kr_request *req) { struct rtt_state state; knot_db_val_t value; @@ -157,7 +157,7 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len, state = default_rtt_state; } else { // memcpy is safe for unaligned case (on non-x86) memcpy(&state, value.data, sizeof(state)); - kr_cache_top_access(&cache->top, key.data, key.len, value.len, "get_rtt"); + kr_cache_top_access(req, key.data, key.len, value.len, "get_rtt"); } free(key.data); @@ -165,7 +165,7 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len, } int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state, - struct kr_cache *cache) + struct kr_cache *cache, struct kr_request *req) { knot_db_t *db = cache->db; struct kr_cdb_stats *stats = &cache->stats; @@ -176,7 +176,7 @@ int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state, int ret = cache->api->write(db, stats, &key, &value, 1); kr_cache_commit(cache); - kr_cache_top_access(&cache->top, key.data, key.len, value.len, "put_rtt"); + kr_cache_top_access(req, key.data, key.len, value.len, "put_rtt"); free(key.data); return ret; @@ -324,7 +324,7 @@ void update_address_state(struct address_state *state, union kr_sockaddr *addres qry->flags.NO_IPV6); state->rtt_state = get_rtt_state(ip_to_bytes(address, address_len), - address_len, &qry->request->ctx->cache); + address_len, &qry->request->ctx->cache, qry->request); invalidate_dead_upstream( state, qry->request->ctx->cache_rtt_tout_retry_interval); #ifdef SELECTION_CHOICE_LOGGING @@ -562,9 +562,8 @@ void update_rtt(struct kr_query *qry, struct address_state *addr_state, return; } - struct kr_cache *cache = &qry->request->ctx->cache; - struct kr_cache_top_context *old_cache_top_ctx = - kr_cache_top_context_switch(&the_resolver->cache.top, &qry->request->cache_top_context, "update_rtt"); + struct kr_request *req = qry->request; + struct kr_cache *cache = &req->ctx->cache; uint8_t *address = ip_to_bytes(&transport->address, transport->address_len); /* This construct is a bit racy since the global state may change @@ -572,9 +571,9 @@ void update_rtt(struct kr_query *qry, struct address_state *addr_state, * care that much since it is rare and we only risk slightly suboptimal * transport choice. */ struct rtt_state cur_rtt_state = - get_rtt_state(address, transport->address_len, cache); + get_rtt_state(address, transport->address_len, cache, req); struct rtt_state new_rtt_state = calc_rtt_state(cur_rtt_state, rtt); - put_rtt_state(address, transport->address_len, new_rtt_state, cache); + put_rtt_state(address, transport->address_len, new_rtt_state, cache, req); if (transport->address_len == sizeof(struct in6_addr)) no6_success(qry); @@ -591,8 +590,6 @@ void update_rtt(struct kr_query *qry, struct address_state *addr_state, qry->id, ns_name, ns_str ? ns_str : "", zonecut_str, rtt, new_rtt_state.srtt, new_rtt_state.variance); } - - kr_cache_top_context_switch(&the_resolver->cache.top, old_cache_top_ctx, "update_rtt"); } /// Update rtt_state (including caching) after a server timed out. @@ -603,9 +600,6 @@ static void server_timeout(const struct kr_query *qry, const struct kr_transport if (transport->timeout_capped) return; - struct kr_cache_top_context *old_cache_top_ctx = - kr_cache_top_context_switch(&the_resolver->cache.top, &qry->request->cache_top_context, "server_timeout"); - const uint8_t *address = ip_to_bytes(&transport->address, transport->address_len); if (transport->address_len == sizeof(struct in6_addr)) no6_timed_out(qry, address); @@ -614,7 +608,7 @@ static void server_timeout(const struct kr_query *qry, const struct kr_transport // While we were waiting for timeout, the stats might have changed considerably, // so let's overwrite what we had by fresh cache contents. // This is useful when the address is busy (we query it concurrently). - *state = get_rtt_state(address, transport->address_len, cache); + *state = get_rtt_state(address, transport->address_len, cache, qry->request); ++state->consecutive_timeouts; // Avoid overflow; we don't utilize very high values anyway (arbitrary limit). @@ -628,12 +622,10 @@ static void server_timeout(const struct kr_query *qry, const struct kr_transport // If transport was chosen by a different query, that one will cache it. if (!transport->deduplicated) { - put_rtt_state(address, transport->address_len, *state, cache); + put_rtt_state(address, transport->address_len, *state, cache, qry->request); } else { kr_cache_commit(cache); // Avoid any risk of long transaction. } - - kr_cache_top_context_switch(&the_resolver->cache.top, old_cache_top_ctx, "server_timeout"); } // Not everything can be checked in nice ways like static_assert() static __attribute__((constructor)) void test_RTT_consts(void) diff --git a/lib/selection.h b/lib/selection.h index f9488ff88..c207d0285 100644 --- a/lib/selection.h +++ b/lib/selection.h @@ -246,10 +246,10 @@ void error(struct kr_query *qry, struct address_state *addr_state, * (e.g. calling kr_cache_commit). */ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len, - struct kr_cache *cache); + struct kr_cache *cache, struct kr_request *req); int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state, - struct kr_cache *cache); + struct kr_cache *cache, struct kr_request *req); /** * @internal Helper function for conversion between different IP representations. diff --git a/lib/zonecut.c b/lib/zonecut.c index 8004fd222..9faac3ab5 100644 --- a/lib/zonecut.c +++ b/lib/zonecut.c @@ -304,7 +304,7 @@ static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t struct kr_context *ctx = qry->request->ctx; struct kr_cache_p peek; - if (kr_cache_peek_exact(&ctx->cache, ns, rrtype, &peek) != 0) { + if (kr_cache_peek_exact(&ctx->cache, qry->request, ns, rrtype, &peek) != 0) { return AI_UNKNOWN; } int32_t new_ttl = kr_cache_ttl(&peek, qry, ns, rrtype); @@ -369,7 +369,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, uint8_t * restrict rank) { struct kr_cache_p peek; - int ret = kr_cache_peek_exact(&ctx->cache, name, KNOT_RRTYPE_NS, &peek); + int ret = kr_cache_peek_exact(&ctx->cache, qry->request, name, KNOT_RRTYPE_NS, &peek); if (ret != 0) { return ret; } @@ -499,7 +499,7 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache, return kr_error(EINVAL); /* peek, check rank and TTL */ struct kr_cache_p peek; - int ret = kr_cache_peek_exact(cache, owner, type, &peek); + int ret = kr_cache_peek_exact(cache, qry->request, owner, type, &peek); if (ret != 0) return ret; if (!kr_rank_test(peek.rank, KR_RANK_SECURE)) -- 2.47.2