It seemed to bring more complexity than benefit.
In many parts this meant revert to code before a few commits.
struct kr_cache_top {
struct mmapped mmapped;
struct top_data *data;
- struct kr_cache_top_context *ctx;
};
struct kr_cache {
kr_cdb_pt db;
qry->flags.TCP = false;
}
qr_task_step(task, NULL, NULL);
-
defer_sample_restart();
} else {
kr_assert(task->ctx->source.session == session);
/** Stash a single nsec_p. \return 0 (errors are ignored). */
static int stash_nsec_p(const knot_dname_t *dname, const char *nsec_p_v,
struct kr_cache *cache, uint32_t timestamp, knot_mm_t *pool,
- const struct kr_query *qry/*logging*/);
+ const struct kr_query *qry/*logging + cache_top*/);
/** The whole .consume phase for the cache module. */
int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
rdataset_dematerialize(rds_sigs, eh->data + rr_ssize);
if (kr_fails_assert(entry_h_consistent_E(val_new_entry, rr->type)))
return kr_error(EINVAL);
- kr_cache_top_access(&cache->top, key.data, key.len, val_new_entry.len, "stash_rrset");
+ if (qry) // it's possible to insert outside a request
+ kr_cache_top_access(qry->request, key.data, key.len, val_new_entry.len, "stash_rrset");
#if 0 /* Occasionally useful when debugging some kinds of changes. */
{
VERBOSE_MSG(qry, "=> EL write failed (ret: %d)\n", ret);
return kr_ok();
}
- kr_cache_top_access(&cache->top, key.data, key.len, val.len, "stash_nsec_p");
+ if (qry)
+ kr_cache_top_access(qry->request, key.data, key.len, val.len, "stash_nsec_p");
if (log_refresh_by) {
VERBOSE_MSG(qry, "=> nsec_p stashed for %s (refresh by %d, hash: %x)\n",
log_dname, log_refresh_by, log_hash);
return (int) written;
}
-static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
+static int peek_exact_real(struct kr_cache *cache, struct kr_request *req,
+ const knot_dname_t *name, uint16_t type,
struct kr_cache_p *peek)
{
if (!check_rrtype(type, NULL) || !check_dname_for_lf(name, NULL)) {
.raw_data = val.data,
.raw_bound = knot_db_val_bound(val),
};
- kr_cache_top_access(&cache->top, key.data, key.len, whole_val_len, "peek_exact_real"); // hits only
+ kr_cache_top_access(req, key.data, key.len, whole_val_len, "peek_exact_real"); // hits only
return kr_ok();
}
-int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
+int kr_cache_peek_exact(struct kr_cache *cache, struct kr_request *req,
+ const knot_dname_t *name, uint16_t type,
struct kr_cache_p *peek)
{ /* Just wrap with extra verbose logging. */
- const int ret = peek_exact_real(cache, name, type, peek);
+ const int ret = peek_exact_real(cache, req, name, type, peek);
if (false && kr_log_is_debug(CACHE, NULL)) { /* too noisy for usual --verbose */
auto_free char *type_str = kr_rrtype_text(type),
*name_str = kr_dname_text(name);
};
};
KR_EXPORT
-int kr_cache_peek_exact(struct kr_cache *cache, const knot_dname_t *name, uint16_t type,
+int kr_cache_peek_exact(struct kr_cache *cache, struct kr_request *req,
+ const knot_dname_t *name, uint16_t type,
struct kr_cache_p *peek);
/* Parameters (qry, name, type) are used for timestamp and stale-serving decisions. */
KR_EXPORT
void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
- const struct kr_request *req, const bool needs_pkt)
+ struct kr_request *req, const bool needs_pkt)
{
/* In some cases, stash also the packet. */
const bool is_negative = kr_response_classify(pkt)
eh->has_optout = qf->DNSSEC_OPTOUT;
memcpy(eh->data, &pkt_size, sizeof(pkt_size));
memcpy(eh->data + sizeof(pkt_size), pkt->wire, pkt_size);
- kr_cache_top_access(&cache->top, key.data, key.len, val_new_entry.len, "stash_pkt");
+ kr_cache_top_access(req, key.data, key.len, val_new_entry.len, "stash_pkt");
WITH_VERBOSE(qry) {
auto_free char *type_str = kr_rrtype_text(pkt_type),
* see stash_rrset() for details
* It assumes check_dname_for_lf(). */
void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
- const struct kr_request *req, bool needs_pkt);
+ struct kr_request *req, bool needs_pkt);
/** Try answering from packet cache, given an entry_h.
*
success:
- kr_cache_top_access(&cache->top, key_nsec.data, key_nsec.len, val.len, "leq_nsec1"); // hits only
+ kr_cache_top_access(qry->request, key_nsec.data, key_nsec.len, val.len, "leq_nsec1"); // hits only
return NULL;
}
success:
- kr_cache_top_access(&cache->top, key_found.data, key_found.len, val.len, "leq_nsec3"); // hits only
+ kr_cache_top_access(qry->request, key_found.data, key_found.len, val.len, "leq_nsec3"); // hits only
return NULL;
}
ret = found_exact_hit(qry, pkt, val, lowest_rank);
}
if (!ret) {
- kr_cache_top_access(&cache->top, key.data, key.len, val.len, "peek_nosync:exact"); // hits only
+ kr_cache_top_access(req, key.data, key.len, val.len, "peek_nosync:exact"); // hits only
return KR_STATE_DONE;
} else if (kr_fails_assert(ret == kr_error(ENOENT))) {
VERBOSE_MSG(qry, "=> exact hit error: %d %s\n", ret, kr_strerror(ret));
ret = entry2answer(&ans, AR_SOA, eh, knot_db_val_bound(val),
k->zname, KNOT_RRTYPE_SOA, new_ttl);
if (ret) return ctx->state;
- kr_cache_top_access(&cache->top, key.data, key.len, val.len, "peek_nosync:SOA"); // hits only
+ kr_cache_top_access(req, key.data, key.len, val.len, "peek_nosync:SOA"); // hits only
}
/* Find our target RCODE. */
ret, (int)new_ttl);
if (ret) return kr_error(ret);
ans->rcode = PKT_NOERROR;
- kr_cache_top_access(&cache->top, key.data, key.len, whole_val_len, "try_wild"); // hits only
+ kr_cache_top_access(qry->request, key.data, key.len, whole_val_len, "try_wild"); // hits only
return kr_ok();
}
success:
k->zlf_len = zlf_len;
- kr_cache_top_access(&cache->top, key.data, key.len, val.len, "closest_NS"); // hits only
+ if (qry)
+ kr_cache_top_access(qry->request, key.data, key.len, val.len, "closest_NS"); // hits only
return kr_ok();
}
#include "lib/cache/top.h"
#include "lib/cache/impl.h"
#include "lib/mmapped.h"
+#include "lib/resolve.h"
#include "lib/kru.h"
#define FILE_FORMAT_VERSION 1 // fail if different
if (state < 0) goto fail;
kr_assert(state == 0);
- top->ctx = NULL;
kr_log_info(CACHE, "Cache top initialized %s (%s).\n",
using_existing ? "using existing data" : "as empty",
(kru_using_avx2() ? "AVX2" : "generic"));
return str;
}
-void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, size_t data_size, char *debug_label)
+void kr_cache_top_access(struct kr_request *req, void *key, size_t key_len, size_t data_size, char *debug_label)
{
+ struct kr_cache_top *top = &req->ctx->cache.top;
+ struct kr_cache_top_context *ctx = &req->cache_top_context;
kru_hash_t hash = KRU.hash_bytes((struct kru *)&top->data->kru, (uint8_t *)key, key_len);
- const bool unique = top->ctx ? first_access(top->ctx, hash) : true;
+ const bool unique = ctx ? first_access(ctx, hash) : true;
if (!unique) return;
const size_t size = kr_cache_top_entry_size(key_len, data_size);
KRU.load_hash((struct kru *)&top->data->kru, ticks_now(), hash, price);
}
-struct kr_cache_top_context *kr_cache_top_context_switch(struct kr_cache_top *top,
- struct kr_cache_top_context *new_ctx, char *debug_label)
-{
- struct kr_cache_top_context *old_ctx = top->ctx;
- top->ctx = new_ctx;
- return old_ctx;
-}
-
uint16_t kr_cache_top_load(struct kr_cache_top *top, void *key, size_t len)
{
kru_hash_t hash = KRU.hash_bytes((struct kru *)&top->data->kru, (uint8_t *)key, len);
#include <stdalign.h>
#include "lib/mmapped.h"
+struct kr_request;
+
/// Data related to open cache.
struct kr_cache_top {
struct mmapped mmapped;
struct top_data *data;
- struct kr_cache_top_context *ctx;
};
/// Part of the previous, shared between all processes.
/// Charge cache access to the accessed key
/// unless it was already accessed in the current request context.
KR_EXPORT
-void kr_cache_top_access(struct kr_cache_top *top, void *key, size_t key_len, size_t data_size, char *debug_label);
+void kr_cache_top_access(struct kr_request *req, void *key, size_t key_len, size_t data_size, char *debug_label);
// debug_label is currently not used, TODO remove?
/// Get current KRU load value assigned to the given cache entry key.
KR_EXPORT
uint16_t kr_cache_top_load(struct kr_cache_top *top, void *key, size_t len);
-/// Switch request context; the ctx has to be kept valid until next call.
-/// The context of a new kr_request has to be initialized with zeroes.
-/// Use NULL as ctx to stop using current context;
-/// all cache accesses in such a state are considered unique,
-/// but no such access is expected to happen.
-/// Returns the previous context.
-KR_EXPORT
-struct kr_cache_top_context *kr_cache_top_context_switch(struct kr_cache_top *top, struct kr_cache_top_context *ctx, char *debug_label);
- // debug_label is currently not used, TODO remove?
-
/// Return readable string representation of a cache key in a statically allocated memory.
/// By default printable characters are kept unchanged and NULL-bytes are printed as '|'.
/// Where numeric values are expected (CACHE_KEY_DEF) or non-printable characters occur,
{
kr_require(request && transport && packet);
struct kr_rplan *rplan = &request->rplan;
- kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "produce");
/* No query left for resolution */
if (kr_rplan_empty(rplan)) {
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
return KR_STATE_FAIL;
}
}
switch(state) {
- case KR_STATE_FAIL:
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
- return KR_STATE_FAIL;
- case KR_STATE_DONE:
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
- return KR_STATE_PRODUCE;
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
default: break;
}
VERBOSE_MSG(qry, "=> resuming yielded answer\n");
* this is normally not required, and incurs another cache lookups for cached answer. */
if (qry->flags.ALWAYS_CUT) { // LATER: maybe the flag doesn't work well anymore
switch(zone_cut_check(request, qry, packet)) {
- case KR_STATE_FAIL:
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
- return KR_STATE_FAIL;
- case KR_STATE_DONE:
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
- return KR_STATE_PRODUCE;
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
default: break;
}
}
}
}
switch(request->state) {
- case KR_STATE_FAIL:
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
- return request->state;
+ case KR_STATE_FAIL: return request->state;
case KR_STATE_CONSUME: break;
case KR_STATE_DONE:
default: /* Current query is done */
kr_rplan_pop(rplan, qry);
}
ITERATE_LAYERS(request, qry, reset);
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
}
/* At this point we need to send a query upstream to proceed towards success. */
if (qry->stype == KNOT_RRTYPE_ANY ||
!knot_wire_get_rd(request->qsource.packet->wire)) {
VERBOSE_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
return KR_STATE_FAIL;
}
/* Update zone cut, spawn new subrequests. */
int state = zone_cut_check(request, qry, packet);
switch(state) {
- case KR_STATE_FAIL:
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
- return KR_STATE_FAIL;
- case KR_STATE_DONE:
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
- return KR_STATE_PRODUCE;
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
default: break;
}
}
kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
return KR_STATE_FAIL;
} else {
/* FIXME: This is probably quite inefficient:
* we go through the whole qr_task_step loop just because of the serve_stale
* module which might not even be loaded. */
qry->flags.NO_NS_FOUND = true;
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
return KR_STATE_PRODUCE;
}
}
uint16_t type = (*transport)->protocol == KR_TRANSPORT_RESOLVE_A ? KNOT_RRTYPE_A : KNOT_RRTYPE_AAAA;
ns_resolve_addr(qry, qry->request, *transport, type);
ITERATE_LAYERS(request, qry, reset);
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
return KR_STATE_PRODUCE;
}
* kr_resolve_checkout().
*/
qry->timestamp_mono = kr_now();
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "produce");
return request->state;
}
int kr_resolve_consume(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet)
{
struct kr_rplan *rplan = &request->rplan;
- kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "consume");
/* Empty resolution plan, push packet as the new query */
if (packet && kr_rplan_empty(rplan)) {
- int ret = resolve_query(request, packet);
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
- return ret;
+ return resolve_query(request, packet);
}
/* Different processing for network error */
/* Check overall resolution time */
if (kr_now() - qry->creation_time_mono >= KR_RESOLVE_TIME_LIMIT) {
kr_query_inform_timeout(request, qry);
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return KR_STATE_FAIL;
}
bool tried_tcp = (qry->flags.TCP);
- if (!packet || packet->size == 0) {
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
+ if (!packet || packet->size == 0)
return KR_STATE_PRODUCE;
- }
/* Packet cleared, derandomize QNAME. */
knot_dname_t *qname_raw = kr_pkt_qname_raw(packet);
if (transport && !qry->flags.CACHED) {
if (!(request->state & KR_STATE_FAIL)) {
/* Do not complete NS address resolution on soft-fail. */
- if (kr_fails_assert(packet->wire)) {
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
+ if (kr_fails_assert(packet->wire))
return KR_STATE_FAIL;
- }
const int rcode = knot_wire_get_rcode(packet->wire);
if (rcode != KNOT_RCODE_SERVFAIL && rcode != KNOT_RCODE_REFUSED) {
qry->flags.AWAIT_IPV6 = false;
}
if (!qry->flags.NO_NS_FOUND) {
qry->flags.NO_NS_FOUND = true;
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return KR_STATE_PRODUCE;
}
"OLX2: delegation ", cut_buf);
}
kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return KR_STATE_FAIL;
}
} else {
/* Pop query if resolved. */
if (request->state == KR_STATE_YIELD) { // NOLINT(bugprone-branch-clone)
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return KR_STATE_PRODUCE; /* Requery */
} else if (qry->flags.RESOLVED) {
kr_rplan_pop(rplan, qry);
} else if (!tried_tcp && (qry->flags.TCP)) {
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return KR_STATE_PRODUCE; /* Requery over TCP */
} else { /* Clear query flags for next attempt */
qry->flags.CACHED = false;
if (qry->flags.FORWARD || qry->flags.STUB
/* Probably CPU exhaustion attempt, so do not retry. */
|| qry->vld_limit_crypto_remains <= 0) {
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return KR_STATE_FAIL;
}
/* Other servers might not have broken DNSSEC. */
qry->flags.DNSSEC_BOGUS = false;
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return KR_STATE_PRODUCE;
}
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "consume");
return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
}
int kr_resolve_finish(struct kr_request *request, int state)
{
- kr_cache_top_context_switch(&the_resolver->cache.top, &request->cache_top_context, "finish");
request->state = state;
/* Finalize answer and construct whole wire-format (unless dropping). */
knot_pkt_t *answer = kr_request_ensure_answer(request);
request->trace_finish = NULL;
request->trace_log = NULL;
- kr_cache_top_context_switch(&the_resolver->cache.top, NULL, "finish");
return KR_STATE_DONE;
}
.dead_since = 0 };
struct rtt_state get_rtt_state(const uint8_t *ip, size_t len,
- struct kr_cache *cache)
+ struct kr_cache *cache, struct kr_request *req)
{
struct rtt_state state;
knot_db_val_t value;
state = default_rtt_state;
} else { // memcpy is safe for unaligned case (on non-x86)
memcpy(&state, value.data, sizeof(state));
- kr_cache_top_access(&cache->top, key.data, key.len, value.len, "get_rtt");
+ kr_cache_top_access(req, key.data, key.len, value.len, "get_rtt");
}
free(key.data);
}
int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state,
- struct kr_cache *cache)
+ struct kr_cache *cache, struct kr_request *req)
{
knot_db_t *db = cache->db;
struct kr_cdb_stats *stats = &cache->stats;
int ret = cache->api->write(db, stats, &key, &value, 1);
kr_cache_commit(cache);
- kr_cache_top_access(&cache->top, key.data, key.len, value.len, "put_rtt");
+ kr_cache_top_access(req, key.data, key.len, value.len, "put_rtt");
free(key.data);
return ret;
qry->flags.NO_IPV6);
state->rtt_state =
get_rtt_state(ip_to_bytes(address, address_len),
- address_len, &qry->request->ctx->cache);
+ address_len, &qry->request->ctx->cache, qry->request);
invalidate_dead_upstream(
state, qry->request->ctx->cache_rtt_tout_retry_interval);
#ifdef SELECTION_CHOICE_LOGGING
return;
}
- struct kr_cache *cache = &qry->request->ctx->cache;
- struct kr_cache_top_context *old_cache_top_ctx =
- kr_cache_top_context_switch(&the_resolver->cache.top, &qry->request->cache_top_context, "update_rtt");
+ struct kr_request *req = qry->request;
+ struct kr_cache *cache = &req->ctx->cache;
uint8_t *address = ip_to_bytes(&transport->address, transport->address_len);
/* This construct is a bit racy since the global state may change
* care that much since it is rare and we only risk slightly suboptimal
* transport choice. */
struct rtt_state cur_rtt_state =
- get_rtt_state(address, transport->address_len, cache);
+ get_rtt_state(address, transport->address_len, cache, req);
struct rtt_state new_rtt_state = calc_rtt_state(cur_rtt_state, rtt);
- put_rtt_state(address, transport->address_len, new_rtt_state, cache);
+ put_rtt_state(address, transport->address_len, new_rtt_state, cache, req);
if (transport->address_len == sizeof(struct in6_addr))
no6_success(qry);
qry->id, ns_name, ns_str ? ns_str : "", zonecut_str,
rtt, new_rtt_state.srtt, new_rtt_state.variance);
}
-
- kr_cache_top_context_switch(&the_resolver->cache.top, old_cache_top_ctx, "update_rtt");
}
/// Update rtt_state (including caching) after a server timed out.
if (transport->timeout_capped)
return;
- struct kr_cache_top_context *old_cache_top_ctx =
- kr_cache_top_context_switch(&the_resolver->cache.top, &qry->request->cache_top_context, "server_timeout");
-
const uint8_t *address = ip_to_bytes(&transport->address, transport->address_len);
if (transport->address_len == sizeof(struct in6_addr))
no6_timed_out(qry, address);
// While we were waiting for timeout, the stats might have changed considerably,
// so let's overwrite what we had by fresh cache contents.
// This is useful when the address is busy (we query it concurrently).
- *state = get_rtt_state(address, transport->address_len, cache);
+ *state = get_rtt_state(address, transport->address_len, cache, qry->request);
++state->consecutive_timeouts;
// Avoid overflow; we don't utilize very high values anyway (arbitrary limit).
// If transport was chosen by a different query, that one will cache it.
if (!transport->deduplicated) {
- put_rtt_state(address, transport->address_len, *state, cache);
+ put_rtt_state(address, transport->address_len, *state, cache, qry->request);
} else {
kr_cache_commit(cache); // Avoid any risk of long transaction.
}
-
- kr_cache_top_context_switch(&the_resolver->cache.top, old_cache_top_ctx, "server_timeout");
}
// Not everything can be checked in nice ways like static_assert()
static __attribute__((constructor)) void test_RTT_consts(void)
* (e.g. calling kr_cache_commit).
*/
struct rtt_state get_rtt_state(const uint8_t *ip, size_t len,
- struct kr_cache *cache);
+ struct kr_cache *cache, struct kr_request *req);
int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state,
- struct kr_cache *cache);
+ struct kr_cache *cache, struct kr_request *req);
/**
* @internal Helper function for conversion between different IP representations.
struct kr_context *ctx = qry->request->ctx;
struct kr_cache_p peek;
- if (kr_cache_peek_exact(&ctx->cache, ns, rrtype, &peek) != 0) {
+ if (kr_cache_peek_exact(&ctx->cache, qry->request, ns, rrtype, &peek) != 0) {
return AI_UNKNOWN;
}
int32_t new_ttl = kr_cache_ttl(&peek, qry, ns, rrtype);
uint8_t * restrict rank)
{
struct kr_cache_p peek;
- int ret = kr_cache_peek_exact(&ctx->cache, name, KNOT_RRTYPE_NS, &peek);
+ int ret = kr_cache_peek_exact(&ctx->cache, qry->request, name, KNOT_RRTYPE_NS, &peek);
if (ret != 0) {
return ret;
}
return kr_error(EINVAL);
/* peek, check rank and TTL */
struct kr_cache_p peek;
- int ret = kr_cache_peek_exact(cache, owner, type, &peek);
+ int ret = kr_cache_peek_exact(cache, qry->request, owner, type, &peek);
if (ret != 0)
return ret;
if (!kr_rank_test(peek.rank, KR_RANK_SECURE))