From 8af54dfb349170630dec1e8c537c5582ff2867b0 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Marek=20Vavrus=CC=8Ca?= Date: Mon, 9 Apr 2018 23:11:16 -0700 Subject: [PATCH] implement basic infrastructure for scoped cache This commit adds support for scoped cache, e.g. keys can be tagged with a scope, so that the same key can exist in multiple scope and returns the value based on the scope set. This is practically requires for scoping by subnet in ECS, but it doesn't implement ECS completely. This is just a framework to make something like ECS possible in a module. The scope search is currently non-exhaustive, it either returns a value bound to given scope or look into global scope, nothing in between. --- daemon/lua/kres-gen.lua | 17 ++++++++++ daemon/lua/kres-gen.sh | 12 +++++++ lib/cache/api.c | 73 +++++++++++++++++++++++++++++++++-------- lib/cache/api.h | 5 ++- lib/cache/entry_pkt.c | 2 +- lib/cache/impl.h | 32 ++++++++++++++++-- lib/cache/peek.c | 58 ++++++++++++++++++++++++++++---- lib/resolve.c | 29 +++++++++------- lib/resolve.h | 2 ++ tests/test_cache.c | 8 ++--- 10 files changed, 197 insertions(+), 41 deletions(-) diff --git a/daemon/lua/kres-gen.lua b/daemon/lua/kres-gen.lua index dc0a7ad6f..b2596f605 100644 --- a/daemon/lua/kres-gen.lua +++ b/daemon/lua/kres-gen.lua @@ -80,6 +80,12 @@ struct knot_pkt { knot_mm_t mm; knot_compr_t compr; }; +typedef struct { + uint16_t family; + uint8_t source_len; + uint8_t scope_len; + uint8_t address[16]; +} knot_edns_client_subnet_t; typedef struct { void *root; struct knot_mm *pool; @@ -194,6 +200,8 @@ struct kr_request { trace_log_f trace_log; trace_callback_f trace_finish; int vars_ref; + int cache_scope_len_bits; + const uint8_t *cache_scope; knot_mm_t pool; }; enum kr_rank {KR_RANK_INITIAL, KR_RANK_OMIT, KR_RANK_TRY, KR_RANK_INDET = 4, KR_RANK_BOGUS, KR_RANK_MISMATCH, KR_RANK_MISSING, KR_RANK_INSECURE, KR_RANK_AUTH = 16, KR_RANK_SECURE = 32}; @@ -275,6 +283,15 @@ int knot_pkt_put_rotate(knot_pkt_t *, uint16_t, const knot_rrset_t *, uint16_t, knot_pkt_t *knot_pkt_new(void *, uint16_t, knot_mm_t *); void knot_pkt_free(knot_pkt_t *); int knot_pkt_parse(knot_pkt_t *, unsigned int); +int knot_pkt_reserve(knot_pkt_t *pkt, uint16_t size); +uint8_t knot_edns_get_version(const knot_rrset_t *); +uint16_t knot_edns_get_payload(const knot_rrset_t *); +bool knot_edns_has_option(const knot_rrset_t *, uint16_t); +uint8_t *knot_edns_get_option(const knot_rrset_t *, uint16_t); +int knot_edns_add_option(knot_rrset_t *, uint16_t, uint16_t, const uint8_t *, knot_mm_t *); +uint16_t knot_edns_client_subnet_size(const knot_edns_client_subnet_t *); +int knot_edns_client_subnet_write(uint8_t *, uint16_t, const knot_edns_client_subnet_t *); +int knot_edns_client_subnet_parse(knot_edns_client_subnet_t *, const uint8_t *, uint16_t); struct kr_rplan *kr_resolve_plan(struct kr_request *); knot_mm_t *kr_resolve_pool(struct kr_request *); struct kr_query *kr_rplan_push(struct kr_rplan *, struct kr_query *, const knot_dname_t *, uint16_t, uint16_t); diff --git a/daemon/lua/kres-gen.sh b/daemon/lua/kres-gen.sh index 78a0401c9..a03729c76 100755 --- a/daemon/lua/kres-gen.sh +++ b/daemon/lua/kres-gen.sh @@ -62,6 +62,7 @@ genResType "knot_rrset_t" | sed 's/\/_owner/; s/\/_ttl/' struct knot_compr knot_compr_t struct knot_pkt + knot_edns_client_subnet_t # generics map_t # libkres @@ -123,6 +124,17 @@ printf "\tchar _stub[];\n};\n" knot_pkt_new knot_pkt_free knot_pkt_parse + knot_pkt_reserve +# OPT + knot_edns_get_version + knot_edns_get_payload + knot_edns_has_option + knot_edns_get_option + knot_edns_add_option + knot_edns_client_subnet_size + knot_edns_client_subnet_write + knot_edns_client_subnet_parse + knot_edns_client_subnet_set_addr EOF ## libkres API diff --git a/lib/cache/api.c b/lib/cache/api.c index 03f23397f..febba52cc 100644 --- a/lib/cache/api.c +++ b/lib/cache/api.c @@ -155,7 +155,8 @@ int kr_cache_sync(struct kr_cache *cache) return kr_ok(); } -int kr_cache_insert_rr(struct kr_cache *cache, const knot_rrset_t *rr, const knot_rrset_t *rrsig, uint8_t rank, uint32_t timestamp) +int kr_cache_insert_rr(struct kr_cache *cache, const knot_rrset_t *rr, const knot_rrset_t *rrsig, + uint8_t rank, uint32_t timestamp, const uint8_t *scope, int scope_len_bits) { int err = stash_rrset_precond(rr, NULL); if (err <= 0) { @@ -267,10 +268,31 @@ static bool check_rrtype(uint16_t type, const struct kr_query *qry/*logging*/) return ret; } +int cache_key_write_scope(struct key *k, size_t off, const uint8_t *scope, int scope_len_bits) +{ + const int scope_len_bytes = (scope_len_bits + 7) / 8; + if (!k || !scope || off + scope_len_bytes + 1 > KR_CACHE_KEY_MAXLEN) { + return kr_error(EINVAL); + } + + /* Write scope at current offset */ + memmove(k->buf + off, scope, scope_len_bytes); + + /* Write a terminal byte to distinguish 'no scope' from 'global scope' */ + k->buf[off + scope_len_bytes] = '\0'; + + return scope_len_bytes + 1; +} + /** Like key_exact_type() but omits a couple checks not holding for pkt cache. */ -knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type) +knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type, const uint8_t *scope, int scope_len_bits) { assert(check_rrtype(type, NULL)); + if (!is_scopable_type(type)) { + scope = NULL; + scope_len_bits = 0; + } + switch (type) { case KNOT_RRTYPE_RRSIG: /* no RRSIG query caching, at least for now */ assert(false); @@ -286,12 +308,20 @@ knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type) int name_len = k->buf[0]; k->buf[name_len + 1] = 0; /* make sure different names can never match */ k->buf[name_len + 2] = 'E'; /* tag for exact name+type matches */ - memcpy(k->buf + name_len + 3, &type, 2); + + size_t off = name_len + 3; + memcpy(k->buf + off, &type, sizeof(type)); k->type = type; - /* CACHE_KEY_DEF: key == dname_lf + '\0' + 'E' + RRTYPE */ - return (knot_db_val_t){ k->buf + 1, name_len + 4 }; -} + off += sizeof(type); + + int ret = cache_key_write_scope(k, off, scope, scope_len_bits); + if (ret > 0) { + off += ret; + } + /* CACHE_KEY_DEF: key == dname_lf + '\0' + 'E' + RRTYPE + scope */ + return (knot_db_val_t){ k->buf + 1, off - 1 }; +} /** The inside for cache_peek(); implementation separated to ./peek.c */ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt); @@ -380,7 +410,6 @@ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt) VERBOSE_MSG(qry, "=> stashing RRs errored out\n"); goto finally; } - cache->stats.insert += 1; /* LATER(optim.): maybe filter out some type-rank combinations * that won't be useful as separate RRsets. */ } @@ -444,6 +473,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, /* Construct the key under which RRs will be stored, * and add corresponding nsec_pmap item (if necessary). */ + int used_scope_len = -1; struct key k_storage, *k = &k_storage; knot_db_val_t key; switch (rr->type) { @@ -497,7 +527,22 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, assert(!ret); return kr_error(ret); } - key = key_exact_type(k, rr->type); + /* Scope the record if authoritative, and scopeable type */ + const uint8_t *scope = NULL; + int scope_len = 0; + if (qry) { + struct kr_request *req = qry->request; + /* Exclude infrastructure service requests (e.g. A/AAAA for an NS set) + * and exclude non-authoritative data (records from other sections) + */ + if (!qry->parent && kr_rank_test(rank, KR_RANK_AUTH) && is_scopable_type(rr->type)) { + scope = req->cache_scope; + scope_len = req->cache_scope_len_bits; + used_scope_len = scope_len; + } + } + + key = key_exact_type(k, rr->type, scope, scope_len); } /* Compute materialized sizes of the new data. */ @@ -556,9 +601,9 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, || rr->type == KNOT_RRTYPE_NS) { auto_free char *type_str = kr_rrtype_text(rr->type), *encl_str = kr_dname_text(encloser); - VERBOSE_MSG(qry, "=> stashed %s%s %s, rank 0%.2o, " + VERBOSE_MSG(qry, "=> stashed %s%s %s, rank 0%.2o, scoped: %d " "%d B total, incl. %d RRSIGs\n", - (wild_labels ? "*." : ""), encl_str, type_str, rank, + (wild_labels ? "*." : ""), encl_str, type_str, rank, used_scope_len, (int)val_new_entry.len, (rr_sigs ? rr_sigs->rrs.count : 0) ); } } @@ -642,7 +687,7 @@ static int stash_nsec_p(const knot_dname_t *dname, const char *nsec_p_v, struct key k_storage, *k = &k_storage; int ret = kr_dname_lf(k->buf, dname, false); if (ret) return kr_error(ret); - knot_db_val_t key = key_exact_type(k, KNOT_RRTYPE_NS); + knot_db_val_t key = key_exact_type(k, KNOT_RRTYPE_NS, NULL, 0); knot_db_val_t val_orig = { NULL, 0 }; ret = cache_op(cache, read, &key, &val_orig, 1); if (ret && ret != -ABS(ENOENT)) { @@ -730,7 +775,7 @@ static int peek_exact_real(struct kr_cache *cache, const knot_dname_t *name, uin int ret = kr_dname_lf(k->buf, name, false); if (ret) return kr_error(ret); - knot_db_val_t key = key_exact_type(k, type); + knot_db_val_t key = key_exact_type(k, type, NULL, 0); knot_db_val_t val = { NULL, 0 }; ret = cache_op(cache, read, &key, &val, 1); if (!ret) ret = entry_h_seek(&val, type); @@ -777,7 +822,7 @@ int kr_cache_remove(struct kr_cache *cache, const knot_dname_t *name, uint16_t t int ret = kr_dname_lf(k->buf, name, false); if (ret) return kr_error(ret); - knot_db_val_t key = key_exact_type(k, type); + knot_db_val_t key = key_exact_type(k, type, NULL, 0); return cache_op(cache, remove, &key, 1); } @@ -797,7 +842,7 @@ int kr_cache_match(struct kr_cache *cache, const knot_dname_t *name, if (ret) return kr_error(ret); // use a mock type - knot_db_val_t key = key_exact_type(k, KNOT_RRTYPE_A); + knot_db_val_t key = key_exact_type(k, KNOT_RRTYPE_A, NULL, 0); /* CACHE_KEY_DEF */ key.len -= sizeof(uint16_t); /* the type */ if (!exact_name) { diff --git a/lib/cache/api.h b/lib/cache/api.h index 84d0f07db..f830dafe3 100644 --- a/lib/cache/api.h +++ b/lib/cache/api.h @@ -99,10 +99,13 @@ static inline void kr_cache_make_checkpoint(struct kr_cache *cache) * @param rrsig RRSIG for inserted RRSet (optional) * @param rank rank of the data * @param timestamp current time + * @param scope scope of the record + * @param scope_len_bits scope of the record in bits * @return 0 or an errcode */ KR_EXPORT -int kr_cache_insert_rr(struct kr_cache *cache, const knot_rrset_t *rr, const knot_rrset_t *rrsig, uint8_t rank, uint32_t timestamp); +int kr_cache_insert_rr(struct kr_cache *cache, const knot_rrset_t *rr, const knot_rrset_t *rrsig, + uint8_t rank, uint32_t timestamp, const uint8_t *scope, int scope_len_bits); /** * Clear all items from the cache. diff --git a/lib/cache/entry_pkt.c b/lib/cache/entry_pkt.c index 9b2e804a4..625621149 100644 --- a/lib/cache/entry_pkt.c +++ b/lib/cache/entry_pkt.c @@ -114,7 +114,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry, assert(owner == NULL); return; } - key = key_exact_type_maypkt(k, pkt_type); + key = key_exact_type_maypkt(k, pkt_type, NULL, 0); /* For now we stash the full packet byte-exactly as it came from upstream. */ const uint16_t pkt_size = pkt->size; diff --git a/lib/cache/impl.h b/lib/cache/impl.h index c38ac6c5b..206b8d912 100644 --- a/lib/cache/impl.h +++ b/lib/cache/impl.h @@ -138,13 +138,39 @@ static inline size_t key_nsec3_hash_off(const struct key *k) static const int NSEC3_HASH_LEN = 20, NSEC3_HASH_TXT_LEN = 32; +/** + * This does not exactly implement https://datatracker.ietf.org/doc/rfc7871/ as in 7.3.1. + * The section says that only DNSSEC records and records from non-answer sections must be scoped to given network. + * However, ECS is used almost exclusively just for traffic engineering, many types are not meant for that. + * The NS record can also show up in the answer section in parent-child setup, but it should not be scoped. + */ +static inline bool is_scopable_type(uint16_t type) +{ + return type == KNOT_RRTYPE_A || type == KNOT_RRTYPE_AAAA || type == KNOT_RRTYPE_CNAME; +} + +/** + * Write cache key scope after the formatted lookup key. + * The cache key looks roughly like this: + * off -- len (bytes) + * 0 .. 1 domain name len (d) + * 1 .. 1 tag (E or 1) + * 2 .. d domain name (d = 0 .. 255) + * .. 1 terminator \x00 + * + * The E tag has additional information: + * .. t type in text (e.g AAAA, t = 1 .. 9 (as of now)) + * .. s cache scope (e.g. [192 168 1], s = 0 .. 16) + */ +int cache_key_write_scope(struct key *k, size_t off, const uint8_t *scope, int scope_len_bits); + /** Finish constructing string key for for exact search. * It's assumed that kr_dname_lf(k->buf, owner, *) had been ran. */ -knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type); +knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type, const uint8_t *scope, int scope_len_bits); /** Like key_exact_type_maypkt but with extra checks if used for RRs only. */ -static inline knot_db_val_t key_exact_type(struct key *k, uint16_t type) +static inline knot_db_val_t key_exact_type(struct key *k, uint16_t type, const uint8_t *scope, int scope_len_bits) { switch (type) { /* Sanity check: forbidden types represented in other way(s). */ @@ -153,7 +179,7 @@ static inline knot_db_val_t key_exact_type(struct key *k, uint16_t type) assert(false); return (knot_db_val_t){ NULL, 0 }; } - return key_exact_type_maypkt(k, type); + return key_exact_type_maypkt(k, type, scope, scope_len_bits); } diff --git a/lib/cache/peek.c b/lib/cache/peek.c index 176bc06c6..8cf93b3db 100644 --- a/lib/cache/peek.c +++ b/lib/cache/peek.c @@ -112,6 +112,18 @@ static uint8_t get_lowest_rank(const struct kr_request *req, const struct kr_que return KR_RANK_INITIAL | KR_RANK_AUTH; } +/** + * Return cache scope as a hexstring. + */ +static char *cache_scope_hex(const uint8_t *scope, int scope_len_bits) +{ + const int len = (scope_len_bits + 7) / 8; + char *hex_str = calloc(1, len * 2 + 1); + for (int i = 0; i < len; ++i) { + snprintf(hex_str + (i * 2), 3, "%02x", scope[i]); + } + return hex_str; +} /** Almost whole .produce phase for the cache module. * \note we don't transition to KR_STATE_FAIL even in case of "unexpected errors". @@ -133,9 +145,17 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) /**** 1. find the name or the closest (available) zone, not considering wildcards **** 1a. exact name+type match (can be negative answer in insecure zones) */ - knot_db_val_t key = key_exact_type_maypkt(k, qry->stype); + knot_db_val_t key = key_exact_type_maypkt(k, qry->stype, req->cache_scope, req->cache_scope_len_bits); knot_db_val_t val = { NULL, 0 }; ret = cache_op(cache, read, &key, &val, 1); + /* If the name is expected to be scope, but there's no scoped result in cache, + * check global scope, as the name may not be scoped by server. */ + if (req->cache_scope != NULL && ret && ret == -abs(ENOENT)) { + /* Retry using global scope */ + VERBOSE_MSG(qry, "=> searching global scope /0\n"); + key = key_exact_type_maypkt(k, qry->stype, req->cache_scope, 0); + ret = cache_op(cache, read, &key, &val, 1); + } if (!ret) { /* found an entry: test conditions, materialize into pkt, etc. */ ret = found_exact_hit(ctx, pkt, val, lowest_rank); @@ -145,6 +165,12 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) assert(false); return ctx->state; } else if (!ret) { + WITH_VERBOSE(qry) { + if (req->cache_scope && is_scopable_type(qry->stype)) { + auto_free char *hex_str = cache_scope_hex(req->cache_scope, req->cache_scope_len_bits); + VERBOSE_MSG(qry, "=> found exact match in scope %s/%d\n", hex_str, req->cache_scope_len_bits); + } + } cache->stats.hit += 1; return KR_STATE_DONE; } @@ -254,7 +280,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) /* Assuming k->buf still starts with zone's prefix, * look up the SOA in cache. */ k->buf[0] = k->zlf_len; - key = key_exact_type(k, KNOT_RRTYPE_SOA); + key = key_exact_type(k, KNOT_RRTYPE_SOA, NULL, 0); knot_db_val_t val = { NULL, 0 }; ret = cache_op(cache, read, &key, &val, 1); const struct entry_h *eh; @@ -455,9 +481,14 @@ static int answer_simple_hit(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type, if (qf->DNSSEC_INSECURE) { qf->DNSSEC_WANT = false; } - VERBOSE_MSG(qry, "=> satisfied by exact %s: rank 0%.2o, new TTL %d\n", - (type == KNOT_RRTYPE_CNAME ? "CNAME" : "RRset"), - eh->rank, new_ttl); + WITH_VERBOSE(qry) { + auto_free char *scope_hex = NULL; + if (req->cache_scope && is_scopable_type(type)) { + scope_hex = cache_scope_hex(req->cache_scope, req->cache_scope_len_bits); + } + VERBOSE_MSG(qry, "=> satisfied by exact RR or CNAME: rank 0%.2o, new TTL %d, scope %s/%d\n", + eh->rank, new_ttl, scope_hex ? scope_hex : "", scope_hex ? req->cache_scope_len_bits : 0); + } return kr_ok(); } #undef CHECK_RET @@ -509,7 +540,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc const uint16_t type, const uint8_t lowest_rank, const struct kr_query *qry, struct kr_cache *cache) { - knot_db_val_t key = key_exact_type(k, type); + knot_db_val_t key = key_exact_type(k, type, NULL, 0); /* Find the record. */ knot_db_val_t val = { NULL, 0 }; int ret = cache_op(cache, read, &key, &val, 1); @@ -597,9 +628,13 @@ static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el, struct kr_query *qry, const bool only_NS, const bool is_DS) { /* get the current timestamp */ + const uint8_t *cache_scope = NULL; + int cache_scope_len_bits = 0; uint32_t timestamp; if (qry) { timestamp = qry->timestamp.tv_sec; + cache_scope = qry->request->cache_scope; + cache_scope_len_bits = qry->request->cache_scope_len_bits; } else { struct timeval tv; if (gettimeofday(&tv, NULL)) return kr_error(errno); @@ -614,9 +649,18 @@ static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el, /* Inspect the NS/xNAME entries, shortening by a label on each iteration. */ do { k->buf[0] = zlf_len; - knot_db_val_t key = key_exact_type(k, KNOT_RRTYPE_NS); + /* Look for CNAME for the exact match to allow scoping, NS otherwise. + * The CNAME is going to get rewritten to NS key, but it will be scoped if possible. + */ + const uint16_t find_type = exact_match ? KNOT_RRTYPE_CNAME : KNOT_RRTYPE_NS; + knot_db_val_t key = key_exact_type(k, find_type, cache_scope, cache_scope_len_bits); knot_db_val_t val; int ret = cache_op(cache, read, &key, &val, 1); + /* Try in global scope if scoped, but no immediate match found */ + if (exact_match && cache_scope != NULL && ret == -abs(ENOENT)) { + key = key_exact_type_maypkt(k, KNOT_RRTYPE_NS, cache_scope, 0); + ret = cache_op(cache, read, &key, &val, 1); + } if (ret == -abs(ENOENT)) goto next_label; if (ret) { assert(!ret); diff --git a/lib/resolve.c b/lib/resolve.c index fd0312a5e..1280b858e 100644 --- a/lib/resolve.c +++ b/lib/resolve.c @@ -709,7 +709,6 @@ static int query_finalize(struct kr_request *request, struct kr_query *qry, knot knot_edns_set_do(pkt->opt_rr); knot_wire_set_cd(pkt->wire); } - ret = edns_put(pkt); } } return ret; @@ -1537,16 +1536,6 @@ int kr_resolve_checkout(struct kr_request *request, struct sockaddr *src, } struct kr_query *qry = array_tail(rplan->pending); - /* Run the checkout layers and cancel on failure. - * The checkout layer doesn't persist the state, so canceled subrequests - * don't affect the resolution or rest of the processing. */ - int state = request->state; - ITERATE_LAYERS(request, qry, checkout, packet, dst, type); - if (request->state == KR_STATE_FAIL) { - request->state = state; /* Restore */ - return kr_error(ECANCELED); - } - #if defined(ENABLE_COOKIES) /* Update DNS cookies in request. */ if (type == SOCK_DGRAM) { /* @todo: Add cookies also over TCP? */ @@ -1566,6 +1555,24 @@ int kr_resolve_checkout(struct kr_request *request, struct sockaddr *src, return kr_error(EINVAL); } + /* Run the checkout layers and cancel on failure. + * The checkout layer doesn't persist the state, so canceled subrequests + * don't affect the resolution or rest of the processing. */ + int state = request->state; + ITERATE_LAYERS(request, qry, checkout, packet, dst, type); + if (request->state == KR_STATE_FAIL) { + request->state = state; /* Restore */ + return kr_error(ECANCELED); + } + + /* Write down OPT unless in safemode */ + if (!(qry->flags.SAFEMODE)) { + ret = edns_put(packet); + if (ret != 0) { + return kr_error(EINVAL); + } + } + WITH_VERBOSE(qry) { char ns_str[INET6_ADDRSTRLEN]; diff --git a/lib/resolve.h b/lib/resolve.h index 1b8647ef5..95ee203a0 100644 --- a/lib/resolve.h +++ b/lib/resolve.h @@ -226,6 +226,8 @@ struct kr_request { trace_log_f trace_log; /**< Logging tracepoint */ trace_callback_f trace_finish; /**< Request finish tracepoint */ int vars_ref; /**< Reference to per-request variable table. LUA_NOREF if not set. */ + int cache_scope_len_bits; /**< Cache scope length (bits) */ + const uint8_t *cache_scope; /**< Cache scope for the request */ knot_mm_t pool; }; diff --git a/tests/test_cache.c b/tests/test_cache.c index 56e0305a8..cb1283cc4 100644 --- a/tests/test_cache.c +++ b/tests/test_cache.c @@ -207,8 +207,8 @@ static void test_invalid(void **state) assert_int_not_equal(kr_cache_peek(cache, KR_CACHE_USER, NULL, KNOT_RRTYPE_TSIG, &entry, ×tamp), 0); assert_int_not_equal(kr_cache_peek_rr(NULL, NULL, NULL, NULL, NULL), 0); assert_int_not_equal(kr_cache_peek_rr(cache, NULL, NULL, NULL, NULL), 0); - assert_int_not_equal(kr_cache_insert_rr(cache, NULL, 0, 0, 0), 0); - assert_int_not_equal(kr_cache_insert_rr(NULL, NULL, 0, 0, 0), 0); + assert_int_not_equal(kr_cache_insert_rr(cache, NULL, 0, 0, 0, 0, 0), 0); + assert_int_not_equal(kr_cache_insert_rr(NULL, NULL, 0, 0, 0, 0, 0), 0); assert_int_not_equal(kr_cache_insert(NULL, KR_CACHE_USER, dname, KNOT_RRTYPE_TSIG, &global_fake_ce, global_namedb_data), 0); assert_int_not_equal(kr_cache_insert(cache, KR_CACHE_USER, NULL, @@ -227,7 +227,7 @@ static void test_insert_rr(void **state) { test_random_rr(&global_rr, CACHE_TTL); struct kr_cache *cache = (*state); - int ret = kr_cache_insert_rr(cache, &global_rr, 0, 0, CACHE_TIME); + int ret = kr_cache_insert_rr(cache, &global_rr, 0, 0, CACHE_TIME, 0, 0); assert_int_equal(ret, 0); kr_cache_sync(cache); } @@ -326,7 +326,7 @@ static void test_fill(void **state) for (unsigned i = 0; i < CACHE_SIZE; ++i) { knot_rrset_t rr; test_random_rr(&rr, CACHE_TTL); - ret = kr_cache_insert_rr(cache, &rr, 0, 0, CACHE_TTL - 1); + ret = kr_cache_insert_rr(cache, &rr, 0, 0, CACHE_TTL - 1, 0, 0); if (ret != 0) { break; } -- 2.47.2