]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
cache: changed get_new_ttl private API to allow custom timestamp
authorMarek Vavruša <marek@vavrusa.com>
Wed, 14 Mar 2018 19:10:46 +0000 (12:10 -0700)
committerVladimír Čunát <vladimir.cunat@nic.cz>
Mon, 23 Apr 2018 12:34:39 +0000 (14:34 +0200)
Before the API depended on the qry object which only makes sense during
resolution of requests, not when manipulating cache out of it.

lib/cache/api.c
lib/cache/entry_list.c
lib/cache/entry_pkt.c
lib/cache/impl.h
lib/cache/nsec1.c

index b0ee0cab0ac1225892114a112826f85a32065c4d..4d14b9da55a9098b3a79e7410f8724fc64923147 100644 (file)
@@ -226,17 +226,16 @@ struct entry_h * entry_h_consistent(knot_db_val_t data, uint16_t type)
        return ok ? /*const-cast*/(struct entry_h *)eh : NULL;
 }
 
-
 int32_t get_new_ttl(const struct entry_h *entry, const struct kr_query *qry,
-                   const knot_dname_t *owner, uint16_t type)
+                    const knot_dname_t *owner, uint16_t type, uint32_t now)
 {
-       int32_t diff = qry->timestamp.tv_sec - entry->time;
+       int32_t diff = now - entry->time;
        if (diff < 0) {
                /* We may have obtained the record *after* the request started. */
                diff = 0;
        }
        int32_t res = entry->ttl - diff;
-       if (res < 0 && owner && qry->stale_cb) {
+       if (res < 0 && owner && qry && qry->stale_cb) {
                /* Stale-serving decision.  FIXME: modularize or make configurable, etc. */
                int res_stale = qry->stale_cb(res, owner, type, qry);
                if (res_stale >= 0)
@@ -244,18 +243,14 @@ int32_t get_new_ttl(const struct entry_h *entry, const struct kr_query *qry,
        }
        return res;
 }
+
 int32_t kr_cache_ttl(const struct kr_cache_p *peek, const struct kr_query *qry,
                     const knot_dname_t *name, uint16_t type)
 {
        const struct entry_h *eh = peek->raw_data;
-       return get_new_ttl(eh, qry, name, type);
+       return get_new_ttl(eh, qry, name, type, qry->timestamp.tv_sec);
 }
 
-
-
-
-
-
 /** Check that no label contains a zero character, incl. a log trace.
  *
  * We refuse to work with those, as LF and our cache keys might become ambiguous.
@@ -417,8 +412,8 @@ static int cache_peek_real(kr_layer_t *ctx, knot_pkt_t *pkt)
                }
                break;
        case KNOT_RRTYPE_CNAME: {
-               const uint32_t new_ttl = get_new_ttl(val_cut.data, qry,
-                                                    qry->sname, KNOT_RRTYPE_CNAME);
+               int32_t new_ttl = get_new_ttl(val_cut.data, 
+                       qry, qry->sname, KNOT_RRTYPE_CNAME, qry->timestamp.tv_sec);
                ret = answer_simple_hit(ctx, pkt, KNOT_RRTYPE_CNAME, val_cut.data,
                                        val_cut.data + val_cut.len, new_ttl);
                /* TODO: ^^ cumbersome code; we also recompute the TTL */
@@ -577,7 +572,7 @@ do_soa:
                        return ctx->state;
                }
                /* Check if the record is OK. */
-               int32_t new_ttl = get_new_ttl(eh, qry, k->zname, KNOT_RRTYPE_SOA);
+               int32_t new_ttl = get_new_ttl(eh, qry, k->zname, KNOT_RRTYPE_SOA, qry->timestamp.tv_sec);
                if (new_ttl < 0 || eh->rank < lowest_rank || eh->is_packet) {
                        VERBOSE_MSG(qry, "=> SOA unfit %s: rank 0%.2o, new TTL %d\n",
                                        (eh->is_packet ? "packet" : "RR"),
@@ -718,7 +713,8 @@ static int stash_rrset_precond(const knot_rrset_t *rr, const struct kr_query *qr
        return 1/*proceed*/;
 }
 
-static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, const knot_rrset_t *rr, const knot_rrset_t *rr_sigs, uint32_t timestamp, uint8_t rank)
+static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, const knot_rrset_t *rr,
+                              const knot_rrset_t *rr_sigs, uint32_t timestamp, uint8_t rank)
 {
        assert(stash_rrset_precond(rr, qry) > 0);
        if (!cache) {
@@ -774,7 +770,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, c
 
        /* Prepare raw memory for the new entry. */
        ret = entry_h_splice(&val_new_entry, rank, key, k->type, rr->type,
-                               rr->owner, qry, cache);
+                               rr->owner, qry, cache, timestamp);
        if (ret) return kr_ok(); /* some aren't really errors */
        assert(val_new_entry.data);
 
@@ -923,7 +919,7 @@ static int found_exact_hit(kr_layer_t *ctx, knot_pkt_t *pkt, knot_db_val_t val,
                // LATER(optim): pehaps optimize the zone cut search
        }
 
-       int32_t new_ttl = get_new_ttl(eh, qry, qry->sname, qry->stype);
+       int32_t new_ttl = get_new_ttl(eh, qry, qry->sname, qry->stype, qry->timestamp.tv_sec);
        if (new_ttl < 0 || eh->rank < lowest_rank) {
                /* Positive record with stale TTL or bad rank.
                 * LATER(optim.): It's unlikely that we find a negative one,
@@ -979,7 +975,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc
                return kr_error(ret);
                // LATER: recovery in case of error, perhaps via removing the entry?
        }
-       int32_t new_ttl = get_new_ttl(eh, qry, qry->sname, type);
+       int32_t new_ttl = get_new_ttl(eh, qry, qry->sname, type, qry->timestamp.tv_sec);
                /* ^^ here we use the *expanded* wildcard name */
        if (new_ttl < 0 || eh->rank < lowest_rank || eh->is_packet) {
                /* Wildcard record with stale TTL, bad rank or packet.  */
@@ -1123,7 +1119,7 @@ static knot_db_val_t closest_NS(kr_layer_t *ctx, struct key *k)
                                assert(false);
                                goto next_label;
                        }
-                       int32_t new_ttl = get_new_ttl(eh, qry, k->zname, type);
+                       int32_t new_ttl = get_new_ttl(eh, qry, k->zname, type, qry->timestamp.tv_sec);
                        if (new_ttl < 0
                            /* Not interested in negative or bogus. */
                            || eh->is_packet
index 860ee1b3b47ed87ad939432561730f9996e699ea..088dac4c2aa5bf683dc5d1722f0df14624072488 100644 (file)
@@ -121,7 +121,7 @@ int entry_h_splice(
        knot_db_val_t *val_new_entry, uint8_t rank,
        const knot_db_val_t key, const uint16_t ktype, const uint16_t type,
        const knot_dname_t *owner/*log only*/,
-       const struct kr_query *qry, struct kr_cache *cache)
+       const struct kr_query *qry, struct kr_cache *cache, uint32_t timestamp)
 {
        static const knot_db_val_t VAL_EMPTY = { NULL, 0 };
        const bool ok = val_new_entry && val_new_entry->len > 0;
@@ -164,7 +164,7 @@ int entry_h_splice(
                 * (whenever TTL nears expiration).
                 * Stale-serving is NOT considered, but TTL 1 would be considered
                 * as expiring anyway, ... */
-               int32_t old_ttl = get_new_ttl(eh_orig, qry, NULL, 0);
+               int32_t old_ttl = get_new_ttl(eh_orig, qry, NULL, 0, timestamp);
                if (old_ttl > 0 && !is_expiring(eh_orig->ttl, old_ttl)
                    && rank <= eh_orig->rank) {
                        WITH_VERBOSE(qry) {
index b6bc24ca587a55998ba870606a36bcb64a81195e..972bae62fcf962cd5e0013fe99e2655105d000d4 100644 (file)
@@ -146,7 +146,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
        /* Prepare raw memory for the new entry and fill it. */
        struct kr_cache *cache = &req->ctx->cache;
        ret = entry_h_splice(&val_new_entry, rank, key, k->type, pkt_type,
-                               owner, qry, cache);
+                               owner, qry, cache, qry->timestamp.tv_sec);
        if (ret) return; /* some aren't really errors */
        assert(val_new_entry.data);
        struct entry_h *eh = val_new_entry.data;
index 60ee9fe5dcd33610be8018b5c839c951f7cccaa2..9b0003430b4c5c38a5ac5d9dfa9c6034d5b56256 100644 (file)
@@ -120,7 +120,7 @@ int entry_h_splice(
        knot_db_val_t *val_new_entry, uint8_t rank,
        const knot_db_val_t key, const uint16_t ktype, const uint16_t type,
        const knot_dname_t *owner/*log only*/,
-       const struct kr_query *qry, struct kr_cache *cache);
+       const struct kr_query *qry, struct kr_cache *cache, uint32_t timestamp);
 
 
 /* Packet caching; implementation in ./entry_pkt.c */
@@ -152,7 +152,7 @@ static inline bool is_expiring(uint32_t orig_ttl, uint32_t new_ttl)
  * @param type for stale-serving.
  */
 int32_t get_new_ttl(const struct entry_h *entry, const struct kr_query *qry,
-                   const knot_dname_t *owner, uint16_t type);
+                    const knot_dname_t *owner, uint16_t type, uint32_t now);
 
 /* RRset (de)materialization; implementation in ./entry_rr.c */
 
index fb64f51e62f3b3ba54379c666909d93499c03051..eb19ac1c38b7c6d57508f8e443525b9521d045e8 100644 (file)
@@ -187,7 +187,7 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query
        }
        /* FIXME(stale): passing just zone name instead of owner, as we don't
         * have it reconstructed at this point. */
-       int32_t new_ttl_ = get_new_ttl(eh, qry, k->zname, KNOT_RRTYPE_NSEC);
+       int32_t new_ttl_ = get_new_ttl(eh, qry, k->zname, KNOT_RRTYPE_NSEC, qry->timestamp.tv_sec);
        if (new_ttl_ < 0 || !kr_rank_test(eh->rank, KR_RANK_SECURE)) {
                return "range search found stale or insecure entry";
                /* TODO: remove the stale record *and* retry,