From aaf4e858bf032e4632f55df7a408d06971f2d06e Mon Sep 17 00:00:00 2001 From: =?utf8?q?Vladim=C3=ADr=20=C4=8Cun=C3=A1t?= Date: Fri, 20 Jul 2018 19:43:30 +0200 Subject: [PATCH] cache: don't require cached NS for aggresive answers --- lib/cache/peek.c | 110 ++++++++++++++++++++++++++++++----------------- 1 file changed, 71 insertions(+), 39 deletions(-) diff --git a/lib/cache/peek.c b/lib/cache/peek.c index 9c588a7d6..919a23ea8 100644 --- a/lib/cache/peek.c +++ b/lib/cache/peek.c @@ -36,10 +36,14 @@ static int peek_encloser( uint8_t lowest_rank, const struct kr_query *qry, struct kr_cache *cache); -static int nsec_p_init(struct nsec_p *nsec_p, const uint8_t *nsec_p_raw, bool with_knot) +static int nsec_p_init(struct nsec_p *nsec_p, knot_db_val_t nsec_p_entry, bool with_knot) { - nsec_p->raw = nsec_p_raw; - if (!nsec_p_raw) return kr_ok(); + const size_t stamp_len = sizeof(uint32_t); + if (nsec_p_entry.len <= stamp_len) { /* plain NSEC if equal */ + nsec_p->raw = NULL; + return kr_ok(); + } + nsec_p->raw = (uint8_t *)nsec_p_entry.data + stamp_len; nsec_p->hash = nsec_p_mkHash(nsec_p->raw); if (!with_knot) return kr_ok(); /* Convert NSEC3 params to another format. */ @@ -58,6 +62,29 @@ static void nsec_p_cleanup(struct nsec_p *nsec_p) memset(nsec_p, 0, sizeof(*nsec_p)); } +/** Compute new TTL for nsec_p entry, using SOA serial arith. + * \param new_ttl (optionally) write the new TTL (even if negative) + * \return error code, e.g. kr_error(ESTALE) */ +static int nsec_p_ttl(knot_db_val_t entry, const struct kr_query *qry, int32_t *new_ttl) +{ + if (!entry.data || !qry) { + assert(!EINVAL); + return kr_error(EINVAL); + } + uint32_t stamp; + if (!entry.len) { + return kr_error(ENOENT); + } + if (entry.len < sizeof(stamp)) { + assert(!EILSEQ); + return kr_error(EILSEQ); + } + memcpy(&stamp, entry.data, sizeof(stamp)); + int32_t newttl = stamp - qry->timestamp.tv_sec; + if (new_ttl) *new_ttl = newttl; + return newttl < 0 ? kr_error(ESTALE) : kr_ok(); +} + static uint8_t get_lowest_rank(const struct kr_request *req, const struct kr_query *qry) { /* TODO: move rank handling into the iterator (DNSSEC_* flags)? */ @@ -188,21 +215,16 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt) /* Try the NSEC* parameters in order, until success. * Let's not mix different parameters for NSEC* RRs in a single proof. */ for (int i = 0; ;) { - if (!el[i].len) goto cont; - /* OK iff the stamp is in future */ - uint32_t stamp; - memcpy(&stamp, el[i].data, sizeof(stamp)); - const int32_t remains = stamp - qry->timestamp.tv_sec; /* using SOA serial arith. */ - if (remains >= 0 || VERBOSE_STATUS) { - const uint8_t *nsec_p_raw = el[i].len > sizeof(stamp) - ? (uint8_t *)el[i].data + sizeof(stamp) : NULL; - nsec_p_init(&ans.nsec_p, nsec_p_raw, remains >= 0); + int32_t log_new_ttl; + ret = nsec_p_ttl(el[i], qry, &log_new_ttl); + if (!ret || VERBOSE_STATUS) { + nsec_p_init(&ans.nsec_p, el[i], !ret); } - if (remains < 0) { + if (ret) { VERBOSE_MSG(qry, "=> skipping zone: %s, %s, hash %x;" - "outdated TTL %d\n", + "new TTL %d, ret %d\n", log_zname, (ans.nsec_p.raw ? "NSEC3" : "NSEC"), - (unsigned)ans.nsec_p.hash, (int)remains); + (unsigned)ans.nsec_p.hash, (int)log_new_ttl, ret); /* no need for nsec_p_cleanup() in this case */ goto cont; } @@ -569,10 +591,10 @@ static int closest_NS(kr_layer_t *ctx, struct key *k, entry_list_t el) need_zero = false; /* More types are possible; try in order. * For non-fatal failures just "continue;" to try the next type. */ - for (int i = ENTRY_APEX_NSECS_CNT; i < EL_LENGTH; ++i) { + for (int i = 0; i < EL_LENGTH; ++i) { /* TODO: factor most of inside into a function? */ if (!el[i].len /* On a zone cut we want DS from the parent zone. */ - || (i == EL_NS && exact_match && qry->stype == KNOT_RRTYPE_DS) + || (i <= EL_NS && exact_match && qry->stype == KNOT_RRTYPE_DS) /* CNAME is interesting only if we * directly hit the name that was asked. * Note that we want it even in the DS case. */ @@ -583,26 +605,36 @@ static int closest_NS(kr_layer_t *ctx, struct key *k, entry_list_t el) ) { continue; } - /* ^^ LATER(optim.): not having NS but having - * non-timeouted nsec_p is also OK for a zone cut. */ - /* Find the entry for the type, check positivity, TTL */ - const uint16_t type = EL2RRTYPE(i); - const struct entry_h *eh = entry_h_consistent(el[i], type); - if (!eh) { - VERBOSE_MSG(qry, "=> EH seek ret: %d\n", ret); - assert(false); - goto next_label; - } - int32_t new_ttl = get_new_ttl(eh, qry, k->zname, type, + + uint16_t type; + int32_t new_ttl; + if (i < ENTRY_APEX_NSECS_CNT) { + type = KNOT_RRTYPE_NS; + ret = nsec_p_ttl(el[i], qry, &new_ttl); + if (ret) { + VERBOSE_MSG(qry, + "=> skipping unfit nsec_p: new TTL %d," + " ret %d\n", (int)new_ttl, ret); + continue; + } + } else { + type = EL2RRTYPE(i); + /* Find the entry for the type, check positivity, TTL */ + const struct entry_h *eh = entry_h_consistent(el[i], type); + if (!eh) { + VERBOSE_MSG(qry, "=> EH not consistent\n"); + assert(false); + goto next_label; + } + new_ttl = get_new_ttl(eh, qry, k->zname, type, qry->timestamp.tv_sec); - if (new_ttl < 0 - /* Not interested in negative or bogus. */ - || eh->is_packet - /* For NS any kr_rank is accepted, - * as insecure or even nonauth is OK */ - || (type != KNOT_RRTYPE_NS && eh->rank < rank_min)) { - - WITH_VERBOSE(qry) { + const bool ok = + /* For NS any kr_rank is accepted, + * as insecure or even nonauth is OK */ + (type == KNOT_RRTYPE_NS || eh->rank >= rank_min) + /* Not interested in negative or bogus. */ + && !eh->is_packet && new_ttl >= 0; + WITH_VERBOSE(qry) { if (!ok) { auto_free char *type_str = kr_rrtype_text(type); const char *packet_str = @@ -610,9 +642,9 @@ static int closest_NS(kr_layer_t *ctx, struct key *k, entry_list_t el) VERBOSE_MSG(qry, "=> skipping unfit %s %s: " "rank 0%.2o, new TTL %d\n", type_str, packet_str, - eh->rank, new_ttl); - } - continue; + eh->rank, (int)new_ttl); + } } + if (!ok) continue; } /* We found our match. */ k->type = type; -- 2.47.2