#define KEY_SIZE (KEY_HSIZE + KNOT_DNAME_MAXLEN)
-/** @internal Forward declarations of the implementation details */
-static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, const knot_rrset_t *rr,
- const knot_rrset_t *rr_sigs, uint32_t timestamp, uint8_t rank, trie_t *nsec_pmap);
+/** @internal Forward declarations of the implementation details
+ * \param optout[out] Set *optout = true; when encountering an opt-out NSEC3 (optional). */
+static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
+ const knot_rrset_t *rr, const knot_rrset_t *rr_sigs, uint32_t timestamp,
+ uint8_t rank, trie_t *nsec_pmap, bool *has_optout);
/** Preliminary checks before stash_rrset(). Don't call if returns <= 0. */
static int stash_rrset_precond(const knot_rrset_t *rr, const struct kr_query *qry/*logs*/);
if (err <= 0) {
return kr_ok();
}
- ssize_t written = stash_rrset(cache, NULL, rr, rrsig, timestamp, rank, NULL);
+ ssize_t written = stash_rrset(cache, NULL, rr, rrsig, timestamp, rank, NULL, NULL);
if (written >= 0) {
return kr_ok();
}
/** It's simply inside of cycle taken out to decrease indentation. \return error code. */
static int stash_rrarray_entry(ranked_rr_array_t *arr, int arr_i,
const struct kr_query *qry, struct kr_cache *cache,
- int *unauth_cnt, trie_t *nsec_pmap);
+ int *unauth_cnt, trie_t *nsec_pmap, bool *has_optout);
/** Stash a single nsec_p. \return 0 (errors are ignored). */
static int stash_nsec_p(const knot_dname_t *dname, const char *nsec_p_v,
struct kr_request *req);
assert(!ENOMEM);
goto finally;
}
+ bool has_optout = false;
+ /* ^^ DNSSEC_OPTOUT is not fired in cases like `com. A`,
+ * but currently we don't stash separate NSEC3 proving that. */
for (int psec = KNOT_ANSWER; psec <= KNOT_ADDITIONAL; ++psec) {
ranked_rr_array_t *arr = selected[psec];
/* uncached entries are located at the end */
continue;
/* TODO: probably safe to break but maybe not worth it */
}
- int ret = stash_rrarray_entry(arr, i, qry, cache, &unauth_cnt, nsec_pmap);
+ int ret = stash_rrarray_entry(arr, i, qry, cache, &unauth_cnt,
+ nsec_pmap, &has_optout);
if (ret) {
VERBOSE_MSG(qry, "=> stashing RRs errored out\n");
goto finally;
/* LATER(optim.): typically we also have corresponding NS record in the list,
* so we might save a cache operation. */
- stash_pkt(pkt, qry, req);
+ stash_pkt(pkt, qry, req, has_optout);
finally:
if (unauth_cnt) {
return 1/*proceed*/;
}
-static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry, const knot_rrset_t *rr,
- const knot_rrset_t *rr_sigs, uint32_t timestamp, uint8_t rank, trie_t *nsec_pmap)
+static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
+ const knot_rrset_t *rr, const knot_rrset_t *rr_sigs, uint32_t timestamp,
+ uint8_t rank, trie_t *nsec_pmap, bool *has_optout)
{
//FIXME review entry_h
assert(stash_rrset_precond(rr, qry) > 0 && nsec_pmap);
knot_db_val_t key;
switch (rr->type) {
case KNOT_RRTYPE_NSEC3:
- if (rr->rrs.rr_count != 1
- || (KNOT_NSEC3_FLAG_OPT_OUT & knot_nsec3_flags(&rr->rrs, 0))) {
- /* Skip "suspicious" or opt-out NSEC3 sets. */
+ /* Skip "suspicious" or opt-out NSEC3 sets. */
+ if (rr->rrs.rr_count != 1) return kr_ok();
+ if (KNOT_NSEC3_FLAG_OPT_OUT & knot_nsec3_flags(&rr->rrs, 0)) {
+ if (has_optout) *has_optout = true;
return kr_ok();
}
/* fall through */
static int stash_rrarray_entry(ranked_rr_array_t *arr, int arr_i,
const struct kr_query *qry, struct kr_cache *cache,
- int *unauth_cnt, trie_t *nsec_pmap)
+ int *unauth_cnt, trie_t *nsec_pmap, bool *has_optout)
{
ranked_rr_array_entry_t *entry = arr->at[arr_i];
if (entry->cached) {
}
ssize_t written = stash_rrset(cache, qry, rr, rr_sigs, qry->timestamp.tv_sec,
- entry->rank, nsec_pmap);
+ entry->rank, nsec_pmap, has_optout);
if (written < 0) {
return (int) written;
}
}
-
void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
- const struct kr_request *req)
+ const struct kr_request *req, const bool has_optout)
{
/* In some cases, stash also the packet. */
const bool is_negative = kr_response_classify(pkt)
const bool want_pkt = qry->flags.DNSSEC_BOGUS
|| (is_negative && (qry->flags.DNSSEC_INSECURE || !qry->flags.DNSSEC_WANT));
- if (!(want_pkt || qry->flags.DNSSEC_OPTOUT) || !knot_wire_get_aa(pkt->wire)
+ if (!(want_pkt || has_optout) || !knot_wire_get_aa(pkt->wire)
|| pkt->parsed != pkt->size /* malformed packet; still can't detect KNOT_EFEWDATA */
) {
return;
kr_rank_set(&rank, KR_RANK_INSECURE);
} else if (!qry->flags.DNSSEC_WANT) {
/* no TAs at all, leave _RANK_AUTH */
- } else if (qry->flags.DNSSEC_OPTOUT) {
+ } else if (has_optout) {
/* FIXME XXX review OPTOUT in this function again! */
/* All bad cases should be filtered above,
* at least the same way as pktcache in kresd 1.5.x. */