return ret;
}
-int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp)
+int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp)
{
if (!txn_is_valid(txn) || !rr || !timestamp) {
return kr_error(EINVAL);
if (rank) {
*rank = entry->rank;
}
+ if (flags) {
+ *flags = entry->flags;
+ }
rr->rrs.rr_count = entry->count;
rr->rrs.data = entry->data;
return kr_ok();
return kr_ok();
}
-int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp)
+int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp)
{
if (!txn_is_valid(txn) || !rr) {
return kr_error(EINVAL);
.timestamp = timestamp,
.ttl = 0,
.rank = rank,
+ .flags = flags,
.count = rr->rrs.rr_count
};
knot_rdata_t *rd = rr->rrs.data;
return kr_cache_insert(txn, KR_CACHE_RR, rr->owner, rr->type, &header, data);
}
-int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp)
+int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp)
{
if (!txn_is_valid(txn) || !rr || !timestamp) {
return kr_error(EINVAL);
if (rank) {
*rank = entry->rank;
}
+ if (flags) {
+ *flags = entry->flags;
+ }
rr->type = KNOT_RRTYPE_RRSIG;
rr->rrs.rr_count = entry->count;
rr->rrs.data = entry->data;
return kr_ok();
}
-int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp)
+int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp)
{
if (!txn_is_valid(txn) || !rr) {
return kr_error(EINVAL);
.timestamp = timestamp,
.ttl = 0,
.rank = rank,
+ .flags = flags,
.count = rr->rrs.rr_count
};
for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
/* @note Rank must not exceed 6 bits */
};
+/** Cache entry flags */
+enum kr_cache_flag {
+ KR_CACHE_FLAG_NONE = 0,
+ KR_CACHE_FLAG_WCARD_PROOF = 1 /* Entry contains either packet with wildcard
+ * answer either record for which wildcard
+ * expansion proof is needed */
+};
+
+
/**
* Serialized form of the RRSet with inception timestamp and maximum TTL.
*/
uint32_t timestamp;
uint32_t ttl;
uint16_t count;
- uint16_t rank;
+ uint8_t rank;
+ uint8_t flags;
uint8_t data[];
};
* @param txn transaction instance
* @param rr query RRSet (its rdataset may be changed depending on the result)
* @param rank entry rank will be stored in this variable
+ * @param flags entry flags
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
KR_EXPORT
-int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp);
+int kr_cache_peek_rr(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp);
/**
* Clone read-only RRSet and adjust TTLs.
* @param txn transaction instance
* @param rr inserted RRSet
* @param rank rank of the data
+ * @param flags additional flags for the data
* @param timestamp current time
* @return 0 or an errcode
*/
KR_EXPORT
-int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp);
+int kr_cache_insert_rr(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp);
/**
* Peek the cache for the given RRset signature (name, type)
* @param txn transaction instance
* @param rr query RRSET (its rdataset and type may be changed depending on the result)
* @param rank entry rank will be stored in this variable
+ * @param flags entry additional flags
* @param timestamp current time (will be replaced with drift if successful)
* @return 0 or an errcode
*/
KR_EXPORT
-int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint16_t *rank, uint32_t *timestamp);
+int kr_cache_peek_rrsig(struct kr_cache_txn *txn, knot_rrset_t *rr, uint8_t *rank, uint8_t *flags, uint32_t *timestamp);
/**
* Insert the selected RRSIG RRSet of the selected type covered into cache, replacing any existing data.
* @param txn transaction instance
* @param rr inserted RRSIG RRSet
* @param rank rank of the data
+ * @param flags additional flags for the data
* @param timestamp current time
* @return 0 or an errcode
*/
KR_EXPORT
-int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint16_t rank, uint32_t timestamp);
+int kr_cache_insert_rrsig(struct kr_cache_txn *txn, const knot_rrset_t *rr, uint8_t rank, uint8_t flags, uint32_t timestamp);
return vctx->result;
}
-int kr_section_check_wcard(kr_rrset_validation_ctx_t *vctx)
-{
- const knot_pkt_t *pkt = vctx->pkt;
- knot_section_t section_id = vctx->section_id;
- const knot_dname_t *zone_name = vctx->zone_name;
- const knot_pktsection_t *sec = knot_pkt_section(pkt, section_id);
- for (unsigned i = 0; i < sec->count; ++i) {
- const knot_rrset_t *rr = knot_pkt_rr(sec, i);
- if (rr->type == KNOT_RRTYPE_RRSIG) {
- continue;
- }
- if ((rr->type == KNOT_RRTYPE_NS) && (vctx->section_id == KNOT_AUTHORITY)) {
- continue;
- }
- if (!knot_dname_in(zone_name, rr->owner)) {
- continue;
- }
- int covered_labels = knot_dname_labels(rr->owner, NULL);
- if (knot_dname_is_wildcard(rr->owner)) {
- /* The asterisk does not count, RFC4034 3.1.3, paragraph 3. */
- --covered_labels;
- }
- for (unsigned j = 0; j < sec->count; ++j) {
- const knot_rrset_t *rrsig = knot_pkt_rr(sec, j);
- if (rrsig->type != KNOT_RRTYPE_RRSIG) {
- continue;
- }
- if ((rr->rclass != rrsig->rclass) || !knot_dname_is_equal(rr->owner, rrsig->owner)) {
- continue;
- }
- for (uint16_t k = 0; k < rrsig->rrs.rr_count; ++k) {
- if (knot_rrsig_type_covered(&rrsig->rrs, k) != rr->type) {
- continue;
- }
- int rrsig_labels = knot_rrsig_labels(&rrsig->rrs, k);
- if (rrsig_labels > covered_labels) {
- return kr_error(EINVAL);
- }
- if (rrsig_labels < covered_labels) {
- vctx->flags |= KR_DNSSEC_VFLG_WEXPAND;
- }
- }
- }
- }
- return kr_ok();
-}
-
int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
{
const knot_pkt_t *pkt = vctx->pkt;
int kr_dnssec_key_match(const uint8_t *key_a_rdata, size_t key_a_rdlen,
const uint8_t *key_b_rdata, size_t key_b_rdlen);
-/** Return 0 if wildcard expansion occurs in specified section.
- * @param vctx Pointer to validation context.
- * @note vctx->keys, vctx->timestamp, vctx->has_nsec3 has no meanings.
- * @return 0 if wildcard expansion occurs or an error code.
- */
-KR_EXPORT KR_PURE
-int kr_section_check_wcard(kr_rrset_validation_ctx_t *vctx);
-
/**
* Construct a DNSSEC key.
* @param key Pointer to be set to newly created DNSSEC key.
}
static int loot_cache_pkt(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t *qname,
- uint16_t rrtype, bool want_secure, uint32_t timestamp)
+ uint16_t rrtype, bool want_secure, uint32_t timestamp, uint8_t *flags)
{
struct kr_cache_entry *entry = NULL;
int ret = kr_cache_peek(txn, KR_CACHE_PKT, qname, rrtype, &entry, ×tamp);
}
}
+ /* Copy cache entry flags */
+ if (flags) {
+ *flags = entry->flags;
+ }
+
return ret;
}
/** @internal Try to find a shortcut directly to searched packet. */
-static int loot_pktcache(struct kr_cache_txn *txn, knot_pkt_t *pkt, struct kr_query *qry)
+static int loot_pktcache(struct kr_cache_txn *txn, knot_pkt_t *pkt, struct kr_query *qry, uint8_t *flags)
{
uint32_t timestamp = qry->timestamp.tv_sec;
const knot_dname_t *qname = qry->sname;
uint16_t rrtype = qry->stype;
const bool want_secure = (qry->flags & QUERY_DNSSEC_WANT);
- return loot_cache_pkt(txn, pkt, qname, rrtype, want_secure, timestamp);
+ return loot_cache_pkt(txn, pkt, qname, rrtype, want_secure, timestamp, flags);
}
static int pktcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
}
/* Fetch either answer to original or minimized query */
- int ret = loot_pktcache(&txn, pkt, qry);
+ uint8_t flags = 0;
+ int ret = loot_pktcache(&txn, pkt, qry, &flags);
kr_cache_txn_abort(&txn);
if (ret == 0) {
DEBUG_MSG(qry, "=> satisfied from cache\n");
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
+ if (flags & KR_CACHE_FLAG_WCARD_PROOF) {
+ qry->flags |= QUERY_DNSSEC_WEXPAND;
+ }
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
knot_wire_set_aa(pkt->wire);
.timestamp = qry->timestamp.tv_sec,
.ttl = ttl,
.rank = KR_RANK_BAD,
+ .flags = KR_CACHE_FLAG_NONE,
.count = data.len
};
header.rank = KR_RANK_INSECURE;
}
+ /* Set cache flags */
+ if (qry->flags & QUERY_DNSSEC_WANT) {
+ header.flags |= KR_CACHE_FLAG_WCARD_PROOF;
+ }
+
/* Check if we can replace (allow current or better rank, SECURE is always accepted). */
if (header.rank < KR_RANK_SECURE) {
int cached_rank = kr_cache_peek_rank(&txn, KR_CACHE_PKT, qname, qtype, header.timestamp);
}
static int loot_rr(struct kr_cache_txn *txn, knot_pkt_t *pkt, const knot_dname_t *name,
- uint16_t rrclass, uint16_t rrtype, struct kr_query *qry, uint16_t *rank, bool fetch_rrsig)
+ uint16_t rrclass, uint16_t rrtype, struct kr_query *qry,
+ uint8_t *rank, uint8_t *flags, bool fetch_rrsig)
{
/* Check if record exists in cache */
int ret = 0;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, (knot_dname_t *)name, rrtype, rrclass);
if (fetch_rrsig) {
- ret = kr_cache_peek_rrsig(txn, &cache_rr, rank, &drift);
+ ret = kr_cache_peek_rrsig(txn, &cache_rr, rank, flags, &drift);
} else {
- ret = kr_cache_peek_rr(txn, &cache_rr, rank, &drift);
+ ret = kr_cache_peek_rr(txn, &cache_rr, rank, flags, &drift);
}
if (ret != 0) {
return ret;
return ret;
}
/* Lookup direct match first */
- uint16_t rank = 0;
- ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, 0);
+ uint8_t rank = 0;
+ ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, NULL, 0);
if (ret != 0 && rrtype != KNOT_RRTYPE_CNAME) { /* Chase CNAME if no direct hit */
rrtype = KNOT_RRTYPE_CNAME;
- ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, 0);
+ ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, NULL, 0);
}
/* Record is flagged as INSECURE => doesn't have RRSIG. */
if (ret == 0 && (rank & KR_RANK_INSECURE)) {
qry->flags &= ~QUERY_DNSSEC_WANT;
/* Record may have RRSIG, try to find it. */
} else if (ret == 0 && dobit) {
- ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, true);
+ ret = loot_rr(&txn, pkt, qry->sname, qry->sclass, rrtype, qry, &rank, NULL, true);
}
kr_cache_txn_abort(&txn);
return ret;
uint32_t min_ttl;
};
-static int commit_rrsig(struct rrcache_baton *baton, uint16_t rank, knot_rrset_t *rr)
+static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
{
/* If not doing secure resolution, ignore (unvalidated) RRSIGs. */
if (!(baton->qry->flags & QUERY_DNSSEC_WANT)) {
return kr_ok();
}
/* Commit covering RRSIG to a separate cache namespace. */
- return kr_cache_insert_rrsig(baton->txn, rr, rank, baton->timestamp);
+ return kr_cache_insert_rrsig(baton->txn, rr, rank, flags, baton->timestamp);
}
static int commit_rr(const char *key, void *val, void *data)
}
/* Save RRSIG in a special cache. */
- uint16_t rank = KEY_FLAG_RANK(key);
+ uint8_t rank = KEY_FLAG_RANK(key);
/* Non-authoritative NSs should never be trusted,
* it may be present in an otherwise secure answer but it
* is only a hint for local state. */
rank |= KR_RANK_INSECURE;
}
if (KEY_COVERING_RRSIG(key)) {
- return commit_rrsig(baton, rank, rr);
+ return commit_rrsig(baton, rank, KR_CACHE_FLAG_NONE, rr);
}
/* Accept only better rank (if not overriding) */
if (!(rank & KR_RANK_SECURE) && !(baton->qry->flags & QUERY_NO_CACHE)) {
knot_rrset_t query_rr;
knot_rrset_init(&query_rr, rr->owner, rr->type, rr->rclass);
- return kr_cache_insert_rr(baton->txn, rr, rank, baton->timestamp);
+ return kr_cache_insert_rr(baton->txn, rr, rank, KR_CACHE_FLAG_NONE, baton->timestamp);
}
static int stash_commit(map_t *stash, struct kr_query *qry, struct kr_cache_txn *txn, struct kr_request *req)
return ret;
}
-static int check_wcard_expanded(struct kr_query *qry, knot_pkt_t *pkt, knot_section_t section_id)
-{
- kr_rrset_validation_ctx_t vctx = {
- .pkt = pkt,
- .section_id = section_id,
- .keys = NULL,
- .zone_name = qry->zone_cut.name,
- .timestamp = 0,
- .has_nsec3 = false,
- .flags = 0,
- .result = 0
- };
- int ret = kr_section_check_wcard(&vctx);
- if (ret != 0) {
- return ret;
- }
- if (vctx.flags & KR_DNSSEC_VFLG_WEXPAND) {
- qry->flags |= QUERY_DNSSEC_WEXPAND;
- }
- return kr_ok();
-}
-
-
static int validate_keyset(struct kr_query *qry, knot_pkt_t *answer, bool has_nsec3)
{
/* Merge DNSKEY records from answer that are below/at current cut. */
* Do not revalidate data from cache, as it's already trusted. */
if (!(qry->flags & QUERY_CACHED)) {
ret = validate_records(qry, pkt, req->rplan.pool, has_nsec3);
- } else {
- /* Records already were validated.
- * Check if wildcard answer. */
- ret = check_wcard_expanded(qry, pkt, KNOT_ANSWER);
- }
- if (ret != 0) {
- DEBUG_MSG(qry, "<= couldn't validate RRSIGs\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
- return KNOT_STATE_FAIL;
+ if (ret != 0) {
+ DEBUG_MSG(qry, "<= couldn't validate RRSIGs\n");
+ qry->flags |= QUERY_DNSSEC_BOGUS;
+ return KNOT_STATE_FAIL;
+ }
}
+ /* Check if wildcard expansion detected for final query.
+ * If yes, copy authority. */
if ((qry->parent == NULL) && (qry->flags & QUERY_DNSSEC_WEXPAND)) {
- /* Wildcard expansion detected for final query.
- * Copy authority. */
const knot_pktsection_t *auth = knot_pkt_section(pkt, KNOT_AUTHORITY);
for (unsigned i = 0; i < auth->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(auth, i);
/** Fetch address for zone cut. */
static void fetch_addr(struct kr_zonecut *cut, const knot_dname_t *ns, uint16_t rrtype, struct kr_cache_txn *txn, uint32_t timestamp)
{
- uint16_t rank = 0;
+ uint8_t rank = 0;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
- if (kr_cache_peek_rr(txn, &cached_rr, &rank, ×tamp) != 0) {
+ if (kr_cache_peek_rr(txn, &cached_rr, &rank, NULL, ×tamp) != 0) {
return;
}
}
/** Fetch best NS for zone cut. */
-static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, const knot_dname_t *name, struct kr_cache_txn *txn, uint32_t timestamp, uint16_t * restrict rank)
+static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, const knot_dname_t *name, struct kr_cache_txn *txn, uint32_t timestamp, uint8_t * restrict rank)
{
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)name, KNOT_RRTYPE_NS, KNOT_CLASS_IN);
- int ret = kr_cache_peek_rr(txn, &cached_rr, rank, &drift);
+ int ret = kr_cache_peek_rr(txn, &cached_rr, rank, NULL, &drift);
if (ret != 0) {
return ret;
}
return kr_error(ENOENT);
}
- uint16_t rank = 0;
+ uint8_t rank = 0;
uint32_t drift = timestamp;
knot_rrset_t cached_rr;
knot_rrset_init(&cached_rr, (knot_dname_t *)owner, type, KNOT_CLASS_IN);
- int ret = kr_cache_peek_rr(txn, &cached_rr, &rank, &drift);
+ int ret = kr_cache_peek_rr(txn, &cached_rr, &rank, NULL, &drift);
if (ret != 0) {
return ret;
}
/* Start at QNAME parent. */
while (txn) {
/* Fetch NS first and see if it's insecure. */
- uint16_t rank = 0;
+ uint8_t rank = 0;
const bool is_root = (label[0] == '\0');
if (fetch_ns(ctx, cut, label, txn, timestamp, &rank) == 0) {
/* Flag as insecure if cached as this */