/** Return false on types to be ignored. Meant both for sname and direct cache requests. */
static bool check_rrtype(uint16_t type, const struct kr_query *qry/*logging*/)
{
- const bool ret = !knot_rrtype_is_metatype(type)
- && type != KNOT_RRTYPE_RRSIG;
+ const bool ret = !knot_rrtype_is_metatype(type);
if (!ret) { WITH_VERBOSE(qry) {
auto_free char *type_str = kr_rrtype_text(type);
VERBOSE_MSG(qry, "=> skipping RR type %s\n", type_str);
}
switch (type) {
- case KNOT_RRTYPE_RRSIG: /* no RRSIG query caching, at least for now */
- assert(false);
- return (knot_db_val_t){ NULL, 0 };
/* xNAME lumped into NS. */
case KNOT_RRTYPE_CNAME:
case KNOT_RRTYPE_DNAME:
const struct kr_qflags * const qf = &qry->flags;
const bool want_negative = qf->DNSSEC_INSECURE || !qf->DNSSEC_WANT || has_optout;
const bool want_pkt = qf->DNSSEC_BOGUS /*< useful for +cd answers */
- || (is_negative && want_negative);
+ || (is_negative && want_negative) || qry->stype == KNOT_RRTYPE_RRSIG;
if (!want_pkt || !knot_wire_get_aa(pkt->wire)
|| pkt->parsed != pkt->size /*< malformed packet; still can't detect KNOT_EFEWDATA */
/* All bad cases should be filtered above,
* at least the same way as pktcache in kresd 1.5.x. */
kr_rank_set(&rank, KR_RANK_SECURE);
- } else assert(false);
+ } else if (qry->stype == KNOT_RRTYPE_RRSIG) {
+ /* RRSIGs can be at most cached as insecure */
+ kr_rank_set(&rank, KR_RANK_INSECURE);
+ }
}
const uint16_t pkt_type = knot_pkt_qtype(pkt);