if (can_satisfy(qry)) {
/* This flag makes the resolver move the query
* to the "resolved" list. */
- qry->flags |= QUERY_RESOLVED;
+ qry->flags.RESOLVED = true;
return KR_STATE_DONE;
}
knot_dname_to_str(name_str, rr->owner, sizeof(name_str));
inet_ntop(af, addr, addr_str, sizeof(addr_str));
}
- if (!(query->flags & QUERY_ALLOW_LOCAL) &&
+ if (!(query->flags.ALLOW_LOCAL) &&
!is_valid_addr(addr, addr_len)) {
QVERBOSE_MSG(query, "<= ignoring invalid glue for "
"'%s': '%s'\n", name_str, addr_str);
continue;
}
if ((rr->type == KNOT_RRTYPE_A) &&
- (req->ctx->options & QUERY_NO_IPV4)) {
+ (req->ctx->options.NO_IPV4)) {
continue;
}
if ((rr->type == KNOT_RRTYPE_AAAA) &&
- (req->ctx->options & QUERY_NO_IPV6)) {
+ (req->ctx->options.NO_IPV6)) {
continue;
}
(void) update_nsaddr(rr, req->current_query);
}
kr_zonecut_add(cut, ns_name, NULL);
/* Choose when to use glue records. */
- if (qry->flags & QUERY_PERMISSIVE) {
+ if (qry->flags.PERMISSIVE) {
fetch_glue(pkt, ns_name, req);
- } else if (qry->flags & QUERY_STRICT) {
+ } else if (qry->flags.STRICT) {
/* Strict mode uses only mandatory glue. */
if (knot_dname_in(cut->name, ns_name))
fetch_glue(pkt, ns_name, req);
/* For RRSIGs, ensure the KR_RANK_AUTH flag corresponds to the signed RR. */
uint16_t type = kr_rrset_type_maysig(rr);
- if (qry->flags & QUERY_CACHED) {
+ if (qry->flags.CACHED) {
return rr->additional ? *(uint8_t *)rr->additional : KR_RANK_OMIT;
/* ^^ Current use case for "cached" RRs without rank: hints module. */
}
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *qry = req->current_query;
- assert(!(qry->flags & QUERY_STUB));
+ assert(!(qry->flags.STUB));
int result = KR_STATE_CONSUME;
- if (qry->flags & QUERY_FORWARD) {
+ if (qry->flags.FORWARD) {
return result;
}
}
- if ((qry->flags & QUERY_DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
+ if ((qry->flags.DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
if (knot_wire_get_aa(pkt->wire) == 0 &&
knot_wire_get_ancount(pkt->wire) == 0 &&
ns_record_exists) {
static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const knot_dname_t **cname_ret)
{
struct kr_query *query = req->current_query;
- assert(!(query->flags & QUERY_STUB));
+ assert(!(query->flags.STUB));
/* Process answer type */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
const knot_dname_t *cname = NULL;
unsigned cname_chain_len = 0;
bool is_final = (query->parent == NULL);
uint32_t iter_count = 0;
- bool strict_mode = (query->flags & QUERY_STRICT);
+ bool strict_mode = (query->flags.STRICT);
do {
/* CNAME was found at previous iteration, but records may not follow the correct order.
* Try to find records for pending_cname owner from section start. */
return KR_STATE_FAIL;
}
if (rrsig_labels < cname_labels) {
- query->flags |= QUERY_DNSSEC_WEXPAND;
+ query->flags.DNSSEC_WEXPAND = true;
}
}
return KR_STATE_FAIL;
}
struct kr_query *query = req->current_query;
- if (!(query->flags & QUERY_CACHED)) {
+ if (!(query->flags.CACHED)) {
/* If not cached (i.e. got from upstream)
* make sure that this is not an authoritative answer
* (even with AA=1) for other layers.
if (!knot_dname_is_equal(knot_pkt_qname(pkt), query->sname) &&
(pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
VERBOSE_MSG("<= found cut, retrying with non-minimized name\n");
- query->flags |= QUERY_NO_MINIMIZE;
+ query->flags.NO_MINIMIZE = true;
return KR_STATE_CONSUME;
}
/* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
if (!is_authoritative(pkt, query)) {
- if (!(query->flags & QUERY_FORWARD) &&
+ if (!(query->flags.FORWARD) &&
pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
VERBOSE_MSG("<= lame response: non-auth sent negative response\n");
return KR_STATE_FAIL;
/* Make sure that this is an authoritative answer (even with AA=0) for other layers */
knot_wire_set_aa(pkt->wire);
/* Either way it resolves current query. */
- query->flags |= QUERY_RESOLVED;
+ query->flags.RESOLVED = true;
/* Follow canonical name as next SNAME. */
if (!knot_dname_is_equal(cname, query->sname)) {
/* Check if target record has been already copied */
- query->flags |= QUERY_CNAME;
+ query->flags.CNAME = true;
if (is_final) {
state = process_final(pkt, req, cname);
if (state != kr_ok()) {
return state;
}
- } else if ((query->flags & QUERY_FORWARD) &&
+ } else if ((query->flags.FORWARD) &&
((query->stype == KNOT_RRTYPE_DS) ||
(query->stype == KNOT_RRTYPE_NS))) {
/* CNAME'ed answer for DS or NS subquery.
if (!next) {
return KR_STATE_FAIL;
}
- next->flags |= QUERY_AWAIT_CUT;
- if (query->flags & QUERY_FORWARD) {
- next->forward_flags |= QUERY_CNAME;
+ next->flags.AWAIT_CUT = true;
+ if (query->flags.FORWARD) {
+ next->forward_flags.CNAME = true;
if (query->parent == NULL) {
state = kr_nsrep_copy_set(&next->ns, &query->ns);
if (state != kr_ok()) {
/* Want DNSSEC if and only if it's posible to secure
* this name (i.e. iff it is covered by a TA) */
if (kr_ta_covers_qry(req->ctx, cname, query->stype)) {
- next->flags |= QUERY_DNSSEC_WANT;
+ next->flags.DNSSEC_WANT = true;
} else {
- next->flags &= ~QUERY_DNSSEC_WANT;
+ next->flags.DNSSEC_WANT = false;
}
- if (!(query->flags & QUERY_FORWARD) ||
- (query->flags & QUERY_DNSSEC_WEXPAND)) {
+ if (!(query->flags.FORWARD) ||
+ (query->flags.DNSSEC_WEXPAND)) {
state = pick_authority(pkt, req, false);
if (state != kr_ok()) {
return KR_STATE_FAIL;
static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *query = req->current_query;
- assert(query->flags & QUERY_STUB);
+ assert(query->flags.STUB);
/* Pick all answer RRs. */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
for (unsigned i = 0; i < an->count; ++i) {
}
knot_wire_set_aa(pkt->wire);
- query->flags |= QUERY_RESOLVED;
+ query->flags.RESOLVED = true;
/* Pick authority RRs. */
int pkt_class = kr_response_classify(pkt);
const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0);
#ifndef STRICT_MODE
/* Work around broken auths/load balancers */
- if (query->flags & QUERY_SAFEMODE) {
+ if (query->flags.SAFEMODE) {
return resolve_error(pkt, req);
- } else if (query->flags & QUERY_NO_MINIMIZE) {
- query->flags |= QUERY_SAFEMODE;
+ } else if (query->flags.NO_MINIMIZE) {
+ query->flags.SAFEMODE = true;
return KR_STATE_DONE;
} else {
- query->flags |= QUERY_NO_MINIMIZE;
+ query->flags.NO_MINIMIZE = true;
return KR_STATE_DONE;
}
#else
}
WITH_VERBOSE {
- if (query->flags & QUERY_TRACE) {
+ if (query->flags.TRACE) {
VERBOSE_MSG("<= answer received:\n");
kr_pkt_print(pkt);
}
VERBOSE_MSG("<= ignoring mismatching response\n");
/* Force TCP, to work around authoritatives messing up question
* without yielding to spoofed responses. */
- query->flags |= QUERY_TCP;
+ query->flags.TCP = true;
return resolve_badmsg(pkt, req, query);
} else if (knot_wire_get_tc(pkt->wire)) {
VERBOSE_MSG("<= truncated response, failover to TCP\n");
if (query) {
/* Fail if already on TCP. */
- if (query->flags & QUERY_TCP) {
+ if (query->flags.TCP) {
VERBOSE_MSG("<= TC=1 with TCP, bailing out\n");
return resolve_error(pkt, req);
}
- query->flags |= QUERY_TCP;
+ query->flags.TCP = true;
}
return KR_STATE_CONSUME;
}
query->fails = 0; /* Reset per-query counter. */
return resolve_error(pkt, req);
} else {
- query->flags |= QUERY_NO_MINIMIZE; /* Drop minimisation as a safe-guard. */
+ query->flags.NO_MINIMIZE = true; /* Drop minimisation as a safe-guard. */
return KR_STATE_CONSUME;
}
}
}
/* Forwarding/stub mode is special. */
- if (query->flags & QUERY_STUB) {
+ if (query->flags.STUB) {
return process_stub(pkt, req);
}
uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
/* There's probably little sense for NONAUTH in pktcache. */
- if (!knot_wire_get_cd(req->answer->wire) && !(qry->flags & QUERY_STUB)) {
+ if (!knot_wire_get_cd(req->answer->wire) && !(qry->flags.STUB)) {
/* Records not present under any TA don't have their security verified at all. */
bool ta_covers = kr_ta_covers_qry(ctx, qry->sname, qry->stype);
/* ^ TODO: performance? */
/* Rank-related fixups. Add rank into the additional field. */
if (kr_rank_test(entry->rank, KR_RANK_INSECURE)) {
- qry->flags |= QUERY_DNSSEC_INSECURE;
- qry->flags &= ~QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_INSECURE = true;
+ qry->flags.DNSSEC_WANT = false;
}
for (size_t i = 0; i < pkt->rrset_count; ++i) {
assert(!pkt->rr[i].additional);
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) ||
- (qry->flags & QUERY_NO_CACHE)) {
+ (qry->flags.NO_CACHE)) {
return ctx->state; /* Already resolved/failed */
}
/* Both caches only peek for qry->sname and that would be useless
* to repeat on every iteration, so disable it from now on.
* Note: it's important to skip this if rrcache sets KR_STATE_DONE,
* as CNAME chains need more iterations to get fetched. */
- qry->flags |= QUERY_NO_CACHE;
+ qry->flags.NO_CACHE = true;
if (knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
return ctx->state; /* Only IN class */
if (ret == 0) {
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
if (flags & KR_CACHE_FLAG_WCARD_PROOF) {
- qry->flags |= QUERY_DNSSEC_WEXPAND;
+ qry->flags.DNSSEC_WEXPAND = true;
}
if (flags & KR_CACHE_FLAG_OPTOUT) {
- qry->flags |= QUERY_DNSSEC_OPTOUT;
+ qry->flags.DNSSEC_OPTOUT = true;
}
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
struct kr_query *qry = req->current_query;
/* Cache only answers that make query resolved (i.e. authoritative)
* that didn't fail during processing and are negative. */
- if (qry->flags & QUERY_CACHED || ctx->state & KR_STATE_FAIL) {
+ if (qry->flags.CACHED || ctx->state & KR_STATE_FAIL) {
return ctx->state; /* Don't cache anything if failed. */
}
/* Cache only authoritative answers from IN class. */
const uint16_t qtype = knot_pkt_qtype(pkt);
const bool is_eligible = (knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
bool is_negative = kr_response_classify(pkt) & (PKT_NODATA|PKT_NXDOMAIN);
- bool wcard_expansion = (qry->flags & QUERY_DNSSEC_WEXPAND);
+ bool wcard_expansion = (qry->flags.DNSSEC_WEXPAND);
if (is_negative && ((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) ==
(QUERY_FORWARD | QUERY_CNAME))) {
/* Don't cache CNAME'ed NXDOMAIN answer in forwarding mode
/* If cd bit is set or we got answer via non-validated forwarding,
* make the rank bad; otherwise it depends on flags. */
- if (knot_wire_get_cd(req->answer->wire) || qry->flags & QUERY_STUB) {
+ if (knot_wire_get_cd(req->answer->wire) || qry->flags.STUB) {
kr_rank_set(&header.rank, KR_RANK_OMIT);
} else {
- if (qry->flags & QUERY_DNSSEC_BOGUS) {
+ if (qry->flags.DNSSEC_BOGUS) {
kr_rank_set(&header.rank, KR_RANK_BOGUS);
- } else if (qry->flags & QUERY_DNSSEC_INSECURE) {
+ } else if (qry->flags.DNSSEC_INSECURE) {
kr_rank_set(&header.rank, KR_RANK_INSECURE);
- } else if (qry->flags & QUERY_DNSSEC_WANT) {
+ } else if (qry->flags.DNSSEC_WANT) {
kr_rank_set(&header.rank, KR_RANK_SECURE);
}
}
VERBOSE_MSG(qry, "=> candidate rank: 0%0.2o\n", header.rank);
/* Set cache flags */
- if (qry->flags & QUERY_DNSSEC_WEXPAND) {
+ if (qry->flags.DNSSEC_WEXPAND) {
header.flags |= KR_CACHE_FLAG_WCARD_PROOF;
}
- if (qry->flags & QUERY_DNSSEC_OPTOUT) {
+ if (qry->flags.DNSSEC_OPTOUT) {
header.flags |= KR_CACHE_FLAG_OPTOUT;
}
}
if (is_expiring(&cache_rr, drift)) {
- qry->flags |= QUERY_EXPIRING;
+ qry->flags.EXPIRING = true;
}
if ((*flags) & KR_CACHE_FLAG_WCARD_PROOF) {
/* Record was found, but wildcard answer proof is needed.
* Do not update packet, try to fetch whole packet from pktcache instead. */
- qry->flags |= QUERY_DNSSEC_WEXPAND;
+ qry->flags.DNSSEC_WEXPAND = true;
return kr_error(ENOENT);
}
uint8_t rank = 0;
uint8_t flags = 0;
uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
- if (qry->flags & QUERY_NONAUTH) {
+ if (qry->flags.NONAUTH) {
lowest_rank = KR_RANK_INITIAL;
/* Note: there's little sense in validation status for non-auth records.
* In case of using NONAUTH to get NS IPs, knowing that you ask correct
int ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
&rank, &flags, 0, lowest_rank);
if (ret != 0 && rrtype != KNOT_RRTYPE_CNAME
- && !(qry->flags & QUERY_STUB)) {
+ && !(qry->flags.STUB)) {
/* Chase CNAME if no direct hit.
* We avoid this in STUB mode because the current iterator
* (process_stub()) is unable to iterate in STUB mode to follow
}
if (kr_rank_test(rank, KR_RANK_INSECURE)) {
- qry->flags |= QUERY_DNSSEC_INSECURE;
- qry->flags &= ~QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_INSECURE = true;
+ qry->flags.DNSSEC_WANT = false;
}
/* Record may have RRSIGs, try to find them. */
if (allow_unverified
- || ((qry->flags & QUERY_DNSSEC_WANT) && kr_rank_test(rank, KR_RANK_SECURE))) {
+ || ((qry->flags.DNSSEC_WANT) && kr_rank_test(rank, KR_RANK_SECURE))) {
kr_rank_set(&lowest_rank, KR_RANK_INITIAL); /* no security for RRSIGs */
ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
&rank, &flags, true, lowest_rank);
{
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
- if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
+ if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags.NO_CACHE)) {
return ctx->state; /* Already resolved/failed or already tried, etc. */
}
/* Reconstruct the answer from the cache,
static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
{
/* If not doing secure resolution, ignore (unvalidated) RRSIGs. */
- if (!(baton->qry->flags & QUERY_DNSSEC_WANT)) {
+ if (!(baton->qry->flags.DNSSEC_WANT)) {
return kr_ok();
}
/* Commit covering RRSIG to a separate cache namespace. */
uint8_t flags = KR_CACHE_FLAG_NONE;
if (kr_rank_test(rank, KR_RANK_AUTH)) {
- if (baton->qry->flags & QUERY_DNSSEC_WEXPAND) {
+ if (baton->qry->flags.DNSSEC_WEXPAND) {
flags |= KR_CACHE_FLAG_WCARD_PROOF;
}
if ((rr->type == KNOT_RRTYPE_NS) &&
- (baton->qry->flags & QUERY_DNSSEC_NODS)) {
+ (baton->qry->flags.DNSSEC_NODS)) {
flags |= KR_CACHE_FLAG_NODS;
}
}
/* Cache only positive answers, not meta types or RRSIG. */
const uint16_t qtype = knot_pkt_qtype(pkt);
const bool is_eligible = !(knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
- if (qry->flags & QUERY_CACHED || knot_wire_get_rcode(pkt->wire) != KNOT_RCODE_NOERROR || !is_eligible) {
+ if (qry->flags.CACHED || knot_wire_get_rcode(pkt->wire) != KNOT_RCODE_NOERROR || !is_eligible) {
return ctx->state;
}
/* Stash data selected by iterator from the last receieved packet. */
* or optout - flag the query.
*/
if (an_flags & KR_DNSSEC_VFLG_WEXPAND) {
- qry->flags |= QUERY_DNSSEC_WEXPAND;
+ qry->flags.DNSSEC_WEXPAND = true;
}
if (an_flags & KR_DNSSEC_VFLG_OPTOUT) {
- qry->flags |= QUERY_DNSSEC_OPTOUT;
+ qry->flags.DNSSEC_OPTOUT = true;
}
return ret;
}
/* Check if there's a key for current TA. */
- if (updated_key && !(qry->flags & QUERY_CACHED)) {
+ if (updated_key && !(qry->flags.CACHED)) {
kr_rrset_validation_ctx_t vctx = {
.pkt = answer,
}
if (vctx.flags & KR_DNSSEC_VFLG_WEXPAND) {
- qry->flags |= QUERY_DNSSEC_WEXPAND;
+ qry->flags.DNSSEC_WEXPAND = true;
}
if (vctx.flags & KR_DNSSEC_VFLG_OPTOUT) {
- qry->flags |= QUERY_DNSSEC_OPTOUT;
+ qry->flags.DNSSEC_OPTOUT = true;
}
}
struct kr_query *parent = qry->parent;
const uint32_t cut_flags = (QUERY_AWAIT_IPV4 | QUERY_AWAIT_IPV6);
while (parent && ((parent->flags & cut_flags) == 0)) {
- parent->flags &= ~QUERY_DNSSEC_WANT;
- parent->flags |= QUERY_DNSSEC_INSECURE;
+ parent->flags.DNSSEC_WANT = false;
+ parent->flags.DNSSEC_INSECURE = true;
if (parent->stype != KNOT_RRTYPE_DS &&
parent->stype != KNOT_RRTYPE_RRSIG) {
break;
if (qry->flags & (QUERY_DNSSEC_INSECURE)) { /* DS non-existence proven. */
mark_insecure_parents(qry);
} else if ((qry->flags & (QUERY_DNSSEC_NODS | QUERY_FORWARD)) == QUERY_DNSSEC_NODS) {
- if (qry->flags & QUERY_DNSSEC_OPTOUT) {
+ if (qry->flags.DNSSEC_OPTOUT) {
mark_insecure_parents(qry);
} else {
int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, qry->uid,
if (referral) {
section = KNOT_AUTHORITY;
} else if (knot_pkt_qtype(answer) == KNOT_RRTYPE_DS &&
- !(qry->flags & QUERY_CNAME) &&
+ !(qry->flags.CNAME) &&
(knot_wire_get_rcode(answer->wire) != KNOT_RCODE_NXDOMAIN)) {
section = KNOT_ANSWER;
} else { /* N/A */
}
} else if (ret != 0) {
VERBOSE_MSG(qry, "<= bogus proof of DS non-existence\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
} else {
VERBOSE_MSG(qry, "<= DS doesn't exist, going insecure\n");
- qry->flags |= QUERY_DNSSEC_NODS;
+ qry->flags.DNSSEC_NODS = true;
}
return ret;
} else if (qry->flags & QUERY_FORWARD && qry->parent) {
kr_zonecut_copy(&next->zone_cut, cut);
kr_zonecut_copy_trust(&next->zone_cut, cut);
} else {
- next->flags |= QUERY_AWAIT_CUT;
+ next->flags.AWAIT_CUT = true;
}
- if (qry->flags & QUERY_FORWARD) {
- next->flags &= ~QUERY_AWAIT_CUT;
+ if (qry->flags.FORWARD) {
+ next->flags.AWAIT_CUT = false;
}
- next->flags |= QUERY_DNSSEC_WANT;
+ next->flags.DNSSEC_WANT = true;
return KR_STATE_YIELD;
}
if (!kr_rank_test(invalid_entry->rank, KR_RANK_SECURE) &&
(++(invalid_entry->revalidation_cnt) > MAX_REVALIDATION_CNT)) {
VERBOSE_MSG(qry, "<= continuous revalidation, fails\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
}
const knot_dname_t *signer_name = knot_rrsig_signer_name(&rr->rrs, 0);
if (knot_dname_is_sub(signer_name, qry->zone_cut.name)) {
qry->zone_cut.name = knot_dname_copy(signer_name, &req->pool);
- qry->flags |= QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = true;
} else if (!knot_dname_is_equal(signer_name, qry->zone_cut.name)) {
if (qry->zone_cut.parent) {
memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut));
} else {
- qry->flags |= QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = true;
}
qry->zone_cut.name = knot_dname_copy(signer_name, &req->pool);
}
} else if (kr_rank_test(invalid_entry->rank, KR_RANK_MISSING)) {
ret = rrsig_not_found(ctx, rr);
} else if (!kr_rank_test(invalid_entry->rank, KR_RANK_SECURE)) {
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
ret = KR_STATE_FAIL;
}
q->stype == KNOT_RRTYPE_DS &&
knot_dname_is_equal(q->sname, qry->sname)) {
nods = true;
- if (!(q->flags & QUERY_DNSSEC_OPTOUT)) {
+ if (!(q->flags.DNSSEC_OPTOUT)) {
int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, q->uid,
qry->sname, KNOT_RRTYPE_NS);
ns_exist = (ret == kr_ok());
}
if (nods && ns_exist && qtype == KNOT_RRTYPE_NS) {
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
- if (qry->forward_flags & QUERY_CNAME) {
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
+ if (qry->forward_flags.CNAME) {
assert(qry->cname_parent);
- qry->cname_parent->flags &= ~QUERY_DNSSEC_WANT;
- qry->cname_parent->flags |= QUERY_DNSSEC_INSECURE;
+ qry->cname_parent->flags.DNSSEC_WANT = false;
+ qry->cname_parent->flags.DNSSEC_INSECURE = true;
} else if (pkt_rcode == KNOT_RCODE_NOERROR && qry->parent != NULL) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, KNOT_ANSWER);
const knot_rrset_t *rr = knot_pkt_rr(sec, 0);
if (rr->type == KNOT_RRTYPE_NS) {
qry->parent->zone_cut.name = knot_dname_copy(rr->owner, &req->pool);
- qry->parent->flags &= ~QUERY_DNSSEC_WANT;
- qry->parent->flags |= QUERY_DNSSEC_INSECURE;
+ qry->parent->flags.DNSSEC_WANT = false;
+ qry->parent->flags.DNSSEC_INSECURE = true;
}
}
while (qry->parent) {
qry = qry->parent;
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
- if (qry->forward_flags & QUERY_CNAME) {
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
+ if (qry->forward_flags.CNAME) {
assert(qry->cname_parent);
- qry->cname_parent->flags &= ~QUERY_DNSSEC_WANT;
- qry->cname_parent->flags |= QUERY_DNSSEC_INSECURE;
+ qry->cname_parent->flags.DNSSEC_WANT = false;
+ qry->cname_parent->flags.DNSSEC_INSECURE = true;
}
}
return KR_STATE_DONE;
}
kr_zonecut_set(&next->zone_cut, qry->zone_cut.name);
kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
- next->flags |= QUERY_DNSSEC_WANT;
+ next->flags.DNSSEC_WANT = true;
}
return KR_STATE_YIELD;
if (ta_name && (!signer || !knot_dname_is_equal(ta_name, signer))) {
/* check all newly added RRSIGs */
if (!signer) {
- if (qry->flags & QUERY_FORWARD) {
+ if (qry->flags.FORWARD) {
return unsigned_forward(ctx, pkt);
}
/* Not a DNSSEC-signed response. */
qry->zone_cut.name = knot_dname_copy(qname, &req->pool);
}
} else if (knot_dname_is_sub(signer, qry->zone_cut.name)) {
- if (!(qry->flags & QUERY_FORWARD)) {
+ if (!(qry->flags.FORWARD)) {
/* Key signer is below current cut, advance and refetch keys. */
qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
} else {
/* Check if DS does not exist. */
struct kr_query *q = kr_rplan_find_resolved(&req->rplan, NULL,
signer, qry->sclass, KNOT_RRTYPE_DS);
- if (q && q->flags & QUERY_DNSSEC_NODS) {
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
+ if (q && q->flags.DNSSEC_NODS) {
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
if (qry->parent) {
- qry->parent->flags &= ~QUERY_DNSSEC_WANT;
- qry->parent->flags |= QUERY_DNSSEC_INSECURE;
+ qry->parent->flags.DNSSEC_WANT = false;
+ qry->parent->flags.DNSSEC_INSECURE = true;
}
} else if (qry->stype != KNOT_RRTYPE_DS) {
struct kr_rplan *rplan = &req->rplan;
}
kr_zonecut_set(&next->zone_cut, qry->zone_cut.name);
kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
- next->flags |= QUERY_DNSSEC_WANT;
+ next->flags.DNSSEC_WANT = true;
}
}
} else if (!knot_dname_is_equal(signer, qry->zone_cut.name)) {
if (qry->zone_cut.parent) {
memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut));
} else {
- qry->flags |= QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = true;
}
qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
}
/* zone cut matches, but DS/DNSKEY doesn't => refetch. */
VERBOSE_MSG(qry, ">< cut changed, needs revalidation\n");
- if ((qry->flags & QUERY_FORWARD) && qry->stype != KNOT_RRTYPE_DS) {
+ if ((qry->flags.FORWARD) && qry->stype != KNOT_RRTYPE_DS) {
struct kr_rplan *rplan = &req->rplan;
struct kr_query *next = kr_rplan_push(rplan, qry, signer,
qry->sclass, KNOT_RRTYPE_DS);
}
kr_zonecut_set(&next->zone_cut, qry->zone_cut.name);
kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
- next->flags |= QUERY_DNSSEC_WANT;
+ next->flags.DNSSEC_WANT = true;
return KR_STATE_YIELD;
}
- if (!(qry->flags & QUERY_FORWARD)) {
+ if (!(qry->flags.FORWARD)) {
return KR_STATE_YIELD;
}
}
for (int k = 0; k < rrsigs->rrs.rr_count; ++k) {
if (knot_rrsig_labels(&rrsigs->rrs, k) != owner_labels) {
- qry->flags |= QUERY_DNSSEC_WEXPAND;
+ qry->flags.DNSSEC_WEXPAND = true;
}
}
}
/* Pass-through if user doesn't want secure answer or stub. */
/* @todo: Validating stub resolver mode. */
- if (qry->flags & QUERY_STUB) {
+ if (qry->flags.STUB) {
rank_records(ctx, KR_RANK_OMIT);
return ctx->state;
}
uint8_t pkt_rcode = knot_wire_get_rcode(pkt->wire);
- if ((qry->flags & QUERY_FORWARD) &&
+ if ((qry->flags.FORWARD) &&
pkt_rcode != KNOT_RCODE_NOERROR &&
pkt_rcode != KNOT_RCODE_NXDOMAIN) {
do {
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
if (qry->cname_parent) {
- qry->cname_parent->flags |= QUERY_DNSSEC_BOGUS;
+ qry->cname_parent->flags.DNSSEC_BOGUS = true;
}
qry = qry->parent;
} while (qry);
return ctx->state;
}
- if (!(qry->flags & QUERY_DNSSEC_WANT)) {
+ if (!(qry->flags.DNSSEC_WANT)) {
const uint32_t test_flags = (QUERY_CACHED | QUERY_DNSSEC_INSECURE);
const bool is_insec = ((qry->flags & test_flags) == test_flags);
- if ((qry->flags & QUERY_DNSSEC_INSECURE)) {
+ if ((qry->flags.DNSSEC_INSECURE)) {
rank_records(ctx, KR_RANK_INSECURE);
}
if (is_insec && qry->parent != NULL) {
/* Check if wildcard expansion happens.
* If yes, copy authority. */
if ((qry->parent == NULL) &&
- (qry->flags & QUERY_DNSSEC_WEXPAND)) {
+ (qry->flags.DNSSEC_WEXPAND)) {
kr_ranked_rrarray_set_wire(&req->auth_selected, true, qry->uid, true);
}
rank_records(ctx, KR_RANK_OMIT);
}
/* Answer for RRSIG may not set DO=1, but all records MUST still validate. */
bool use_signatures = (knot_pkt_qtype(pkt) != KNOT_RRTYPE_RRSIG);
- if (!(qry->flags & QUERY_CACHED) && !knot_pkt_has_dnssec(pkt) && !use_signatures) {
+ if (!(qry->flags.CACHED) && !knot_pkt_has_dnssec(pkt) && !use_signatures) {
VERBOSE_MSG(qry, "<= got insecure response\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
}
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
const bool referral = (an->count == 0 && !knot_wire_get_aa(pkt->wire));
- if (!(qry->flags & QUERY_CACHED) && knot_wire_get_aa(pkt->wire)) {
+ if (!(qry->flags.CACHED) && knot_wire_get_aa(pkt->wire)) {
/* Check if answer if not empty,
* but iterator has not selected any records. */
if (!check_empty_answer(ctx, pkt)) {
VERBOSE_MSG(qry, "<= no useful RR in authoritative answer\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
}
/* Track difference between current TA and signer name.
} else if (ret == kr_error(DNSSEC_INVALID_DS_ALGORITHM)) {
VERBOSE_MSG(qry, ">< all DS entries use unsupported algorithm pairs, going insecure\n");
/* ^ the message is a bit imprecise to avoid being too verbose */
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
rank_records(ctx, KR_RANK_INSECURE);
mark_insecure_parents(qry);
return KR_STATE_DONE;
} else if (ret != 0) {
VERBOSE_MSG(qry, "<= bad keys, broken trust chain\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
}
}
/* Validate non-existence proof if not positive answer. */
- if (!(qry->flags & QUERY_CACHED) && pkt_rcode == KNOT_RCODE_NXDOMAIN &&
+ if (!(qry->flags.CACHED) && pkt_rcode == KNOT_RCODE_NXDOMAIN &&
((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) != (QUERY_FORWARD | QUERY_CNAME))) {
/* @todo If knot_pkt_qname(pkt) is used instead of qry->sname then the tests crash. */
if (!has_nsec3) {
* but NSEC3 that covers next closer name
* (or wildcard at next closer name) has opt-out flag.
* RFC5155 9.2; AD flag can not be set */
- qry->flags |= QUERY_DNSSEC_OPTOUT;
+ qry->flags.DNSSEC_OPTOUT = true;
VERBOSE_MSG(qry, "<= can't prove NXDOMAIN due to optout, going insecure\n");
} else if (ret != 0) {
VERBOSE_MSG(qry, "<= bad NXDOMAIN proof\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
}
}
/* @todo WTH, this needs API that just tries to find a proof and the caller
* doesn't have to worry about NSEC/NSEC3
* @todo rework this */
- if (!(qry->flags & QUERY_CACHED) && (pkt_rcode == KNOT_RCODE_NOERROR) &&
+ if (!(qry->flags.CACHED) && (pkt_rcode == KNOT_RCODE_NOERROR) &&
((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) != (QUERY_FORWARD | QUERY_CNAME))) {
bool no_data = (an->count == 0 && knot_wire_get_aa(pkt->wire));
if (no_data) {
if (ret != 0) {
if (has_nsec3 && (ret == kr_error(DNSSEC_OUT_OF_RANGE))) {
VERBOSE_MSG(qry, "<= can't prove NODATA due to optout, going insecure\n");
- qry->flags |= QUERY_DNSSEC_OPTOUT;
+ qry->flags.DNSSEC_OPTOUT = true;
/* Could not return from here,
* we must continue, validate NSEC\NSEC3 and
* call update_parent_keys() to mark
* parent queries as insecured */
} else {
VERBOSE_MSG(qry, "<= bad NODATA proof\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
}
}
/* Validate all records, fail as bogus if it doesn't match.
* Do not revalidate data from cache, as it's already trusted. */
- if (!(qry->flags & QUERY_CACHED)) {
+ if (!(qry->flags.CACHED)) {
ret = validate_records(req, pkt, req->rplan.pool, has_nsec3);
if (ret != 0) {
/* something exceptional - no DNS key, empty pointers etc
* normally it shoudn't happen */
VERBOSE_MSG(qry, "<= couldn't validate RRSIGs\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
}
/* check validation state and spawn subrequests */
/* Check if wildcard expansion detected for final query.
* If yes, copy authority. */
- if ((qry->parent == NULL) && (qry->flags & QUERY_DNSSEC_WEXPAND)) {
+ if ((qry->parent == NULL) && (qry->flags.DNSSEC_WEXPAND)) {
kr_ranked_rrarray_set_wire(&req->auth_selected, true, qry->uid, true);
}
if (ret == DNSSEC_NOT_FOUND && qry->stype != KNOT_RRTYPE_DS) {
if (ctx->state == KR_STATE_YIELD) {
VERBOSE_MSG(qry, "<= can't validate referral\n");
- qry->flags |= QUERY_DNSSEC_BOGUS;
+ qry->flags.DNSSEC_BOGUS = true;
return KR_STATE_FAIL;
} else {
/* Check the trust chain and query DS\DNSKEY if needed. */
} else if (pkt_rcode == KNOT_RCODE_NOERROR &&
referral &&
(((qry->flags & (QUERY_DNSSEC_WANT | QUERY_DNSSEC_INSECURE)) == QUERY_DNSSEC_INSECURE) ||
- (qry->flags & QUERY_DNSSEC_NODS))) {
+ (qry->flags.DNSSEC_NODS))) {
/* referral with proven DS non-existance */
qtype = KNOT_RRTYPE_DS;
}
if (qry->flags & QUERY_FORWARD && qry->parent) {
if (pkt_rcode == KNOT_RCODE_NXDOMAIN) {
- qry->parent->forward_flags |= QUERY_NO_MINIMIZE;
+ qry->parent->forward_flags.NO_MINIMIZE = true;
}
}
VERBOSE_MSG(qry, "<= answer valid, OK\n");
bool is_valid = false;
/* Check if the address isn't disabled. */
if (len == sizeof(struct in6_addr)) {
- is_valid = !(opts & QUERY_NO_IPV6);
+ is_valid = !(opts.NO_IPV6);
favour = FAVOUR_IPV6;
} else {
- is_valid = !(opts & QUERY_NO_IPV4);
+ is_valid = !(opts.NO_IPV4);
}
/* Get RTT for this address (if known) */
if (is_valid) {
if (reputation & KR_NS_NOIP4) {
score = KR_NS_UNKNOWN;
/* Try to start with clean slate */
- if (!(ctx->options & QUERY_NO_IPV6)) {
+ if (!(ctx->options.NO_IPV6)) {
reputation &= ~KR_NS_NOIP6;
}
- if (!(ctx->options & QUERY_NO_IPV4)) {
+ if (!(ctx->options.NO_IPV4)) {
reputation &= ~KR_NS_NOIP4;
}
}
* The fastest NS is preferred by workers until it is depleted (timeouts or degrades),
* at the same time long distance scouts probe other sources (low probability).
* Servers on TIMEOUT (depleted) can be probed by the dice roll only */
- if (score <= ns->score && (qry->flags & QUERY_NO_THROTTLE || score < KR_NS_TIMEOUT)) {
+ if (score <= ns->score && (qry->flags.NO_THROTTLE || score < KR_NS_TIMEOUT)) {
update_nsrep_set(ns, (const knot_dname_t *)k, addr_choice, score);
ns->reputation = reputation;
} else {
if ((kr_rand_uint(100) < 10) && (kr_rand_uint(KR_NS_MAX_SCORE) >= score)) {
/* If this is a low-reliability probe, go with TCP to get ICMP reachability check. */
if (score >= KR_NS_LONG) {
- qry->flags |= QUERY_TCP;
+ qry->flags.TCP = true;
}
update_nsrep_set(ns, (const knot_dname_t *)k, addr_choice, score);
ns->reputation = reputation;
*/
static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
{
- if (qry->flags & QUERY_NO_MINIMIZE) {
+ if (qry->flags.NO_MINIMIZE) {
return;
}
int ret = kr_cache_peek(cache, KR_CACHE_PKT, target, KNOT_RRTYPE_NS, &entry, ×tamp);
if (ret == 0) { /* Either NXDOMAIN or NODATA, start here. */
/* @todo We could stop resolution here for NXDOMAIN, but we can't because of broken CDNs */
- qry->flags |= QUERY_NO_MINIMIZE;
+ qry->flags.NO_MINIMIZE = true;
kr_make_query(qry, pkt);
return;
}
if (is_insecured) {
/* If parent is unsecured we don't want DNSSEC
* even if cut name is covered by TA. */
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
} else if (kr_ta_covers_qry(req->ctx, qry->zone_cut.name, KNOT_RRTYPE_NS)) {
- qry->flags |= QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = true;
} else {
- qry->flags &= ~QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = false;
}
struct kr_zonecut cut_found = {0};
return KR_STATE_FAIL;
}
VERBOSE_MSG(qry, "=> using root hints\n");
- qry->flags &= ~QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = false;
kr_zonecut_deinit(&cut_found);
return KR_STATE_DONE;
} else if (ret != kr_ok()) {
/* Find out security status.
* Go insecure if the zone cut is provably insecure */
- if ((qry->flags & QUERY_DNSSEC_WANT) && !secured) {
+ if ((qry->flags.DNSSEC_WANT) && !secured) {
VERBOSE_MSG(qry, "=> NS is provably without DS, going insecure\n");
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
}
/* Zonecut name can change, check it again
* to prevent unnecessary DS & DNSKEY queries */
- if (!(qry->flags & QUERY_DNSSEC_INSECURE) &&
+ if (!(qry->flags.DNSSEC_INSECURE) &&
kr_ta_covers_qry(req->ctx, cut_found.name, KNOT_RRTYPE_NS)) {
- qry->flags |= QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = true;
} else {
- qry->flags &= ~QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = false;
}
/* Check if any DNSKEY found for cached cut */
- if ((qry->flags & QUERY_DNSSEC_WANT) &&
+ if ((qry->flags.DNSSEC_WANT) &&
(cut_found.key == NULL)) {
/* No DNSKEY was found for cached cut.
* If no glue were fetched for this cut,
return KR_STATE_FAIL;
}
VERBOSE_MSG(qry, "=> using root hints\n");
- qry->flags &= ~QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = false;
return KR_STATE_DONE;
}
/* Copy fetched name */
* Prefer IPv6 and continue with IPv4 if not available.
*/
uint16_t next_type = 0;
- if (!(qry->flags & QUERY_AWAIT_IPV6) &&
- !(ctx->options & QUERY_NO_IPV6)) {
+ if (!(qry->flags.AWAIT_IPV6) &&
+ !(ctx->options.NO_IPV6)) {
next_type = KNOT_RRTYPE_AAAA;
- qry->flags |= QUERY_AWAIT_IPV6;
- } else if (!(qry->flags & QUERY_AWAIT_IPV4) &&
- !(ctx->options & QUERY_NO_IPV4)) {
+ qry->flags.AWAIT_IPV6 = true;
+ } else if (!(qry->flags.AWAIT_IPV4) &&
+ !(ctx->options.NO_IPV4)) {
next_type = KNOT_RRTYPE_A;
- qry->flags |= QUERY_AWAIT_IPV4;
+ qry->flags.AWAIT_IPV4 = true;
/* Hmm, no useable IPv6 then. */
qry->ns.reputation |= KR_NS_NOIP6;
kr_nsrep_update_rep(&qry->ns, qry->ns.reputation, ctx->cache_rep);
if (!next_type && qry->zone_cut.name[0] == '\0') {
VERBOSE_MSG(qry, "=> fallback to root hints\n");
kr_zonecut_set_sbelt(ctx, &qry->zone_cut);
- qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
+ qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
return kr_error(EAGAIN);
}
/* No IPv4 nor IPv6, flag server as unuseable. */
struct kr_query *next = qry;
if (knot_dname_is_equal(qry->ns.name, qry->sname) &&
qry->stype == next_type) {
- if (!(qry->flags & QUERY_NO_MINIMIZE)) {
- qry->flags |= QUERY_NO_MINIMIZE;
- qry->flags &= ~QUERY_AWAIT_IPV6;
- qry->flags &= ~QUERY_AWAIT_IPV4;
+ if (!(qry->flags.NO_MINIMIZE)) {
+ qry->flags.NO_MINIMIZE = true;
+ qry->flags.AWAIT_IPV6 = false;
+ qry->flags.AWAIT_IPV4 = false;
VERBOSE_MSG(qry, "=> circular dependepcy, retrying with non-minimized name\n");
} else {
qry->ns.reputation |= KR_NS_NOIP4 | KR_NS_NOIP6;
if (!next) {
return kr_error(ENOMEM);
}
- next->flags |= QUERY_NONAUTH;
+ next->flags.NONAUTH = true;
}
/* At the root level with no NS addresses, add SBELT subrequest. */
int ret = 0;
if (ret == 0) { /* Copy TA and key since it's the same cut to avoid lookup. */
kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
kr_zonecut_set_sbelt(ctx, &qry->zone_cut); /* Add SBELT to parent in case query fails. */
- qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
+ qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
}
} else {
- next->flags |= QUERY_AWAIT_CUT;
+ next->flags.AWAIT_CUT = true;
}
return ret;
}
/* Always set SERVFAIL for bogus answers. */
if (state == KR_STATE_FAIL && rplan->pending.len > 0) {
struct kr_query *last = array_tail(rplan->pending);
- if ((last->flags & QUERY_DNSSEC_WANT) && (last->flags & QUERY_DNSSEC_BOGUS)) {
+ if ((last->flags.DNSSEC_WANT) && (last->flags.DNSSEC_BOGUS)) {
return answer_fail(request);
}
}
* Be conservative. Primary approach: check ranks of all RRs in wire.
* Only "negative answers" need special handling. */
bool secure = (last != NULL); /* suspicious otherwise */
- if (last && (last->flags & QUERY_STUB)) {
+ if (last && (last->flags.STUB)) {
secure = false; /* don't trust forwarding for now */
}
- if (last && (last->flags & QUERY_DNSSEC_OPTOUT)) {
+ if (last && (last->flags.DNSSEC_OPTOUT)) {
secure = false; /* the last answer is insecure due to opt-out */
}
* as those would also be PKT_NOERROR. */
|| (answ_all_cnames && knot_pkt_qtype(answer) != KNOT_RRTYPE_CNAME))
{
- secure = secure && (last->flags & QUERY_DNSSEC_WANT)
+ secure = secure && (last->flags.DNSSEC_WANT)
&& !(last->flags & (QUERY_DNSSEC_BOGUS | QUERY_DNSSEC_INSECURE));
}
}
if (last) {
struct kr_query *cname_parent = last->cname_parent;
while (cname_parent != NULL) {
- if (cname_parent->flags & QUERY_DNSSEC_OPTOUT) {
+ if (cname_parent->flags.DNSSEC_OPTOUT) {
knot_wire_clear_ad(answer->wire);
break;
}
{
int ret = 0;
knot_pkt_begin(pkt, KNOT_ADDITIONAL);
- if (!(qry->flags & QUERY_SAFEMODE)) {
+ if (!(qry->flags.SAFEMODE)) {
/* Remove any EDNS records from any previous iteration. */
ret = edns_erase_and_reserve(pkt);
if (ret == 0) {
}
if (ret == 0) {
/* Stub resolution (ask for +rd and +do) */
- if (qry->flags & QUERY_STUB) {
+ if (qry->flags.STUB) {
knot_wire_set_rd(pkt->wire);
if (knot_pkt_has_dnssec(request->answer)) {
knot_edns_set_do(pkt->opt_rr);
knot_wire_set_cd(pkt->wire);
}
/* Full resolution (ask for +cd and +do) */
- } else if (qry->flags & QUERY_FORWARD) {
+ } else if (qry->flags.FORWARD) {
knot_wire_set_rd(pkt->wire);
knot_edns_set_do(pkt->opt_rr);
knot_wire_set_cd(pkt->wire);
- } else if (qry->flags & QUERY_DNSSEC_WANT) {
+ } else if (qry->flags.DNSSEC_WANT) {
knot_edns_set_do(pkt->opt_rr);
knot_wire_set_cd(pkt->wire);
}
}
/* Deferred zone cut lookup for this query. */
- qry->flags |= QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = true;
/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
if ((knot_wire_get_ad(packet->wire) || knot_pkt_has_dnssec(packet)) &&
kr_ta_covers_qry(request->ctx, qname, qtype)) {
- qry->flags |= QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = true;
}
/* Initialize answer packet */
if (cd_is_set) {
knot_wire_set_cd(answer->wire);
- } else if (qry->flags & QUERY_DNSSEC_WANT) {
+ } else if (qry->flags.DNSSEC_WANT) {
knot_wire_set_ad(answer->wire);
}
static void update_nslist_rtt(struct kr_context *ctx, struct kr_query *qry, const struct sockaddr *src)
{
/* Do not track in safe mode. */
- if (qry->flags & QUERY_SAFEMODE) {
+ if (qry->flags.SAFEMODE) {
return;
}
kr_nsrep_update_rtt(&qry->ns, src, KR_NS_PENALTY, ctx->cache_rtt, KR_NS_ADD);
}
/* Penalise resolution failures except validation failures. */
- } else if (!(qry->flags & QUERY_DNSSEC_BOGUS)) {
+ } else if (!(qry->flags.DNSSEC_BOGUS)) {
kr_nsrep_update_rtt(&qry->ns, src, KR_NS_TIMEOUT, ctx->cache_rtt, KR_NS_RESET);
WITH_VERBOSE {
char addr_str[INET6_ADDRSTRLEN];
/* Different processing for network error */
struct kr_query *qry = array_tail(rplan->pending);
- bool tried_tcp = (qry->flags & QUERY_TCP);
+ bool tried_tcp = (qry->flags.TCP);
if (!packet || packet->size == 0) {
if (tried_tcp) {
request->state = KR_STATE_FAIL;
} else {
- qry->flags |= QUERY_TCP;
+ qry->flags.TCP = true;
}
} else {
/* Packet cleared, derandomize QNAME. */
randomized_qname_case(qname_raw, qry->secret);
}
request->state = KR_STATE_CONSUME;
- if (qry->flags & QUERY_CACHED) {
+ if (qry->flags.CACHED) {
ITERATE_LAYERS(request, qry, consume, packet);
} else {
struct timeval now;
}
/* Track RTT for iterative answers */
- if (src && !(qry->flags & QUERY_CACHED)) {
+ if (src && !(qry->flags.CACHED)) {
update_nslist_score(request, qry, src, packet);
}
/* Resolution failed, invalidate current NS. */
if (request->state == KR_STATE_FAIL) {
invalidate_ns(rplan, qry);
- qry->flags &= ~QUERY_RESOLVED;
+ qry->flags.RESOLVED = false;
}
/* Pop query if resolved. */
if (request->state == KR_STATE_YIELD) {
return KR_STATE_PRODUCE; /* Requery */
- } else if (qry->flags & QUERY_RESOLVED) {
+ } else if (qry->flags.RESOLVED) {
kr_rplan_pop(rplan, qry);
- } else if (!tried_tcp && (qry->flags & QUERY_TCP)) {
+ } else if (!tried_tcp && (qry->flags.TCP)) {
return KR_STATE_PRODUCE; /* Requery over TCP */
} else { /* Clear query flags for next attempt */
qry->flags &= ~(QUERY_CACHED|QUERY_TCP);
ITERATE_LAYERS(request, qry, reset);
/* Do not finish with bogus answer. */
- if (qry->flags & QUERY_DNSSEC_BOGUS) {
+ if (qry->flags.DNSSEC_BOGUS) {
return KR_STATE_FAIL;
}
kr_zonecut_copy_trust(&next->zone_cut, &parent->zone_cut) != 0) {
return NULL;
}
- next->flags |= QUERY_NO_MINIMIZE;
- if (parent->flags & QUERY_DNSSEC_WANT) {
- next->flags |= QUERY_DNSSEC_WANT;
+ next->flags.NO_MINIMIZE = true;
+ if (parent->flags.DNSSEC_WANT) {
+ next->flags.DNSSEC_WANT = true;
}
return next;
}
map_t *negative_anchors = &request->ctx->negative_anchors;
if (qry->parent != NULL &&
- !(qry->forward_flags & QUERY_CNAME) &&
- !(qry->flags & QUERY_DNS64_MARK) &&
+ !(qry->forward_flags.CNAME) &&
+ !(qry->flags.DNS64_MARK) &&
knot_dname_in(qry->parent->zone_cut.name, qry->zone_cut.name)) {
return KR_STATE_PRODUCE;
}
- assert(qry->flags & QUERY_FORWARD);
+ assert(qry->flags.FORWARD);
if (!trust_anchors) {
- qry->flags &= ~QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = false;
return KR_STATE_PRODUCE;
}
- if (qry->flags & QUERY_DNSSEC_INSECURE) {
- qry->flags &= ~QUERY_AWAIT_CUT;
+ if (qry->flags.DNSSEC_INSECURE) {
+ qry->flags.AWAIT_CUT = false;
return KR_STATE_PRODUCE;
}
- if (qry->forward_flags & QUERY_NO_MINIMIZE) {
- qry->flags &= ~QUERY_AWAIT_CUT;
+ if (qry->forward_flags.NO_MINIMIZE) {
+ qry->flags.AWAIT_CUT = false;
return KR_STATE_PRODUCE;
}
const knot_dname_t *wanted_name = qry->sname;
const knot_dname_t *start_name = qry->sname;
- if ((qry->flags & QUERY_AWAIT_CUT) && !resume) {
- qry->flags &= ~QUERY_AWAIT_CUT;
+ if ((qry->flags.AWAIT_CUT) && !resume) {
+ qry->flags.AWAIT_CUT = false;
const knot_dname_t *longest_ta = kr_ta_get_longest_name(trust_anchors, qry->sname);
if (longest_ta) {
start_name = longest_ta;
qry->zone_cut.name = knot_dname_copy(start_name, qry->zone_cut.pool);
- qry->flags |= QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = true;
} else {
- qry->flags &= ~QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = false;
return KR_STATE_PRODUCE;
}
}
knot_dname_is_equal(q->sname, wanted_name)) {
if (q->stype == KNOT_RRTYPE_DS) {
ds_req = true;
- if (q->flags & QUERY_DNSSEC_NODS) {
+ if (q->flags.DNSSEC_NODS) {
nods = true;
}
- if (q->flags & QUERY_CNAME) {
+ if (q->flags.CNAME) {
nods = true;
ns_exist = false;
- } else if (!(q->flags & QUERY_DNSSEC_OPTOUT)) {
+ } else if (!(q->flags.DNSSEC_OPTOUT)) {
int ret = kr_dnssec_matches_name_and_type(&request->auth_selected, q->uid,
wanted_name, KNOT_RRTYPE_NS);
ns_exist = (ret == kr_ok());
}
} else {
- if (q->flags & QUERY_CNAME) {
+ if (q->flags.CNAME) {
nods = true;
ns_exist = false;
}
return KR_STATE_DONE;
}
- if (qry->parent == NULL && (qry->flags & QUERY_CNAME) &&
+ if (qry->parent == NULL && (qry->flags.CNAME) &&
ds_req && ns_req) {
return KR_STATE_PRODUCE;
}
/* Disable DNSSEC if it enters NTA. */
if (kr_ta_get(negative_anchors, wanted_name)){
VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
- qry->flags &= ~QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = false;
}
/* Enable DNSSEC if enters a new island of trust. */
- bool want_secured = (qry->flags & QUERY_DNSSEC_WANT) &&
+ bool want_secured = (qry->flags.DNSSEC_WANT) &&
!knot_wire_get_cd(request->answer->wire);
- if (!(qry->flags & QUERY_DNSSEC_WANT) &&
+ if (!(qry->flags.DNSSEC_WANT) &&
!knot_wire_get_cd(request->answer->wire) &&
kr_ta_get(trust_anchors, wanted_name)) {
- qry->flags |= QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = true;
want_secured = true;
WITH_VERBOSE {
char qname_str[KNOT_DNAME_MAXLEN];
/* Disable DNSSEC if it enters NTA. */
if (kr_ta_get(negative_anchors, qry->zone_cut.name)){
VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
}
- if (qry->flags & QUERY_DNSSEC_NODS) {
+ if (qry->flags.DNSSEC_NODS) {
/* This is the next query iteration with minimized qname.
* At previous iteration DS non-existance has been proven */
- qry->flags &= ~QUERY_DNSSEC_NODS;
- qry->flags &= ~QUERY_DNSSEC_WANT;
- qry->flags |= QUERY_DNSSEC_INSECURE;
+ qry->flags.DNSSEC_NODS = false;
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
}
/* Enable DNSSEC if entering a new (or different) island of trust,
* and update the TA RRset if required. */
- bool want_secured = (qry->flags & QUERY_DNSSEC_WANT) &&
+ bool want_secured = (qry->flags.DNSSEC_WANT) &&
!knot_wire_get_cd(request->answer->wire);
knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, qry->zone_cut.name);
if (!knot_wire_get_cd(request->answer->wire) && ta_rr) {
- qry->flags |= QUERY_DNSSEC_WANT;
+ qry->flags.DNSSEC_WANT = true;
want_secured = true;
if (qry->zone_cut.trust_anchor == NULL
static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
{
/* Stub mode, just forward and do not solve cut. */
- if (qry->flags & QUERY_STUB) {
+ if (qry->flags.STUB) {
return KR_STATE_PRODUCE;
}
/* Forwarding to upstream resolver mode.
* Since forwarding targets already are in qry->ns -
* cut fetching is not needed. */
- if (qry->flags & QUERY_FORWARD) {
+ if (qry->flags.FORWARD) {
return forward_trust_chain_check(request, qry, false);
}
- if (!(qry->flags & QUERY_AWAIT_CUT)) {
+ if (!(qry->flags.AWAIT_CUT)) {
/* The query was resolved from cache.
* Spawn DS \ DNSKEY requests if needed and exit */
return trust_chain_check(request, qry);
return KR_STATE_FAIL;
}
VERBOSE_MSG(qry, "=> using root hints\n");
- qry->flags &= ~QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = false;
return KR_STATE_DONE;
}
} while (state == KR_STATE_CONSUME);
/* Update minimized QNAME if zone cut changed */
- if (qry->zone_cut.name[0] != '\0' && !(qry->flags & QUERY_NO_MINIMIZE)) {
+ if (qry->zone_cut.name[0] != '\0' && !(qry->flags.NO_MINIMIZE)) {
if (kr_make_query(qry, packet) != 0) {
return KR_STATE_FAIL;
}
}
- qry->flags &= ~QUERY_AWAIT_CUT;
+ qry->flags.AWAIT_CUT = false;
/* Check trust chain */
return trust_chain_check(request, qry);
if (qry->deferred != NULL) {
/* @todo: Refactoring validator, check trust chain before resuming. */
int state = 0;
- if (((qry->flags & QUERY_FORWARD) == 0) ||
- ((qry->stype == KNOT_RRTYPE_DS) && (qry->flags & QUERY_CNAME))) {
+ if (((qry->flags.FORWARD) == 0) ||
+ ((qry->stype == KNOT_RRTYPE_DS) && (qry->flags.CNAME))) {
state = trust_chain_check(request, qry);
} else {
state = forward_trust_chain_check(request, qry, true);
} else {
/* Caller is interested in always tracking a zone cut, even if the answer is cached
* this is normally not required, and incurrs another cache lookups for cached answer. */
- if (qry->flags & QUERY_ALWAYS_CUT) {
+ if (qry->flags.ALWAYS_CUT) {
switch(zone_cut_check(request, qry, packet)) {
case KR_STATE_FAIL: return KR_STATE_FAIL;
case KR_STATE_DONE: return KR_STATE_PRODUCE;
}
/* Update zone cut, spawn new subrequests. */
- if (!(qry->flags & QUERY_STUB)) {
+ if (!(qry->flags.STUB)) {
int state = zone_cut_check(request, qry, packet);
switch(state) {
case KR_STATE_FAIL: return KR_STATE_FAIL;
/* Root DNSKEY must be fetched from the hints to avoid chicken and egg problem. */
if (qry->sname[0] == '\0' && qry->stype == KNOT_RRTYPE_DNSKEY) {
kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
- qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
+ qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
}
kr_nsrep_elect(qry, request->ctx);
if (qry->ns.score > KR_NS_MAX_SCORE) {
gettimeofday(&qry->timestamp, NULL);
*dst = &qry->ns.addr[0].ip;
- *type = (qry->flags & QUERY_TCP) ? SOCK_STREAM : SOCK_DGRAM;
+ *type = (qry->flags.TCP) ? SOCK_STREAM : SOCK_DGRAM;
return request->state;
}
}
inet_ntop(addr->sa_family, kr_inaddr(&qry->ns.addr[i].ip), ns_str, sizeof(ns_str));
VERBOSE_MSG(qry, "=> querying: '%s' score: %u zone cut: '%s' m12n: '%s' type: '%s' proto: '%s'\n",
- ns_str, qry->ns.score, zonecut_str, qname_str, type_str, (qry->flags & QUERY_TCP) ? "tcp" : "udp");
+ ns_str, qry->ns.score, zonecut_str, qname_str, type_str, (qry->flags.TCP) ? "tcp" : "udp");
break;
}}
: 0;
/* When forwarding, keep the nameserver addresses. */
- if (parent && (parent->flags & qry->flags & QUERY_FORWARD)) {
+ if (parent && (parent->flags & qry->flags.FORWARD)) {
ret = kr_nsrep_copy_set(&qry->ns, &parent->ns);
if (ret) {
query_free(rplan->pool, qry);
unsigned *cached = lru_get_try(ctx->cache_rep,
(const char *)ns_name, knot_dname_size(ns_name));
unsigned reputation = (cached) ? *cached : 0;
- if (!(reputation & KR_NS_NOIP4) && !(ctx->options & QUERY_NO_IPV4)) {
+ if (!(reputation & KR_NS_NOIP4) && !(ctx->options.NO_IPV4)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_A, timestamp);
}
- if (!(reputation & KR_NS_NOIP6) && !(ctx->options & QUERY_NO_IPV6)) {
+ if (!(reputation & KR_NS_NOIP6) && !(ctx->options.NO_IPV6)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_AAAA, timestamp);
}
}
return ctx->state;
}
- if (!cookie_ctx->clnt.enabled || (qry->flags & QUERY_TCP)) {
+ if (!cookie_ctx->clnt.enabled || (qry->flags.TCP)) {
return ctx->state;
}
#endif
if (rcode == KNOT_RCODE_BADCOOKIE) {
struct kr_query *next = NULL;
- if (!(qry->flags & QUERY_BADCOOKIE_AGAIN)) {
+ if (!(qry->flags.BADCOOKIE_AGAIN)) {
/* Received first BADCOOKIE, regenerate query. */
next = kr_rplan_push(&req->rplan, qry->parent,
qry->sname, qry->sclass,
if (next) {
VERBOSE_MSG(NULL, "%s\n", "BADCOOKIE querying again");
- qry->flags |= QUERY_BADCOOKIE_AGAIN;
+ qry->flags.BADCOOKIE_AGAIN = true;
} else {
/*
* Either the planning of the second request failed or
* RFC7873 5.3 says that TCP should be used. Currently
* we always expect that the server doesn't support TCP.
*/
- qry->flags &= ~QUERY_BADCOOKIE_AGAIN;
+ qry->flags.BADCOOKIE_AGAIN = false;
return KR_STATE_FAIL;
}
if (rplan->resolved.len > 0) {
struct kr_query *last = array_tail(rplan->resolved);
/* Only add query_zone when not answered from cache */
- if (!(last->flags & QUERY_CACHED)) {
+ if (!(last->flags.CACHED)) {
const knot_dname_t *zone_cut_name = last->zone_cut.name;
if (zone_cut_name != NULL) {
m.query_zone.data = (uint8_t *)zone_cut_name;
}
VERBOSE_MSG(qry, "<= answered from hints\n");
- qry->flags &= ~QUERY_DNSSEC_WANT; /* Never authenticated */
+ qry->flags.DNSSEC_WANT = false; /* Never authenticated */
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
for (size_t i = 0; i < rplan->resolved.len; ++i) {
/* Sample queries leading to iteration or expiring */
struct kr_query *qry = rplan->resolved.at[i];
- if ((qry->flags & QUERY_CACHED) && !(qry->flags & QUERY_EXPIRING)) {
+ if ((qry->flags.CACHED) && !(qry->flags.EXPIRING)) {
continue;
}
int key_len = collect_key(key, qry->sname, qry->stype);
- if (qry->flags & QUERY_EXPIRING) {
+ if (qry->flags.EXPIRING) {
unsigned *count = lru_get_new(data->queries.expiring, key, key_len);
if (count)
*count += 1;
{
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
- if (qry->flags & QUERY_CACHED || !req->upstream.addr) {
+ if (qry->flags.CACHED || !req->upstream.addr) {
return ctx->state;
}
}
/* Observe the final query. */
struct kr_query *last = array_tail(rplan->resolved);
- if (last->flags & QUERY_CACHED) {
+ if (last->flags.CACHED) {
stat_const_add(data, metric_answer_cached, 1);
}
}