]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
qflags: WIP refactor - regex replacements
authorVladimír Čunát <vladimir.cunat@nic.cz>
Tue, 1 Aug 2017 13:23:57 +0000 (15:23 +0200)
committerVladimír Čunát <vladimir.cunat@nic.cz>
Tue, 1 Aug 2017 14:51:04 +0000 (16:51 +0200)
sed -e 's/flags |= QUERY_\([A-Z0-9_]*\);/flags.\1 = true;/g' \
        -e 's/flags &= ~QUERY_\([A-Z0-9_]*\);/flags.\1 = false;/g' \
        -e 's/\(flags\|options\|opts\) & QUERY_\([A-Z0-9_]*\)\()\| ||\)/\1.\2\3/g' \
        -i $(git grep -l QUERY_)

13 files changed:
lib/README.rst
lib/layer/iterate.c
lib/layer/pktcache.c
lib/layer/rrcache.c
lib/layer/validate.c
lib/nsrep.c
lib/resolve.c
lib/rplan.c
lib/zonecut.c
modules/cookies/cookiemonster.c
modules/dnstap/dnstap.c
modules/hints/hints.c
modules/stats/stats.c

index cb1e10584803104b1f1c5e683237d1310f9fce24..5e662664c1197d43265f828af7662d6465be9d11 100644 (file)
@@ -113,7 +113,7 @@ This is only passive processing of the incoming answer. If you want to change th
                if (can_satisfy(qry)) {
                        /* This flag makes the resolver move the query
                         * to the "resolved" list. */
-                       qry->flags |= QUERY_RESOLVED;
+                       qry->flags.RESOLVED = true;
                        return KR_STATE_DONE;
                }
 
index b8ce5d01a40779d221e40a173229547695059fdd..25c168452849c99e30572cc25bfe35172b3d767b 100644 (file)
@@ -161,7 +161,7 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
                        knot_dname_to_str(name_str, rr->owner, sizeof(name_str));
                        inet_ntop(af, addr, addr_str, sizeof(addr_str));
                }
-               if (!(query->flags & QUERY_ALLOW_LOCAL) &&
+               if (!(query->flags.ALLOW_LOCAL) &&
                        !is_valid_addr(addr, addr_len)) {
                        QVERBOSE_MSG(query, "<= ignoring invalid glue for "
                                     "'%s': '%s'\n", name_str, addr_str);
@@ -192,11 +192,11 @@ static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_reques
                                continue;
                        }
                        if ((rr->type == KNOT_RRTYPE_A) &&
-                           (req->ctx->options & QUERY_NO_IPV4)) {
+                           (req->ctx->options.NO_IPV4)) {
                                continue;
                        }
                        if ((rr->type == KNOT_RRTYPE_AAAA) &&
-                           (req->ctx->options & QUERY_NO_IPV6)) {
+                           (req->ctx->options.NO_IPV6)) {
                                continue;
                        }
                        (void) update_nsaddr(rr, req->current_query);
@@ -275,9 +275,9 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
                }
                kr_zonecut_add(cut, ns_name, NULL);
                /* Choose when to use glue records. */
-               if (qry->flags & QUERY_PERMISSIVE) {
+               if (qry->flags.PERMISSIVE) {
                        fetch_glue(pkt, ns_name, req);
-               } else if (qry->flags & QUERY_STRICT) {
+               } else if (qry->flags.STRICT) {
                        /* Strict mode uses only mandatory glue. */
                        if (knot_dname_in(cut->name, ns_name))
                                fetch_glue(pkt, ns_name, req);
@@ -299,7 +299,7 @@ static uint8_t get_initial_rank(const knot_rrset_t *rr, const struct kr_query *q
        /* For RRSIGs, ensure the KR_RANK_AUTH flag corresponds to the signed RR. */
        uint16_t type = kr_rrset_type_maysig(rr);
 
-       if (qry->flags & QUERY_CACHED) {
+       if (qry->flags.CACHED) {
                return rr->additional ? *(uint8_t *)rr->additional : KR_RANK_OMIT;
                /* ^^ Current use case for "cached" RRs without rank: hints module. */
        }
@@ -357,10 +357,10 @@ static int pick_authority(knot_pkt_t *pkt, struct kr_request *req, bool to_wire)
 static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
 {
        struct kr_query *qry = req->current_query;
-       assert(!(qry->flags & QUERY_STUB));
+       assert(!(qry->flags.STUB));
 
        int result = KR_STATE_CONSUME;
-       if (qry->flags & QUERY_FORWARD) {
+       if (qry->flags.FORWARD) {
                return result;
        }
 
@@ -410,7 +410,7 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
        }
 
 
-       if ((qry->flags & QUERY_DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
+       if ((qry->flags.DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
                if (knot_wire_get_aa(pkt->wire) == 0 &&
                    knot_wire_get_ancount(pkt->wire) == 0 &&
                    ns_record_exists) {
@@ -435,7 +435,7 @@ static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_req
 static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const knot_dname_t **cname_ret)
 {
        struct kr_query *query = req->current_query;
-       assert(!(query->flags & QUERY_STUB));
+       assert(!(query->flags.STUB));
        /* Process answer type */
        const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
        const knot_dname_t *cname = NULL;
@@ -443,7 +443,7 @@ static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral,
        unsigned cname_chain_len = 0;
        bool is_final = (query->parent == NULL);
        uint32_t iter_count = 0;
-       bool strict_mode = (query->flags & QUERY_STRICT);
+       bool strict_mode = (query->flags.STRICT);
        do {
                /* CNAME was found at previous iteration, but records may not follow the correct order.
                 * Try to find records for pending_cname owner from section start. */
@@ -468,7 +468,7 @@ static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral,
                                        return KR_STATE_FAIL;
                                }
                                if (rrsig_labels < cname_labels) {
-                                       query->flags |= QUERY_DNSSEC_WEXPAND;
+                                       query->flags.DNSSEC_WEXPAND = true;
                                }
                        }
 
@@ -546,7 +546,7 @@ static int process_referral_answer(knot_pkt_t *pkt, struct kr_request *req)
                return KR_STATE_FAIL;
        }
        struct kr_query *query = req->current_query;
-       if (!(query->flags & QUERY_CACHED)) {
+       if (!(query->flags.CACHED)) {
                /* If not cached (i.e. got from upstream)
                 * make sure that this is not an authoritative answer
                 * (even with AA=1) for other layers.
@@ -613,13 +613,13 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
        if (!knot_dname_is_equal(knot_pkt_qname(pkt), query->sname) &&
            (pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
                VERBOSE_MSG("<= found cut, retrying with non-minimized name\n");
-               query->flags |= QUERY_NO_MINIMIZE;
+               query->flags.NO_MINIMIZE = true;
                return KR_STATE_CONSUME;
        }
 
        /* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
        if (!is_authoritative(pkt, query)) {
-               if (!(query->flags & QUERY_FORWARD) &&
+               if (!(query->flags.FORWARD) &&
                    pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
                        VERBOSE_MSG("<= lame response: non-auth sent negative response\n");
                        return KR_STATE_FAIL;
@@ -635,17 +635,17 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
        /* Make sure that this is an authoritative answer (even with AA=0) for other layers */
        knot_wire_set_aa(pkt->wire);
        /* Either way it resolves current query. */
-       query->flags |= QUERY_RESOLVED;
+       query->flags.RESOLVED = true;
        /* Follow canonical name as next SNAME. */
        if (!knot_dname_is_equal(cname, query->sname)) {
                /* Check if target record has been already copied */
-               query->flags |= QUERY_CNAME;
+               query->flags.CNAME = true;
                if (is_final) {
                        state = process_final(pkt, req, cname);
                        if (state != kr_ok()) {
                                return state;
                        }
-               } else if ((query->flags & QUERY_FORWARD) &&
+               } else if ((query->flags.FORWARD) &&
                           ((query->stype == KNOT_RRTYPE_DS) ||
                            (query->stype == KNOT_RRTYPE_NS))) {
                        /* CNAME'ed answer for DS or NS subquery.
@@ -667,9 +667,9 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
                if (!next) {
                        return KR_STATE_FAIL;
                }
-               next->flags |= QUERY_AWAIT_CUT;
-               if (query->flags & QUERY_FORWARD) {
-                       next->forward_flags |= QUERY_CNAME;
+               next->flags.AWAIT_CUT = true;
+               if (query->flags.FORWARD) {
+                       next->forward_flags.CNAME = true;
                        if (query->parent == NULL) {
                                state = kr_nsrep_copy_set(&next->ns, &query->ns);
                                if (state != kr_ok()) {
@@ -681,12 +681,12 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
                /* Want DNSSEC if and only if it's posible to secure
                 * this name (i.e. iff it is covered by a TA) */
                if (kr_ta_covers_qry(req->ctx, cname, query->stype)) {
-                       next->flags |= QUERY_DNSSEC_WANT;
+                       next->flags.DNSSEC_WANT = true;
                } else {
-                       next->flags &= ~QUERY_DNSSEC_WANT;
+                       next->flags.DNSSEC_WANT = false;
                }
-               if (!(query->flags & QUERY_FORWARD) ||
-                   (query->flags & QUERY_DNSSEC_WEXPAND)) {
+               if (!(query->flags.FORWARD) ||
+                   (query->flags.DNSSEC_WEXPAND)) {
                        state = pick_authority(pkt, req, false);
                        if (state != kr_ok()) {
                                return KR_STATE_FAIL;
@@ -737,7 +737,7 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
 static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
 {
        struct kr_query *query = req->current_query;
-       assert(query->flags & QUERY_STUB);
+       assert(query->flags.STUB);
        /* Pick all answer RRs. */
        const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
        for (unsigned i = 0; i < an->count; ++i) {
@@ -753,7 +753,7 @@ static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
        }
 
        knot_wire_set_aa(pkt->wire);
-       query->flags |= QUERY_RESOLVED;
+       query->flags.RESOLVED = true;
        /* Pick authority RRs. */
        int pkt_class = kr_response_classify(pkt);
        const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0);
@@ -859,13 +859,13 @@ static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_que
 
 #ifndef STRICT_MODE
        /* Work around broken auths/load balancers */
-       if (query->flags & QUERY_SAFEMODE) {
+       if (query->flags.SAFEMODE) {
                return resolve_error(pkt, req);
-       } else if (query->flags & QUERY_NO_MINIMIZE) {
-               query->flags |= QUERY_SAFEMODE;
+       } else if (query->flags.NO_MINIMIZE) {
+               query->flags.SAFEMODE = true;
                return KR_STATE_DONE;
        } else {
-               query->flags |= QUERY_NO_MINIMIZE;
+               query->flags.NO_MINIMIZE = true;
                return KR_STATE_DONE;
        }
 #else
@@ -887,7 +887,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
        }
 
        WITH_VERBOSE {
-       if (query->flags & QUERY_TRACE) {
+       if (query->flags.TRACE) {
                VERBOSE_MSG("<= answer received:\n");
                kr_pkt_print(pkt);
        }
@@ -913,17 +913,17 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
                VERBOSE_MSG("<= ignoring mismatching response\n");
                /* Force TCP, to work around authoritatives messing up question
                 * without yielding to spoofed responses. */
-               query->flags |= QUERY_TCP;
+               query->flags.TCP = true;
                return resolve_badmsg(pkt, req, query);
        } else if (knot_wire_get_tc(pkt->wire)) {
                VERBOSE_MSG("<= truncated response, failover to TCP\n");
                if (query) {
                        /* Fail if already on TCP. */
-                       if (query->flags & QUERY_TCP) {
+                       if (query->flags.TCP) {
                                VERBOSE_MSG("<= TC=1 with TCP, bailing out\n");
                                return resolve_error(pkt, req);
                        }
-                       query->flags |= QUERY_TCP;
+                       query->flags.TCP = true;
                }
                return KR_STATE_CONSUME;
        }
@@ -949,7 +949,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
                        query->fails = 0; /* Reset per-query counter. */
                        return resolve_error(pkt, req);
                } else {
-                       query->flags |= QUERY_NO_MINIMIZE; /* Drop minimisation as a safe-guard. */
+                       query->flags.NO_MINIMIZE = true; /* Drop minimisation as a safe-guard. */
                        return KR_STATE_CONSUME;
                }
        }
@@ -963,7 +963,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
        }
 
        /* Forwarding/stub mode is special. */
-       if (query->flags & QUERY_STUB) {
+       if (query->flags.STUB) {
                return process_stub(pkt, req);
        }
 
index 777e8e1e27f2a09c670265ff12d5b24d84db2214..e902c29e5889898be376d5521e277e202168b7f4 100644 (file)
@@ -78,7 +78,7 @@ static int loot_pktcache(struct kr_context *ctx, knot_pkt_t *pkt,
        uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
        /* There's probably little sense for NONAUTH in pktcache. */
 
-       if (!knot_wire_get_cd(req->answer->wire) && !(qry->flags & QUERY_STUB)) {
+       if (!knot_wire_get_cd(req->answer->wire) && !(qry->flags.STUB)) {
                /* Records not present under any TA don't have their security verified at all. */
                bool ta_covers = kr_ta_covers_qry(ctx, qry->sname, qry->stype);
                /* ^ TODO: performance? */
@@ -108,8 +108,8 @@ static int loot_pktcache(struct kr_context *ctx, knot_pkt_t *pkt,
 
        /* Rank-related fixups.  Add rank into the additional field. */
        if (kr_rank_test(entry->rank, KR_RANK_INSECURE)) {
-               qry->flags |= QUERY_DNSSEC_INSECURE;
-               qry->flags &= ~QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_INSECURE = true;
+               qry->flags.DNSSEC_WANT = false;
        }
        for (size_t i = 0; i < pkt->rrset_count; ++i) {
                assert(!pkt->rr[i].additional);
@@ -143,14 +143,14 @@ static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
        struct kr_request *req = ctx->req;
        struct kr_query *qry = req->current_query;
        if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) ||
-           (qry->flags & QUERY_NO_CACHE)) {
+           (qry->flags.NO_CACHE)) {
                return ctx->state; /* Already resolved/failed */
        }
        /* Both caches only peek for qry->sname and that would be useless
         * to repeat on every iteration, so disable it from now on.
         * Note: it's important to skip this if rrcache sets KR_STATE_DONE,
         * as CNAME chains need more iterations to get fetched. */
-       qry->flags |= QUERY_NO_CACHE;
+       qry->flags.NO_CACHE = true;
 
        if (knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
                return ctx->state; /* Only IN class */
@@ -162,10 +162,10 @@ static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
        if (ret == 0) {
                qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
                if (flags & KR_CACHE_FLAG_WCARD_PROOF) {
-                       qry->flags |= QUERY_DNSSEC_WEXPAND;
+                       qry->flags.DNSSEC_WEXPAND = true;
                }
                if (flags & KR_CACHE_FLAG_OPTOUT) {
-                       qry->flags |= QUERY_DNSSEC_OPTOUT;
+                       qry->flags.DNSSEC_OPTOUT = true;
                }
                pkt->parsed = pkt->size;
                knot_wire_set_qr(pkt->wire);
@@ -219,7 +219,7 @@ static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
        struct kr_query *qry = req->current_query;
        /* Cache only answers that make query resolved (i.e. authoritative)
         * that didn't fail during processing and are negative. */
-       if (qry->flags & QUERY_CACHED || ctx->state & KR_STATE_FAIL) {
+       if (qry->flags.CACHED || ctx->state & KR_STATE_FAIL) {
                return ctx->state; /* Don't cache anything if failed. */
        }
        /* Cache only authoritative answers from IN class. */
@@ -230,7 +230,7 @@ static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
        const uint16_t qtype = knot_pkt_qtype(pkt);
        const bool is_eligible = (knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
        bool is_negative = kr_response_classify(pkt) & (PKT_NODATA|PKT_NXDOMAIN);
-       bool wcard_expansion = (qry->flags & QUERY_DNSSEC_WEXPAND);
+       bool wcard_expansion = (qry->flags.DNSSEC_WEXPAND);
        if (is_negative && ((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) ==
            (QUERY_FORWARD | QUERY_CNAME))) {
                /* Don't cache CNAME'ed NXDOMAIN answer in forwarding mode
@@ -261,24 +261,24 @@ static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
 
        /* If cd bit is set or we got answer via non-validated forwarding,
         * make the rank bad; otherwise it depends on flags. */
-       if (knot_wire_get_cd(req->answer->wire) || qry->flags & QUERY_STUB) {
+       if (knot_wire_get_cd(req->answer->wire) || qry->flags.STUB) {
                kr_rank_set(&header.rank, KR_RANK_OMIT);
        } else {
-               if (qry->flags & QUERY_DNSSEC_BOGUS) {
+               if (qry->flags.DNSSEC_BOGUS) {
                        kr_rank_set(&header.rank, KR_RANK_BOGUS);
-               } else if (qry->flags & QUERY_DNSSEC_INSECURE) {
+               } else if (qry->flags.DNSSEC_INSECURE) {
                        kr_rank_set(&header.rank, KR_RANK_INSECURE);
-               } else if (qry->flags & QUERY_DNSSEC_WANT) {
+               } else if (qry->flags.DNSSEC_WANT) {
                        kr_rank_set(&header.rank, KR_RANK_SECURE);
                }
        }
        VERBOSE_MSG(qry, "=> candidate rank: 0%0.2o\n", header.rank);
 
        /* Set cache flags */
-       if (qry->flags & QUERY_DNSSEC_WEXPAND) {
+       if (qry->flags.DNSSEC_WEXPAND) {
                header.flags |= KR_CACHE_FLAG_WCARD_PROOF;
        }
-       if (qry->flags & QUERY_DNSSEC_OPTOUT) {
+       if (qry->flags.DNSSEC_OPTOUT) {
                header.flags |= KR_CACHE_FLAG_OPTOUT;
        }
 
index 2a25c1af25e8873850fdd7ca79f0ea7618ac6579..fbf5164c323a708523ec6bf4ed49d624ddf87a1c 100644 (file)
@@ -89,13 +89,13 @@ static int loot_rr(struct kr_cache *cache, knot_pkt_t *pkt, const knot_dname_t *
        }
 
        if (is_expiring(&cache_rr, drift)) {
-               qry->flags |= QUERY_EXPIRING;
+               qry->flags.EXPIRING = true;
        }
 
        if ((*flags) & KR_CACHE_FLAG_WCARD_PROOF) {
                /* Record was found, but wildcard answer proof is needed.
                 * Do not update packet, try to fetch whole packet from pktcache instead. */
-               qry->flags |= QUERY_DNSSEC_WEXPAND;
+               qry->flags.DNSSEC_WEXPAND = true;
                return kr_error(ENOENT);
        }
 
@@ -157,7 +157,7 @@ static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
        uint8_t rank  = 0;
        uint8_t flags = 0;
        uint8_t lowest_rank = KR_RANK_INITIAL | KR_RANK_AUTH;
-       if (qry->flags & QUERY_NONAUTH) {
+       if (qry->flags.NONAUTH) {
                lowest_rank = KR_RANK_INITIAL;
                /* Note: there's little sense in validation status for non-auth records.
                 * In case of using NONAUTH to get NS IPs, knowing that you ask correct
@@ -179,7 +179,7 @@ static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
        int ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
                          &rank, &flags, 0, lowest_rank);
        if (ret != 0 && rrtype != KNOT_RRTYPE_CNAME
-           && !(qry->flags & QUERY_STUB)) {
+           && !(qry->flags.STUB)) {
                /* Chase CNAME if no direct hit.
                 * We avoid this in STUB mode because the current iterator
                 * (process_stub()) is unable to iterate in STUB mode to follow
@@ -193,13 +193,13 @@ static int loot_rrcache(struct kr_request *req, knot_pkt_t *pkt,
        }
 
        if (kr_rank_test(rank, KR_RANK_INSECURE)) {
-               qry->flags |= QUERY_DNSSEC_INSECURE;
-               qry->flags &= ~QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_INSECURE = true;
+               qry->flags.DNSSEC_WANT = false;
        }
 
        /* Record may have RRSIGs, try to find them. */
        if (allow_unverified
-           || ((qry->flags & QUERY_DNSSEC_WANT) && kr_rank_test(rank, KR_RANK_SECURE))) {
+           || ((qry->flags.DNSSEC_WANT) && kr_rank_test(rank, KR_RANK_SECURE))) {
                kr_rank_set(&lowest_rank, KR_RANK_INITIAL); /* no security for RRSIGs */
                ret = loot_rr(cache, pkt, qry->sname, qry->sclass, rrtype, qry,
                              &rank, &flags, true, lowest_rank);
@@ -232,7 +232,7 @@ static int rrcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->req;
        struct kr_query *qry = req->current_query;
-       if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
+       if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags.NO_CACHE)) {
                return ctx->state; /* Already resolved/failed or already tried, etc. */
        }
        /* Reconstruct the answer from the cache,
@@ -275,7 +275,7 @@ struct rrcache_baton
 static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
 {
        /* If not doing secure resolution, ignore (unvalidated) RRSIGs. */
-       if (!(baton->qry->flags & QUERY_DNSSEC_WANT)) {
+       if (!(baton->qry->flags.DNSSEC_WANT)) {
                return kr_ok();
        }
        /* Commit covering RRSIG to a separate cache namespace. */
@@ -326,11 +326,11 @@ static int commit_rr(const char *key, void *val, void *data)
 
        uint8_t flags = KR_CACHE_FLAG_NONE;
        if (kr_rank_test(rank, KR_RANK_AUTH)) {
-               if (baton->qry->flags & QUERY_DNSSEC_WEXPAND) {
+               if (baton->qry->flags.DNSSEC_WEXPAND) {
                        flags |= KR_CACHE_FLAG_WCARD_PROOF;
                }
                if ((rr->type == KNOT_RRTYPE_NS) &&
-                   (baton->qry->flags & QUERY_DNSSEC_NODS)) {
+                   (baton->qry->flags.DNSSEC_NODS)) {
                        flags |= KR_CACHE_FLAG_NODS;
                }
        }
@@ -428,7 +428,7 @@ static int rrcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
        /* Cache only positive answers, not meta types or RRSIG. */
        const uint16_t qtype = knot_pkt_qtype(pkt);
        const bool is_eligible = !(knot_rrtype_is_metatype(qtype) || qtype == KNOT_RRTYPE_RRSIG);
-       if (qry->flags & QUERY_CACHED || knot_wire_get_rcode(pkt->wire) != KNOT_RCODE_NOERROR || !is_eligible) {
+       if (qry->flags.CACHED || knot_wire_get_rcode(pkt->wire) != KNOT_RCODE_NOERROR || !is_eligible) {
                return ctx->state;
        }
        /* Stash data selected by iterator from the last receieved packet. */
index da73c225aad360812954e944be851f44043d3804..464b7e8b0c29581557229094e84593ec560fb7b4 100644 (file)
@@ -173,10 +173,10 @@ static int validate_records(struct kr_request *req, knot_pkt_t *answer, knot_mm_
         * or optout - flag the query.
          */
        if (an_flags & KR_DNSSEC_VFLG_WEXPAND) {
-               qry->flags |= QUERY_DNSSEC_WEXPAND;
+               qry->flags.DNSSEC_WEXPAND = true;
        }
        if (an_flags & KR_DNSSEC_VFLG_OPTOUT) {
-               qry->flags |= QUERY_DNSSEC_OPTOUT;
+               qry->flags.DNSSEC_OPTOUT = true;
        }
 
        return ret;
@@ -212,7 +212,7 @@ static int validate_keyset(struct kr_request *req, knot_pkt_t *answer, bool has_
        }
 
        /* Check if there's a key for current TA. */
-       if (updated_key && !(qry->flags & QUERY_CACHED)) {
+       if (updated_key && !(qry->flags.CACHED)) {
 
                kr_rrset_validation_ctx_t vctx = {
                        .pkt            = answer,
@@ -233,10 +233,10 @@ static int validate_keyset(struct kr_request *req, knot_pkt_t *answer, bool has_
                }
 
                if (vctx.flags & KR_DNSSEC_VFLG_WEXPAND) {
-                       qry->flags |= QUERY_DNSSEC_WEXPAND;
+                       qry->flags.DNSSEC_WEXPAND = true;
                }
                if (vctx.flags & KR_DNSSEC_VFLG_OPTOUT) {
-                       qry->flags |= QUERY_DNSSEC_OPTOUT;
+                       qry->flags.DNSSEC_OPTOUT = true;
                }
 
        }
@@ -279,8 +279,8 @@ static void mark_insecure_parents(const struct kr_query *qry)
        struct kr_query *parent = qry->parent;
        const uint32_t cut_flags = (QUERY_AWAIT_IPV4 | QUERY_AWAIT_IPV6);
        while (parent && ((parent->flags & cut_flags) == 0)) {
-               parent->flags &= ~QUERY_DNSSEC_WANT;
-               parent->flags |= QUERY_DNSSEC_INSECURE;
+               parent->flags.DNSSEC_WANT = false;
+               parent->flags.DNSSEC_INSECURE = true;
                if (parent->stype != KNOT_RRTYPE_DS &&
                    parent->stype != KNOT_RRTYPE_RRSIG) {
                        break;
@@ -307,7 +307,7 @@ static int update_parent_keys(struct kr_request *req, uint16_t answer_type)
                if (qry->flags & (QUERY_DNSSEC_INSECURE)) { /* DS non-existence proven. */
                        mark_insecure_parents(qry);
                } else if ((qry->flags & (QUERY_DNSSEC_NODS | QUERY_FORWARD)) == QUERY_DNSSEC_NODS) {
-                       if (qry->flags & QUERY_DNSSEC_OPTOUT) {
+                       if (qry->flags.DNSSEC_OPTOUT) {
                                mark_insecure_parents(qry);
                        } else {
                                int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, qry->uid,
@@ -351,7 +351,7 @@ static int update_delegation(struct kr_request *req, struct kr_query *qry, knot_
        if (referral) {
                section = KNOT_AUTHORITY;
        } else if (knot_pkt_qtype(answer) == KNOT_RRTYPE_DS &&
-                  !(qry->flags & QUERY_CNAME) &&
+                  !(qry->flags.CNAME) &&
                   (knot_wire_get_rcode(answer->wire) != KNOT_RCODE_NXDOMAIN)) {
                section = KNOT_ANSWER;
        } else { /* N/A */
@@ -398,10 +398,10 @@ static int update_delegation(struct kr_request *req, struct kr_query *qry, knot_
                        }
                } else if (ret != 0) {
                        VERBOSE_MSG(qry, "<= bogus proof of DS non-existence\n");
-                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                       qry->flags.DNSSEC_BOGUS = true;
                } else {
                        VERBOSE_MSG(qry, "<= DS doesn't exist, going insecure\n");
-                       qry->flags |= QUERY_DNSSEC_NODS;
+                       qry->flags.DNSSEC_NODS = true;
                }
                return ret;
        } else if (qry->flags & QUERY_FORWARD && qry->parent) {
@@ -477,12 +477,12 @@ static int rrsig_not_found(kr_layer_t *ctx, const knot_rrset_t *rr)
                kr_zonecut_copy(&next->zone_cut, cut);
                kr_zonecut_copy_trust(&next->zone_cut, cut);
        } else {
-               next->flags |= QUERY_AWAIT_CUT;
+               next->flags.AWAIT_CUT = true;
        }
-       if (qry->flags & QUERY_FORWARD) {
-               next->flags &= ~QUERY_AWAIT_CUT;
+       if (qry->flags.FORWARD) {
+               next->flags.AWAIT_CUT = false;
        }
-       next->flags |= QUERY_DNSSEC_WANT;
+       next->flags.DNSSEC_WANT = true;
        return KR_STATE_YIELD;
 }
 
@@ -518,7 +518,7 @@ static int check_validation_result(kr_layer_t *ctx, ranked_rr_array_t *arr)
        if (!kr_rank_test(invalid_entry->rank, KR_RANK_SECURE) &&
            (++(invalid_entry->revalidation_cnt) > MAX_REVALIDATION_CNT)) {
                VERBOSE_MSG(qry, "<= continuous revalidation, fails\n");
-               qry->flags |= QUERY_DNSSEC_BOGUS;
+               qry->flags.DNSSEC_BOGUS = true;
                return KR_STATE_FAIL;
        }
 
@@ -527,12 +527,12 @@ static int check_validation_result(kr_layer_t *ctx, ranked_rr_array_t *arr)
                const knot_dname_t *signer_name = knot_rrsig_signer_name(&rr->rrs, 0);
                if (knot_dname_is_sub(signer_name, qry->zone_cut.name)) {
                        qry->zone_cut.name = knot_dname_copy(signer_name, &req->pool);
-                       qry->flags |= QUERY_AWAIT_CUT;
+                       qry->flags.AWAIT_CUT = true;
                } else if (!knot_dname_is_equal(signer_name, qry->zone_cut.name)) {
                        if (qry->zone_cut.parent) {
                                memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut));
                        } else {
-                               qry->flags |= QUERY_AWAIT_CUT;
+                               qry->flags.AWAIT_CUT = true;
                        }
                        qry->zone_cut.name = knot_dname_copy(signer_name, &req->pool);
                }
@@ -541,7 +541,7 @@ static int check_validation_result(kr_layer_t *ctx, ranked_rr_array_t *arr)
        } else if (kr_rank_test(invalid_entry->rank, KR_RANK_MISSING)) {
                ret = rrsig_not_found(ctx, rr);
        } else if (!kr_rank_test(invalid_entry->rank, KR_RANK_SECURE)) {
-               qry->flags |= QUERY_DNSSEC_BOGUS;
+               qry->flags.DNSSEC_BOGUS = true;
                ret = KR_STATE_FAIL;
        }
 
@@ -582,7 +582,7 @@ static int unsigned_forward(kr_layer_t *ctx, knot_pkt_t *pkt)
                    q->stype == KNOT_RRTYPE_DS &&
                    knot_dname_is_equal(q->sname, qry->sname)) {
                        nods = true;
-                       if (!(q->flags & QUERY_DNSSEC_OPTOUT)) {
+                       if (!(q->flags.DNSSEC_OPTOUT)) {
                                int ret = kr_dnssec_matches_name_and_type(&req->auth_selected, q->uid,
                                                                          qry->sname, KNOT_RRTYPE_NS);
                                ns_exist = (ret == kr_ok());
@@ -591,29 +591,29 @@ static int unsigned_forward(kr_layer_t *ctx, knot_pkt_t *pkt)
        }
 
        if (nods && ns_exist && qtype == KNOT_RRTYPE_NS) {
-               qry->flags &= ~QUERY_DNSSEC_WANT;
-               qry->flags |= QUERY_DNSSEC_INSECURE;
-               if (qry->forward_flags & QUERY_CNAME) {
+               qry->flags.DNSSEC_WANT = false;
+               qry->flags.DNSSEC_INSECURE = true;
+               if (qry->forward_flags.CNAME) {
                        assert(qry->cname_parent);
-                       qry->cname_parent->flags &= ~QUERY_DNSSEC_WANT;
-                       qry->cname_parent->flags |= QUERY_DNSSEC_INSECURE;
+                       qry->cname_parent->flags.DNSSEC_WANT = false;
+                       qry->cname_parent->flags.DNSSEC_INSECURE = true;
                } else if (pkt_rcode == KNOT_RCODE_NOERROR && qry->parent != NULL) {
                        const knot_pktsection_t *sec = knot_pkt_section(pkt, KNOT_ANSWER);
                        const knot_rrset_t *rr = knot_pkt_rr(sec, 0);
                        if (rr->type == KNOT_RRTYPE_NS) {
                                qry->parent->zone_cut.name = knot_dname_copy(rr->owner, &req->pool);
-                               qry->parent->flags &= ~QUERY_DNSSEC_WANT;
-                               qry->parent->flags |= QUERY_DNSSEC_INSECURE;
+                               qry->parent->flags.DNSSEC_WANT = false;
+                               qry->parent->flags.DNSSEC_INSECURE = true;
                        }
                }
                while (qry->parent) {
                        qry = qry->parent;
-                       qry->flags &= ~QUERY_DNSSEC_WANT;
-                       qry->flags |= QUERY_DNSSEC_INSECURE;
-                       if (qry->forward_flags & QUERY_CNAME) {
+                       qry->flags.DNSSEC_WANT = false;
+                       qry->flags.DNSSEC_INSECURE = true;
+                       if (qry->forward_flags.CNAME) {
                                assert(qry->cname_parent);
-                               qry->cname_parent->flags &= ~QUERY_DNSSEC_WANT;
-                               qry->cname_parent->flags |= QUERY_DNSSEC_INSECURE;
+                               qry->cname_parent->flags.DNSSEC_WANT = false;
+                               qry->cname_parent->flags.DNSSEC_INSECURE = true;
                        }
                }
                return KR_STATE_DONE;
@@ -631,7 +631,7 @@ static int unsigned_forward(kr_layer_t *ctx, knot_pkt_t *pkt)
                }
                kr_zonecut_set(&next->zone_cut, qry->zone_cut.name);
                kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
-               next->flags |= QUERY_DNSSEC_WANT;
+               next->flags.DNSSEC_WANT = true;
        }
 
        return KR_STATE_YIELD;
@@ -646,7 +646,7 @@ static int check_signer(kr_layer_t *ctx, knot_pkt_t *pkt)
        if (ta_name && (!signer || !knot_dname_is_equal(ta_name, signer))) {
                /* check all newly added RRSIGs */
                if (!signer) {
-                       if (qry->flags & QUERY_FORWARD) {
+                       if (qry->flags.FORWARD) {
                                return unsigned_forward(ctx, pkt);
                        }
                        /* Not a DNSSEC-signed response. */
@@ -669,19 +669,19 @@ static int check_signer(kr_layer_t *ctx, knot_pkt_t *pkt)
                                qry->zone_cut.name = knot_dname_copy(qname, &req->pool);
                        }
                } else if (knot_dname_is_sub(signer, qry->zone_cut.name)) {
-                       if (!(qry->flags & QUERY_FORWARD)) {
+                       if (!(qry->flags.FORWARD)) {
                                /* Key signer is below current cut, advance and refetch keys. */
                                qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
                        } else {
                                /* Check if DS does not exist. */
                                struct kr_query *q = kr_rplan_find_resolved(&req->rplan, NULL,
                                                                            signer, qry->sclass, KNOT_RRTYPE_DS);
-                               if (q && q->flags & QUERY_DNSSEC_NODS) {
-                                       qry->flags &= ~QUERY_DNSSEC_WANT;
-                                       qry->flags |= QUERY_DNSSEC_INSECURE;
+                               if (q && q->flags.DNSSEC_NODS) {
+                                       qry->flags.DNSSEC_WANT = false;
+                                       qry->flags.DNSSEC_INSECURE = true;
                                        if (qry->parent) {
-                                               qry->parent->flags &= ~QUERY_DNSSEC_WANT;
-                                               qry->parent->flags |= QUERY_DNSSEC_INSECURE;
+                                               qry->parent->flags.DNSSEC_WANT = false;
+                                               qry->parent->flags.DNSSEC_INSECURE = true;
                                        }
                                } else if (qry->stype != KNOT_RRTYPE_DS) {
                                        struct kr_rplan *rplan = &req->rplan;
@@ -692,7 +692,7 @@ static int check_signer(kr_layer_t *ctx, knot_pkt_t *pkt)
                                        }
                                        kr_zonecut_set(&next->zone_cut, qry->zone_cut.name);
                                        kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
-                                       next->flags |= QUERY_DNSSEC_WANT;
+                                       next->flags.DNSSEC_WANT = true;
                                }
                        }
                } else if (!knot_dname_is_equal(signer, qry->zone_cut.name)) {
@@ -702,14 +702,14 @@ static int check_signer(kr_layer_t *ctx, knot_pkt_t *pkt)
                        if (qry->zone_cut.parent) {
                                memcpy(&qry->zone_cut, qry->zone_cut.parent, sizeof(qry->zone_cut));
                        } else {
-                               qry->flags |= QUERY_AWAIT_CUT;
+                               qry->flags.AWAIT_CUT = true;
                        }
                        qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
                }
 
                /* zone cut matches, but DS/DNSKEY doesn't => refetch. */
                VERBOSE_MSG(qry, ">< cut changed, needs revalidation\n");
-               if ((qry->flags & QUERY_FORWARD) && qry->stype != KNOT_RRTYPE_DS) {
+               if ((qry->flags.FORWARD) && qry->stype != KNOT_RRTYPE_DS) {
                        struct kr_rplan *rplan = &req->rplan;
                        struct kr_query *next = kr_rplan_push(rplan, qry, signer,
                                                        qry->sclass, KNOT_RRTYPE_DS);
@@ -718,10 +718,10 @@ static int check_signer(kr_layer_t *ctx, knot_pkt_t *pkt)
                        }
                        kr_zonecut_set(&next->zone_cut, qry->zone_cut.name);
                        kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
-                       next->flags |= QUERY_DNSSEC_WANT;
+                       next->flags.DNSSEC_WANT = true;
                        return KR_STATE_YIELD;
                }
-               if (!(qry->flags & QUERY_FORWARD)) {
+               if (!(qry->flags.FORWARD)) {
                        return KR_STATE_YIELD;
                }
        }
@@ -773,7 +773,7 @@ static void check_wildcard(kr_layer_t *ctx)
 
                        for (int k = 0; k < rrsigs->rrs.rr_count; ++k) {
                                if (knot_rrsig_labels(&rrsigs->rrs, k) != owner_labels) {
-                                       qry->flags |= QUERY_DNSSEC_WEXPAND;
+                                       qry->flags.DNSSEC_WEXPAND = true;
                                }
                        }
                }
@@ -793,18 +793,18 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
 
        /* Pass-through if user doesn't want secure answer or stub. */
        /* @todo: Validating stub resolver mode. */
-       if (qry->flags & QUERY_STUB) {
+       if (qry->flags.STUB) {
                rank_records(ctx, KR_RANK_OMIT);
                return ctx->state;
        }
        uint8_t pkt_rcode = knot_wire_get_rcode(pkt->wire);
-       if ((qry->flags & QUERY_FORWARD) &&
+       if ((qry->flags.FORWARD) &&
            pkt_rcode != KNOT_RCODE_NOERROR &&
            pkt_rcode != KNOT_RCODE_NXDOMAIN) {
                do {
-                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                       qry->flags.DNSSEC_BOGUS = true;
                        if (qry->cname_parent) {
-                               qry->cname_parent->flags |= QUERY_DNSSEC_BOGUS;
+                               qry->cname_parent->flags.DNSSEC_BOGUS = true;
                        }
                        qry = qry->parent;
                } while (qry);
@@ -812,10 +812,10 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
                return ctx->state;
        }
 
-       if (!(qry->flags & QUERY_DNSSEC_WANT)) {
+       if (!(qry->flags.DNSSEC_WANT)) {
                const uint32_t test_flags = (QUERY_CACHED | QUERY_DNSSEC_INSECURE);
                const bool is_insec = ((qry->flags & test_flags) == test_flags);
-               if ((qry->flags & QUERY_DNSSEC_INSECURE)) {
+               if ((qry->flags.DNSSEC_INSECURE)) {
                        rank_records(ctx, KR_RANK_INSECURE);
                }
                if (is_insec && qry->parent != NULL) {
@@ -840,7 +840,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
                /* Check if wildcard expansion happens.
                 * If yes, copy authority. */
                if ((qry->parent == NULL) &&
-                   (qry->flags & QUERY_DNSSEC_WEXPAND)) {
+                   (qry->flags.DNSSEC_WEXPAND)) {
                        kr_ranked_rrarray_set_wire(&req->auth_selected, true, qry->uid, true);
                }
                rank_records(ctx, KR_RANK_OMIT);
@@ -848,9 +848,9 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
        }
        /* Answer for RRSIG may not set DO=1, but all records MUST still validate. */
        bool use_signatures = (knot_pkt_qtype(pkt) != KNOT_RRTYPE_RRSIG);
-       if (!(qry->flags & QUERY_CACHED) && !knot_pkt_has_dnssec(pkt) && !use_signatures) {
+       if (!(qry->flags.CACHED) && !knot_pkt_has_dnssec(pkt) && !use_signatures) {
                VERBOSE_MSG(qry, "<= got insecure response\n");
-               qry->flags |= QUERY_DNSSEC_BOGUS;
+               qry->flags.DNSSEC_BOGUS = true;
                return KR_STATE_FAIL;
        }
 
@@ -860,12 +860,12 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
        const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
        const bool referral = (an->count == 0 && !knot_wire_get_aa(pkt->wire));
 
-       if (!(qry->flags & QUERY_CACHED) && knot_wire_get_aa(pkt->wire)) {
+       if (!(qry->flags.CACHED) && knot_wire_get_aa(pkt->wire)) {
                /* Check if answer if not empty,
                 * but iterator has not selected any records. */
                if (!check_empty_answer(ctx, pkt)) {
                        VERBOSE_MSG(qry, "<= no useful RR in authoritative answer\n");
-                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                       qry->flags.DNSSEC_BOGUS = true;
                        return KR_STATE_FAIL;
                }
                /* Track difference between current TA and signer name.
@@ -890,20 +890,20 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
                } else if (ret == kr_error(DNSSEC_INVALID_DS_ALGORITHM)) {
                        VERBOSE_MSG(qry, ">< all DS entries use unsupported algorithm pairs, going insecure\n");
                        /* ^ the message is a bit imprecise to avoid being too verbose */
-                       qry->flags &= ~QUERY_DNSSEC_WANT;
-                       qry->flags |= QUERY_DNSSEC_INSECURE;
+                       qry->flags.DNSSEC_WANT = false;
+                       qry->flags.DNSSEC_INSECURE = true;
                        rank_records(ctx, KR_RANK_INSECURE);
                        mark_insecure_parents(qry);
                        return KR_STATE_DONE;
                } else if (ret != 0) {
                        VERBOSE_MSG(qry, "<= bad keys, broken trust chain\n");
-                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                       qry->flags.DNSSEC_BOGUS = true;
                        return KR_STATE_FAIL;
                }
        }
 
        /* Validate non-existence proof if not positive answer. */
-       if (!(qry->flags & QUERY_CACHED) && pkt_rcode == KNOT_RCODE_NXDOMAIN &&
+       if (!(qry->flags.CACHED) && pkt_rcode == KNOT_RCODE_NXDOMAIN &&
            ((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) != (QUERY_FORWARD | QUERY_CNAME))) {
                /* @todo If knot_pkt_qname(pkt) is used instead of qry->sname then the tests crash. */
                if (!has_nsec3) {
@@ -916,11 +916,11 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
                         * but NSEC3 that covers next closer name
                         * (or wildcard at next closer name) has opt-out flag.
                         * RFC5155 9.2; AD flag can not be set */
-                       qry->flags |= QUERY_DNSSEC_OPTOUT;
+                       qry->flags.DNSSEC_OPTOUT = true;
                        VERBOSE_MSG(qry, "<= can't prove NXDOMAIN due to optout, going insecure\n");
                } else if (ret != 0) {
                        VERBOSE_MSG(qry, "<= bad NXDOMAIN proof\n");
-                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                       qry->flags.DNSSEC_BOGUS = true;
                        return KR_STATE_FAIL;
                }
        }
@@ -928,7 +928,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
        /* @todo WTH, this needs API that just tries to find a proof and the caller
         * doesn't have to worry about NSEC/NSEC3
         * @todo rework this */
-       if (!(qry->flags & QUERY_CACHED) && (pkt_rcode == KNOT_RCODE_NOERROR) &&
+       if (!(qry->flags.CACHED) && (pkt_rcode == KNOT_RCODE_NOERROR) &&
            ((qry->flags & (QUERY_FORWARD | QUERY_CNAME)) != (QUERY_FORWARD | QUERY_CNAME))) {
                bool no_data = (an->count == 0 && knot_wire_get_aa(pkt->wire));
                if (no_data) {
@@ -944,14 +944,14 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
                        if (ret != 0) {
                                if (has_nsec3 && (ret == kr_error(DNSSEC_OUT_OF_RANGE))) {
                                        VERBOSE_MSG(qry, "<= can't prove NODATA due to optout, going insecure\n");
-                                       qry->flags |= QUERY_DNSSEC_OPTOUT;
+                                       qry->flags.DNSSEC_OPTOUT = true;
                                        /* Could not return from here,
                                         * we must continue, validate NSEC\NSEC3 and
                                         * call update_parent_keys() to mark
                                         * parent queries as insecured */
                                } else {
                                        VERBOSE_MSG(qry, "<= bad NODATA proof\n");
-                                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                                       qry->flags.DNSSEC_BOGUS = true;
                                        return KR_STATE_FAIL;
                                }
                        }
@@ -960,13 +960,13 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
 
        /* Validate all records, fail as bogus if it doesn't match.
         * Do not revalidate data from cache, as it's already trusted. */
-       if (!(qry->flags & QUERY_CACHED)) {
+       if (!(qry->flags.CACHED)) {
                ret = validate_records(req, pkt, req->rplan.pool, has_nsec3);
                if (ret != 0) {
                        /* something exceptional - no DNS key, empty pointers etc
                         * normally it shoudn't happen */
                        VERBOSE_MSG(qry, "<= couldn't validate RRSIGs\n");
-                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                       qry->flags.DNSSEC_BOGUS = true;
                        return KR_STATE_FAIL;
                }
                /* check validation state and spawn subrequests */
@@ -986,7 +986,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
 
        /* Check if wildcard expansion detected for final query.
         * If yes, copy authority. */
-       if ((qry->parent == NULL) && (qry->flags & QUERY_DNSSEC_WEXPAND)) {
+       if ((qry->parent == NULL) && (qry->flags.DNSSEC_WEXPAND)) {
                kr_ranked_rrarray_set_wire(&req->auth_selected, true, qry->uid, true);
        }
 
@@ -995,7 +995,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
        if (ret == DNSSEC_NOT_FOUND && qry->stype != KNOT_RRTYPE_DS) {
                if (ctx->state == KR_STATE_YIELD) {
                        VERBOSE_MSG(qry, "<= can't validate referral\n");
-                       qry->flags |= QUERY_DNSSEC_BOGUS;
+                       qry->flags.DNSSEC_BOGUS = true;
                        return KR_STATE_FAIL;
                } else {
                        /* Check the trust chain and query DS\DNSKEY if needed. */
@@ -1007,7 +1007,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
        } else if (pkt_rcode == KNOT_RCODE_NOERROR &&
                   referral &&
                   (((qry->flags & (QUERY_DNSSEC_WANT | QUERY_DNSSEC_INSECURE)) == QUERY_DNSSEC_INSECURE) ||
-                  (qry->flags & QUERY_DNSSEC_NODS))) {
+                  (qry->flags.DNSSEC_NODS))) {
                /* referral with proven DS non-existance */
                qtype = KNOT_RRTYPE_DS;
        }
@@ -1020,7 +1020,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
 
        if (qry->flags & QUERY_FORWARD && qry->parent) {
                if (pkt_rcode == KNOT_RCODE_NXDOMAIN) {
-                       qry->parent->forward_flags |= QUERY_NO_MINIMIZE;
+                       qry->parent->forward_flags.NO_MINIMIZE = true;
                }
        }
        VERBOSE_MSG(qry, "<= answer valid, OK\n");
index 4cfecde8b106b36f8f2857f102a63f94ec3c38c4..e4452b57d8b2e4ca4167bd3482f541fed708978b 100644 (file)
@@ -94,10 +94,10 @@ static unsigned eval_addr_set(pack_t *addr_set, kr_nsrep_lru_t *rttcache, unsign
                bool is_valid = false;
                /* Check if the address isn't disabled. */
                if (len == sizeof(struct in6_addr)) {
-                       is_valid = !(opts & QUERY_NO_IPV6);
+                       is_valid = !(opts.NO_IPV6);
                        favour = FAVOUR_IPV6;
                } else {
-                       is_valid = !(opts & QUERY_NO_IPV4);
+                       is_valid = !(opts.NO_IPV4);
                }
                /* Get RTT for this address (if known) */
                if (is_valid) {
@@ -146,10 +146,10 @@ static int eval_nsrep(const char *k, void *v, void *baton)
                        if (reputation & KR_NS_NOIP4) {
                                score = KR_NS_UNKNOWN;
                                /* Try to start with clean slate */
-                               if (!(ctx->options & QUERY_NO_IPV6)) {
+                               if (!(ctx->options.NO_IPV6)) {
                                        reputation &= ~KR_NS_NOIP6;
                                }
-                               if (!(ctx->options & QUERY_NO_IPV4)) {
+                               if (!(ctx->options.NO_IPV4)) {
                                        reputation &= ~KR_NS_NOIP4;
                                }
                        }
@@ -162,7 +162,7 @@ static int eval_nsrep(const char *k, void *v, void *baton)
         * The fastest NS is preferred by workers until it is depleted (timeouts or degrades),
         * at the same time long distance scouts probe other sources (low probability).
         * Servers on TIMEOUT (depleted) can be probed by the dice roll only */
-       if (score <= ns->score && (qry->flags & QUERY_NO_THROTTLE || score < KR_NS_TIMEOUT)) {
+       if (score <= ns->score && (qry->flags.NO_THROTTLE || score < KR_NS_TIMEOUT)) {
                update_nsrep_set(ns, (const knot_dname_t *)k, addr_choice, score);
                ns->reputation = reputation;
        } else {
@@ -170,7 +170,7 @@ static int eval_nsrep(const char *k, void *v, void *baton)
                if ((kr_rand_uint(100) < 10) && (kr_rand_uint(KR_NS_MAX_SCORE) >= score)) {
                        /* If this is a low-reliability probe, go with TCP to get ICMP reachability check. */
                        if (score >= KR_NS_LONG) {
-                               qry->flags |= QUERY_TCP;
+                               qry->flags.TCP = true;
                        }
                        update_nsrep_set(ns, (const knot_dname_t *)k, addr_choice, score);
                        ns->reputation = reputation;
index c8701768d66c3b0bdfef445c72d45e9018a0312b..4033c99a6021d289706ce8a4aeb3a6300cd86ead 100644 (file)
@@ -165,7 +165,7 @@ static int invalidate_ns(struct kr_rplan *rplan, struct kr_query *qry)
  */
 static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
 {
-       if (qry->flags & QUERY_NO_MINIMIZE) {
+       if (qry->flags.NO_MINIMIZE) {
                return;
        }
 
@@ -186,7 +186,7 @@ static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct k
                int ret = kr_cache_peek(cache, KR_CACHE_PKT, target, KNOT_RRTYPE_NS, &entry, &timestamp);
                if (ret == 0) { /* Either NXDOMAIN or NODATA, start here. */
                        /* @todo We could stop resolution here for NXDOMAIN, but we can't because of broken CDNs */
-                       qry->flags |= QUERY_NO_MINIMIZE;
+                       qry->flags.NO_MINIMIZE = true;
                        kr_make_query(qry, pkt);
                        return;
                }
@@ -211,12 +211,12 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name
        if (is_insecured) {
                /* If parent is unsecured we don't want DNSSEC
                 * even if cut name is covered by TA. */
-               qry->flags &= ~QUERY_DNSSEC_WANT;
-               qry->flags |= QUERY_DNSSEC_INSECURE;
+               qry->flags.DNSSEC_WANT = false;
+               qry->flags.DNSSEC_INSECURE = true;
        } else if (kr_ta_covers_qry(req->ctx, qry->zone_cut.name, KNOT_RRTYPE_NS)) {
-               qry->flags |= QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = true;
        } else {
-               qry->flags &= ~QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = false;
        }
 
        struct kr_zonecut cut_found = {0};
@@ -235,7 +235,7 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name
                        return KR_STATE_FAIL;
                }
                VERBOSE_MSG(qry, "=> using root hints\n");
-               qry->flags &= ~QUERY_AWAIT_CUT;
+               qry->flags.AWAIT_CUT = false;
                kr_zonecut_deinit(&cut_found);
                return KR_STATE_DONE;
        } else if (ret != kr_ok()) {
@@ -245,21 +245,21 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name
 
        /* Find out security status.
         * Go insecure if the zone cut is provably insecure */
-       if ((qry->flags & QUERY_DNSSEC_WANT) && !secured) {
+       if ((qry->flags.DNSSEC_WANT) && !secured) {
                VERBOSE_MSG(qry, "=> NS is provably without DS, going insecure\n");
-               qry->flags &= ~QUERY_DNSSEC_WANT;
-               qry->flags |= QUERY_DNSSEC_INSECURE;
+               qry->flags.DNSSEC_WANT = false;
+               qry->flags.DNSSEC_INSECURE = true;
        }
        /* Zonecut name can change, check it again
         * to prevent unnecessary DS & DNSKEY queries */
-       if (!(qry->flags & QUERY_DNSSEC_INSECURE) &&
+       if (!(qry->flags.DNSSEC_INSECURE) &&
            kr_ta_covers_qry(req->ctx, cut_found.name, KNOT_RRTYPE_NS)) {
-               qry->flags |= QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = true;
        } else {
-               qry->flags &= ~QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = false;
        }
        /* Check if any DNSKEY found for cached cut */
-       if ((qry->flags & QUERY_DNSSEC_WANT) &&
+       if ((qry->flags.DNSSEC_WANT) &&
            (cut_found.key == NULL)) {
                /* No DNSKEY was found for cached cut.
                 * If no glue were fetched for this cut,
@@ -277,7 +277,7 @@ static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name
                        return KR_STATE_FAIL;
                }
                VERBOSE_MSG(qry, "=> using root hints\n");
-               qry->flags &= ~QUERY_AWAIT_CUT;
+               qry->flags.AWAIT_CUT = false;
                return KR_STATE_DONE;
        }
        /* Copy fetched name */
@@ -305,14 +305,14 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
         * Prefer IPv6 and continue with IPv4 if not available.
         */
        uint16_t next_type = 0;
-       if (!(qry->flags & QUERY_AWAIT_IPV6) &&
-           !(ctx->options & QUERY_NO_IPV6)) {
+       if (!(qry->flags.AWAIT_IPV6) &&
+           !(ctx->options.NO_IPV6)) {
                next_type = KNOT_RRTYPE_AAAA;
-               qry->flags |= QUERY_AWAIT_IPV6;
-       } else if (!(qry->flags & QUERY_AWAIT_IPV4) &&
-                  !(ctx->options & QUERY_NO_IPV4)) {
+               qry->flags.AWAIT_IPV6 = true;
+       } else if (!(qry->flags.AWAIT_IPV4) &&
+                  !(ctx->options.NO_IPV4)) {
                next_type = KNOT_RRTYPE_A;
-               qry->flags |= QUERY_AWAIT_IPV4;
+               qry->flags.AWAIT_IPV4 = true;
                /* Hmm, no useable IPv6 then. */
                qry->ns.reputation |= KR_NS_NOIP6;
                kr_nsrep_update_rep(&qry->ns, qry->ns.reputation, ctx->cache_rep);
@@ -323,7 +323,7 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
                if (!next_type && qry->zone_cut.name[0] == '\0') {
                        VERBOSE_MSG(qry, "=> fallback to root hints\n");
                        kr_zonecut_set_sbelt(ctx, &qry->zone_cut);
-                       qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
+                       qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
                        return kr_error(EAGAIN);
                }
                /* No IPv4 nor IPv6, flag server as unuseable. */
@@ -336,10 +336,10 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
        struct kr_query *next = qry;
        if (knot_dname_is_equal(qry->ns.name, qry->sname) &&
            qry->stype == next_type) {
-               if (!(qry->flags & QUERY_NO_MINIMIZE)) {
-                       qry->flags |= QUERY_NO_MINIMIZE;
-                       qry->flags &= ~QUERY_AWAIT_IPV6;
-                       qry->flags &= ~QUERY_AWAIT_IPV4;
+               if (!(qry->flags.NO_MINIMIZE)) {
+                       qry->flags.NO_MINIMIZE = true;
+                       qry->flags.AWAIT_IPV6 = false;
+                       qry->flags.AWAIT_IPV4 = false;
                        VERBOSE_MSG(qry, "=> circular dependepcy, retrying with non-minimized name\n");
                } else {
                        qry->ns.reputation |= KR_NS_NOIP4 | KR_NS_NOIP6;
@@ -354,7 +354,7 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
                if (!next) {
                        return kr_error(ENOMEM);
                }
-               next->flags |= QUERY_NONAUTH;
+               next->flags.NONAUTH = true;
        }
        /* At the root level with no NS addresses, add SBELT subrequest. */
        int ret = 0;
@@ -363,10 +363,10 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param)
                if (ret == 0) { /* Copy TA and key since it's the same cut to avoid lookup. */
                        kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
                        kr_zonecut_set_sbelt(ctx, &qry->zone_cut); /* Add SBELT to parent in case query fails. */
-                       qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
+                       qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
                }
        } else {
-               next->flags |= QUERY_AWAIT_CUT;
+               next->flags.AWAIT_CUT = true;
        }
        return ret;
 }
@@ -578,7 +578,7 @@ static int answer_finalize(struct kr_request *request, int state)
        /* Always set SERVFAIL for bogus answers. */
        if (state == KR_STATE_FAIL && rplan->pending.len > 0) {
                struct kr_query *last = array_tail(rplan->pending);
-               if ((last->flags & QUERY_DNSSEC_WANT) && (last->flags & QUERY_DNSSEC_BOGUS)) {
+               if ((last->flags.DNSSEC_WANT) && (last->flags.DNSSEC_BOGUS)) {
                        return answer_fail(request);
                }
        }
@@ -590,10 +590,10 @@ static int answer_finalize(struct kr_request *request, int state)
         * Be conservative.  Primary approach: check ranks of all RRs in wire.
         * Only "negative answers" need special handling. */
        bool secure = (last != NULL); /* suspicious otherwise */
-       if (last && (last->flags & QUERY_STUB)) {
+       if (last && (last->flags.STUB)) {
                secure = false; /* don't trust forwarding for now */
        }
-       if (last && (last->flags & QUERY_DNSSEC_OPTOUT)) {
+       if (last && (last->flags.DNSSEC_OPTOUT)) {
                secure = false; /* the last answer is insecure due to opt-out */
        }
 
@@ -642,7 +642,7 @@ static int answer_finalize(struct kr_request *request, int state)
                     * as those would also be PKT_NOERROR. */
                    || (answ_all_cnames && knot_pkt_qtype(answer) != KNOT_RRTYPE_CNAME))
                {
-                       secure = secure && (last->flags & QUERY_DNSSEC_WANT)
+                       secure = secure && (last->flags.DNSSEC_WANT)
                                && !(last->flags & (QUERY_DNSSEC_BOGUS | QUERY_DNSSEC_INSECURE));
                }
        }
@@ -655,7 +655,7 @@ static int answer_finalize(struct kr_request *request, int state)
        if (last) {
                struct kr_query *cname_parent = last->cname_parent;
                while (cname_parent != NULL) {
-                       if (cname_parent->flags & QUERY_DNSSEC_OPTOUT) {
+                       if (cname_parent->flags.DNSSEC_OPTOUT) {
                                knot_wire_clear_ad(answer->wire);
                                break;
                        }
@@ -670,7 +670,7 @@ static int query_finalize(struct kr_request *request, struct kr_query *qry, knot
 {
        int ret = 0;
        knot_pkt_begin(pkt, KNOT_ADDITIONAL);
-       if (!(qry->flags & QUERY_SAFEMODE)) {
+       if (!(qry->flags.SAFEMODE)) {
                /* Remove any EDNS records from any previous iteration. */
                ret = edns_erase_and_reserve(pkt);
                if (ret == 0) {
@@ -678,7 +678,7 @@ static int query_finalize(struct kr_request *request, struct kr_query *qry, knot
                }
                if (ret == 0) {
                        /* Stub resolution (ask for +rd and +do) */
-                       if (qry->flags & QUERY_STUB) {
+                       if (qry->flags.STUB) {
                                knot_wire_set_rd(pkt->wire);
                                if (knot_pkt_has_dnssec(request->answer)) {
                                        knot_edns_set_do(pkt->opt_rr);
@@ -687,11 +687,11 @@ static int query_finalize(struct kr_request *request, struct kr_query *qry, knot
                                        knot_wire_set_cd(pkt->wire);
                                }
                        /* Full resolution (ask for +cd and +do) */
-                       } else if (qry->flags & QUERY_FORWARD) {
+                       } else if (qry->flags.FORWARD) {
                                knot_wire_set_rd(pkt->wire);
                                knot_edns_set_do(pkt->opt_rr);
                                knot_wire_set_cd(pkt->wire);
-                       } else if (qry->flags & QUERY_DNSSEC_WANT) {
+                       } else if (qry->flags.DNSSEC_WANT) {
                                knot_edns_set_do(pkt->opt_rr);
                                knot_wire_set_cd(pkt->wire);
                        }
@@ -742,11 +742,11 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
        }
 
        /* Deferred zone cut lookup for this query. */
-       qry->flags |= QUERY_AWAIT_CUT;
+       qry->flags.AWAIT_CUT = true;
        /* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
        if ((knot_wire_get_ad(packet->wire) || knot_pkt_has_dnssec(packet)) &&
            kr_ta_covers_qry(request->ctx, qname, qtype)) {
-               qry->flags |= QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = true;
        }
 
        /* Initialize answer packet */
@@ -758,7 +758,7 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
 
        if (cd_is_set) {
                knot_wire_set_cd(answer->wire);
-       } else if (qry->flags & QUERY_DNSSEC_WANT) {
+       } else if (qry->flags.DNSSEC_WANT) {
                knot_wire_set_ad(answer->wire);
        }
 
@@ -782,7 +782,7 @@ KR_PURE static bool kr_inaddr_equal(const struct sockaddr *a, const struct socka
 static void update_nslist_rtt(struct kr_context *ctx, struct kr_query *qry, const struct sockaddr *src)
 {
        /* Do not track in safe mode. */
-       if (qry->flags & QUERY_SAFEMODE) {
+       if (qry->flags.SAFEMODE) {
                return;
        }
 
@@ -846,7 +846,7 @@ static void update_nslist_score(struct kr_request *request, struct kr_query *qry
                        kr_nsrep_update_rtt(&qry->ns, src, KR_NS_PENALTY, ctx->cache_rtt, KR_NS_ADD);
                }
        /* Penalise resolution failures except validation failures. */
-       } else if (!(qry->flags & QUERY_DNSSEC_BOGUS)) {
+       } else if (!(qry->flags.DNSSEC_BOGUS)) {
                kr_nsrep_update_rtt(&qry->ns, src, KR_NS_TIMEOUT, ctx->cache_rtt, KR_NS_RESET);
                WITH_VERBOSE {
                        char addr_str[INET6_ADDRSTRLEN];
@@ -870,12 +870,12 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
 
        /* Different processing for network error */
        struct kr_query *qry = array_tail(rplan->pending);
-       bool tried_tcp = (qry->flags & QUERY_TCP);
+       bool tried_tcp = (qry->flags.TCP);
        if (!packet || packet->size == 0) {
                if (tried_tcp) {
                        request->state = KR_STATE_FAIL;
                } else {
-                       qry->flags |= QUERY_TCP;
+                       qry->flags.TCP = true;
                }
        } else {
                /* Packet cleared, derandomize QNAME. */
@@ -884,7 +884,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
                        randomized_qname_case(qname_raw, qry->secret);
                }
                request->state = KR_STATE_CONSUME;
-               if (qry->flags & QUERY_CACHED) {
+               if (qry->flags.CACHED) {
                        ITERATE_LAYERS(request, qry, consume, packet);
                } else {
                        struct timeval now;
@@ -900,21 +900,21 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
        }
 
        /* Track RTT for iterative answers */
-       if (src && !(qry->flags & QUERY_CACHED)) {
+       if (src && !(qry->flags.CACHED)) {
                update_nslist_score(request, qry, src, packet);
        }
        /* Resolution failed, invalidate current NS. */
        if (request->state == KR_STATE_FAIL) {
                invalidate_ns(rplan, qry);
-               qry->flags &= ~QUERY_RESOLVED;
+               qry->flags.RESOLVED = false;
        }
 
        /* Pop query if resolved. */
        if (request->state == KR_STATE_YIELD) {
                return KR_STATE_PRODUCE; /* Requery */
-       } else if (qry->flags & QUERY_RESOLVED) {
+       } else if (qry->flags.RESOLVED) {
                kr_rplan_pop(rplan, qry);
-       } else if (!tried_tcp && (qry->flags & QUERY_TCP)) {
+       } else if (!tried_tcp && (qry->flags.TCP)) {
                return KR_STATE_PRODUCE; /* Requery over TCP */
        } else { /* Clear query flags for next attempt */
                qry->flags &= ~(QUERY_CACHED|QUERY_TCP);
@@ -923,7 +923,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
        ITERATE_LAYERS(request, qry, reset);
 
        /* Do not finish with bogus answer. */
-       if (qry->flags & QUERY_DNSSEC_BOGUS)  {
+       if (qry->flags.DNSSEC_BOGUS)  {
                return KR_STATE_FAIL;
        }
 
@@ -943,9 +943,9 @@ static struct kr_query *zone_cut_subreq(struct kr_rplan *rplan, struct kr_query
            kr_zonecut_copy_trust(&next->zone_cut, &parent->zone_cut) != 0) {
                return NULL;
        }
-       next->flags |= QUERY_NO_MINIMIZE;
-       if (parent->flags & QUERY_DNSSEC_WANT) {
-               next->flags |= QUERY_DNSSEC_WANT;
+       next->flags.NO_MINIMIZE = true;
+       if (parent->flags.DNSSEC_WANT) {
+               next->flags.DNSSEC_WANT = true;
        }
        return next;
 }
@@ -957,40 +957,40 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query
        map_t *negative_anchors = &request->ctx->negative_anchors;
 
        if (qry->parent != NULL &&
-           !(qry->forward_flags & QUERY_CNAME) &&
-           !(qry->flags & QUERY_DNS64_MARK) &&
+           !(qry->forward_flags.CNAME) &&
+           !(qry->flags.DNS64_MARK) &&
            knot_dname_in(qry->parent->zone_cut.name, qry->zone_cut.name)) {
                return KR_STATE_PRODUCE;
        }
 
-       assert(qry->flags & QUERY_FORWARD);
+       assert(qry->flags.FORWARD);
 
        if (!trust_anchors) {
-               qry->flags &= ~QUERY_AWAIT_CUT;
+               qry->flags.AWAIT_CUT = false;
                return KR_STATE_PRODUCE;
        }
 
-       if (qry->flags & QUERY_DNSSEC_INSECURE) {
-               qry->flags &= ~QUERY_AWAIT_CUT;
+       if (qry->flags.DNSSEC_INSECURE) {
+               qry->flags.AWAIT_CUT = false;
                return KR_STATE_PRODUCE;
        }
 
-       if (qry->forward_flags & QUERY_NO_MINIMIZE) {
-               qry->flags &= ~QUERY_AWAIT_CUT;
+       if (qry->forward_flags.NO_MINIMIZE) {
+               qry->flags.AWAIT_CUT = false;
                return KR_STATE_PRODUCE;
        }
 
        const knot_dname_t *wanted_name = qry->sname;
        const knot_dname_t *start_name = qry->sname;
-       if ((qry->flags & QUERY_AWAIT_CUT) && !resume) {
-               qry->flags &= ~QUERY_AWAIT_CUT;
+       if ((qry->flags.AWAIT_CUT) && !resume) {
+               qry->flags.AWAIT_CUT = false;
                const knot_dname_t *longest_ta = kr_ta_get_longest_name(trust_anchors, qry->sname);
                if (longest_ta) {
                        start_name = longest_ta;
                        qry->zone_cut.name = knot_dname_copy(start_name, qry->zone_cut.pool);
-                       qry->flags |= QUERY_DNSSEC_WANT;
+                       qry->flags.DNSSEC_WANT = true;
                } else {
-                       qry->flags &= ~QUERY_DNSSEC_WANT;
+                       qry->flags.DNSSEC_WANT = false;
                        return KR_STATE_PRODUCE;
                }
        }
@@ -1038,19 +1038,19 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query
                            knot_dname_is_equal(q->sname, wanted_name)) {
                                if (q->stype == KNOT_RRTYPE_DS) {
                                        ds_req = true;
-                                       if (q->flags & QUERY_DNSSEC_NODS) {
+                                       if (q->flags.DNSSEC_NODS) {
                                                nods = true;
                                        }
-                                       if (q->flags & QUERY_CNAME) {
+                                       if (q->flags.CNAME) {
                                                nods = true;
                                                ns_exist = false;
-                                       } else if (!(q->flags & QUERY_DNSSEC_OPTOUT)) {
+                                       } else if (!(q->flags.DNSSEC_OPTOUT)) {
                                                int ret = kr_dnssec_matches_name_and_type(&request->auth_selected, q->uid,
                                                                                          wanted_name, KNOT_RRTYPE_NS);
                                                ns_exist = (ret == kr_ok());
                                        }
                                } else {
-                                       if (q->flags & QUERY_CNAME) {
+                                       if (q->flags.CNAME) {
                                                nods = true;
                                                ns_exist = false;
                                        }
@@ -1068,7 +1068,7 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query
                        return KR_STATE_DONE;
                }
 
-               if (qry->parent == NULL && (qry->flags & QUERY_CNAME) &&
+               if (qry->parent == NULL && (qry->flags.CNAME) &&
                    ds_req && ns_req) {
                        return KR_STATE_PRODUCE;
                }
@@ -1089,16 +1089,16 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query
        /* Disable DNSSEC if it enters NTA. */
        if (kr_ta_get(negative_anchors, wanted_name)){
                VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
-               qry->flags &= ~QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = false;
        }
 
        /* Enable DNSSEC if enters a new island of trust. */
-       bool want_secured = (qry->flags & QUERY_DNSSEC_WANT) &&
+       bool want_secured = (qry->flags.DNSSEC_WANT) &&
                            !knot_wire_get_cd(request->answer->wire);
-       if (!(qry->flags & QUERY_DNSSEC_WANT) &&
+       if (!(qry->flags.DNSSEC_WANT) &&
            !knot_wire_get_cd(request->answer->wire) &&
            kr_ta_get(trust_anchors, wanted_name)) {
-               qry->flags |= QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = true;
                want_secured = true;
                WITH_VERBOSE {
                char qname_str[KNOT_DNAME_MAXLEN];
@@ -1155,23 +1155,23 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
        /* Disable DNSSEC if it enters NTA. */
        if (kr_ta_get(negative_anchors, qry->zone_cut.name)){
                VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
-               qry->flags &= ~QUERY_DNSSEC_WANT;
-               qry->flags |= QUERY_DNSSEC_INSECURE;
+               qry->flags.DNSSEC_WANT = false;
+               qry->flags.DNSSEC_INSECURE = true;
        }
-       if (qry->flags & QUERY_DNSSEC_NODS) {
+       if (qry->flags.DNSSEC_NODS) {
                /* This is the next query iteration with minimized qname.
                 * At previous iteration DS non-existance has been proven */
-               qry->flags &= ~QUERY_DNSSEC_NODS;
-               qry->flags &= ~QUERY_DNSSEC_WANT;
-               qry->flags |= QUERY_DNSSEC_INSECURE;
+               qry->flags.DNSSEC_NODS = false;
+               qry->flags.DNSSEC_WANT = false;
+               qry->flags.DNSSEC_INSECURE = true;
        }
        /* Enable DNSSEC if entering a new (or different) island of trust,
         * and update the TA RRset if required. */
-       bool want_secured = (qry->flags & QUERY_DNSSEC_WANT) &&
+       bool want_secured = (qry->flags.DNSSEC_WANT) &&
                            !knot_wire_get_cd(request->answer->wire);
        knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, qry->zone_cut.name);
        if (!knot_wire_get_cd(request->answer->wire) && ta_rr) {
-               qry->flags |= QUERY_DNSSEC_WANT;
+               qry->flags.DNSSEC_WANT = true;
                want_secured = true;
 
                if (qry->zone_cut.trust_anchor == NULL
@@ -1219,17 +1219,17 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
 static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
 {
        /* Stub mode, just forward and do not solve cut. */
-       if (qry->flags & QUERY_STUB) {
+       if (qry->flags.STUB) {
                return KR_STATE_PRODUCE;
        }
 
        /* Forwarding to upstream resolver mode.
         * Since forwarding targets already are in qry->ns -
         * cut fetching is not needed. */
-       if (qry->flags & QUERY_FORWARD) {
+       if (qry->flags.FORWARD) {
                return forward_trust_chain_check(request, qry, false);
        }
-       if (!(qry->flags & QUERY_AWAIT_CUT)) {
+       if (!(qry->flags.AWAIT_CUT)) {
                /* The query was resolved from cache.
                 * Spawn DS \ DNSKEY requests if needed and exit */
                return trust_chain_check(request, qry);
@@ -1244,7 +1244,7 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
                        return KR_STATE_FAIL;
                }
                VERBOSE_MSG(qry, "=> using root hints\n");
-               qry->flags &= ~QUERY_AWAIT_CUT;
+               qry->flags.AWAIT_CUT = false;
                return KR_STATE_DONE;
        }
 
@@ -1273,12 +1273,12 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
        } while (state == KR_STATE_CONSUME);
 
        /* Update minimized QNAME if zone cut changed */
-       if (qry->zone_cut.name[0] != '\0' && !(qry->flags & QUERY_NO_MINIMIZE)) {
+       if (qry->zone_cut.name[0] != '\0' && !(qry->flags.NO_MINIMIZE)) {
                if (kr_make_query(qry, packet) != 0) {
                        return KR_STATE_FAIL;
                }
        }
-       qry->flags &= ~QUERY_AWAIT_CUT;
+       qry->flags.AWAIT_CUT = false;
 
        /* Check trust chain */
        return trust_chain_check(request, qry);
@@ -1298,8 +1298,8 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
        if (qry->deferred != NULL) {
                /* @todo: Refactoring validator, check trust chain before resuming. */
                int state = 0;
-               if (((qry->flags & QUERY_FORWARD) == 0) ||
-                   ((qry->stype == KNOT_RRTYPE_DS) && (qry->flags & QUERY_CNAME))) {
+               if (((qry->flags.FORWARD) == 0) ||
+                   ((qry->stype == KNOT_RRTYPE_DS) && (qry->flags.CNAME))) {
                        state = trust_chain_check(request, qry);
                } else {
                        state = forward_trust_chain_check(request, qry, true);
@@ -1323,7 +1323,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
        } else {
                /* Caller is interested in always tracking a zone cut, even if the answer is cached
                 * this is normally not required, and incurrs another cache lookups for cached answer. */
-               if (qry->flags & QUERY_ALWAYS_CUT) {
+               if (qry->flags.ALWAYS_CUT) {
                        switch(zone_cut_check(request, qry, packet)) {
                        case KR_STATE_FAIL: return KR_STATE_FAIL;
                        case KR_STATE_DONE: return KR_STATE_PRODUCE;
@@ -1360,7 +1360,7 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
        }
 
        /* Update zone cut, spawn new subrequests. */
-       if (!(qry->flags & QUERY_STUB)) {
+       if (!(qry->flags.STUB)) {
                int state = zone_cut_check(request, qry, packet);
                switch(state) {
                case KR_STATE_FAIL: return KR_STATE_FAIL;
@@ -1388,7 +1388,7 @@ ns_election:
                /* Root DNSKEY must be fetched from the hints to avoid chicken and egg problem. */
                if (qry->sname[0] == '\0' && qry->stype == KNOT_RRTYPE_DNSKEY) {
                        kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
-                       qry->flags |= QUERY_NO_THROTTLE; /* Pick even bad SBELT servers */
+                       qry->flags.NO_THROTTLE = true; /* Pick even bad SBELT servers */
                }
                kr_nsrep_elect(qry, request->ctx);
                if (qry->ns.score > KR_NS_MAX_SCORE) {
@@ -1424,7 +1424,7 @@ ns_election:
 
        gettimeofday(&qry->timestamp, NULL);
        *dst = &qry->ns.addr[0].ip;
-       *type = (qry->flags & QUERY_TCP) ? SOCK_STREAM : SOCK_DGRAM;
+       *type = (qry->flags.TCP) ? SOCK_STREAM : SOCK_DGRAM;
        return request->state;
 }
 
@@ -1511,7 +1511,7 @@ int kr_resolve_checkout(struct kr_request *request, struct sockaddr *src,
                }
                inet_ntop(addr->sa_family, kr_inaddr(&qry->ns.addr[i].ip), ns_str, sizeof(ns_str));
                VERBOSE_MSG(qry, "=> querying: '%s' score: %u zone cut: '%s' m12n: '%s' type: '%s' proto: '%s'\n",
-                       ns_str, qry->ns.score, zonecut_str, qname_str, type_str, (qry->flags & QUERY_TCP) ? "tcp" : "udp");
+                       ns_str, qry->ns.score, zonecut_str, qname_str, type_str, (qry->flags.TCP) ? "tcp" : "udp");
                break;
        }}
 
index cc3918ee7d636b4d1669c2bd1eb75857855b4863..f115f2e9d8499869b687bd46ef47fa9040b39bd7 100644 (file)
@@ -140,7 +140,7 @@ static struct kr_query *kr_rplan_push_query(struct kr_rplan *rplan,
                : 0;
 
        /* When forwarding, keep the nameserver addresses. */
-       if (parent && (parent->flags & qry->flags & QUERY_FORWARD)) {
+       if (parent && (parent->flags & qry->flags.FORWARD)) {
                ret = kr_nsrep_copy_set(&qry->ns, &parent->ns);
                if (ret) {
                        query_free(rplan->pool, qry);
index 8a108c1902c6aca1320027d0e101586067e7b1ef..f1ce2c7554e0368718fd201b094834701711e124 100644 (file)
@@ -375,10 +375,10 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
                unsigned *cached = lru_get_try(ctx->cache_rep,
                                (const char *)ns_name, knot_dname_size(ns_name));
                unsigned reputation = (cached) ? *cached : 0;
-               if (!(reputation & KR_NS_NOIP4) && !(ctx->options & QUERY_NO_IPV4)) {
+               if (!(reputation & KR_NS_NOIP4) && !(ctx->options.NO_IPV4)) {
                        fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_A, timestamp);
                }
-               if (!(reputation & KR_NS_NOIP6) && !(ctx->options & QUERY_NO_IPV6)) {
+               if (!(reputation & KR_NS_NOIP6) && !(ctx->options.NO_IPV6)) {
                        fetch_addr(cut,  &ctx->cache, ns_name, KNOT_RRTYPE_AAAA, timestamp);
                }
        }
index 13f548959310cb60c9faca00cb25fce282aac508..33c2455241f387da0d5cad71034a4e60231c102f 100644 (file)
@@ -225,7 +225,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
                return ctx->state;
        }
 
-       if (!cookie_ctx->clnt.enabled || (qry->flags & QUERY_TCP)) {
+       if (!cookie_ctx->clnt.enabled || (qry->flags.TCP)) {
                return ctx->state;
        }
 
@@ -265,7 +265,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
 #endif
        if (rcode == KNOT_RCODE_BADCOOKIE) {
                struct kr_query *next = NULL;
-               if (!(qry->flags & QUERY_BADCOOKIE_AGAIN)) {
+               if (!(qry->flags.BADCOOKIE_AGAIN)) {
                        /* Received first BADCOOKIE, regenerate query. */
                        next = kr_rplan_push(&req->rplan, qry->parent,
                                             qry->sname,  qry->sclass,
@@ -274,7 +274,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
 
                if (next) {
                        VERBOSE_MSG(NULL, "%s\n", "BADCOOKIE querying again");
-                       qry->flags |= QUERY_BADCOOKIE_AGAIN;
+                       qry->flags.BADCOOKIE_AGAIN = true;
                } else {
                        /*
                         * Either the planning of the second request failed or
@@ -283,7 +283,7 @@ int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
                         * RFC7873 5.3 says that TCP should be used. Currently
                         * we always expect that the server doesn't support TCP.
                         */
-                       qry->flags &= ~QUERY_BADCOOKIE_AGAIN;
+                       qry->flags.BADCOOKIE_AGAIN = false;
                        return KR_STATE_FAIL;
                }
 
index 69d0cd6e431b7eec2344b0186ff87261d1d1f5f1..f0eae82803d83598bbaa6b37aa50c5f5c60adf97 100644 (file)
@@ -174,7 +174,7 @@ static int dnstap_log(kr_layer_t *ctx) {
        if (rplan->resolved.len > 0) {
                struct kr_query *last = array_tail(rplan->resolved);
                /* Only add query_zone when not answered from cache */
-               if (!(last->flags & QUERY_CACHED)) {
+               if (!(last->flags.CACHED)) {
                        const knot_dname_t *zone_cut_name = last->zone_cut.name;
                        if (zone_cut_name != NULL) {
                                m.query_zone.data = (uint8_t *)zone_cut_name;
index c1aa9252d8e8b9107381749ebebf9d4734d30e8e..6dd142ea3345dd5c7eba3ee94fe93866d2da3c85 100644 (file)
@@ -152,7 +152,7 @@ static int query(kr_layer_t *ctx, knot_pkt_t *pkt)
        }
 
        VERBOSE_MSG(qry, "<= answered from hints\n");
-       qry->flags &= ~QUERY_DNSSEC_WANT; /* Never authenticated */
+       qry->flags.DNSSEC_WANT = false; /* Never authenticated */
        qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
        pkt->parsed = pkt->size;
        knot_wire_set_qr(pkt->wire);
index edc41a3b2160a1ec160ed2c4c9ac808b2fea870f..c5727cc089d8cb30f228a11da7bde18b0c8359b2 100644 (file)
@@ -137,11 +137,11 @@ static void collect_sample(struct stat_data *data, struct kr_rplan *rplan, knot_
        for (size_t i = 0; i < rplan->resolved.len; ++i) {
                /* Sample queries leading to iteration or expiring */
                struct kr_query *qry = rplan->resolved.at[i];
-               if ((qry->flags & QUERY_CACHED) && !(qry->flags & QUERY_EXPIRING)) {
+               if ((qry->flags.CACHED) && !(qry->flags.EXPIRING)) {
                        continue;
                }
                int key_len = collect_key(key, qry->sname, qry->stype);
-               if (qry->flags & QUERY_EXPIRING) {
+               if (qry->flags.EXPIRING) {
                        unsigned *count = lru_get_new(data->queries.expiring, key, key_len);
                        if (count)
                                *count += 1;
@@ -158,7 +158,7 @@ static int collect_rtt(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->req;
        struct kr_query *qry = req->current_query;
-       if (qry->flags & QUERY_CACHED || !req->upstream.addr) {
+       if (qry->flags.CACHED || !req->upstream.addr) {
                return ctx->state;
        }
 
@@ -221,7 +221,7 @@ static int collect(kr_layer_t *ctx)
                }
                /* Observe the final query. */
                struct kr_query *last = array_tail(rplan->resolved);
-               if (last->flags & QUERY_CACHED) {
+               if (last->flags.CACHED) {
                        stat_const_add(data, metric_answer_cached, 1);
                }
        }