]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
lib/layer/iterate.c: replace asserts
authorTomas Krizek <tomas.krizek@nic.cz>
Wed, 24 Mar 2021 16:44:46 +0000 (17:44 +0100)
committerTomas Krizek <tomas.krizek@nic.cz>
Tue, 25 May 2021 12:39:41 +0000 (14:39 +0200)
lib/layer/iterate.c

index 7621b9007ae84920b544aa8aac65d7a0e827b743..499e97469fa2105b6b4babb30745d17e3d3d8e9b 100644 (file)
@@ -16,7 +16,6 @@
  */
 
 #include <sys/time.h>
-#include <assert.h>
 #include <arpa/inet.h>
 
 #include <contrib/cleanup.h>
@@ -308,7 +307,7 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
                        continue;
                }
                int ret = kr_zonecut_add(cut, ns_name, NULL, 0);
-               assert(!ret); (void)ret;
+               (void)!kr_assume(!ret);
 
                /* Choose when to use glue records. */
                const bool in_bailiwick =
@@ -390,7 +389,8 @@ static int pick_authority(knot_pkt_t *pkt, struct kr_request *req, bool to_wire)
 static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
 {
        struct kr_query *qry = req->current_query;
-       assert(!(qry->flags.STUB));
+       if (!kr_assume(!qry->flags.STUB))
+               return KR_STATE_FAIL;
 
        int result = KR_STATE_CONSUME;
        if (qry->flags.FORWARD) {
@@ -493,7 +493,8 @@ static int finalize_answer(knot_pkt_t *pkt, struct kr_request *req)
 static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const knot_dname_t **cname_ret)
 {
        struct kr_query *query = req->current_query;
-       assert(!(query->flags.STUB));
+       if (!kr_assume(!query->flags.STUB))
+               return KR_STATE_FAIL;
        /* Process answer type */
        const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
        const knot_dname_t *cname = NULL;
@@ -848,7 +849,8 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
 static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
 {
        struct kr_query *query = req->current_query;
-       assert(query->flags.STUB);
+       if (!kr_assume(query->flags.STUB))
+               return KR_STATE_FAIL;
        /* Pick all answer RRs. */
        const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
        for (unsigned i = 0; i < an->count; ++i) {
@@ -942,7 +944,8 @@ int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
 
 static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
-       assert(pkt && ctx);
+       if (!kr_assume(pkt && ctx))
+               return KR_STATE_FAIL;
        struct kr_request *req = ctx->req;
        struct kr_query *query = req->current_query;
        if (!query || ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
@@ -994,7 +997,8 @@ static bool satisfied_by_additional(const struct kr_query *qry)
  */
 static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
-       assert(pkt && ctx);
+       if (!kr_assume(pkt && ctx))
+               return KR_STATE_FAIL;
        struct kr_request *req = ctx->req;
        struct kr_query *query = req->current_query;
        if (!query) {
@@ -1145,7 +1149,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
                         * we trigger another cache *reading* attempt
                         * for the subsequent PRODUCE round.
                         */
-                       assert(query->flags.NONAUTH);
+                       (void)!kr_assume(query->flags.NONAUTH);
                        query->flags.CACHE_TRIED = false;
                        VERBOSE_MSG("<= referral response, but cache should stop us short now\n");
                } else {