*/
#include <sys/time.h>
-#include <assert.h>
#include <arpa/inet.h>
#include <contrib/cleanup.h>
continue;
}
int ret = kr_zonecut_add(cut, ns_name, NULL, 0);
- assert(!ret); (void)ret;
+ (void)!kr_assume(!ret);
/* Choose when to use glue records. */
const bool in_bailiwick =
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *qry = req->current_query;
- assert(!(qry->flags.STUB));
+ if (!kr_assume(!qry->flags.STUB))
+ return KR_STATE_FAIL;
int result = KR_STATE_CONSUME;
if (qry->flags.FORWARD) {
static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const knot_dname_t **cname_ret)
{
struct kr_query *query = req->current_query;
- assert(!(query->flags.STUB));
+ if (!kr_assume(!query->flags.STUB))
+ return KR_STATE_FAIL;
/* Process answer type */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
const knot_dname_t *cname = NULL;
static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *query = req->current_query;
- assert(query->flags.STUB);
+ if (!kr_assume(query->flags.STUB))
+ return KR_STATE_FAIL;
/* Pick all answer RRs. */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
for (unsigned i = 0; i < an->count; ++i) {
static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
{
- assert(pkt && ctx);
+ if (!kr_assume(pkt && ctx))
+ return KR_STATE_FAIL;
struct kr_request *req = ctx->req;
struct kr_query *query = req->current_query;
if (!query || ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
*/
static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
{
- assert(pkt && ctx);
+ if (!kr_assume(pkt && ctx))
+ return KR_STATE_FAIL;
struct kr_request *req = ctx->req;
struct kr_query *query = req->current_query;
if (!query) {
* we trigger another cache *reading* attempt
* for the subsequent PRODUCE round.
*/
- assert(query->flags.NONAUTH);
+ (void)!kr_assume(query->flags.NONAUTH);
query->flags.CACHE_TRIED = false;
VERBOSE_MSG("<= referral response, but cache should stop us short now\n");
} else {