if (kr_fails_assert(handle && handle->data == session))
return qr_task_on_send(task, NULL, kr_error(EINVAL));
const bool is_stream = handle->type == UV_TCP;
- if (!is_stream && handle->type != UV_UDP) abort();
+ kr_require(is_stream || handle->type == UV_UDP);
if (addr == NULL)
addr = session_get_peer(session);
static uint8_t get_lowest_rank(const struct kr_query *qry, const knot_dname_t *name, const uint16_t type)
{
/* Shut up linters. */
- if (unlikely(!qry || !qry->request)) abort();
+ kr_require(qry && qry->request);
/* TODO: move rank handling into the iterator (DNSSEC_* flags)? */
const bool allow_unverified =
knot_wire_get_cd(qry->request->qsource.packet->wire) || qry->flags.STUB;
inline static void kr_qflags_mod(struct kr_qflags *fl1, struct kr_qflags fl2,
unsigned char mod(const unsigned char a, const unsigned char b))
{
- if (!fl1) abort();
+ kr_require(fl1);
union {
struct kr_qflags flags;
/* C99 section 6.5.3.4: sizeof(char) == 1 */
void kr_zonecut_move(struct kr_zonecut *to, const struct kr_zonecut *from)
{
- if (!to || !from) abort();
+ kr_require(to && from);
kr_zonecut_deinit(to);
memcpy(to, from, sizeof(*to));
}
- cached_rr.rrs.count * offsetof(knot_rdata_t, len);
int ret = pack_reserve_mm(*addrs, cached_rr.rrs.count, pack_extra_size,
kr_memreserve, mm_pool);
- if (ret) abort(); /* ENOMEM "probably" */
+ kr_require(ret == 0); /* ENOMEM "probably" */
int usable_cnt = 0;
addrset_info_t result = AI_EMPTY;