Checked with -Wpointer-arith; still hindered by contrib/ucw.
const int32_t new_ttl = get_new_ttl(v.data, qry, qry->sname,
KNOT_RRTYPE_CNAME, qry->timestamp.tv_sec);
ret = answer_simple_hit(ctx, pkt, KNOT_RRTYPE_CNAME, v.data,
- v.data + v.len, new_ttl);
+ knot_db_val_bound(v), new_ttl);
/* TODO: ^^ cumbersome code; we also recompute the TTL */
return ret == kr_ok() ? KR_STATE_DONE : ctx->state;
}
if (remains < 0) goto cont;
{
const uint8_t *nsec_p_raw = el[i].len > sizeof(stamp)
- ? el[i].data + sizeof(stamp) : NULL;
+ ? (uint8_t *)el[i].data + sizeof(stamp) : NULL;
nsec_p_init(&ans.nsec_p, nsec_p_raw);
}
/**** 2. and 3. inside */
return ctx->state;
}
/* Add the SOA into the answer. */
- void *eh_data_bound = val.data + val.len;
- ret = entry2answer(&ans, AR_SOA, eh, eh_data_bound,
+ ret = entry2answer(&ans, AR_SOA, eh, knot_db_val_bound(val),
k->zname, KNOT_RRTYPE_SOA, new_ttl);
if (ret) return ctx->state;
}
struct answer ans;
memset(&ans, 0, sizeof(ans));
ans.mm = &qry->request->pool;
- ret = entry2answer(&ans, AR_ANSWER, eh,
- val_new_entry.data + val_new_entry.len,
+ ret = entry2answer(&ans, AR_ANSWER, eh, knot_db_val_bound(val_new_entry),
rr->owner, rr->type, 0);
/*
VERBOSE_MSG(qry, "=> sanity: written %d and read %d\n",
int i_replace = ENTRY_APEX_NSECS_CNT - 1;
for (int i = 0; i < ENTRY_APEX_NSECS_CNT; ++i) {
if (el[i].len != data_stride) continue;
- if (nsec_p && memcmp(nsec_p, el[i].data + sizeof(uint32_t),
+ if (nsec_p && memcmp(nsec_p, (uint8_t *)el[i].data + sizeof(uint32_t),
data_stride - sizeof(uint32_t)) != 0) {
continue;
}
/* Prepare the new data chunk */
memcpy(el[0].data, &valid_until, sizeof(valid_until));
if (nsec_p) {
- memcpy(el[0].data + sizeof(valid_until), nsec_p,
+ memcpy((uint8_t *)el[0].data + sizeof(valid_until), nsec_p,
data_stride - sizeof(valid_until));
}
/* Write it all to the cache */
return kr_error(ENOENT);
}
- const void *eh_bound = val.data + val.len;
+ const uint8_t *eh_bound = knot_db_val_bound(val);
if (eh->is_packet) {
/* Note: we answer here immediately, even if it's (theoretically)
* possible that we could generate a higher-security negative proof.
return -ABS(ESTALE);
}
/* Add the RR into the answer. */
- const void *eh_bound = val.data + val.len;
- ret = entry2answer(ans, AR_ANSWER, eh, eh_bound, qry->sname, type, new_ttl);
+ ret = entry2answer(ans, AR_ANSWER, eh, knot_db_val_bound(val),
+ qry->sname, type, new_ttl);
VERBOSE_MSG(qry, "=> wildcard: answer expanded, ret = %d, new TTL %d\n",
ret, (int)new_ttl);
if (ret) return kr_error(ret);
.ttl = eh->ttl,
.rank = eh->rank,
.raw_data = val.data,
- .raw_bound = val.data + val.len,
+ .raw_bound = knot_db_val_bound(val),
};
return kr_ok();
}
return kr_error(EILSEQ);
}
const uint8_t *it = ea->data,
- *it_bound = val.data + val.len;
+ *it_bound = knot_db_val_bound(val);
for (int i = 0; i < ENTRY_APEX_NSECS_CNT; ++i) {
if (it > it_bound) {
return kr_error(EILSEQ);
const bool ok = val.data && ((ssize_t)val.len) > 0;
if (!ok) return kr_error(EINVAL);
const struct entry_h *eh = val.data;
- const void *d = eh->data; /* iterates over the data in entry */
- const void *data_bound = val.data + val.len;
+ const uint8_t *d = eh->data; /* iterates over the data in entry */
+ const uint8_t *data_bound = knot_db_val_bound(val);
if (d >= data_bound) return kr_error(EILSEQ);
if (!eh->is_packet) { /* Positive RRset + its RRsig set (may be empty). */
int sets = 2;
d += 2 + len;
}
if (d > data_bound) return kr_error(EILSEQ);
- return d - val.data;
+ return d - (uint8_t *)val.data;
}
struct entry_apex * entry_apex_consistent(knot_db_val_t val)
ret = cache_write_or_clear(cache, &key, &val, qry);
if (ret) return kr_error(ret);
memcpy(val.data, buf, val.len); /* we also copy the "empty" space, but well... */
- val_new_entry->data = val.data + (el[i_type].data - buf);
+ val_new_entry->data = (uint8_t *)val.data
+ + ((uint8_t *)el[i_type].data - (uint8_t *)buf);
return kr_ok();
}
int entry2answer(struct answer *ans, int id,
- const struct entry_h *eh, const void *eh_bound,
+ const struct entry_h *eh, const uint8_t *eh_bound,
const knot_dname_t *owner, uint16_t type, uint32_t new_ttl)
{
/* We assume it's zeroed. Do basic sanity check. */
* \return error code. They are all bad conditions and "guarded" by assert.
*/
int entry2answer(struct answer *ans, int id,
- const struct entry_h *eh, const void *eh_bound,
+ const struct entry_h *eh, const uint8_t *eh_bound,
const knot_dname_t *owner, uint16_t type, uint32_t new_ttl);
return tmp;
}
+/** Useful pattern, especially as void-pointer arithmetic isn't standard-compliant. */
+static inline uint8_t * knot_db_val_bound(knot_db_val_t val)
+{
+ return (uint8_t *)val.data + val.len;
+}
+
}
if (kwz_low) {
*kwz_low = (knot_db_val_t){
- .data = key_nsec.data + nwz_off,
+ .data = (uint8_t *)key_nsec.data + nwz_off,
.len = key_nsec.len - nwz_off,
}; /* CACHE_KEY_DEF */
}
assert((ssize_t)(kwz_hi.len) >= 0);
/* 2. do the actual range check. */
const knot_db_val_t kwz_sname = {
- .data = (void *)k->buf + 1 + nwz_off,
+ .data = (void *)(k->buf + 1 + nwz_off),
.len = k->buf[0] - k->zlf_len,
};
assert((ssize_t)(kwz_sname.len) >= 0);
/* Basic checks OK -> materialize data. */
{
const struct entry_h *nsec_eh = val.data;
- const void *nsec_eh_bound = val.data + val.len;
- int ret = entry2answer(ans, AR_NSEC, nsec_eh, nsec_eh_bound,
+ int ret = entry2answer(ans, AR_NSEC, nsec_eh, knot_db_val_bound(val),
owner, KNOT_RRTYPE_NSEC, new_ttl);
if (ret) return kr_error(ret);
}
}
/* Check if our sname-covering NSEC also covers/matches SS. */
knot_db_val_t kwz = {
- .data = key.data + nwz_off,
+ .data = (uint8_t *)key.data + nwz_off,
.len = key.len - nwz_off,
};
assert((ssize_t)(kwz.len) >= 0);
}
/* Materialize the record into answer (speculatively). */
const struct entry_h *nsec_eh = val.data;
- const void *nsec_eh_bound = val.data + val.len;
knot_dname_t owner[KNOT_DNAME_MAXLEN];
int ret = dname_wire_reconstruct(owner, k, wild_low_kwz);
if (ret) return kr_error(ret);
- ret = entry2answer(ans, AR_WILD, nsec_eh, nsec_eh_bound,
+ ret = entry2answer(ans, AR_WILD, nsec_eh, knot_db_val_bound(val),
owner, KNOT_RRTYPE_NSEC, new_ttl);
if (ret) return kr_error(ret);
nsec_rr = ans->rrsets[AR_WILD].set.rr;
knot_db_val_t val = key_NSEC3_common(k, nsec3_name /*only zname required*/,
nsec_p_hash);
if (!val.data) return val;
- int len = base32hex_decode(nsec3_name + 1, nsec3_name[0], val.data + val.len,
- KR_CACHE_KEY_MAXLEN - val.len);
+ int len = base32hex_decode(nsec3_name + 1, nsec3_name[0],
+ knot_db_val_bound(val), KR_CACHE_KEY_MAXLEN - val.len);
if (len != NSEC3_HASH_LEN) {
return VAL_EMPTY;
}
assert(false);
return VAL_EMPTY;
}
- memcpy(val.data + val.len, hash.data, NSEC3_HASH_LEN);
+ memcpy(knot_db_val_bound(val), hash.data, NSEC3_HASH_LEN);
free(hash.data);
#endif
*new_ttl = new_ttl_;
}
if (hash_low) {
- *hash_low = key_found.data + hash_off;
+ *hash_low = (uint8_t *)key_found.data + hash_off;
}
if (is_exact) {
/* Nothing else to do. */
return "unexpected next hash length";
}
/* B. do the actual range check. */
- const uint8_t * const hash_searched = key.data + hash_off;
+ const uint8_t * const hash_searched = (uint8_t *)key.data + hash_off;
bool covers = /* we know for sure that the low end is before the searched name */
nsec3_hash_ordered(hash_searched, hash_next)
/* and the wrap-around case */
- || nsec3_hash_ordered(hash_next, key_found.data + hash_off);
+ || nsec3_hash_ordered(hash_next, (const uint8_t *)key_found.data + hash_off);
if (!covers) {
return "range search miss (!covers)";
}
static void key_NSEC3_hash2text(const knot_db_val_t key, char *text)
{
assert(key.data && key.len > NSEC3_HASH_LEN);
- const uint8_t *hash_raw = key.data + key.len - NSEC3_HASH_LEN;
+ const uint8_t *hash_raw = knot_db_val_bound(key) - NSEC3_HASH_LEN;
/* CACHE_KEY_DEF ^^ */
int len = base32hex_encode(hash_raw, NSEC3_HASH_LEN, (uint8_t *)text,
NSEC3_HASH_TXT_LEN);
? AR_CPE : AR_NSEC;
{
const struct entry_h *nsec_eh = val.data;
- const void *nsec_eh_bound = val.data + val.len;
memset(&ans->rrsets[ans_id], 0, sizeof(ans->rrsets[ans_id]));
- int ret = entry2answer(ans, ans_id, nsec_eh, nsec_eh_bound,
+ int ret = entry2answer(ans, ans_id, nsec_eh, knot_db_val_bound(val),
owner, KNOT_RRTYPE_NSEC3, new_ttl);
if (ret) return kr_error(ret);
}
int ret = dname_wire_reconstruct(owner, k->zname, hash_low);
if (unlikely(ret)) return kr_ok();
const struct entry_h *nsec_eh = val.data;
- const void *nsec_eh_bound = val.data + val.len;
- ret = entry2answer(ans, AR_WILD, nsec_eh, nsec_eh_bound,
+ ret = entry2answer(ans, AR_WILD, nsec_eh, knot_db_val_bound(val),
owner, KNOT_RRTYPE_NSEC3, new_ttl);
if (ret) return kr_error(ret);
}