bool has_optout : 1; /**< Only for packets; persisted DNSSEC_OPTOUT. */
uint8_t _pad; /**< We need even alignment for data now. */
uint8_t data[];
-};
+/* Well, we don't really need packing or alignment changes,
+ * but due to LMDB the whole structure may not be stored at an aligned address,
+ * and we need compilers (for non-x86) to know it to avoid SIGBUS (test: UBSAN). */
+} __attribute__ ((packed,aligned(1)));
struct entry_apex;
/** Check basic consistency of entry_h for 'E' entries, not looking into ->data.
assert(sizeof(count) == KR_CACHE_RR_COUNT_SIZE);
memcpy(&count, data, sizeof(count));
const uint8_t *rdata = data + sizeof(count);
- if (rdataset_count)
- *rdataset_count = count;
- for (int i = 0; i < count; ++i)
- rdata += knot_rdata_size(((knot_rdata_t *)rdata)->len);
+ if (rdataset_count) // memcpy is safe for unaligned case (on non-x86)
+ memcpy(rdataset_count, &count, sizeof(count));
+ for (int i = 0; i < count; ++i) {
+ __typeof__(((knot_rdata_t *)NULL)->len) len; // memcpy as above
+ memcpy(&len, rdata + offsetof(knot_rdata_t, len), sizeof(len));
+ rdata += knot_rdata_size(len);
+ }
return rdata - (data + sizeof(count));
}
/* We know it starts before sname, so let's check the other end.
* 1. construct the key for the next name - kwz_hi. */
/* it's *full* name ATM */
- const knot_rdata_t *next = (const knot_rdata_t *)
- (eh->data + KR_CACHE_RR_COUNT_SIZE);
+ /* Technical complication: memcpy is safe for unaligned case (on non-x86) */
+ __typeof__(((knot_rdata_t *)NULL)->len) next_len;
+ const uint8_t *next_data;
+ { /* next points to knot_rdata_t but possibly unaligned */
+ const uint8_t *next = eh->data + KR_CACHE_RR_COUNT_SIZE;
+ memcpy(&next_len, next + offsetof(knot_rdata_t, len), sizeof(next_len));
+ next_data = next + offsetof(knot_rdata_t, data);
+ }
if (KR_CACHE_RR_COUNT_SIZE != 2 || get_uint16(eh->data) == 0) {
assert(false);
return "ERROR";
/* Lower-case chs; see also RFC 6840 5.1.
* LATER(optim.): we do lots of copying etc. */
knot_dname_t lower_buf[KNOT_DNAME_MAXLEN];
- ret = knot_dname_to_wire(lower_buf, next->data,
- MIN(next->len, KNOT_DNAME_MAXLEN));
+ ret = knot_dname_to_wire(lower_buf, next_data,
+ MIN(next_len, KNOT_DNAME_MAXLEN));
if (ret < 0) { /* _ESPACE */
return "range search found record with incorrect contents";
}
{
if (len == sizeof(struct in_addr)) {
/* Filter ANY and 127.0.0.0/8 */
- uint32_t ip_host = ntohl(*(const uint32_t *)(addr));
+ uint32_t ip_host; /* Memcpy is safe for unaligned case (on non-x86) */
+ memcpy(&ip_host, addr, sizeof(ip_host));
+ ip_host = ntohl(ip_host);
if (ip_host == 0 || (ip_host & 0xff000000) == 0x7f000000) {
return false;
}
} else if (value.len != sizeof(struct rtt_state)) {
assert(false); // shouldn't happen but let's be more robust
state = default_rtt_state;
- } else {
- state = *(struct rtt_state *)value.data;
+ } else { // memcpy is safe for unaligned case (on non-x86)
+ memcpy(&state, value.data, sizeof(state));
}
free(key.data);
kr_cache_close(kres_db);
}
-const uint16_t *kr_gc_key_consistent(knot_db_val_t key)
+int kr_gc_key_consistent(knot_db_val_t key)
{
- const static uint16_t NSEC1 = KNOT_RRTYPE_NSEC;
- const static uint16_t NSEC3 = KNOT_RRTYPE_NSEC3;
const uint8_t *kd = key.data;
ssize_t i;
/* CACHE_KEY_DEF */
for (i = 2; kd[i - 1] || kd[i - 2]; ++i) {
if (i >= key.len) {
// TODO: assert(!EINVAL) -> kr_assume()
- return NULL;
+ return kr_error(EINVAL);
}
}
}
case 'E':
if (i + 1 + sizeof(uint16_t) > key.len) {
assert(!EINVAL);
- return NULL;
+ return kr_error(EINVAL);
}
- return (uint16_t *) & kd[i + 1];
+ uint16_t type;
+ memcpy(&type, kd + i + 1, sizeof(type));
+ return type;
case '1':
- return &NSEC1;
+ return KNOT_RRTYPE_NSEC;
case '3':
- return &NSEC3;
+ return KNOT_RRTYPE_NSEC3;
case 'S': // the rtt_state entries are considered inconsistent, at least for now
- return NULL;
+ return -1;
default:
assert(!EINVAL);
- return NULL;
+ return kr_error(EINVAL);
}
}
info.entry_size = key.len + val.len;
info.valid = false;
- const uint16_t *entry_type =
- ret == KNOT_EOK ? kr_gc_key_consistent(key) : NULL;
+ const int entry_type = ret == KNOT_EOK ? kr_gc_key_consistent(key) : -1;
const struct entry_h *entry = NULL;
- if (entry_type != NULL) {
+ if (entry_type >= 0) {
counter_gc_consistent++;
- entry = val2entry(val, *entry_type);
+ entry = val2entry(val, entry_type);
}
/* TODO: perhaps improve some details around here:
* - rtt_state entries are considered gc_inconsistent;
* here as kr_inconsistent */
if (entry != NULL) {
info.valid = true;
- info.rrtype = *entry_type;
+ info.rrtype = entry_type;
info.expires_in = entry->time + entry->ttl - now;
- info.no_labels = entry_labels(&key, *entry_type);
+ info.no_labels = entry_labels(&key, entry_type);
}
counter_iter++;
counter_kr_consistent += info.valid;
int kr_gc_cache_iter(knot_db_t * knot_db, const kr_cache_gc_cfg_t *cfg,
kr_gc_iter_callback callback, void *ctx);
-/** Return RR type corresponding to the key or NULL.
+/** Return RR type corresponding to the key or negative error code.
*
- * NULL is returned on unexpected values (those also trigger assertion)
+ * Error is returned on unexpected values (those also trigger assertion)
* and on other kinds of data in cache (e.g. struct rtt_state).
*/
-const uint16_t *kr_gc_key_consistent(knot_db_val_t key);
+int kr_gc_key_consistent(knot_db_val_t key);
/** Printf a *binary* string in a human-readable way. */
void debug_printbin(const char *str, unsigned int len);
switch (ret) {
case KNOT_EOK:
deleted_records++;
- const uint16_t *entry_type = kr_gc_key_consistent(**i);
- if (entry_type != NULL) // some "inconsistent" entries are OK
- rrtypelist_add(&deleted_rrtypes, *entry_type);
+ const int entry_type = kr_gc_key_consistent(**i);
+ if (entry_type >= 0) // some "inconsistent" entries are OK
+ rrtypelist_add(&deleted_rrtypes, entry_type);
break;
case KNOT_ENOENT:
already_gone++;