From: Vladimír Čunát Date: Tue, 6 Apr 2021 15:28:52 +0000 (+0200) Subject: treewide: fix unaligned access X-Git-Tag: v5.3.2~10^2~4 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=44f67cf4b0c98c92d5dfd5cdcad72e10d0820c89;p=thirdparty%2Fknot-resolver.git treewide: fix unaligned access Some less common HW (not x86, usually ARM) doesn't tolerate unaligned access to memory and it's breakage of C as well. It's easiest to check by meson's -Db_sanitize=undefined (on any HW). I pushed millions of real-life QNAME+QTYPE queries over UDP in default mode and the sanitizer seems clear now. --- diff --git a/lib/cache/impl.h b/lib/cache/impl.h index b884c361a..fca8653ca 100644 --- a/lib/cache/impl.h +++ b/lib/cache/impl.h @@ -45,7 +45,10 @@ struct entry_h { bool has_optout : 1; /**< Only for packets; persisted DNSSEC_OPTOUT. */ uint8_t _pad; /**< We need even alignment for data now. */ uint8_t data[]; -}; +/* Well, we don't really need packing or alignment changes, + * but due to LMDB the whole structure may not be stored at an aligned address, + * and we need compilers (for non-x86) to know it to avoid SIGBUS (test: UBSAN). */ +} __attribute__ ((packed,aligned(1))); struct entry_apex; /** Check basic consistency of entry_h for 'E' entries, not looking into ->data. @@ -303,10 +306,13 @@ static inline int rdataset_dematerialized_size(const uint8_t *data, uint16_t *rd assert(sizeof(count) == KR_CACHE_RR_COUNT_SIZE); memcpy(&count, data, sizeof(count)); const uint8_t *rdata = data + sizeof(count); - if (rdataset_count) - *rdataset_count = count; - for (int i = 0; i < count; ++i) - rdata += knot_rdata_size(((knot_rdata_t *)rdata)->len); + if (rdataset_count) // memcpy is safe for unaligned case (on non-x86) + memcpy(rdataset_count, &count, sizeof(count)); + for (int i = 0; i < count; ++i) { + __typeof__(((knot_rdata_t *)NULL)->len) len; // memcpy as above + memcpy(&len, rdata + offsetof(knot_rdata_t, len), sizeof(len)); + rdata += knot_rdata_size(len); + } return rdata - (data + sizeof(count)); } diff --git a/lib/cache/nsec1.c b/lib/cache/nsec1.c index 3985ca3b1..ed242ca84 100644 --- a/lib/cache/nsec1.c +++ b/lib/cache/nsec1.c @@ -197,8 +197,14 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query /* We know it starts before sname, so let's check the other end. * 1. construct the key for the next name - kwz_hi. */ /* it's *full* name ATM */ - const knot_rdata_t *next = (const knot_rdata_t *) - (eh->data + KR_CACHE_RR_COUNT_SIZE); + /* Technical complication: memcpy is safe for unaligned case (on non-x86) */ + __typeof__(((knot_rdata_t *)NULL)->len) next_len; + const uint8_t *next_data; + { /* next points to knot_rdata_t but possibly unaligned */ + const uint8_t *next = eh->data + KR_CACHE_RR_COUNT_SIZE; + memcpy(&next_len, next + offsetof(knot_rdata_t, len), sizeof(next_len)); + next_data = next + offsetof(knot_rdata_t, data); + } if (KR_CACHE_RR_COUNT_SIZE != 2 || get_uint16(eh->data) == 0) { assert(false); return "ERROR"; @@ -220,8 +226,8 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query /* Lower-case chs; see also RFC 6840 5.1. * LATER(optim.): we do lots of copying etc. */ knot_dname_t lower_buf[KNOT_DNAME_MAXLEN]; - ret = knot_dname_to_wire(lower_buf, next->data, - MIN(next->len, KNOT_DNAME_MAXLEN)); + ret = knot_dname_to_wire(lower_buf, next_data, + MIN(next_len, KNOT_DNAME_MAXLEN)); if (ret < 0) { /* _ESPACE */ return "range search found record with incorrect contents"; } diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c index 65fce45da..cf29f35cd 100644 --- a/lib/layer/iterate.c +++ b/lib/layer/iterate.c @@ -132,7 +132,9 @@ static bool is_valid_addr(const uint8_t *addr, size_t len) { if (len == sizeof(struct in_addr)) { /* Filter ANY and 127.0.0.0/8 */ - uint32_t ip_host = ntohl(*(const uint32_t *)(addr)); + uint32_t ip_host; /* Memcpy is safe for unaligned case (on non-x86) */ + memcpy(&ip_host, addr, sizeof(ip_host)); + ip_host = ntohl(ip_host); if (ip_host == 0 || (ip_host & 0xff000000) == 0x7f000000) { return false; } diff --git a/lib/selection.c b/lib/selection.c index 56530cba7..4b516bb8a 100644 --- a/lib/selection.c +++ b/lib/selection.c @@ -155,8 +155,8 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len, } else if (value.len != sizeof(struct rtt_state)) { assert(false); // shouldn't happen but let's be more robust state = default_rtt_state; - } else { - state = *(struct rtt_state *)value.data; + } else { // memcpy is safe for unaligned case (on non-x86) + memcpy(&state, value.data, sizeof(state)); } free(key.data); diff --git a/utils/cache_gc/db.c b/utils/cache_gc/db.c index 1acd37e68..b6bfaacab 100644 --- a/utils/cache_gc/db.c +++ b/utils/cache_gc/db.c @@ -64,10 +64,8 @@ void kr_gc_cache_close(struct kr_cache *kres_db, knot_db_t * knot_db) kr_cache_close(kres_db); } -const uint16_t *kr_gc_key_consistent(knot_db_val_t key) +int kr_gc_key_consistent(knot_db_val_t key) { - const static uint16_t NSEC1 = KNOT_RRTYPE_NSEC; - const static uint16_t NSEC3 = KNOT_RRTYPE_NSEC3; const uint8_t *kd = key.data; ssize_t i; /* CACHE_KEY_DEF */ @@ -80,7 +78,7 @@ const uint16_t *kr_gc_key_consistent(knot_db_val_t key) for (i = 2; kd[i - 1] || kd[i - 2]; ++i) { if (i >= key.len) { // TODO: assert(!EINVAL) -> kr_assume() - return NULL; + return kr_error(EINVAL); } } } @@ -89,18 +87,20 @@ const uint16_t *kr_gc_key_consistent(knot_db_val_t key) case 'E': if (i + 1 + sizeof(uint16_t) > key.len) { assert(!EINVAL); - return NULL; + return kr_error(EINVAL); } - return (uint16_t *) & kd[i + 1]; + uint16_t type; + memcpy(&type, kd + i + 1, sizeof(type)); + return type; case '1': - return &NSEC1; + return KNOT_RRTYPE_NSEC; case '3': - return &NSEC3; + return KNOT_RRTYPE_NSEC3; case 'S': // the rtt_state entries are considered inconsistent, at least for now - return NULL; + return -1; default: assert(!EINVAL); - return NULL; + return kr_error(EINVAL); } } @@ -203,12 +203,11 @@ int kr_gc_cache_iter(knot_db_t * knot_db, const kr_cache_gc_cfg_t *cfg, info.entry_size = key.len + val.len; info.valid = false; - const uint16_t *entry_type = - ret == KNOT_EOK ? kr_gc_key_consistent(key) : NULL; + const int entry_type = ret == KNOT_EOK ? kr_gc_key_consistent(key) : -1; const struct entry_h *entry = NULL; - if (entry_type != NULL) { + if (entry_type >= 0) { counter_gc_consistent++; - entry = val2entry(val, *entry_type); + entry = val2entry(val, entry_type); } /* TODO: perhaps improve some details around here: * - rtt_state entries are considered gc_inconsistent; @@ -219,9 +218,9 @@ int kr_gc_cache_iter(knot_db_t * knot_db, const kr_cache_gc_cfg_t *cfg, * here as kr_inconsistent */ if (entry != NULL) { info.valid = true; - info.rrtype = *entry_type; + info.rrtype = entry_type; info.expires_in = entry->time + entry->ttl - now; - info.no_labels = entry_labels(&key, *entry_type); + info.no_labels = entry_labels(&key, entry_type); } counter_iter++; counter_kr_consistent += info.valid; diff --git a/utils/cache_gc/db.h b/utils/cache_gc/db.h index 15d26caa1..48dc101a8 100644 --- a/utils/cache_gc/db.h +++ b/utils/cache_gc/db.h @@ -21,12 +21,12 @@ typedef int (*kr_gc_iter_callback)(const knot_db_val_t * key, int kr_gc_cache_iter(knot_db_t * knot_db, const kr_cache_gc_cfg_t *cfg, kr_gc_iter_callback callback, void *ctx); -/** Return RR type corresponding to the key or NULL. +/** Return RR type corresponding to the key or negative error code. * - * NULL is returned on unexpected values (those also trigger assertion) + * Error is returned on unexpected values (those also trigger assertion) * and on other kinds of data in cache (e.g. struct rtt_state). */ -const uint16_t *kr_gc_key_consistent(knot_db_val_t key); +int kr_gc_key_consistent(knot_db_val_t key); /** Printf a *binary* string in a human-readable way. */ void debug_printbin(const char *str, unsigned int len); diff --git a/utils/cache_gc/kr_cache_gc.c b/utils/cache_gc/kr_cache_gc.c index 3843d5ca6..068c457e5 100644 --- a/utils/cache_gc/kr_cache_gc.c +++ b/utils/cache_gc/kr_cache_gc.c @@ -276,9 +276,9 @@ int kr_cache_gc(kr_cache_gc_cfg_t *cfg, kr_cache_gc_state_t **state) switch (ret) { case KNOT_EOK: deleted_records++; - const uint16_t *entry_type = kr_gc_key_consistent(**i); - if (entry_type != NULL) // some "inconsistent" entries are OK - rrtypelist_add(&deleted_rrtypes, *entry_type); + const int entry_type = kr_gc_key_consistent(**i); + if (entry_type >= 0) // some "inconsistent" entries are OK + rrtypelist_add(&deleted_rrtypes, entry_type); break; case KNOT_ENOENT: already_gone++;