.threshold = chunk_size >> 1,
.last_big = &pool->last_big
};
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
return pool;
}
if (pool == NULL) {
return;
}
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
DBG("Deleting mempool %p", pool);
mp_free_big_chain(pool->state.last[1]);
mp_free_chain(pool->unused);
void
mp_flush(struct mempool *pool)
{
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
mp_free_big_chain(pool->state.last[1]);
struct mempool_chunk *chunk = pool->state.last[0], *next;
while (chunk) {
pool->state.last[1] = NULL;
pool->state.free[1] = 0;
pool->last_big = &pool->last_big;
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
}
static void
void
mp_stats(struct mempool *pool, struct mempool_stats *stats)
{
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
bzero(stats, sizeof(*stats));
mp_stats_chain(pool->state.last[0], stats, 0);
mp_stats_chain(pool->state.last[1], stats, 1);
uint64_t
mp_total_size(struct mempool *pool)
{
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
struct mempool_stats stats;
mp_stats(pool, &stats);
return stats.total_size;
static void *
mp_alloc_internal(struct mempool *pool, unsigned size)
{
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
struct mempool_chunk *chunk;
if (size <= pool->threshold) {
pool->idx = 0;
void *
mp_alloc(struct mempool *pool, unsigned size)
{
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
unsigned avail = pool->state.free[0] & ~(CPU_STRUCT_ALIGN - 1);
+ VALGRIND_CHECK_MEM_IS_DEFINED(&avail, sizeof(unsigned)); // TODO
void *ptr = NULL;
if (size <= avail) {
pool->state.free[0] = avail - size;
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool->state.last[0], sizeof(uint8_t *));
ptr = (uint8_t*)pool->state.last[0] - avail;
VALGRIND_MEMPOOL_ALLOC(pool->state.last[0], ptr, size);
} else {
ptr = mp_alloc_internal(pool, size);
}
ASAN_UNPOISON_MEMORY_REGION(ptr, size);
+ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(ptr, size);
return ptr;
}
void *
mp_alloc_noalign(struct mempool *pool, unsigned size)
{
+ VALGRIND_CHECK_MEM_IS_DEFINED(pool, sizeof(pool)); // TODO
void *ptr = NULL;
if (size <= pool->state.free[0]) {
ptr = (uint8_t*)pool->state.last[0] - pool->state.free[0];
ptr = mp_alloc_internal(pool, size);
}
ASAN_UNPOISON_MEMORY_REGION(ptr, size);
+ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(ptr, size);
return ptr;
}
#include <contrib/macros.h>
#include <contrib/ucw/mempool.h>
#include <contrib/wire.h>
+#include <contrib/memcheck.h>
#if defined(__GLIBC__) && defined(_GNU_SOURCE)
#include <malloc.h>
#endif
return ret;
}
+static void check_rrset(const knot_rrset_t *rrset)
+{
+ VALGRIND_CHECK_MEM_IS_DEFINED(rrset, sizeof(rrset));
+ //VALGRIND_CHECK_MEM_IS_DEFINED(rrset->rrs, sizeof(rrset->rrs));
+ uint16_t rr_count = rrset->rrs.count;
+ knot_rdata_t *rr = rrset->rrs.rdata;
+ // Loop over rdata in rrset.
+ for (uint16_t i = 0; i < rr_count; i++) {
+ VALGRIND_CHECK_MEM_IS_DEFINED(&rr->len, sizeof(rr->len));
+ if (rr->len > 0)
+ VALGRIND_CHECK_MEM_IS_DEFINED(rr->data, rr->len);
+
+ rr = knot_rdataset_next(rr);
+ }
+}
+
+
static int parse_packet(knot_pkt_t *query)
{
if (!query){
/* Parse query packet. */
int ret = knot_pkt_parse(query, 0);
if (ret == KNOT_ETRAIL) {
+ if (query->rrset_count > 0) // TODO
+ check_rrset(query->rr);
/* Extra data after message end. */
ret = kr_error(EMSGSIZE);
} else if (ret != KNOT_EOK) {
/* Malformed query. */
ret = kr_error(EPROTO);
} else {
+ if (query->rrset_count > 0) // TODO
+ check_rrset(query->rr);
ret = kr_ok();
}
#include "contrib/base32hex.h"
#include "contrib/cleanup.h"
#include "contrib/macros.h"
+#include "contrib/memcheck.h"
#include "lib/cache/api.h"
#include "lib/cache/cdb_lmdb.h"
#include "lib/defines.h"
eh->time = timestamp;
eh->ttl = MAX(MIN(ttl, cache->ttl_max), cache->ttl_min);
eh->rank = rank;
+ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(val_new_entry.data, val_new_entry.len);
if (rdataset_dematerialize(&rr->rrs, eh->data)
|| rdataset_dematerialize(rds_sigs, eh->data + rr_ssize)) {
/* minimize the damage from incomplete write; TODO: better */
eh->rank = 0;
assert(false);
}
+ VALGRIND_CHECK_MEM_IS_DEFINED(val_new_entry.data, val_new_entry.len);
assert(entry_h_consistent_E(val_new_entry, rr->type));
+ VALGRIND_CHECK_MEM_IS_DEFINED(val_new_entry.data, val_new_entry.len);
#if 0 /* Occasionally useful when debugging some kinds of changes. */
{
);
} }
+ VALGRIND_CHECK_MEM_IS_DEFINED(val_new_entry.data, val_new_entry.len);
return (ssize_t) val_new_entry.len;
return_needs_pkt:
if (needs_pkt) *needs_pkt = true;
{
/* Cache doesn't require durability, we can be
* loose with the requirements as a tradeoff for speed. */
- const unsigned flags = MDB_WRITEMAP | MDB_MAPASYNC | MDB_NOTLS;
+ const unsigned flags = MDB_NOTLS; // TODO
+ // const unsigned flags = MDB_WRITEMAP | MDB_MAPASYNC | MDB_NOTLS;
int ret = cdb_open_env(env, flags, path, mapsize, stats);
if (ret != 0) {
return ret;
}
return ret;
}
- VALGRIND_CHECK_MEM_IS_DEFINED(_val.mv_data, _val.mv_size);
+ VALGRIND_MAKE_MEM_DEFINED(_val.mv_data, _val.mv_size);
/* Update the result. */
val[i] = val_mdb2knot(_val);
}
#include "lib/cache/impl.h"
#include "lib/utils.h"
+#include "contrib/memcheck.h"
static int entry_h_len(knot_db_val_t val);
void entry_list_memcpy(struct entry_apex *ea, entry_list_t list)
{
assert(ea);
+ VALGRIND_CHECK_MEM_IS_DEFINED(list, sizeof(entry_list_t));
memset(ea, 0, offsetof(struct entry_apex, data));
ea->has_ns = list[EL_NS ].len;
ea->has_cname = list[EL_CNAME ].len;
uint8_t *it = ea->data;
for (int i = 0; i < EL_LENGTH; ++i) {
if (list[i].data) {
+ VALGRIND_CHECK_MEM_IS_DEFINED(list[i].data, list[i].len);
memcpy(it, list[i].data, list[i].len);
/* LATER(optim.): coalesce consecutive writes? */
} else {
*/
#include "lib/cache/impl.h"
+#include "contrib/memcheck.h"
int rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data)
{
data += sizeof(rr_count);
if (rr_count) {
size_t size = knot_rdataset_size(rds);
+ VALGRIND_CHECK_MEM_IS_DEFINED(rds->rdata, size);
memcpy(data, rds->rdata, size);
data += size;
}