oldest one instead of the freshest one. This way even late memory corruptions
have a chance to be detected.
+Another non-destructive approach is to use "-dMbackup". A full copy of the
+object is made after its end, which eases inspection (e.g. of the parts
+scratched by the pool_item elements), and a comparison is made upon allocation
+of that object, just like with "-dMintegrity", causing a crash on mismatch. The
+initial 4 words corresponding to the list are ignored as well. Note that when
+both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
+being scratched, and the comparison is done by "-dMintegrity" only.
+
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
is passed on the executable's command line, pool objects are allocated with
one extra pointer compared to the requested size, so that the bytes that follow
last released. This works best with "no-merge", "cold-first" and "tag".
Enabling this option will slightly increase the CPU usage.
+ - backup / no-backup:
+ This option performs a copy of each released object at release time,
+ allowing developers to inspect them. It also performs a comparison at
+ allocation time to detect if anything changed in between, indicating a
+ use-after-free condition. This doubles the memory usage and slightly
+ increases the CPU usage (similar to "integrity"). If combined with
+ "integrity", it still duplicates the contents but doesn't perform the
+ comparison (which is performed by "integrity"). Just like "integrity",
+ it works best with "no-merge", "cold-first" and "tag".
+
- no-global / global:
Depending on the operating system, a process-wide global memory cache
may be enabled if it is estimated that the standard allocator is too
{ POOL_DBG_TAG, "tag", "no-tag", "add tag at end of allocated objects" },
{ POOL_DBG_POISON, "poison", "no-poison", "poison newly allocated objects" },
{ POOL_DBG_UAF, "uaf", "no-uaf", "enable use-after-free checks (slow)" },
+ { POOL_DBG_BACKUP, "backup", "no-backup", "compare object contents on re-alloc" },
{ 0 /* end */ }
};
size = ((size + align - 1) & -align);
}
+ if (pool_debugging & POOL_DBG_BACKUP) {
+ /* keep a full backup of the pool at release time. We need
+ * a word-aligned size, so it's fine to do it now.
+ */
+ extra += size;
+ }
+
/* TODO: thread: we do not lock pool list for now because all pools are
* created during HAProxy startup (so before threads creation) */
start = &pools;
pool_put_to_os_nodec(pool, ptr);
}
+/* Copies the contents of <item> to the reserved area after it to have a backup.
+ * The item part is left untouched.
+ */
+void pool_copy_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ ulong *ptr = (ulong *)item;
+ ulong extra;
+
+ if (size <= sizeof(*item))
+ return;
+
+ extra = !!(pool_debugging & (POOL_DBG_TAG|POOL_DBG_CALLER));
+ memcpy(&ptr[size/sizeof(*ptr) + extra], ptr, size);
+}
/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
* up to <size> bytes. The item part is left untouched.
}
/* check for a pool_cache_item integrity after extracting it from the cache. It
- * must have been previously initialized using pool_fill_pattern(). If any
- * corruption is detected, the function provokes an immediate crash.
+ * must have been previously initialized using either pool_fill_pattern() or
+ * pool_copy_pattern(). If any corruption is detected, the function provokes an
+ * immediate crash.
*/
void pool_check_pattern(struct pool_cache_head *pch, struct pool_head *pool, struct pool_cache_item *item, const void *caller)
{
if (size <= sizeof(*item))
return;
- /* let's check that all words past *item are equal */
- ofs = sizeof(*item) / sizeof(*ptr);
- u = ptr[ofs++];
- while (ofs < size / sizeof(*ptr)) {
- if (unlikely(ptr[ofs] != u)) {
- pool_inspect_item("cache corruption detected", pool, item, caller, ofs * sizeof(*ptr));
- ABORT_NOW();
+ if (pool_debugging & POOL_DBG_INTEGRITY) {
+ /* let's check that all words past *item are equal */
+ ofs = sizeof(*item) / sizeof(*ptr);
+ u = ptr[ofs++];
+ while (ofs < size / sizeof(*ptr)) {
+ if (unlikely(ptr[ofs] != u)) {
+ pool_inspect_item("cache corruption detected", pool, item, caller, ofs * sizeof(*ptr));
+ ABORT_NOW();
+ }
+ ofs++;
+ }
+ } else {
+ /* the pattern was backed up */
+ ofs = sizeof(*item) / sizeof(*ptr);
+ u = !!(pool_debugging & (POOL_DBG_TAG|POOL_DBG_CALLER));
+ while (ofs < size / sizeof(*ptr)) {
+ if (unlikely(ptr[ofs] != ptr[size/sizeof(*ptr) + u + ofs])) {
+ pool_inspect_item("cache corruption detected", pool, item, caller, ofs * sizeof(*ptr));
+ ABORT_NOW();
+ }
+ ofs++;
}
- ofs++;
}
}
while (released < count && !LIST_ISEMPTY(&ph->list)) {
item = LIST_PREV(&ph->list, typeof(item), by_pool);
BUG_ON(&item->by_pool == &ph->list);
- if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ if (unlikely(pool_debugging & (POOL_DBG_INTEGRITY|POOL_DBG_BACKUP)))
pool_check_pattern(ph, pool, item, caller);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
POOL_DEBUG_TRACE_CALLER(pool, item, caller);
ph->count++;
+ if (unlikely(pool_debugging & POOL_DBG_BACKUP))
+ pool_copy_pattern(ph, item, pool->size);
+
if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
pool_fill_pattern(ph, item, pool->size);
+
pool_cache_count++;
pool_cache_bytes += pool->size;