# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
-# DEBUG_TASK, DEBUG_MEMORY_POOLS.
+# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING.
DEBUG =
#### Trace options
preference for cold cache instead of hot cache, though not as much as
with DEBUG_UAF. This option is meant to be usable in production.
+DEBUG_POOL_TRACING
+ When enabled, the callers of pool_alloc() and pool_free() will be
+ recorded into an extra memory area placed after the end of the object.
+ This may only be required by developers who want to get a few more
+ hints about code paths involved in some crashes, but will serve no
+ purpose outside of this. It remains compatible (and completes well)
+ DEBUG_POOL_INTEGRITY above. Such information become meaningless once
+ the objects leave the thread-local cache.
+
DEBUG_MEM_STATS
When enabled, all malloc/calloc/realloc/strdup/free calls are accounted
for per call place (file+line number), and may be displayed or reset on
#endif // DEBUG_MEMORY_POOLS
-# define POOL_EXTRA (POOL_EXTRA_MARK)
+/* It's possible to trace callers of pool_free() by placing their pointer
+ * after the end of the area and the optional mark above.
+ */
+#if defined(DEBUG_POOL_TRACING)
+# define POOL_EXTRA_CALLER (sizeof(void *))
+# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) \
+ do { \
+ typeof(pool) __p = (pool); \
+ typeof(item) __i = (item); \
+ typeof(caller) __c = (caller); \
+ *(typeof(caller)*)(((char *)__i) + __p->size + POOL_EXTRA_MARK) = __c; \
+ } while (0)
+
+#else // DEBUG_POOL_TRACING
+
+# define POOL_EXTRA_CALLER (0)
+# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) do { } while (0)
+
+#endif
+
+# define POOL_EXTRA (POOL_EXTRA_MARK + POOL_EXTRA_CALLER)
/* poison each newly allocated area with this byte if >= 0 */
extern int mem_poison_byte;
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, item);
+ POOL_DEBUG_TRACE_CALLER(pool, item, caller);
ph->count--;
pool_cache_bytes -= pool->size;
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, ptr);
+ POOL_DEBUG_TRACE_CALLER(pool, item, NULL);
return ptr;
}
/* Frees an object to the local cache, possibly pushing oldest objects to the
* shared cache, which itself may decide to release some of them to the OS.
* While it is unspecified what the object becomes past this point, it is
- * guaranteed to be released from the users' perpective.
+ * guaranteed to be released from the users' perpective. A caller address may
+ * be passed and stored into the area when DEBUG_POOL_TRACING is set.
*/
void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller)
{
LIST_INSERT(&ph->list, &item->by_pool);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
+ POOL_DEBUG_TRACE_CALLER(pool, item, caller);
ph->count++;
pool_fill_pattern(ph, item, pool->size);
pool_cache_count++;
down = ret->down;
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, ret);
+ POOL_DEBUG_TRACE_CALLER(pool, item, NULL);
item = (struct pool_cache_item *)ret;
LIST_INSERT(&pch->list, &item->by_pool);
return NULL;
#endif
+#if defined(DEBUG_POOL_TRACING)
+ caller = __builtin_return_address(0);
+#endif
if (!p)
p = pool_get_from_cache(pool, caller);
if (unlikely(!p))
{
const void *caller = NULL;
+#if defined(DEBUG_POOL_TRACING)
+ caller = __builtin_return_address(0);
+#endif
/* we'll get late corruption if we refill to the wrong pool or double-free */
POOL_DEBUG_CHECK_MARK(pool, ptr);