void *pool_get_from_os_noinc(struct pool_head *pool);
void pool_put_to_os_nodec(struct pool_head *pool, void *ptr);
-void *pool_alloc_nocache(struct pool_head *pool);
+void *pool_alloc_nocache(struct pool_head *pool, const void *caller);
void pool_free_nocache(struct pool_head *pool, void *ptr);
void dump_pools(void);
int pool_parse_debugging(const char *str, char **err);
pool_head_buffer->limit = global.tune.buf_limit;
for (done = 0; done < pool_head_buffer->minavail - 1; done++) {
- buffer = pool_alloc_nocache(pool_head_buffer);
+ buffer = pool_alloc_nocache(pool_head_buffer, init_buffer);
if (!buffer)
return 0;
pool_free(pool_head_buffer, buffer);
* and directly returns it. The pool's counters are updated but the object is
* never cached, so this is usable with and without local or shared caches.
*/
-void *pool_alloc_nocache(struct pool_head *pool)
+void *pool_alloc_nocache(struct pool_head *pool, const void *caller)
{
void *ptr = NULL;
uint bucket;
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, ptr);
- POOL_DEBUG_TRACE_CALLER(pool, (struct pool_cache_item *)ptr, NULL);
+ POOL_DEBUG_TRACE_CALLER(pool, (struct pool_cache_item *)ptr, caller);
return ptr;
}
p = pool_get_from_cache(pool, caller);
if (unlikely(!p))
- p = pool_alloc_nocache(pool);
+ p = pool_alloc_nocache(pool, caller);
if (likely(p)) {
#ifdef USE_MEMORY_PROFILING