# not use them at all. Some even more obscure ones might also be available
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_NO_LOCKLESS_POOLS, DEBUG_FD,
-# DEBUG_NO_LOCAL_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK,
+# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_TASK.
DEBUG =
#include <haproxy/list-t.h>
#include <haproxy/thread-t.h>
+/* Pools are always enabled unless explicitly disabled. When disabled, the
+ * calls are directly passed to the underlying OS functions.
+ */
+#if !defined(DEBUG_NO_POOLS) && !defined(DEBUG_UAF) && !defined(DEBUG_FAIL_ALLOC)
+#define CONFIG_HAP_POOLS
+#endif
+
/* On architectures supporting threads and double-word CAS, we can implement
* lock-less memory pools. This isn't supported for debugging modes however.
*/
#define CONFIG_HAP_LOCKLESS_POOLS
#endif
-/* On architectures supporting threads we can amortize the locking cost using
- * local pools.
- */
-#if defined(USE_THREAD) && !defined(DEBUG_NO_LOCAL_POOLS) && !defined(DEBUG_UAF) && !defined(DEBUG_FAIL_ALLOC)
-#define CONFIG_HAP_LOCAL_POOLS
-#endif
-
/* On modern architectures with many threads, a fast memory allocator, and
* local pools, the global pools with their single list can be way slower than
* the standard allocator which already has its own per-thread arenas. In this
* case we disable global pools. The global pools may still be enforced
* using CONFIG_HAP_GLOBAL_POOLS though.
*/
-#if defined(USE_THREAD) && defined(HA_HAVE_FAST_MALLOC) && defined(CONFIG_HAP_LOCAL_POOLS) && !defined(CONFIG_HAP_GLOBAL_POOLS)
+#if defined(USE_THREAD) && defined(HA_HAVE_FAST_MALLOC) && defined(CONFIG_HAP_POOLS) && !defined(CONFIG_HAP_GLOBAL_POOLS)
#define CONFIG_HAP_NO_GLOBAL_POOLS
#endif
unsigned int failed; /* failed allocations */
struct list list; /* list of all known pools */
char name[12]; /* name of the pool */
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
struct pool_cache_head cache[MAX_THREADS]; /* pool caches */
#endif
} __attribute__((aligned(64)));
}
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
/****************** Thread-local cache management ******************/
pool_evict_from_cache();
}
-#endif // CONFIG_HAP_LOCAL_POOLS
+#endif // CONFIG_HAP_POOLS
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
{
void *p;
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
if (likely(p = __pool_get_from_cache(pool)))
goto ret;
#endif
if (unlikely(mem_poison_byte >= 0))
memset(ptr, mem_poison_byte, pool->size);
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
/* put the object back into the cache only if there are not too
* many objects yet in this pool (no more than half of the cached
* is used or this pool uses no more than 1/8 of the cache size).
unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
unsigned int flags; /* thread info flags, TI_FL_* */
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
struct list pool_lru_head; /* oldest objects */
#endif
struct list buffer_wq; /* buffer waiters */
#include <haproxy/tools.h>
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
/* These ones are initialized per-thread on startup by init_pools() */
THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
pool->flags = flags;
LIST_ADDQ(start, &pool->list);
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
/* update per-thread pool cache if necessary */
for (thr = 0; thr < MAX_THREADS; thr++) {
LIST_INIT(&pool->cache[thr].list);
return pool;
}
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
/* Evicts some of the oldest objects from the local cache, pushing them to the
* global pool.
*/
/* Initializes all per-thread arrays on startup */
static void init_pools()
{
-#ifdef CONFIG_HAP_LOCAL_POOLS
+#ifdef CONFIG_HAP_POOLS
int thr;
for (thr = 0; thr < MAX_THREADS; thr++) {