enum lock_label {
THREAD_SYNC_LOCK = 0,
+ POOL_LOCK,
LOCK_LABELS
};
struct lock_stat {
static inline void show_lock_stats()
{
- const char *labels[LOCK_LABELS] = {"THREAD_SYNC" };
+ const char *labels[LOCK_LABELS] = {"THREAD_SYNC", "POOL"};
int lbl;
for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
#include <common/config.h>
#include <common/mini-clist.h>
+#include <common/hathreads.h>
#ifndef DEBUG_DONT_SHARE_POOLS
#define MEM_F_SHARED 0x1
struct pool_head {
void **free_list;
+#ifdef USE_THREAD
+ HA_SPINLOCK_T lock; /* the spin lock */
+#endif
struct list list; /* list of all known pools */
unsigned int used; /* how many chunks are currently in use */
unsigned int allocated; /* how many chunks have been allocated */
* A call to the garbage collector is performed at most once in case malloc()
* returns an error, before returning NULL.
*/
+void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail);
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail);
/* Try to find an existing shared pool with the same characteristics and
/*
* This function frees whatever can be freed in all pools, but respecting
* the minimum thresholds imposed by owners.
+ *
+ * <pool_ctx> is used when pool_gc2 is called to release resources to allocate
+ * an element in __pool_refill_alloc. It is important because <pool_ctx> is
+ * already locked, so we need to skip the lock here.
*/
-void pool_gc2();
+void pool_gc2(struct pool_head *pool_ctx);
/*
* This function destroys a pull by freeing it completely.
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
* is never performed. The purpose is to get the fastest possible allocation.
*/
-static inline void *pool_get_first(struct pool_head *pool)
+static inline void *__pool_get_first(struct pool_head *pool)
{
void *p;
return p;
}
+static inline void *pool_get_first(struct pool_head *pool)
+{
+ void *ret;
+
+ SPIN_LOCK(POOL_LOCK, &pool->lock);
+ ret = __pool_get_first(pool);
+ SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+ return ret;
+}
/*
* Returns a pointer to type <type> taken from the pool <pool_type> or
* dynamically allocated. In the first case, <pool_type> is updated to point to
{
void *p;
- if ((p = pool_get_first(pool)) == NULL)
- p = pool_refill_alloc(pool, 0);
-
+ SPIN_LOCK(POOL_LOCK, &pool->lock);
+ if ((p = __pool_get_first(pool)) == NULL)
+ p = __pool_refill_alloc(pool, 0);
+ SPIN_UNLOCK(POOL_LOCK, &pool->lock);
return p;
}
p = pool_alloc_dirty(pool);
#ifdef DEBUG_MEMORY_POOLS
if (p) {
+ SPIN_LOCK(POOL_LOCK, &pool->lock);
/* keep track of where the element was allocated from */
*POOL_LINK(pool, p) = (void *)pool;
+ SPIN_UNLOCK(POOL_LOCK, &pool->lock);
}
#endif
if (p && mem_poison_byte >= 0) {
static inline void pool_free2(struct pool_head *pool, void *ptr)
{
if (likely(ptr != NULL)) {
+ SPIN_LOCK(POOL_LOCK, &pool->lock);
#ifdef DEBUG_MEMORY_POOLS
/* we'll get late corruption if we refill to the wrong pool or double-free */
if (*POOL_LINK(pool, ptr) != (void *)pool)
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
pool->used--;
+ SPIN_UNLOCK(POOL_LOCK, &pool->lock);
}
}
-
-
#endif /* _COMMON_MEMORY_H */
/*
{
soft_stop();
signal_unregister_handler(sh);
- pool_gc2();
+ pool_gc2(NULL);
}
/*
static void sig_pause(struct sig_handler *sh)
{
pause_proxies();
- pool_gc2();
+ pool_gc2(NULL);
}
/*
{
/* dump memory usage then free everything possible */
dump_pools();
- pool_gc2();
+ pool_gc2(NULL);
}
/* This function check if cfg_cfgfiles containes directories.
size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
}
+ /* TODO: thread: we do not lock pool list for now because all pools are
+ * created during HAProxy startup (so before threads creation) */
start = &pools;
pool = NULL;
LIST_ADDQ(start, &pool->list);
}
pool->users++;
+ SPIN_INIT(&pool->lock);
return pool;
}
* A call to the garbage collector is performed at most once in case malloc()
* returns an error, before returning NULL.
*/
-void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
+void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
{
void *ptr = NULL;
int failed = 0;
if (failed)
return NULL;
failed++;
- pool_gc2();
+ pool_gc2(pool);
continue;
}
if (++pool->allocated > avail)
#endif
return ptr;
}
+void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
+{
+ void *ptr;
+ SPIN_LOCK(POOL_LOCK, &pool->lock);
+ ptr = __pool_refill_alloc(pool, avail);
+ SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+ return ptr;
+}
/*
* This function frees whatever can be freed in pool <pool>.
*/
if (!pool)
return;
+ SPIN_LOCK(POOL_LOCK, &pool->lock);
next = pool->free_list;
while (next) {
temp = next;
free(temp);
}
pool->free_list = next;
-
+ SPIN_UNLOCK(POOL_LOCK, &pool->lock);
/* here, we should have pool->allocate == pool->used */
}
* This function frees whatever can be freed in all pools, but respecting
* the minimum thresholds imposed by owners. It takes care of avoiding
* recursion because it may be called from a signal handler.
+ *
+ * <pool_ctx> is used when pool_gc2 is called to release resources to allocate
+ * an element in __pool_refill_alloc. It is important because <pool_ctx> is
+ * already locked, so we need to skip the lock here.
*/
-void pool_gc2()
+void pool_gc2(struct pool_head *pool_ctx)
{
static int recurse;
+ int cur_recurse = 0;
struct pool_head *entry;
- if (recurse++)
- goto out;
+ if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
+ return;
list_for_each_entry(entry, &pools, list) {
void *temp, *next;
//qfprintf(stderr, "Flushing pool %s\n", entry->name);
+ if (entry != pool_ctx)
+ SPIN_LOCK(POOL_LOCK, &entry->lock);
next = entry->free_list;
while (next &&
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
free(temp);
}
entry->free_list = next;
+ if (entry != pool_ctx)
+ SPIN_UNLOCK(POOL_LOCK, &entry->lock);
}
- out:
- recurse--;
+
+ HA_ATOMIC_STORE(&recurse, 0);
}
/*
pool->users--;
if (!pool->users) {
LIST_DEL(&pool->list);
+ SPIN_DESTROY(&pool->lock);
free(pool);
}
}
allocated = used = nbpools = 0;
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
list_for_each_entry(entry, &pools, list) {
+ SPIN_LOCK(POOL_LOCK, &entry->lock);
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
entry->name, entry->size, entry->allocated,
entry->size * entry->allocated, entry->used, entry->failed,
allocated += entry->allocated * entry->size;
used += entry->used * entry->size;
nbpools++;
+ SPIN_UNLOCK(POOL_LOCK, &entry->lock);
}
chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
nbpools, allocated, used);
p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
stop_proxy(p);
/* try to free more memory */
- pool_gc2();
+ pool_gc2(NULL);
}
else {
next = tick_first(next, p->stop_time);
if (unlikely(stopping && p->state == PR_STSTOPPED && p->table.current)) {
if (!p->table.syncing) {
stktable_trash_oldest(&p->table, p->table.current);
- pool_gc2();
+ pool_gc2(NULL);
}
if (p->table.current) {
/* some entries still remain, let's recheck in one second */
conn->xprt_ctx = SSL_new(objt_server(conn->target)->ssl_ctx.ctx);
if (!conn->xprt_ctx) {
if (may_retry--) {
- pool_gc2();
+ pool_gc2(NULL);
goto retry_connect;
}
conn->err_code = CO_ER_SSL_NO_MEM;
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
- pool_gc2();
+ pool_gc2(NULL);
goto retry_connect;
}
conn->err_code = CO_ER_SSL_NO_MEM;
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
- pool_gc2();
+ pool_gc2(NULL);
goto retry_connect;
}
conn->err_code = CO_ER_SSL_NO_MEM;
conn->xprt_ctx = SSL_new(objt_listener(conn->target)->bind_conf->initial_ctx);
if (!conn->xprt_ctx) {
if (may_retry--) {
- pool_gc2();
+ pool_gc2(NULL);
goto retry_accept;
}
conn->err_code = CO_ER_SSL_NO_MEM;
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
- pool_gc2();
+ pool_gc2(NULL);
goto retry_accept;
}
conn->err_code = CO_ER_SSL_NO_MEM;
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
- pool_gc2();
+ pool_gc2(NULL);
goto retry_accept;
}
conn->err_code = CO_ER_SSL_NO_MEM;