From: Willy Tarreau Date: Fri, 29 May 2020 15:23:05 +0000 (+0200) Subject: CLEANUP: pools: use the regular lock for the flush operation on lockless pools X-Git-Tag: v2.2-dev9~171 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=21072b94806501c26a6e286ed4e46467afd557fe;p=thirdparty%2Fhaproxy.git CLEANUP: pools: use the regular lock for the flush operation on lockless pools Commit 04f5fe87d3d introduced an rwlock in the pools to deal with the risk that pool_flush() dereferences an area being freed, and commit 899fb8abdcd turned it into a spinlock. The pools already contain a spinlock in case of locked pools, so let's use the same and simplify the code by removing ifdefs. At this point I'm really suspecting that if pool_flush() would instead rely on __pool_get_first() to pick entries from the pool, the concurrency problem could never happen since only one user would get a given entry at once, thus it could not be freed by another user. It's not certain this would be faster however because of the number of atomic ops to retrieve one entry compared to a locked batch. --- diff --git a/include/common/memory.h b/include/common/memory.h index 84ec18e14d..d3950ca5df 100644 --- a/include/common/memory.h +++ b/include/common/memory.h @@ -81,14 +81,15 @@ struct pool_free_list { }; #endif +/* Note below, in case of lockless pools, we still need the lock only for + * the flush() operation. + */ struct pool_head { void **free_list; #ifdef CONFIG_HAP_LOCKLESS_POOLS uintptr_t seq; - HA_SPINLOCK_T flush_lock; -#else - __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */ #endif + __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */ unsigned int used; /* how many chunks are currently in use */ unsigned int needed_avg;/* floating indicator between used and allocated */ unsigned int allocated; /* how many chunks have been allocated */ diff --git a/src/memory.c b/src/memory.c index 4552e4559b..b442f0a108 100644 --- a/src/memory.c +++ b/src/memory.c @@ -139,11 +139,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags) for (thr = 0; thr < MAX_THREADS; thr++) pool_cache[thr][idx].size = size; } -#ifndef CONFIG_HAP_LOCKLESS_POOLS HA_SPIN_INIT(&pool->lock); -#else - HA_SPIN_INIT(&pool->flush_lock); -#endif } pool->users++; return pool; @@ -227,7 +223,7 @@ void pool_flush(struct pool_head *pool) if (!pool) return; - HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock); + HA_SPIN_LOCK(POOL_LOCK, &pool->lock); do { cmp.free_list = pool->free_list; cmp.seq = pool->seq; @@ -235,7 +231,7 @@ void pool_flush(struct pool_head *pool) new.seq = cmp.seq + 1; } while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new)); __ha_barrier_atomic_store(); - HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock); + HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock); next = cmp.free_list; while (next) { temp = next;