]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
CLEANUP: pools: use the regular lock for the flush operation on lockless pools
authorWilly Tarreau <w@1wt.eu>
Fri, 29 May 2020 15:23:05 +0000 (17:23 +0200)
committerWilly Tarreau <w@1wt.eu>
Fri, 29 May 2020 15:28:04 +0000 (17:28 +0200)
Commit 04f5fe87d3d introduced an rwlock in the pools to deal with the risk
that pool_flush() dereferences an area being freed, and commit 899fb8abdcd
turned it into a spinlock. The pools already contain a spinlock in case of
locked pools, so let's use the same and simplify the code by removing ifdefs.

At this point I'm really suspecting that if pool_flush() would instead
rely on __pool_get_first() to pick entries from the pool, the concurrency
problem could never happen since only one user would get a given entry at
once, thus it could not be freed by another user. It's not certain this
would be faster however because of the number of atomic ops to retrieve
one entry compared to a locked batch.

include/common/memory.h
src/memory.c

index 84ec18e14dc4780d161a9b21ec309da68f65bcb7..d3950ca5df2c739d9bbcc68628e3e4462733ace2 100644 (file)
@@ -81,14 +81,15 @@ struct pool_free_list {
 };
 #endif
 
+/* Note below, in case of lockless pools, we still need the lock only for
+ * the flush() operation.
+ */
 struct pool_head {
        void **free_list;
 #ifdef CONFIG_HAP_LOCKLESS_POOLS
        uintptr_t seq;
-       HA_SPINLOCK_T flush_lock;
-#else
-       __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
 #endif
+       __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
        unsigned int used;      /* how many chunks are currently in use */
        unsigned int needed_avg;/* floating indicator between used and allocated */
        unsigned int allocated; /* how many chunks have been allocated */
index 4552e4559b4133ede016cef5282cfb70e1a6f05e..b442f0a108594b255804d57239770b1af3833c55 100644 (file)
@@ -139,11 +139,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
                        for (thr = 0; thr < MAX_THREADS; thr++)
                                pool_cache[thr][idx].size = size;
                }
-#ifndef CONFIG_HAP_LOCKLESS_POOLS
                HA_SPIN_INIT(&pool->lock);
-#else
-               HA_SPIN_INIT(&pool->flush_lock);
-#endif
        }
        pool->users++;
        return pool;
@@ -227,7 +223,7 @@ void pool_flush(struct pool_head *pool)
 
        if (!pool)
                return;
-       HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock);
+       HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
        do {
                cmp.free_list = pool->free_list;
                cmp.seq = pool->seq;
@@ -235,7 +231,7 @@ void pool_flush(struct pool_head *pool)
                new.seq = cmp.seq + 1;
        } while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
        __ha_barrier_atomic_store();
-       HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock);
+       HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
        next = cmp.free_list;
        while (next) {
                temp = next;