]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: memory: Change the flush_lock to a spinlock, and don't get it in alloc.
authorOlivier Houchard <cognet@ci0.org>
Wed, 18 Mar 2020 14:48:29 +0000 (15:48 +0100)
committerOlivier Houchard <cognet@ci0.org>
Wed, 18 Mar 2020 14:55:35 +0000 (15:55 +0100)
The flush_lock was introduced, mostly to be sure that pool_gc() will never
dereference a pointer that has been free'd. __pool_get_first() was acquiring
the lock to, the fear was that otherwise that pointer could get free'd later,
and then pool_gc() would attempt to dereference it. However, that can not
happen, because the only functions that can free a pointer, when using
lockless pools, are pool_gc() and pool_flush(), and as long as those two
are mutually exclusive, nobody will be able to free the pointer while
pool_gc() attempts to access it.
So change the flush_lock to a spinlock, and don't bother acquire/release
it in __pool_get_first(), that way callers of __pool_get_first() won't have
to wait while the pool is flushed. The worst that can happen is we call
__pool_refill_alloc() while the pool is getting flushed, and memory can
get allocated just to be free'd.

This may help with github issue #552

This may be backported to 2.1, 2.0 and 1.9.

include/common/memory.h
src/memory.c

index 5433a7373c29e2a8ea1af036721c2539d09005e7..44a9dcb57609b503bc60c9f9a8b56a4c5e949bc2 100644 (file)
@@ -78,7 +78,7 @@ struct pool_head {
        void **free_list;
 #ifdef CONFIG_HAP_LOCKLESS_POOLS
        uintptr_t seq;
-       HA_RWLOCK_T flush_lock;
+       HA_SPINLOCK_T flush_lock;
 #else
        __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
 #endif
@@ -222,19 +222,15 @@ static inline void *__pool_get_first(struct pool_head *pool)
        cmp.seq = pool->seq;
        __ha_barrier_load();
 
-       HA_RWLOCK_RDLOCK(POOL_LOCK, &pool->flush_lock);
        cmp.free_list = pool->free_list;
        do {
-               if (cmp.free_list == NULL) {
-                       HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
+               if (cmp.free_list == NULL)
                        return NULL;
-               }
                new.seq = cmp.seq + 1;
                __ha_barrier_load();
                new.free_list = *POOL_LINK(pool, cmp.free_list);
        } while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
        __ha_barrier_atomic_store();
-       HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
 
        _HA_ATOMIC_ADD(&pool->used, 1);
 #ifdef DEBUG_MEMORY_POOLS
index b0512aaece5ddfc2a1f93086240243e2bc25021b..4a37381e045f82e0c3c2a60fbbebb9256420fb69 100644 (file)
@@ -142,7 +142,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
 #ifndef CONFIG_HAP_LOCKLESS_POOLS
                HA_SPIN_INIT(&pool->lock);
 #else
-               HA_RWLOCK_INIT(&pool->flush_lock);
+               HA_SPIN_INIT(&pool->flush_lock);
 #endif
        }
        pool->users++;
@@ -225,7 +225,7 @@ void pool_flush(struct pool_head *pool)
 
        if (!pool)
                return;
-       HA_RWLOCK_WRLOCK(POOL_LOCK, &pool->flush_lock);
+       HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock);
        do {
                cmp.free_list = pool->free_list;
                cmp.seq = pool->seq;
@@ -233,7 +233,7 @@ void pool_flush(struct pool_head *pool)
                new.seq = cmp.seq + 1;
        } while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
        __ha_barrier_atomic_store();
-       HA_RWLOCK_WRUNLOCK(POOL_LOCK, &pool->flush_lock);
+       HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock);
        next = cmp.free_list;
        while (next) {
                temp = next;
@@ -263,7 +263,7 @@ void pool_gc(struct pool_head *pool_ctx)
                return;
 
        list_for_each_entry(entry, &pools, list) {
-               HA_RWLOCK_WRLOCK(POOL_LOCK, &entry->flush_lock);
+               HA_SPIN_LOCK(POOL_LOCK, &entry->flush_lock);
                while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
                        struct pool_free_list cmp, new;
 
@@ -280,7 +280,7 @@ void pool_gc(struct pool_head *pool_ctx)
                        free(cmp.free_list);
                        _HA_ATOMIC_SUB(&entry->allocated, 1);
                }
-               HA_RWLOCK_WRUNLOCK(POOL_LOCK, &entry->flush_lock);
+               HA_SPIN_UNLOCK(POOL_LOCK, &entry->flush_lock);
        }
 
        _HA_ATOMIC_STORE(&recurse, 0);