]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: pools: always use atomic ops to maintain counters
authorWilly Tarreau <w@1wt.eu>
Sat, 17 Apr 2021 16:06:57 +0000 (18:06 +0200)
committerWilly Tarreau <w@1wt.eu>
Mon, 19 Apr 2021 13:24:33 +0000 (15:24 +0200)
A part of the code cannot be factored out because it still uses non-atomic
inc/dec for pool->used and pool->allocated as these are located under the
pool's lock. While it can make sense in terms of bus cycles, it does not
make sense in terms of code normalization. Further, some operations were
still performed under a lock that could be totally removed via the use of
atomic ops.

There is still one occurrence in pool_put_to_shared_cache() in the locked
code where pool_free_area() is called under the lock, which must absolutely
be fixed.

include/haproxy/pool.h
src/pool.c

index 19e596552dd98b1927e7218b8dee22152fc0c373..a44062556f3b5a2eec908f0775addfbacc7c766f 100644 (file)
@@ -217,11 +217,11 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool)
        void *p;
 
        HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
-       if ((p = pool->free_list) != NULL) {
+       if ((p = pool->free_list) != NULL)
                pool->free_list = *POOL_LINK(pool, p);
-               pool->used++;
-       }
        HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+       if (p)
+               _HA_ATOMIC_INC(&pool->used);
 
 #ifdef DEBUG_MEMORY_POOLS
        if (p) {
@@ -238,11 +238,11 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool)
 static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
 {
 #ifndef DEBUG_UAF /* normal pool behaviour */
+       _HA_ATOMIC_DEC(&pool->used);
        HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
-       pool->used--;
        if (pool_is_crowded(pool)) {
                pool_free_area(ptr, pool->size + POOL_EXTRA);
-               pool->allocated--;
+               _HA_ATOMIC_DEC(&pool->allocated);
        } else {
                *POOL_LINK(pool, ptr) = (void *)pool->free_list;
                pool->free_list = (void *)ptr;
@@ -253,11 +253,9 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
        /* ensure we crash on double free or free of a const area*/
        *(uint32_t *)ptr = 0xDEADADD4;
        pool_free_area(ptr, pool->size + POOL_EXTRA);
-       HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
-       pool->allocated--;
-       pool->used--;
+       _HA_ATOMIC_DEC(&pool->allocated);
+       _HA_ATOMIC_DEC(&pool->used);
        swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
-       HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
 #endif /* DEBUG_UAF */
 }
 
index f309b848799a8d6743417fd88cb94eec7f8a9d5c..f98092225b347ec32ce0cefd83a01505911a6014 100644 (file)
@@ -215,7 +215,6 @@ void pool_flush(struct pool_head *pool)
 {
        struct pool_free_list cmp, new;
        void **next, *temp;
-       int removed = 0;
 
        if (!pool)
                return;
@@ -232,11 +231,10 @@ void pool_flush(struct pool_head *pool)
        while (next) {
                temp = next;
                next = *POOL_LINK(pool, temp);
-               removed++;
                pool_free_area(temp, pool->size + POOL_EXTRA);
+               _HA_ATOMIC_DEC(&pool->allocated);
        }
        pool->free_list = next;
-       _HA_ATOMIC_SUB(&pool->allocated, removed);
        /* here, we should have pool->allocate == pool->used */
 }
 
@@ -300,9 +298,9 @@ void pool_flush(struct pool_head *pool)
                        break;
                }
                pool->free_list = *POOL_LINK(pool, temp);
-               pool->allocated--;
                HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
                pool_free_area(temp, pool->size + POOL_EXTRA);
+               _HA_ATOMIC_DEC(&pool->allocated);
        }
        /* here, we should have pool->allocated == pool->used */
 }
@@ -327,8 +325,8 @@ void pool_gc(struct pool_head *pool_ctx)
                       (int)(entry->allocated - entry->used) > (int)entry->minavail) {
                        temp = entry->free_list;
                        entry->free_list = *POOL_LINK(entry, temp);
-                       entry->allocated--;
                        pool_free_area(temp, entry->size + POOL_EXTRA);
+                       _HA_ATOMIC_DEC(&entry->allocated);
                }
        }