]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: pools: factor the release code into pool_put_to_os()
authorWilly Tarreau <w@1wt.eu>
Sat, 17 Apr 2021 15:48:40 +0000 (17:48 +0200)
committerWilly Tarreau <w@1wt.eu>
Mon, 19 Apr 2021 13:24:33 +0000 (15:24 +0200)
There are two levels of freeing to the OS:
  - code that wants to keep the pool's usage counters updated uses
    pool_free_area() and handles the counters itself. That's what
    pool_put_to_shared_cache() does in the no-global-pools case.
  - code that does not want to update the counters because they were
    already updated only calls pool_free_area().

Let's extract these calls to establish the symmetry with pool_get_from_os()
and pool_alloc_nocache(), resulting in pool_put_to_os() (which only updates
the allocated counter) and pool_free_nocache() (which also updates the used
counter). This will later allow to simplify the generic code.

include/haproxy/pool.h
src/pool.c

index 8fce5932d7cff6fcd9cab6118fface99549f3206..c1faa279374c436b28a6ed9ac03c275a0b0665a2 100644 (file)
@@ -49,7 +49,9 @@
 extern int mem_poison_byte;
 
 void *pool_get_from_os(struct pool_head *pool);
+void pool_put_to_os(struct pool_head *pool, void *ptr);
 void *pool_alloc_nocache(struct pool_head *pool);
+void pool_free_nocache(struct pool_head *pool, void *ptr);
 void dump_pools_to_trash();
 void dump_pools(void);
 int pool_total_failures();
@@ -139,10 +141,7 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool)
 
 static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
 {
-       _HA_ATOMIC_DEC(&pool->used);
-       _HA_ATOMIC_DEC(&pool->allocated);
-       swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
-       pool_free_area(ptr, pool->size + POOL_EXTRA);
+       pool_free_nocache(pool, ptr);
 }
 
 #elif defined(CONFIG_HAP_LOCKLESS_POOLS)
@@ -190,8 +189,7 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
        _HA_ATOMIC_DEC(&pool->used);
 
        if (unlikely(pool_is_crowded(pool))) {
-               pool_free_area(ptr, pool->size + POOL_EXTRA);
-               _HA_ATOMIC_DEC(&pool->allocated);
+               pool_put_to_os(pool, ptr);
        } else {
                do {
                        *POOL_LINK(pool, ptr) = (void *)free_list;
@@ -258,8 +256,7 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
 
        if (ptr) {
                /* still not freed */
-               pool_free_area(ptr, pool->size + POOL_EXTRA);
-               _HA_ATOMIC_DEC(&pool->allocated);
+               pool_put_to_os(pool, ptr);
        }
        swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
 }
index f98092225b347ec32ce0cefd83a01505911a6014..0956976eea13677e680b0e5d7f4875f6ce97fb2d 100644 (file)
@@ -139,6 +139,15 @@ void *pool_get_from_os(struct pool_head *pool)
 
 }
 
+/* Releases a pool item back to the operating system and atomically updates
+ * the allocation counter.
+ */
+void pool_put_to_os(struct pool_head *pool, void *ptr)
+{
+       pool_free_area(ptr, pool->size + POOL_EXTRA);
+       _HA_ATOMIC_DEC(&pool->allocated);
+}
+
 #ifdef CONFIG_HAP_POOLS
 /* Evicts some of the oldest objects from the local cache, pushing them to the
  * global pool.
@@ -190,6 +199,17 @@ void *pool_alloc_nocache(struct pool_head *pool)
        return ptr;
 }
 
+/* Release a pool item back to the OS and keeps the pool's counters up to date.
+ * This is always defined even when pools are not enabled (their usage stats
+ * are maintained).
+ */
+void pool_free_nocache(struct pool_head *pool, void *ptr)
+{
+       _HA_ATOMIC_DEC(&pool->used);
+       swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
+       pool_put_to_os(pool, ptr);
+}
+
 
 #if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
 
@@ -231,8 +251,7 @@ void pool_flush(struct pool_head *pool)
        while (next) {
                temp = next;
                next = *POOL_LINK(pool, temp);
-               pool_free_area(temp, pool->size + POOL_EXTRA);
-               _HA_ATOMIC_DEC(&pool->allocated);
+               pool_put_to_os(pool, temp);
        }
        pool->free_list = next;
        /* here, we should have pool->allocate == pool->used */
@@ -265,8 +284,7 @@ void pool_gc(struct pool_head *pool_ctx)
                        new.seq = cmp.seq + 1;
                        if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
                                continue;
-                       pool_free_area(cmp.free_list, entry->size + POOL_EXTRA);
-                       _HA_ATOMIC_DEC(&entry->allocated);
+                       pool_put_to_os(entry, cmp.free_list);
                }
        }
 
@@ -299,8 +317,7 @@ void pool_flush(struct pool_head *pool)
                }
                pool->free_list = *POOL_LINK(pool, temp);
                HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
-               pool_free_area(temp, pool->size + POOL_EXTRA);
-               _HA_ATOMIC_DEC(&pool->allocated);
+               pool_put_to_os(pool, temp);
        }
        /* here, we should have pool->allocated == pool->used */
 }
@@ -325,8 +342,7 @@ void pool_gc(struct pool_head *pool_ctx)
                       (int)(entry->allocated - entry->used) > (int)entry->minavail) {
                        temp = entry->free_list;
                        entry->free_list = *POOL_LINK(entry, temp);
-                       pool_free_area(temp, entry->size + POOL_EXTRA);
-                       _HA_ATOMIC_DEC(&entry->allocated);
+                       pool_put_to_os(entry, temp);
                }
        }