From: Willy Tarreau Date: Thu, 15 Apr 2021 16:20:12 +0000 (+0200) Subject: CLEANUP: pools: re-merge pool_refill_alloc() and __pool_refill_alloc() X-Git-Tag: v2.4-dev17~74 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8fe726f1183c1c0cb16e4724ced89482712260ef;p=thirdparty%2Fhaproxy.git CLEANUP: pools: re-merge pool_refill_alloc() and __pool_refill_alloc() They were strictly equivalent, let's remerge them and rename them to pool_alloc_nocache() as it's the call which performs a real allocation which does not check nor update the cache. The only difference in the past was the former taking the lock and not the second but now the lock is not needed anymore at this stage since the pool's list is not touched. In addition, given that the "avail" argument is no longer used by the function nor by its callers, let's drop it. --- diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index d01c6d7c80..a62a93b39e 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -48,8 +48,7 @@ /* poison each newly allocated area with this byte if >= 0 */ extern int mem_poison_byte; -void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail); -void *pool_refill_alloc(struct pool_head *pool, unsigned int avail); +void *pool_alloc_nocache(struct pool_head *pool); void dump_pools_to_trash(); void dump_pools(void); int pool_total_failures(); @@ -279,7 +278,7 @@ static inline void *__pool_alloc(struct pool_head *pool, unsigned int flags) HA_SPIN_LOCK(POOL_LOCK, &pool->lock); #endif if ((p = __pool_get_first(pool)) == NULL) - p = __pool_refill_alloc(pool, 0); + p = pool_alloc_nocache(pool); #if !defined(CONFIG_HAP_LOCKLESS_POOLS) && !defined(CONFIG_HAP_NO_GLOBAL_POOLS) HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock); #endif diff --git a/src/dynbuf.c b/src/dynbuf.c index 84e1ca2cde..0dce2101ce 100644 --- a/src/dynbuf.c +++ b/src/dynbuf.c @@ -49,7 +49,7 @@ int init_buffer() pool_head_buffer->limit = global.tune.buf_limit; for (done = 0; done < pool_head_buffer->minavail - 1; done++) { - buffer = pool_refill_alloc(pool_head_buffer, 1); + buffer = pool_alloc_nocache(pool_head_buffer); if (!buffer) return 0; pool_free(pool_head_buffer, buffer); diff --git a/src/pool.c b/src/pool.c index cf1dd8630e..61b7a7ad1b 100644 --- a/src/pool.c +++ b/src/pool.c @@ -146,9 +146,13 @@ void pool_evict_from_cache() } #endif -/* simply fall back on the default OS' allocator */ - -void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail) +/* Tries to allocate an object for the pool using the system's allocator + * and directly returns it. The pool's counters are updated but the object is + * never cached, so this is usable with and without local or shared caches. + * This may be called with or without the pool lock held, so it must not use + * the pool's lock. + */ +void *pool_alloc_nocache(struct pool_head *pool) { int allocated = pool->allocated; int limit = pool->limit; @@ -182,14 +186,6 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail) return ptr; } -/* legacy stuff */ -void *pool_refill_alloc(struct pool_head *pool, unsigned int avail) -{ - void *ptr; - - ptr = __pool_refill_alloc(pool, avail); - return ptr; -} #if defined(CONFIG_HAP_NO_GLOBAL_POOLS)