From: Willy Tarreau Date: Thu, 30 Dec 2021 16:37:33 +0000 (+0100) Subject: MINOR: pool: check for pool's fullness outside of pool_put_to_shared_cache() X-Git-Tag: v2.6-dev1~199 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b46674a2839950769113c298d880ed01ca12339a;p=thirdparty%2Fhaproxy.git MINOR: pool: check for pool's fullness outside of pool_put_to_shared_cache() Instead of letting pool_put_to_shared_cache() pass the object to the underlying OS layer when there's no more room, let's have the caller check if the pool is full and either call pool_put_to_shared_cache() or call pool_free_nocache(). Doing this sensibly simplifies the code as this function now only has to deal with a pool and an item and only for cases where there are local caches and shared caches. As the code was simplified and the calls more isolated, the function was moved to pool.c. Note that it's only called from pool_evict_from_local_cache{,s}() and that a part of its logic might very well move there when dealing with batches. --- diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index 1f9ca21f31..ec73cbcb0e 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -123,14 +123,15 @@ static inline void pool_refill_local_from_shared(struct pool_head *pool, struct /* ignored without shared pools */ } -static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr) +static inline void pool_put_to_shared_cache(struct pool_head *pool, void *item) { - pool_free_nocache(pool, ptr); + /* ignored without shared pools */ } #else /* CONFIG_HAP_NO_GLOBAL_POOLS */ void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch); +void pool_put_to_shared_cache(struct pool_head *pool, void *item); /* returns true if the pool is considered to have too many free objects */ static inline int pool_is_crowded(const struct pool_head *pool) @@ -139,32 +140,6 @@ static inline int pool_is_crowded(const struct pool_head *pool) (int)(pool->allocated - pool->used) >= pool->minavail; } -/* Locklessly add item to pool , then update the pool used count. - * Both the pool and the pointer must be valid. Use pool_free() for normal - * operations. - */ -static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr) -{ - void **free_list; - - if (unlikely(pool_is_crowded(pool))) { - pool_free_nocache(pool, ptr); - return; - } - - _HA_ATOMIC_DEC(&pool->used); - free_list = _HA_ATOMIC_LOAD(&pool->free_list); - do { - while (unlikely(free_list == POOL_BUSY)) { - __ha_cpu_relax(); - free_list = _HA_ATOMIC_LOAD(&pool->free_list); - } - _HA_ATOMIC_STORE((void **)ptr, (void *)free_list); - __ha_barrier_atomic_store(); - } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr)); - __ha_barrier_atomic_store(); - swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used); -} #endif /* CONFIG_HAP_NO_GLOBAL_POOLS */ diff --git a/src/pool.c b/src/pool.c index 45241fe11a..7b7dd998ec 100644 --- a/src/pool.c +++ b/src/pool.c @@ -320,7 +320,11 @@ void pool_evict_from_local_cache(struct pool_head *pool) pool_cache_count--; LIST_DELETE(&item->by_pool); LIST_DELETE(&item->by_lru); - pool_put_to_shared_cache(pool, item); + + if (unlikely(pool_is_crowded(pool))) + pool_free_nocache(pool, item); + else + pool_put_to_shared_cache(pool, item); } } @@ -345,7 +349,10 @@ void pool_evict_from_local_caches() ph->count--; pool_cache_count--; pool_cache_bytes -= pool->size; - pool_put_to_shared_cache(pool, item); + if (unlikely(pool_is_crowded(pool))) + pool_free_nocache(pool, item); + else + pool_put_to_shared_cache(pool, item); } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8); } @@ -434,6 +441,29 @@ void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_hea pool_cache_bytes += pool->size; } +/* Adds cache item entry to the shared cache. The caller is advised to + * first check using pool_is_crowded() if it's wise to add this object there. + * Both the pool and the item must be valid. Use pool_free() for normal + * operations. + */ +void pool_put_to_shared_cache(struct pool_head *pool, void *item) +{ + void **free_list; + + _HA_ATOMIC_DEC(&pool->used); + free_list = _HA_ATOMIC_LOAD(&pool->free_list); + do { + while (unlikely(free_list == POOL_BUSY)) { + __ha_cpu_relax(); + free_list = _HA_ATOMIC_LOAD(&pool->free_list); + } + _HA_ATOMIC_STORE((void **)item, (void *)free_list); + __ha_barrier_atomic_store(); + } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item)); + __ha_barrier_atomic_store(); + swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used); +} + /* * This function frees whatever can be freed in pool . */