From feeda4132b5677e01e68eec6eab895a4806105a4 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Mon, 24 Jul 2023 15:12:31 +0200 Subject: [PATCH] OPTIM: pools: use exponential back-off on shared pool allocation/release Running a stick-table stress with -dMglobal under 56 threads shows extreme contention on the pool's free_list because it has to be processed in two phases and only used to implement a cpu_relax() on the retry path. Let's at least implement exponential back-off here to limit the neighbor's noise and reduce the time needed to successfully acquire the pointer. Just doing so shows there's still contention but almost doubled the performance, from 1.1 to 2.1M req/s. --- src/pool.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/pool.c b/src/pool.c index 292b27d133..a2edaab1fc 100644 --- a/src/pool.c +++ b/src/pool.c @@ -632,8 +632,7 @@ void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_hea ret = _HA_ATOMIC_LOAD(&pool->free_list); do { while (unlikely(ret == POOL_BUSY)) { - __ha_cpu_relax(); - ret = _HA_ATOMIC_LOAD(&pool->free_list); + ret = (void*)pl_wait_new_long((ulong*)&pool->free_list, (ulong)ret); } if (ret == NULL) return; @@ -678,8 +677,7 @@ void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, ui free_list = _HA_ATOMIC_LOAD(&pool->free_list); do { while (unlikely(free_list == POOL_BUSY)) { - __ha_cpu_relax(); - free_list = _HA_ATOMIC_LOAD(&pool->free_list); + free_list = (void*)pl_wait_new_long((ulong*)&pool->free_list, (ulong)free_list); } _HA_ATOMIC_STORE(&item->next, free_list); __ha_barrier_atomic_store(); -- 2.47.3