From: Willy Tarreau Date: Sat, 27 Jun 2020 22:54:27 +0000 (+0200) Subject: MINOR: pools: move the LRU cache heads to thread_info X-Git-Tag: v2.2-dev12~61 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=20dc3cd4a60d7b1895d8cca954a00da71b1dca3e;p=thirdparty%2Fhaproxy.git MINOR: pools: move the LRU cache heads to thread_info The LRU cache head was an array of list, which causes false sharing between 4 to 8 threads in the same cache line. Let's move it to the thread_info structure instead. There's no need to do the same for the pool_cache[] array since it's already quite large (32 pointers each). By doing this the request rate increased by 1% on a 16-thread machine. --- diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index a62d06efad..c647bbdd28 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -78,7 +78,6 @@ static inline int pool_is_crowded(const struct pool_head *pool) extern struct pool_head pool_base_start[MAX_BASE_POOLS]; extern unsigned int pool_base_count; extern struct pool_cache_head pool_cache[][MAX_BASE_POOLS]; -extern struct list pool_lru_head[MAX_THREADS]; extern THREAD_LOCAL size_t pool_cache_bytes; /* total cache size */ extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */ @@ -134,7 +133,7 @@ static inline void pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t struct pool_cache_head *ph = &pool_cache[tid][idx]; LIST_ADD(&ph->list, &item->by_pool); - LIST_ADD(&pool_lru_head[tid], &item->by_lru); + LIST_ADD(&ti->pool_lru_head, &item->by_lru); ph->count++; pool_cache_count++; pool_cache_bytes += ph->size; diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h index b272661282..f3badbd88e 100644 --- a/include/haproxy/tinfo-t.h +++ b/include/haproxy/tinfo-t.h @@ -24,6 +24,7 @@ #include #include +#include /* thread info flags, for ha_thread_info[].flags */ #define TI_FL_STUCK 0x00000001 @@ -40,6 +41,10 @@ struct thread_info { uint64_t prev_mono_time; /* previous system wide monotonic time */ unsigned int idle_pct; /* idle to total ratio over last sample (percent) */ unsigned int flags; /* thread info flags, TI_FL_* */ + +#ifdef CONFIG_HAP_LOCAL_POOLS + struct list pool_lru_head; /* oldest objects */ +#endif /* pad to cache line (64B) */ char __pad[0]; /* unused except to check remaining room */ char __end[0] __attribute__((aligned(64))); diff --git a/src/pool.c b/src/pool.c index 0680c8bb7b..321f8bc677 100644 --- a/src/pool.c +++ b/src/pool.c @@ -36,7 +36,6 @@ unsigned int pool_base_count = 0; /* These ones are initialized per-thread on startup by init_pools() */ struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS]; -struct list pool_lru_head[MAX_THREADS]; /* oldest objects */ THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */ THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */ #endif @@ -156,7 +155,7 @@ void pool_evict_from_cache() struct pool_cache_head *ph; do { - item = LIST_PREV(&pool_lru_head[tid], struct pool_cache_item *, by_lru); + item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru); /* note: by definition we remove oldest objects so they also are the * oldest in their own pools, thus their next is the pool's head. */ @@ -581,7 +580,7 @@ static void init_pools() LIST_INIT(&pool_cache[thr][idx].list); pool_cache[thr][idx].size = 0; } - LIST_INIT(&pool_lru_head[thr]); + LIST_INIT(&ha_thread_info[thr].pool_lru_head); } #endif }