From: Willy Tarreau Date: Mon, 1 Jun 2020 16:16:57 +0000 (+0200) Subject: MEDIUM: memory: don't let pool_put_to_cache() free the objects itself X-Git-Tag: v2.2-dev9~112 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fb117e6a8;p=thirdparty%2Fhaproxy.git MEDIUM: memory: don't let pool_put_to_cache() free the objects itself Just as for the allocation path, the release path was not symmetrical. It was not logical to have pool_put_to_cache() free the objects itself, it was pool_free's job. In addition, just because of a variable export issue, it the insertion of the object to free back into the local cache couldn't be inlined while it was very cheap. This patch just slightly reorganizes this code path by making pool_free() decide whether or not to put the object back into the cache via pool_put_to_cache() otherwise place it back to the global pool using __pool_free(). Then pool_put_to_cache() adds the item to the local cache and only calls pool_evict_from_cache() if the cache is too big. --- diff --git a/include/common/memory.h b/include/common/memory.h index 2eaa6234b9..e19cd1bbc2 100644 --- a/include/common/memory.h +++ b/include/common/memory.h @@ -32,6 +32,7 @@ #include extern struct pool_cache_head pool_cache[][MAX_BASE_POOLS]; +extern struct list pool_lru_head[MAX_THREADS]; extern THREAD_LOCAL size_t pool_cache_bytes; /* total cache size */ extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */ extern struct pool_head pool_base_start[MAX_BASE_POOLS]; @@ -250,27 +251,25 @@ static inline void __pool_free(struct pool_head *pool, void *ptr) swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used); } -/* frees an object to the local cache, possibly pushing oldest objects to the +void pool_evict_from_cache(); + +/* Frees an object to the local cache, possibly pushing oldest objects to the * global pool. */ -void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx); -static inline void pool_put_to_cache(struct pool_head *pool, void *ptr) +static inline void pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx) { - ssize_t idx = pool_get_index(pool); + struct pool_cache_item *item = (struct pool_cache_item *)ptr; + struct pool_cache_head *ph = &pool_cache[tid][idx]; - /* pool not in cache or too many objects for this pool (more than - * half of the cache is used and this pool uses more than 1/8 of - * the cache size). - */ - if (idx < 0 || - (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4 && - pool_cache[tid][idx].count >= 16 + pool_cache_count / 8)) { - __pool_free(pool, ptr); - return; - } - __pool_put_to_cache(pool, ptr, idx); -} + LIST_ADD(&ph->list, &item->by_pool); + LIST_ADD(&pool_lru_head[tid], &item->by_lru); + ph->count++; + pool_cache_count++; + pool_cache_bytes += ph->size; + if (unlikely(pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE)) + pool_evict_from_cache(pool, ptr, idx); +} /* * Puts a memory area back to the corresponding pool. * Items are chained directly through a pointer that @@ -283,6 +282,8 @@ static inline void pool_put_to_cache(struct pool_head *pool, void *ptr) static inline void pool_free(struct pool_head *pool, void *ptr) { if (likely(ptr != NULL)) { + ssize_t idx __maybe_unused; + #ifdef DEBUG_MEMORY_POOLS /* we'll get late corruption if we refill to the wrong pool or double-free */ if (*POOL_LINK(pool, ptr) != (void *)pool) @@ -290,7 +291,20 @@ static inline void pool_free(struct pool_head *pool, void *ptr) #endif if (mem_poison_byte >= 0) memset(ptr, mem_poison_byte, pool->size); - pool_put_to_cache(pool, ptr); + + /* put the object back into the cache only if there are not too + * many objects yet in this pool (no more than half of the cached + * is used or this pool uses no more than 1/8 of the cache size). + */ + idx = pool_get_index(pool); + if (idx >= 0 && + (pool_cache_bytes <= CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4 || + pool_cache[tid][idx].count < 16 + pool_cache_count / 8)) { + pool_put_to_cache(pool, ptr, idx); + return; + } + + __pool_free(pool, ptr); } } diff --git a/src/memory.c b/src/memory.c index 106b04a0b5..8589c7d69f 100644 --- a/src/memory.c +++ b/src/memory.c @@ -40,7 +40,7 @@ unsigned int pool_base_count = 0; /* These ones are initialized per-thread on startup by init_pools() */ struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS]; -static struct list pool_lru_head[MAX_THREADS]; /* oldest objects */ +struct list pool_lru_head[MAX_THREADS]; /* oldest objects */ THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */ THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */ @@ -278,22 +278,13 @@ void pool_gc(struct pool_head *pool_ctx) thread_release(); } -/* frees an object to the local cache, possibly pushing oldest objects to the - * global pool. Must not be called directly. +/* Evicts some of the oldest objects from the local cache, pushing them to the + * global pool. */ -void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx) +void pool_evict_from_cache() { - struct pool_cache_item *item = (struct pool_cache_item *)ptr; - struct pool_cache_head *ph = &pool_cache[tid][idx]; - - LIST_ADD(&ph->list, &item->by_pool); - LIST_ADD(&pool_lru_head[tid], &item->by_lru); - ph->count++; - pool_cache_count++; - pool_cache_bytes += ph->size; - - if (pool_cache_bytes <= CONFIG_HAP_POOL_CACHE_SIZE) - return; + struct pool_cache_item *item; + struct pool_cache_head *ph; do { item = LIST_PREV(&pool_lru_head[tid], struct pool_cache_item *, by_lru);