]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: pool: check for pool's fullness outside of pool_put_to_shared_cache()
authorWilly Tarreau <w@1wt.eu>
Thu, 30 Dec 2021 16:37:33 +0000 (17:37 +0100)
committerWilly Tarreau <w@1wt.eu>
Sun, 2 Jan 2022 18:35:26 +0000 (19:35 +0100)
Instead of letting pool_put_to_shared_cache() pass the object to the
underlying OS layer when there's no more room, let's have the caller
check if the pool is full and either call pool_put_to_shared_cache()
or call pool_free_nocache().

Doing this sensibly simplifies the code as this function now only has
to deal with a pool and an item and only for cases where there are
local caches and shared caches. As the code was simplified and the
calls more isolated, the function was moved to pool.c.

Note that it's only called from pool_evict_from_local_cache{,s}() and
that a part of its logic might very well move there when dealing with
batches.

include/haproxy/pool.h
src/pool.c

index 1f9ca21f3193ac6d5820a36e46809e9211f1a23b..ec73cbcb0e071017550a96ec62e408441ad611dc 100644 (file)
@@ -123,14 +123,15 @@ static inline void pool_refill_local_from_shared(struct pool_head *pool, struct
        /* ignored without shared pools */
 }
 
-static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
+static inline void pool_put_to_shared_cache(struct pool_head *pool, void *item)
 {
-       pool_free_nocache(pool, ptr);
+       /* ignored without shared pools */
 }
 
 #else /* CONFIG_HAP_NO_GLOBAL_POOLS */
 
 void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
+void pool_put_to_shared_cache(struct pool_head *pool, void *item);
 
 /* returns true if the pool is considered to have too many free objects */
 static inline int pool_is_crowded(const struct pool_head *pool)
@@ -139,32 +140,6 @@ static inline int pool_is_crowded(const struct pool_head *pool)
               (int)(pool->allocated - pool->used) >= pool->minavail;
 }
 
-/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
- * Both the pool and the pointer must be valid. Use pool_free() for normal
- * operations.
- */
-static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
-{
-       void **free_list;
-
-       if (unlikely(pool_is_crowded(pool))) {
-               pool_free_nocache(pool, ptr);
-               return;
-       }
-
-       _HA_ATOMIC_DEC(&pool->used);
-       free_list = _HA_ATOMIC_LOAD(&pool->free_list);
-       do {
-               while (unlikely(free_list == POOL_BUSY)) {
-                       __ha_cpu_relax();
-                       free_list = _HA_ATOMIC_LOAD(&pool->free_list);
-               }
-               _HA_ATOMIC_STORE((void **)ptr, (void *)free_list);
-               __ha_barrier_atomic_store();
-       } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
-       __ha_barrier_atomic_store();
-       swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
-}
 
 #endif /* CONFIG_HAP_NO_GLOBAL_POOLS */
 
index 45241fe11a3a9fe970a714b8ffdaa7b8674178f3..7b7dd998ec6da7b80b990a0ee466e826d71b1bfa 100644 (file)
@@ -320,7 +320,11 @@ void pool_evict_from_local_cache(struct pool_head *pool)
                pool_cache_count--;
                LIST_DELETE(&item->by_pool);
                LIST_DELETE(&item->by_lru);
-               pool_put_to_shared_cache(pool, item);
+
+               if (unlikely(pool_is_crowded(pool)))
+                       pool_free_nocache(pool, item);
+               else
+                       pool_put_to_shared_cache(pool, item);
        }
 }
 
@@ -345,7 +349,10 @@ void pool_evict_from_local_caches()
                ph->count--;
                pool_cache_count--;
                pool_cache_bytes -= pool->size;
-               pool_put_to_shared_cache(pool, item);
+               if (unlikely(pool_is_crowded(pool)))
+                       pool_free_nocache(pool, item);
+               else
+                       pool_put_to_shared_cache(pool, item);
        } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
 }
 
@@ -434,6 +441,29 @@ void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_hea
        pool_cache_bytes += pool->size;
 }
 
+/* Adds cache item entry <item> to the shared cache. The caller is advised to
+ * first check using pool_is_crowded() if it's wise to add this object there.
+ * Both the pool and the item must be valid. Use pool_free() for normal
+ * operations.
+ */
+void pool_put_to_shared_cache(struct pool_head *pool, void *item)
+{
+       void **free_list;
+
+       _HA_ATOMIC_DEC(&pool->used);
+       free_list = _HA_ATOMIC_LOAD(&pool->free_list);
+       do {
+               while (unlikely(free_list == POOL_BUSY)) {
+                       __ha_cpu_relax();
+                       free_list = _HA_ATOMIC_LOAD(&pool->free_list);
+               }
+               _HA_ATOMIC_STORE((void **)item, (void *)free_list);
+               __ha_barrier_atomic_store();
+       } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item));
+       __ha_barrier_atomic_store();
+       swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
+}
+
 /*
  * This function frees whatever can be freed in pool <pool>.
  */