]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: pools: move the failed allocation counter over a few buckets
authorWilly Tarreau <w@1wt.eu>
Mon, 24 Jul 2023 14:38:09 +0000 (16:38 +0200)
committerWilly Tarreau <w@1wt.eu>
Sat, 12 Aug 2023 17:04:34 +0000 (19:04 +0200)
The failed allocation counter cannot depend on a pointer, but since it's
a perpetually increasing counter and not a gauge, we don't care where
it's incremented. Thus instead we're hashing on the TID. There's no
contention there anyway, but it's better not to waste the room in
the pool's heads and to move that with the other counters.

include/haproxy/pool-t.h
include/haproxy/pool.h
src/pool.c

index 3cbab8bf16f512be81e2bae809f1a801af880105..49eef3382fe83f32ea9e86b4b5b391397c79833d 100644 (file)
@@ -122,7 +122,6 @@ struct pool_head {
        THREAD_ALIGN(64);
 
        struct pool_item *free_list; /* list of free shared objects */
-       unsigned int failed;    /* failed allocations */
 
        /* these entries depend on the pointer value, they're used to reduce
         * the contention on fast-changing values. The alignment here is
@@ -133,6 +132,7 @@ struct pool_head {
                unsigned int allocated; /* how many chunks have been allocated */
                unsigned int used;      /* how many chunks are currently in use */
                unsigned int needed_avg;/* floating indicator between used and allocated */
+               unsigned int failed;    /* failed allocations (indexed by hash of TID) */
        } buckets[CONFIG_HAP_POOL_BUCKETS];
 
        struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
index b999c392d6a69892142afce7566a8ef52f708567..8c480d3f63e3076bf64e0512fa188189be202232 100644 (file)
@@ -171,6 +171,17 @@ static inline uint pool_needed_avg(const struct pool_head *pool)
        return ret;
 }
 
+/* returns the total number of failed allocations for a pool across all buckets */
+static inline uint pool_failed(const struct pool_head *pool)
+{
+       int bucket;
+       uint ret;
+
+       for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+               ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].failed);
+       return ret;
+}
+
 /* Returns the max number of entries that may be brought back to the pool
  * before it's considered as full. Note that it is only usable for releasing
  * objects, hence the function assumes that no more than ->used entries will
index e2f2e7e4c04d81dd52ed25a32484269286aa55d3..0d2a840ab2ffd29927da99bfde7df5f16fdb3305 100644 (file)
@@ -395,7 +395,7 @@ void *pool_get_from_os_noinc(struct pool_head *pool)
                        ptr = pool_alloc_area(pool->alloc_sz);
                if (ptr)
                        return ptr;
-               _HA_ATOMIC_INC(&pool->failed);
+               _HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
        }
        activity[tid].pool_fail++;
        return NULL;
@@ -985,7 +985,7 @@ void dump_pools_to_trash(int by_what, int max, const char *pfx)
                pool_info[nbpools].used_items = pool_used(entry);
                pool_info[nbpools].cached_items = cached;
                pool_info[nbpools].need_avg = swrate_avg(pool_needed_avg(entry), POOL_AVG_SAMPLES);
-               pool_info[nbpools].failed_items = entry->failed;
+               pool_info[nbpools].failed_items = pool_failed(entry);
                nbpools++;
        }
 
@@ -1040,7 +1040,7 @@ int pool_total_failures()
        int failed = 0;
 
        list_for_each_entry(entry, &pools, list)
-               failed += entry->failed;
+               failed += pool_failed(entry);
        return failed;
 }