#include <unistd.h>
#include <haproxy/api.h>
+#include <haproxy/freq_ctr.h>
#include <haproxy/list.h>
#include <haproxy/thread.h>
#define MAX_BASE_POOLS 32
+#define POOL_AVG_SAMPLES 1024
+
struct pool_cache_head {
struct list list; /* head of objects in this pool */
size_t size; /* size of an object */
return idx;
}
-/* The two functions below were copied from freq_ctr.h's swrate_add, impossible
- * to use here due to include dependency hell again!
- */
-#define POOL_AVG_SAMPLES 1024
-
-static inline unsigned int pool_avg_add(unsigned int *sum, unsigned int v)
-{
- unsigned int new_sum, old_sum;
- unsigned int n = POOL_AVG_SAMPLES;
-
- old_sum = *sum;
- do {
- new_sum = old_sum - (old_sum + n - 1) / n + v;
- } while (!_HA_ATOMIC_CAS(sum, &old_sum, new_sum));
- return new_sum;
-}
-
-/* make the new value <v> count for 1/4 of the total sum */
-static inline unsigned int pool_avg_bump(unsigned int *sum, unsigned int v)
-{
- unsigned int new_sum, old_sum;
- unsigned int n = POOL_AVG_SAMPLES;
-
- old_sum = *sum;
- do {
- new_sum = old_sum - (old_sum + 3) / 4;
- new_sum += (n * v + 3) / 4;
- } while (!_HA_ATOMIC_CAS(sum, &old_sum, new_sum));
- return new_sum;
-}
-
-static inline unsigned int pool_avg(unsigned int sum)
-{
- unsigned int n = POOL_AVG_SAMPLES;
-
- return (sum + n - 1) / n;
-}
-
/* returns true if the pool is considered to have too many free objects */
static inline int pool_is_crowded(const struct pool_head *pool)
{
- return pool->allocated >= pool_avg(pool->needed_avg + pool->needed_avg / 4) &&
+ return pool->allocated >= swrate_avg(pool->needed_avg + pool->needed_avg / 4, POOL_AVG_SAMPLES) &&
(int)(pool->allocated - pool->used) >= pool->minavail;
}
} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
__ha_barrier_atomic_store();
}
- pool_avg_add(&pool->needed_avg, pool->used);
+ swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
}
/* frees an object to the local cache, possibly pushing oldest objects to the
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
}
- pool_avg_add(&pool->needed_avg, pool->used);
+ swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
#else /* release the entry for real to detect use after free */
/* ensure we crash on double free or free of a const area*/
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
pool->allocated--;
pool->used--;
- pool_avg_add(&pool->needed_avg, pool->used);
+ swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
#endif /* DEBUG_UAF */
}
return NULL;
}
- pool_avg_bump(&pool->needed_avg, pool->allocated);
+ swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4);
ptr = malloc(size + POOL_EXTRA);
if (!ptr) {
return NULL;
}
- pool_avg_bump(&pool->needed_avg, pool->allocated);
+ swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
ptr = pool_alloc_area(pool->size + POOL_EXTRA);
#ifdef DEBUG_MEMORY_POOLS
chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p=%02d%s\n",
entry->name, entry->size, entry->allocated,
entry->size * entry->allocated, entry->used,
- pool_avg(entry->needed_avg), entry->failed,
+ swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES), entry->failed,
entry->users, entry, (int)pool_get_index(entry),
(entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");