void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, uint count);
+/* returns the total number of allocated entries for a pool across all buckets */
+static inline uint pool_allocated(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].allocated);
+ return ret;
+}
+
/* Returns the max number of entries that may be brought back to the pool
* before it's considered as full. Note that it is only usable for releasing
* objects, hence the function assumes that no more than ->used entries will
if (unlikely(pool_debugging & (POOL_DBG_NO_CACHE|POOL_DBG_NO_GLOBAL)))
return 0;
- alloc = HA_ATOMIC_LOAD(&pool->allocated);
+ alloc = pool_allocated(pool);
used = HA_ATOMIC_LOAD(&pool->used);
if (used < alloc)
used = alloc;
*/
void *pool_get_from_os_noinc(struct pool_head *pool)
{
- if (!pool->limit || pool->allocated < pool->limit) {
+ if (!pool->limit || pool_allocated(pool) < pool->limit) {
void *ptr;
if (pool_debugging & POOL_DBG_UAF)
void *pool_alloc_nocache(struct pool_head *pool)
{
void *ptr = NULL;
+ uint bucket;
ptr = pool_get_from_os_noinc(pool);
if (!ptr)
return NULL;
+ bucket = pool_pbucket(ptr);
swrate_add_scaled_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4);
- _HA_ATOMIC_INC(&pool->allocated);
+ _HA_ATOMIC_INC(&pool->buckets[bucket].allocated);
_HA_ATOMIC_INC(&pool->used);
/* keep track of where the element was allocated from */
*/
void pool_free_nocache(struct pool_head *pool, void *ptr)
{
+ uint bucket = pool_pbucket(ptr);
+
_HA_ATOMIC_DEC(&pool->used);
- _HA_ATOMIC_DEC(&pool->allocated);
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].allocated);
+
swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
pool_put_to_os_nodec(pool, ptr);
}
void pool_flush(struct pool_head *pool)
{
struct pool_item *next, *temp, *down;
- int released = 0;
if (!pool || (pool_debugging & (POOL_DBG_NO_CACHE|POOL_DBG_NO_GLOBAL)))
return;
temp = next;
next = temp->next;
for (; temp; temp = down) {
+ uint bucket = pool_pbucket(temp);
+
down = temp->down;
- released++;
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].allocated);
+
pool_put_to_os_nodec(pool, temp);
}
}
-
- HA_ATOMIC_SUB(&pool->allocated, released);
/* here, we should have pool->allocated == pool->used */
}
list_for_each_entry(entry, &pools, list) {
struct pool_item *temp, *down;
- int released = 0;
+ uint allocated = pool_allocated(entry);
while (entry->free_list &&
- (int)(entry->allocated - entry->used) > (int)entry->minavail) {
+ (int)(allocated - entry->used) > (int)entry->minavail) {
temp = entry->free_list;
entry->free_list = temp->next;
for (; temp; temp = down) {
+ uint bucket = pool_pbucket(temp);
down = temp->down;
- released++;
+ allocated--;
+ _HA_ATOMIC_DEC(&entry->buckets[bucket].allocated);
pool_put_to_os_nodec(entry, temp);
}
}
-
- _HA_ATOMIC_SUB(&entry->allocated, released);
}
trim_all_pools();
int nbpools, i;
unsigned long long cached_bytes = 0;
uint cached = 0;
+ uint alloc_items;
allocated = used = nbpools = 0;
if (nbpools >= POOLS_MAX_DUMPED_ENTRIES)
break;
+ alloc_items = pool_allocated(entry);
/* do not dump unused entries when sorting by usage */
- if (by_what == 3 && !entry->allocated)
+ if (by_what == 3 && !alloc_items)
continue;
/* verify the pool name if a prefix is requested */
cached += entry->cache[i].count;
}
pool_info[nbpools].entry = entry;
- pool_info[nbpools].alloc_items = entry->allocated;
- pool_info[nbpools].alloc_bytes = (ulong)entry->size * entry->allocated;
+ pool_info[nbpools].alloc_items = alloc_items;
+ pool_info[nbpools].alloc_bytes = (ulong)entry->size * alloc_items;
pool_info[nbpools].used_items = entry->used;
pool_info[nbpools].cached_items = cached;
pool_info[nbpools].need_avg = swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES);
unsigned long long allocated = 0;
list_for_each_entry(entry, &pools, list)
- allocated += entry->allocated * (ullong)entry->size;
+ allocated += pool_allocated(entry) * (ullong)entry->size;
return allocated;
}