void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
-void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, uint count);
+void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item);
/* returns the total number of allocated entries for a pool across all buckets */
static inline uint pool_allocated(const struct pool_head *pool)
return ret;
}
+/* returns the total number of used entries for a pool across all buckets */
+static inline uint pool_used(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].used);
+ return ret;
+}
+
/* Returns the max number of entries that may be brought back to the pool
* before it's considered as full. Note that it is only usable for releasing
* objects, hence the function assumes that no more than ->used entries will
return 0;
alloc = pool_allocated(pool);
- used = HA_ATOMIC_LOAD(&pool->used);
+ used = pool_used(pool);
if (used < alloc)
used = alloc;
return NULL;
bucket = pool_pbucket(ptr);
- swrate_add_scaled_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4);
+ swrate_add_scaled_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool_used(pool), POOL_AVG_SAMPLES/4);
_HA_ATOMIC_INC(&pool->buckets[bucket].allocated);
- _HA_ATOMIC_INC(&pool->used);
+ _HA_ATOMIC_INC(&pool->buckets[bucket].used);
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, ptr);
{
uint bucket = pool_pbucket(ptr);
- _HA_ATOMIC_DEC(&pool->used);
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].used);
_HA_ATOMIC_DEC(&pool->buckets[bucket].allocated);
+ swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool_used(pool));
- swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
pool_put_to_os_nodec(pool, ptr);
}
if (to_free_max > released || cluster) {
/* will never match when global pools are disabled */
+ uint bucket = pool_pbucket(item);
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].used);
+ swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool_used(pool));
+
pi = (struct pool_item *)item;
pi->next = NULL;
pi->down = head;
cluster++;
if (cluster >= CONFIG_HAP_POOL_CLUSTER_SIZE) {
/* enough to make a cluster */
- pool_put_to_shared_cache(pool, head, cluster);
+ pool_put_to_shared_cache(pool, head);
cluster = 0;
head = NULL;
}
/* incomplete cluster left */
if (cluster)
- pool_put_to_shared_cache(pool, head, cluster);
+ pool_put_to_shared_cache(pool, head);
ph->count -= released;
pool_cache_count -= released;
POOL_DEBUG_TRACE_CALLER(pool, item, NULL);
LIST_INSERT(&pch->list, &item->by_pool);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
+ _HA_ATOMIC_INC(&pool->buckets[pool_pbucket(item)].used);
count++;
if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
pool_fill_pattern(pch, item, pool->size);
+
}
- HA_ATOMIC_ADD(&pool->used, count);
pch->count += count;
pool_cache_count += count;
pool_cache_bytes += count * pool->size;
* it's wise to add this series of objects there. Both the pool and the item's
* head must be valid.
*/
-void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, uint count)
+void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item)
{
struct pool_item *free_list;
- _HA_ATOMIC_SUB(&pool->used, count);
free_list = _HA_ATOMIC_LOAD(&pool->free_list);
do {
while (unlikely(free_list == POOL_BUSY)) {
__ha_barrier_atomic_store();
} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item));
__ha_barrier_atomic_store();
- swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
}
/*
list_for_each_entry(entry, &pools, list) {
struct pool_item *temp, *down;
uint allocated = pool_allocated(entry);
+ uint used = pool_used(entry);
while (entry->free_list &&
- (int)(allocated - entry->used) > (int)entry->minavail) {
+ (int)(allocated - used) > (int)entry->minavail) {
temp = entry->free_list;
entry->free_list = temp->next;
for (; temp; temp = down) {
pool_evict_from_local_cache(pool, 1);
pool_flush(pool);
- if (pool->used)
+ if (pool_used(pool))
return pool;
pool->users--;
if (!pool->users) {
pool_info[nbpools].entry = entry;
pool_info[nbpools].alloc_items = alloc_items;
pool_info[nbpools].alloc_bytes = (ulong)entry->size * alloc_items;
- pool_info[nbpools].used_items = entry->used;
+ pool_info[nbpools].used_items = pool_used(entry);
pool_info[nbpools].cached_items = cached;
pool_info[nbpools].need_avg = swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES);
pool_info[nbpools].failed_items = entry->failed;
unsigned long long used = 0;
list_for_each_entry(entry, &pools, list)
- used += entry->used * (ullong)entry->size;
+ used += pool_used(entry) * (ullong)entry->size;
return used;
}