THREAD_ALIGN(64);
struct pool_item *free_list; /* list of free shared objects */
- unsigned int needed_avg;/* floating indicator between used and allocated */
unsigned int failed; /* failed allocations */
/* these entries depend on the pointer value, they're used to reduce
THREAD_ALIGN(64);
unsigned int allocated; /* how many chunks have been allocated */
unsigned int used; /* how many chunks are currently in use */
+ unsigned int needed_avg;/* floating indicator between used and allocated */
} buckets[CONFIG_HAP_POOL_BUCKETS];
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
return ret;
}
+/* returns the raw total number needed entries across all buckets. It must
+ * be passed to swrate_avg() to get something usable.
+ */
+static inline uint pool_needed_avg(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].needed_avg);
+ return ret;
+}
+
/* Returns the max number of entries that may be brought back to the pool
* before it's considered as full. Note that it is only usable for releasing
* objects, hence the function assumes that no more than ->used entries will
static inline uint pool_releasable(const struct pool_head *pool)
{
uint alloc, used;
+ uint needed_raw;
if (unlikely(pool_debugging & (POOL_DBG_NO_CACHE|POOL_DBG_NO_GLOBAL)))
return 0;
if (used < alloc)
used = alloc;
- if (alloc < swrate_avg(pool->needed_avg + pool->needed_avg / 4, POOL_AVG_SAMPLES))
+ needed_raw = pool_needed_avg(pool);
+ if (alloc < swrate_avg(needed_raw + needed_raw / 4, POOL_AVG_SAMPLES))
return used; // less than needed is allocated, can release everything
if ((uint)(alloc - used) < pool->minavail)
return NULL;
bucket = pool_pbucket(ptr);
- swrate_add_scaled_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool_used(pool), POOL_AVG_SAMPLES/4);
+ swrate_add_scaled_opportunistic(&pool->buckets[bucket].needed_avg, POOL_AVG_SAMPLES, pool->buckets[bucket].used, POOL_AVG_SAMPLES/4);
_HA_ATOMIC_INC(&pool->buckets[bucket].allocated);
_HA_ATOMIC_INC(&pool->buckets[bucket].used);
_HA_ATOMIC_DEC(&pool->buckets[bucket].used);
_HA_ATOMIC_DEC(&pool->buckets[bucket].allocated);
- swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool_used(pool));
+ swrate_add_opportunistic(&pool->buckets[bucket].needed_avg, POOL_AVG_SAMPLES, pool->buckets[bucket].used);
pool_put_to_os_nodec(pool, ptr);
}
/* will never match when global pools are disabled */
uint bucket = pool_pbucket(item);
_HA_ATOMIC_DEC(&pool->buckets[bucket].used);
- swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool_used(pool));
+ swrate_add_opportunistic(&pool->buckets[bucket].needed_avg, POOL_AVG_SAMPLES, pool->buckets[bucket].used);
pi = (struct pool_item *)item;
pi->next = NULL;
pool_info[nbpools].alloc_bytes = (ulong)entry->size * alloc_items;
pool_info[nbpools].used_items = pool_used(entry);
pool_info[nbpools].cached_items = cached;
- pool_info[nbpools].need_avg = swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES);
+ pool_info[nbpools].need_avg = swrate_avg(pool_needed_avg(entry), POOL_AVG_SAMPLES);
pool_info[nbpools].failed_items = entry->failed;
nbpools++;
}