THREAD_ALIGN(64);
struct pool_item *free_list; /* list of free shared objects */
- unsigned int failed; /* failed allocations */
/* these entries depend on the pointer value, they're used to reduce
* the contention on fast-changing values. The alignment here is
unsigned int allocated; /* how many chunks have been allocated */
unsigned int used; /* how many chunks are currently in use */
unsigned int needed_avg;/* floating indicator between used and allocated */
+ unsigned int failed; /* failed allocations (indexed by hash of TID) */
} buckets[CONFIG_HAP_POOL_BUCKETS];
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
return ret;
}
+/* returns the total number of failed allocations for a pool across all buckets */
+static inline uint pool_failed(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].failed);
+ return ret;
+}
+
/* Returns the max number of entries that may be brought back to the pool
* before it's considered as full. Note that it is only usable for releasing
* objects, hence the function assumes that no more than ->used entries will
ptr = pool_alloc_area(pool->alloc_sz);
if (ptr)
return ptr;
- _HA_ATOMIC_INC(&pool->failed);
+ _HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
}
activity[tid].pool_fail++;
return NULL;
pool_info[nbpools].used_items = pool_used(entry);
pool_info[nbpools].cached_items = cached;
pool_info[nbpools].need_avg = swrate_avg(pool_needed_avg(entry), POOL_AVG_SAMPLES);
- pool_info[nbpools].failed_items = entry->failed;
+ pool_info[nbpools].failed_items = pool_failed(entry);
nbpools++;
}
int failed = 0;
list_for_each_entry(entry, &pools, list)
- failed += entry->failed;
+ failed += pool_failed(entry);
return failed;
}