#define POOL_DBG_FAIL_ALLOC 0x00000001 // randomly fail memory allocations
#define POOL_DBG_DONT_MERGE 0x00000002 // do not merge same-size pools
#define POOL_DBG_COLD_FIRST 0x00000004 // pick cold objects first
+#define POOL_DBG_INTEGRITY 0x00000008 // perform integrity checks on cache
/* This is the head of a thread-local cache */
unsigned int count; /* number of objects in this pool */
unsigned int tid; /* thread id, for debugging only */
struct pool_head *pool; /* assigned pool, for debugging only */
-#if defined(DEBUG_POOL_INTEGRITY)
ulong fill_pattern; /* pattern used to fill the area on free */
-#endif
} THREAD_ALIGNED(64);
/* This represents one item stored in the thread-local cache. <by_pool> links
void pool_evict_from_local_cache(struct pool_head *pool, int full);
void pool_evict_from_local_caches(void);
void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller);
+void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
+void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
* cache first, then from the second level if it exists.
*/
-#if defined(DEBUG_POOL_INTEGRITY)
-
-/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
- * up to <size> bytes. The item part is left untouched.
- */
-static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
- ulong *ptr = (ulong *)item;
- uint ofs;
- ulong u;
-
- if (size <= sizeof(*item))
- return;
-
- /* Upgrade the fill_pattern to change about half of the bits
- * (to be sure to catch static flag corruption), and apply it.
- */
- u = pch->fill_pattern += ~0UL / 3; // 0x55...55
- ofs = sizeof(*item) / sizeof(*ptr);
- while (ofs < size / sizeof(*ptr))
- ptr[ofs++] = u;
-}
-
-/* check for a pool_cache_item integrity after extracting it from the cache. It
- * must have been previously initialized using pool_fill_pattern(). If any
- * corruption is detected, the function provokes an immediate crash.
- */
-static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
- const ulong *ptr = (const ulong *)item;
- uint ofs;
- ulong u;
-
- if (size <= sizeof(*item))
- return;
-
- /* let's check that all words past *item are equal */
- ofs = sizeof(*item) / sizeof(*ptr);
- u = ptr[ofs++];
- while (ofs < size / sizeof(*ptr)) {
- if (unlikely(ptr[ofs] != u))
- ABORT_NOW();
- ofs++;
- }
-}
-
-#else
-
-static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
-}
-
-static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
-{
-}
-
-#endif
-
/* Tries to retrieve an object from the local pool cache corresponding to pool
* <pool>. If none is available, tries to allocate from the shared cache, and
* returns NULL if nothing is available.
return NULL;
}
- if (unlikely(pool_debugging & POOL_DBG_COLD_FIRST)) {
+ /* allocate hottest objects first */
+ item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+
+ if (unlikely(pool_debugging & (POOL_DBG_COLD_FIRST|POOL_DBG_INTEGRITY))) {
/* allocate oldest objects first so as to keep them as long as possible
* in the cache before being reused and maximizing the chance to detect
* an overwrite.
*/
- item = LIST_PREV(&ph->list, typeof(item), by_pool);
- } else {
- /* allocate hottest objects first */
- item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+ if (pool_debugging & POOL_DBG_COLD_FIRST)
+ item = LIST_PREV(&ph->list, typeof(item), by_pool);
+
+ if (pool_debugging & POOL_DBG_INTEGRITY)
+ pool_check_pattern(ph, item, pool->size);
}
-#if defined(DEBUG_POOL_INTEGRITY)
- pool_check_pattern(ph, item, pool->size);
-#endif
+
BUG_ON(&item->by_pool == &ph->list);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
#endif
#ifdef DEBUG_POOL_INTEGRITY
POOL_DBG_COLD_FIRST |
+#endif
+#ifdef DEBUG_POOL_INTEGRITY
+ POOL_DBG_INTEGRITY |
#endif
0;
#ifdef CONFIG_HAP_POOLS
+/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
+ * up to <size> bytes. The item part is left untouched.
+ */
+void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ ulong *ptr = (ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* Upgrade the fill_pattern to change about half of the bits
+ * (to be sure to catch static flag corruption), and apply it.
+ */
+ u = pch->fill_pattern += ~0UL / 3; // 0x55...55
+ ofs = sizeof(*item) / sizeof(*ptr);
+ while (ofs < size / sizeof(*ptr))
+ ptr[ofs++] = u;
+}
+
+/* check for a pool_cache_item integrity after extracting it from the cache. It
+ * must have been previously initialized using pool_fill_pattern(). If any
+ * corruption is detected, the function provokes an immediate crash.
+ */
+void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ const ulong *ptr = (const ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* let's check that all words past *item are equal */
+ ofs = sizeof(*item) / sizeof(*ptr);
+ u = ptr[ofs++];
+ while (ofs < size / sizeof(*ptr)) {
+ if (unlikely(ptr[ofs] != u))
+ ABORT_NOW();
+ ofs++;
+ }
+}
+
/* removes up to <count> items from the end of the local pool cache <ph> for
* pool <pool>. The shared pool is refilled with these objects in the limit
* of the number of acceptable objects, and the rest will be released to the
while (released < count && !LIST_ISEMPTY(&ph->list)) {
item = LIST_PREV(&ph->list, typeof(item), by_pool);
BUG_ON(&item->by_pool == &ph->list);
- pool_check_pattern(ph, item, pool->size);
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_check_pattern(ph, item, pool->size);
LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
POOL_DEBUG_TRACE_CALLER(pool, item, caller);
ph->count++;
- pool_fill_pattern(ph, item, pool->size);
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_fill_pattern(ph, item, pool->size);
pool_cache_count++;
pool_cache_bytes += pool->size;
LIST_INSERT(&pch->list, &item->by_pool);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
count++;
- pool_fill_pattern(pch, item, pool->size);
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_fill_pattern(pch, item, pool->size);
}
HA_ATOMIC_ADD(&pool->used, count);
pch->count += count;