struct list by_lru; /* link to objects by LRU order */
};
-extern THREAD_LOCAL struct pool_cache_head pool_cache[MAX_BASE_POOLS];
-extern THREAD_LOCAL struct list pool_lru_head; /* oldest objects */
+extern struct pool_cache_head pool_cache[][MAX_BASE_POOLS];
extern THREAD_LOCAL size_t pool_cache_bytes; /* total cache size */
extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */
{
ssize_t idx = pool_get_index(pool);
struct pool_cache_item *item;
+ struct pool_cache_head *ph;
/* pool not in cache */
if (idx < 0)
return NULL;
- /* never allocated or empty */
- if (pool_cache[idx].list.n == NULL || LIST_ISEMPTY(&pool_cache[idx].list))
- return NULL;
+ ph = &pool_cache[tid][idx];
+ if (LIST_ISEMPTY(&ph->list))
+ return NULL; // empty
- item = LIST_NEXT(&pool_cache[idx].list, typeof(item), by_pool);
- pool_cache[idx].count--;
- pool_cache_bytes -= pool_cache[idx].size;
+ item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+ ph->count--;
+ pool_cache_bytes -= ph->size;
pool_cache_count--;
LIST_DEL(&item->by_pool);
LIST_DEL(&item->by_lru);
*/
if (idx < 0 ||
(pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 3 / 4 &&
- pool_cache[idx].count >= 16 + pool_cache_count / 8)) {
+ pool_cache[tid][idx].count >= 16 + pool_cache_count / 8)) {
__pool_free(pool, ptr);
return;
}
struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
unsigned int pool_base_count = 0;
-THREAD_LOCAL struct pool_cache_head pool_cache[MAX_BASE_POOLS] = { };
-THREAD_LOCAL struct list pool_lru_head = { }; /* oldest objects */
+/* These ones are initialized per-thread on startup by init_pools() */
+struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS];
+static struct list pool_lru_head[MAX_THREADS]; /* oldest objects */
THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
{
struct pool_cache_item *item = (struct pool_cache_item *)ptr;
- struct pool_cache_head *ph = &pool_cache[idx];
-
- /* never allocated or empty */
- if (unlikely(ph->list.n == NULL)) {
- LIST_INIT(&ph->list);
- ph->size = pool->size;
- if (pool_lru_head.n == NULL)
- LIST_INIT(&pool_lru_head);
- }
+ struct pool_cache_head *ph = &pool_cache[tid][idx];
LIST_ADD(&ph->list, &item->by_pool);
- LIST_ADD(&pool_lru_head, &item->by_lru);
+ LIST_ADD(&pool_lru_head[tid], &item->by_lru);
ph->count++;
pool_cache_count++;
pool_cache_bytes += ph->size;
return;
do {
- item = LIST_PREV(&pool_lru_head, struct pool_cache_item *, by_lru);
+ item = LIST_PREV(&pool_lru_head[tid], struct pool_cache_item *, by_lru);
/* note: by definition we remove oldest objects so they also are the
* oldest in their own pools, thus their next is the pool's head.
*/
ph->count--;
pool_cache_count--;
pool_cache_bytes -= ph->size;
- __pool_free(pool_base_start + (ph - pool_cache), item);
+ __pool_free(pool_base_start + (ph - pool_cache[tid]), item);
} while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
}
}
}
+/* Initializes all per-thread arrays on startup */
+static void init_pools()
+{
+ int thr, idx;
+
+ for (thr = 0; thr < MAX_THREADS; thr++) {
+ for (idx = 0; idx < MAX_BASE_POOLS; idx++) {
+ LIST_INIT(&pool_cache[thr][idx].list);
+ pool_cache[thr][idx].size = 0;
+ }
+ LIST_INIT(&pool_lru_head[thr]);
+ }
+}
+
+INITCALL0(STG_PREPARE, init_pools);
/* register cli keywords */
static struct cli_kw_list cli_kws = {{ },{