struct list by_lru; /* link to objects by LRU order */
};
+/* This structure is used to represent an element in the pool's shared
+ * free_list.
+ */
+struct pool_item {
+ struct pool_item *next;
+};
+
/* This describes a complete pool, with its status, usage statistics and the
* thread-local caches if any. Even if pools are disabled, these descriptors
* are valid and are used at least to get names and sizes. For small builds
* alignment could be removed.
*/
struct pool_head {
- void **free_list;
+ struct pool_item *free_list; /* list of free shared objects */
unsigned int used; /* how many chunks are currently in use */
unsigned int needed_avg;/* floating indicator between used and allocated */
unsigned int allocated; /* how many chunks have been allocated */
/* ignored without shared pools */
}
-static inline void pool_put_to_shared_cache(struct pool_head *pool, void *item)
+static inline void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item)
{
/* ignored without shared pools */
}
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
-void pool_put_to_shared_cache(struct pool_head *pool, void *item);
+void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item);
/* returns true if the pool is considered to have too many free objects */
static inline int pool_is_crowded(const struct pool_head *pool)
if (unlikely(pool_is_crowded(pool)))
pool_free_nocache(pool, item);
else
- pool_put_to_shared_cache(pool, item);
+ pool_put_to_shared_cache(pool, (struct pool_item *)item);
}
}
if (unlikely(pool_is_crowded(pool)))
pool_free_nocache(pool, item);
else
- pool_put_to_shared_cache(pool, item);
+ pool_put_to_shared_cache(pool, (struct pool_item *)item);
} while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
}
void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
{
struct pool_cache_item *item;
- void *ret;
+ struct pool_item *ret;
/* we'll need to reference the first element to figure the next one. We
* must temporarily lock it so that nobody allocates then releases it,
}
/* this releases the lock */
- HA_ATOMIC_STORE(&pool->free_list, *(void **)ret);
+ HA_ATOMIC_STORE(&pool->free_list, ret->next);
HA_ATOMIC_INC(&pool->used);
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, ret);
/* now store the retrieved object into the local cache */
- item = ret;
+ item = (struct pool_cache_item *)ret;
LIST_INSERT(&pch->list, &item->by_pool);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
pch->count++;
* Both the pool and the item must be valid. Use pool_free() for normal
* operations.
*/
-void pool_put_to_shared_cache(struct pool_head *pool, void *item)
+void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item)
{
- void **free_list;
+ struct pool_item *free_list;
_HA_ATOMIC_DEC(&pool->used);
free_list = _HA_ATOMIC_LOAD(&pool->free_list);
__ha_cpu_relax();
free_list = _HA_ATOMIC_LOAD(&pool->free_list);
}
- _HA_ATOMIC_STORE((void **)item, (void *)free_list);
+ _HA_ATOMIC_STORE(&item->next, free_list);
__ha_barrier_atomic_store();
} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item));
__ha_barrier_atomic_store();
*/
void pool_flush(struct pool_head *pool)
{
- void *next, *temp;
+ struct pool_item *next, *temp;
if (!pool)
return;
while (next) {
temp = next;
- next = *(void **)temp;
+ next = temp->next;
pool_put_to_os(pool, temp);
}
/* here, we should have pool->allocated == pool->used */
thread_isolate();
list_for_each_entry(entry, &pools, list) {
- void *temp;
- //qfprintf(stderr, "Flushing pool %s\n", entry->name);
+ struct pool_item *temp;
+
while (entry->free_list &&
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
temp = entry->free_list;
- entry->free_list = *(void **)temp;
+ entry->free_list = temp->next;
pool_put_to_os(entry, temp);
}
}