void *p;
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
- if ((p = pool->free_list) != NULL) {
+ if ((p = pool->free_list) != NULL)
pool->free_list = *POOL_LINK(pool, p);
- pool->used++;
- }
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
+ if (p)
+ _HA_ATOMIC_INC(&pool->used);
#ifdef DEBUG_MEMORY_POOLS
if (p) {
static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
{
#ifndef DEBUG_UAF /* normal pool behaviour */
+ _HA_ATOMIC_DEC(&pool->used);
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
- pool->used--;
if (pool_is_crowded(pool)) {
pool_free_area(ptr, pool->size + POOL_EXTRA);
- pool->allocated--;
+ _HA_ATOMIC_DEC(&pool->allocated);
} else {
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
/* ensure we crash on double free or free of a const area*/
*(uint32_t *)ptr = 0xDEADADD4;
pool_free_area(ptr, pool->size + POOL_EXTRA);
- HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
- pool->allocated--;
- pool->used--;
+ _HA_ATOMIC_DEC(&pool->allocated);
+ _HA_ATOMIC_DEC(&pool->used);
swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
- HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
#endif /* DEBUG_UAF */
}
{
struct pool_free_list cmp, new;
void **next, *temp;
- int removed = 0;
if (!pool)
return;
while (next) {
temp = next;
next = *POOL_LINK(pool, temp);
- removed++;
pool_free_area(temp, pool->size + POOL_EXTRA);
+ _HA_ATOMIC_DEC(&pool->allocated);
}
pool->free_list = next;
- _HA_ATOMIC_SUB(&pool->allocated, removed);
/* here, we should have pool->allocate == pool->used */
}
break;
}
pool->free_list = *POOL_LINK(pool, temp);
- pool->allocated--;
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
pool_free_area(temp, pool->size + POOL_EXTRA);
+ _HA_ATOMIC_DEC(&pool->allocated);
}
/* here, we should have pool->allocated == pool->used */
}
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
temp = entry->free_list;
entry->free_list = *POOL_LINK(entry, temp);
- entry->allocated--;
pool_free_area(temp, entry->size + POOL_EXTRA);
+ _HA_ATOMIC_DEC(&entry->allocated);
}
}