void **free_list;
#ifdef CONFIG_HAP_LOCKLESS_POOLS
uintptr_t seq;
- HA_RWLOCK_T flush_lock;
+ HA_SPINLOCK_T flush_lock;
#else
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
#endif
cmp.seq = pool->seq;
__ha_barrier_load();
- HA_RWLOCK_RDLOCK(POOL_LOCK, &pool->flush_lock);
cmp.free_list = pool->free_list;
do {
- if (cmp.free_list == NULL) {
- HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
+ if (cmp.free_list == NULL)
return NULL;
- }
new.seq = cmp.seq + 1;
__ha_barrier_load();
new.free_list = *POOL_LINK(pool, cmp.free_list);
} while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
__ha_barrier_atomic_store();
- HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
_HA_ATOMIC_ADD(&pool->used, 1);
#ifdef DEBUG_MEMORY_POOLS
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_INIT(&pool->lock);
#else
- HA_RWLOCK_INIT(&pool->flush_lock);
+ HA_SPIN_INIT(&pool->flush_lock);
#endif
}
pool->users++;
if (!pool)
return;
- HA_RWLOCK_WRLOCK(POOL_LOCK, &pool->flush_lock);
+ HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock);
do {
cmp.free_list = pool->free_list;
cmp.seq = pool->seq;
new.seq = cmp.seq + 1;
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
__ha_barrier_atomic_store();
- HA_RWLOCK_WRUNLOCK(POOL_LOCK, &pool->flush_lock);
+ HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock);
next = cmp.free_list;
while (next) {
temp = next;
return;
list_for_each_entry(entry, &pools, list) {
- HA_RWLOCK_WRLOCK(POOL_LOCK, &entry->flush_lock);
+ HA_SPIN_LOCK(POOL_LOCK, &entry->flush_lock);
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
struct pool_free_list cmp, new;
free(cmp.free_list);
_HA_ATOMIC_SUB(&entry->allocated, 1);
}
- HA_RWLOCK_WRUNLOCK(POOL_LOCK, &entry->flush_lock);
+ HA_SPIN_UNLOCK(POOL_LOCK, &entry->flush_lock);
}
_HA_ATOMIC_STORE(&recurse, 0);