__ha_barrier_load();
new.free_list = *POOL_LINK(pool, cmp.free_list);
} while (__ha_cas_dw((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
+ __ha_barrier_atomic_store();
- HA_ATOMIC_ADD(&pool->used, 1);
+ _HA_ATOMIC_ADD(&pool->used, 1);
#ifdef DEBUG_MEMORY_POOLS
/* keep track of where the element was allocated from */
*POOL_LINK(pool, cmp.free_list) = (void *)pool;
do {
*POOL_LINK(pool, ptr) = (void *)free_list;
__ha_barrier_store();
- } while (!HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
- HA_ATOMIC_SUB(&pool->used, 1);
+ } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
+ __ha_barrier_atomic_store();
+ _HA_ATOMIC_SUB(&pool->used, 1);
}
/* frees an object to the local cache, possibly pushing oldest objects to the
while (1) {
if (limit && allocated >= limit) {
- HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
+ _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
return NULL;
}
ptr = malloc(size + POOL_EXTRA);
if (!ptr) {
- HA_ATOMIC_ADD(&pool->failed, 1);
+ _HA_ATOMIC_ADD(&pool->failed, 1);
if (failed)
return NULL;
failed++;
do {
*POOL_LINK(pool, ptr) = free_list;
__ha_barrier_store();
- } while (HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
+ } while (_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
}
+ __ha_barrier_atomic_store();
- HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
- HA_ATOMIC_ADD(&pool->used, 1);
+ _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
+ _HA_ATOMIC_ADD(&pool->used, 1);
#ifdef DEBUG_MEMORY_POOLS
/* keep track of where the element was allocated from */
return;
do {
next = pool->free_list;
- } while (!HA_ATOMIC_CAS(&pool->free_list, &next, NULL));
+ } while (!_HA_ATOMIC_CAS(&pool->free_list, &next, NULL));
+ __ha_barrier_atomic_store();
while (next) {
temp = next;
next = *POOL_LINK(pool, temp);
free(temp);
}
pool->free_list = next;
- HA_ATOMIC_SUB(&pool->allocated, removed);
+ _HA_ATOMIC_SUB(&pool->allocated, removed);
/* here, we should have pool->allocate == pool->used */
}
int cur_recurse = 0;
struct pool_head *entry;
- if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
+ if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
return;
list_for_each_entry(entry, &pools, list) {
if (__ha_cas_dw(&entry->free_list, &cmp, &new) == 0)
continue;
free(cmp.free_list);
- HA_ATOMIC_SUB(&entry->allocated, 1);
+ _HA_ATOMIC_SUB(&entry->allocated, 1);
}
}
- HA_ATOMIC_STORE(&recurse, 0);
+ _HA_ATOMIC_STORE(&recurse, 0);
}
/* frees an object to the local cache, possibly pushing oldest objects to the
int cur_recurse = 0;
struct pool_head *entry;
- if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
+ if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
return;
list_for_each_entry(entry, &pools, list) {
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
}
- HA_ATOMIC_STORE(&recurse, 0);
+ _HA_ATOMIC_STORE(&recurse, 0);
}
#endif