n = HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY); \
if (n == LLIST_BUSY) \
continue; \
- __ha_barrier_store(); \
+ __ha_barrier_atomic_store(); \
p = HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \
if (p == LLIST_BUSY) { \
(lh)->n = n; \
/* Get the local pointer to the peer. */
local = HA_ATOMIC_XCHG(&xref->peer, XREF_BUSY);
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
/* If the local pointer is NULL, the peer no longer exists. */
if (local == NULL) {
goto done;
if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
goto redo_next;
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
new = fd;
redo_last:
if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
goto lock_self_prev;
#endif
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
/* Now, lock the entries of our neighbours */
if (likely(prev != -1)) {
#ifdef USE_THREAD
if (root == &rqueue) {
HA_ATOMIC_OR(&global_tasks_mask, t->thread_mask);
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
}
#endif
old_active_mask = active_tasks_mask;
}
if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
- __ha_barrier_load();
+ __ha_barrier_atomic_load();
if (global_tasks_mask & tid_bit)
HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
}
t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list);
state = HA_ATOMIC_XCHG(&t->state, TASK_RUNNING);
- __ha_barrier_store();
+ __ha_barrier_atomic_store();
task_remove_from_task_list(t);
ctx = t->context;