if (HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
return;
- oldupdt = HA_ATOMIC_ADD(&fd_nbupdt, 1) - 1;
+ oldupdt = _HA_ATOMIC_ADD(&fd_nbupdt, 1) - 1;
fd_updt[oldupdt] = fd;
} else {
unsigned long update_mask = fdtab[fd].update_mask;
do {
if (update_mask == fdtab[fd].thread_mask)
return;
- } while (!HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask,
+ } while (!_HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask,
fdtab[fd].thread_mask));
fd_add_to_fd_list(&update_list, fd, offsetof(struct fdtab, update));
}
{
unsigned long update_mask;
- update_mask = HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
+ update_mask = _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
while ((update_mask & all_threads_mask)== 0) {
/* If we were the last one that had to update that entry, remove it from the list */
fd_rm_from_fd_list(&update_list, fd, offsetof(struct fdtab, update));
*/
static inline void fd_alloc_cache_entry(const int fd)
{
- HA_ATOMIC_OR(&fd_cache_mask, fdtab[fd].thread_mask);
+ _HA_ATOMIC_OR(&fd_cache_mask, fdtab[fd].thread_mask);
if (!(fdtab[fd].thread_mask & (fdtab[fd].thread_mask - 1)))
fd_add_to_fd_list(&fd_cache_local[my_ffsl(fdtab[fd].thread_mask) - 1], fd, offsetof(struct fdtab, cache));
else
return;
new = old & ~FD_EV_ACTIVE_R;
new &= ~FD_EV_POLLED_R;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
return;
new = old & ~FD_EV_ACTIVE_W;
new &= ~FD_EV_POLLED_W;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
return;
new = old & ~FD_EV_ACTIVE_RW;
new &= ~FD_EV_POLLED_RW;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_RW)
updt_fd_polling(fd);
new = old & ~FD_EV_READY_R;
if (new & FD_EV_ACTIVE_R)
new |= FD_EV_POLLED_R;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
unsigned long locked;
/* marking ready never changes polled status */
- HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R);
+ _HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R);
locked = atleast2(fdtab[fd].thread_mask);
if (locked)
new = old & ~FD_EV_READY_R;
if (new & FD_EV_ACTIVE_R)
new |= FD_EV_POLLED_R;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
new = old & ~FD_EV_READY_W;
if (new & FD_EV_ACTIVE_W)
new |= FD_EV_POLLED_W;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
unsigned long locked;
/* marking ready never changes polled status */
- HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W);
+ _HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W);
locked = atleast2(fdtab[fd].thread_mask);
if (locked)
new = old | FD_EV_ACTIVE_R;
if (!(new & FD_EV_READY_R))
new |= FD_EV_POLLED_R;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
new = old | FD_EV_ACTIVE_W;
if (!(new & FD_EV_READY_W))
new |= FD_EV_POLLED_W;
- } while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
/* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */
static inline void hap_fd_set(int fd, unsigned int *evts)
{
- HA_ATOMIC_OR(&evts[fd / (8*sizeof(*evts))], 1U << (fd & (8*sizeof(*evts) - 1)));
+ _HA_ATOMIC_OR(&evts[fd / (8*sizeof(*evts))], 1U << (fd & (8*sizeof(*evts) - 1)));
}
static inline void hap_fd_clr(int fd, unsigned int *evts)
{
- HA_ATOMIC_AND(&evts[fd / (8*sizeof(*evts))], ~(1U << (fd & (8*sizeof(*evts) - 1))));
+ _HA_ATOMIC_AND(&evts[fd / (8*sizeof(*evts))], ~(1U << (fd & (8*sizeof(*evts) - 1))));
}
static inline unsigned int hap_fd_isset(int fd, unsigned int *evts)
/* Check that we're not already in the cache, and if not, lock us. */
if (next >= -2)
goto done;
- if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
+ if (!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
goto redo_next;
__ha_barrier_atomic_store();
if (unlikely(last == -1)) {
/* list is empty, try to add ourselves alone so that list->last=fd */
- if (unlikely(!HA_ATOMIC_CAS(&list->last, &old, new)))
+ if (unlikely(!_HA_ATOMIC_CAS(&list->last, &old, new)))
goto redo_last;
/* list->first was necessary -1, we're guaranteed to be alone here */
* The CAS will only succeed if its next is -1,
* which means it's in the cache, and the last element.
*/
- if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(last, off), &old, new)))
+ if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(last, off), &old, new)))
goto redo_last;
/* Then, update the last entry */
goto lock_self;
} while (
#ifdef HA_CAS_IS_8B
- unlikely(!HA_ATOMIC_CAS(((void **)(void *)&_GET_NEXT(fd, off)), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
+ unlikely(!_HA_ATOMIC_CAS(((void **)(void *)&_GET_NEXT(fd, off)), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
#else
unlikely(!__ha_cas_dw((void *)&_GET_NEXT(fd, off), (void *)&cur_list, (void *)&next_list)))
#endif
goto lock_self_next;
if (next <= -3)
goto done;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)))
+ if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)))
goto lock_self_next;
lock_self_prev:
prev = ({ volatile int *prev = &_GET_PREV(fd, off); *prev; });
if (prev == -2)
goto lock_self_prev;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
+ if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
goto lock_self_prev;
#endif
__ha_barrier_atomic_store();
redo_prev:
old = fd;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(prev, off), &old, new))) {
+ if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(prev, off), &old, new))) {
if (unlikely(old == -2)) {
/* Neighbour already locked, give up and
* retry again once he's done
if (likely(next != -1)) {
redo_next:
old = fd;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(next, off), &old, new))) {
+ if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(next, off), &old, new))) {
if (unlikely(old == -2)) {
/* Neighbour already locked, give up and
* retry again once he's done
list->first = next;
__ha_barrier_store();
last = list->last;
- while (unlikely(last == fd && (!HA_ATOMIC_CAS(&list->last, &last, prev))))
+ while (unlikely(last == fd && (!_HA_ATOMIC_CAS(&list->last, &last, prev))))
__ha_compiler_barrier();
/* Make sure we let other threads know we're no longer in cache,
* before releasing our neighbours.
if (fdtab[fd].cache.next < -3)
continue;
- HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
+ _HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
locked = atleast2(fdtab[fd].thread_mask);
if (locked && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
activity[tid].fd_lock++;
*/
void fd_process_cached_events()
{
- HA_ATOMIC_AND(&fd_cache_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&fd_cache_mask, ~tid_bit);
fdlist_process_cached_events(&fd_cache_local[tid]);
fdlist_process_cached_events(&fd_cache);
}