*/
static inline long fd_clr_running(int fd)
{
- return _HA_ATOMIC_AND_FETCH(&fdtab[fd].running_mask, ~tid_bit);
+ return _HA_ATOMIC_AND_FETCH(&fdtab[fd].running_mask, ~ti->ltid_bit);
}
/* Prepares <fd> for being polled on all permitted threads of this group ID
* safely delete the FD. Most of the time it will be the current thread.
*/
- HA_ATOMIC_OR(&fdtab[fd].running_mask, tid_bit);
+ HA_ATOMIC_OR(&fdtab[fd].running_mask, ti->ltid_bit);
HA_ATOMIC_STORE(&fdtab[fd].thread_mask, 0);
if (fd_clr_running(fd) == 0)
_fd_delete_orphan(fd);
return -1;
old = 0;
- if (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &old, tid_bit)) {
+ if (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &old, ti->ltid_bit)) {
fd_drop_tgid(fd);
return -1;
}
fd_stop_recv(fd);
/* we're done with it */
- HA_ATOMIC_AND(&fdtab[fd].running_mask, ~tid_bit);
+ HA_ATOMIC_AND(&fdtab[fd].running_mask, ~ti->ltid_bit);
/* no more changes planned */
fd_drop_tgid(fd);
fd_drop_tgid(fd);
return FD_UPDT_MIGRATED;
}
- } while (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &rmask, rmask | tid_bit));
+ } while (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &rmask, rmask | ti->ltid_bit));
/* with running we're safe now, we can drop the reference */
fd_drop_tgid(fd);
* This is detected by both thread_mask and running_mask being 0 after
* we remove ourselves last.
*/
- if ((fdtab[fd].running_mask & tid_bit) &&
+ if ((fdtab[fd].running_mask & ti->ltid_bit) &&
fd_clr_running(fd) == 0 && !fdtab[fd].thread_mask) {
_fd_delete_orphan(fd);
return FD_UPDT_CLOSED;