Use the new _HA_ATOMIC_* macros and add barriers where needed.
if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) {
/* fd removed from poll list */
opcode = EPOLL_CTL_DEL;
- HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit);
+ _HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit);
}
else {
/* fd status changed */
else if ((fdtab[fd].thread_mask & tid_bit) && (en & FD_EV_POLLED_RW)) {
/* new fd in the poll list */
opcode = EPOLL_CTL_ADD;
- HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
+ _HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
}
else {
return;
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
/* FD has been migrated */
activity[tid].poll_skip++;
epoll_ctl(epoll_fd[tid], EPOLL_CTL_DEL, fd, &ev);
- HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit);
+ _HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit);
continue;
}
/* always remap RDHUP to HUP as they're used similarly */
if (e & EPOLLRDHUP) {
- HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP);
+ _HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP);
n |= FD_POLL_HUP;
}
fd_update_events(fd, n);
/* fd totally removed from poll list */
EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
- HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit);
+ _HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit);
}
else {
/* OK fd has to be monitored, it was either added or changed */
else if (polled_mask[fd] & tid_bit)
EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
- HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
+ _HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
}
return changes;
}
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
/* fd totally removed from poll list */
hap_fd_clr(fd, fd_evts[DIR_RD]);
hap_fd_clr(fd, fd_evts[DIR_WR]);
- HA_ATOMIC_AND(&polled_mask[fd], 0);
+ _HA_ATOMIC_AND(&polled_mask[fd], 0);
}
else {
/* OK fd has to be monitored, it was either added or changed */
else
hap_fd_set(fd, fd_evts[DIR_WR]);
- HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
+ _HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
if (fd > *max_add_fd)
*max_add_fd = fd;
}
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
* we don't need every thread ot take care of the
* update.
*/
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask);
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask);
done_update_polling(fd);
} else
continue;
/* maybe we added at least one fd larger than maxfd */
for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) {
- if (HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
+ if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
break;
}
new_maxfd--;
if (new_maxfd >= old_maxfd)
break;
- } while (!HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
+ } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
thread_harmless_now();
/* always remap RDHUP to HUP as they're used similarly */
if (e & POLLRDHUP) {
- HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP);
+ _HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP);
n |= FD_POLL_HUP;
}
fd_update_events(fd, n);
/* fd totally removed from poll list */
hap_fd_clr(fd, fd_evts[DIR_RD]);
hap_fd_clr(fd, fd_evts[DIR_WR]);
- HA_ATOMIC_AND(&polled_mask[fd], 0);
+ _HA_ATOMIC_AND(&polled_mask[fd], 0);
}
else {
/* OK fd has to be monitored, it was either added or changed */
else
hap_fd_set(fd, fd_evts[DIR_WR]);
- HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
+ _HA_ATOMIC_OR(&polled_mask[fd], tid_bit);
if (fd > *max_add_fd)
*max_add_fd = fd;
}
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
* we don't need every thread ot take care of the
* update.
*/
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask);
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~all_threads_mask);
done_update_polling(fd);
} else
continue;
/* maybe we added at least one fd larger than maxfd */
for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) {
- if (HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
+ if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
break;
}
new_maxfd--;
if (new_maxfd >= old_maxfd)
break;
- } while (!HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
+ } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
thread_harmless_now();