for (count = 0; count < status; count++) {
unsigned int n, e;
- int ret;
e = epoll_events[count].events;
fd = epoll_events[count].data.fd;
((e & EPOLLHUP) ? FD_EV_SHUT_RW : 0) |
((e & EPOLLERR) ? FD_EV_ERR_RW : 0);
- ret = fd_update_events(fd, n);
-
- if (ret == FD_UPDT_MIGRATED) {
- /* FD has been migrated */
- if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
- fd_updt[fd_nbupdt++] = fd;
- }
+ fd_update_events(fd, n);
}
/* the caller will take care of cached events */
}
*/
ret = fd_update_events(fd, n);
- /* disable polling on this instance if the FD was migrated */
- if (ret == FD_UPDT_MIGRATED) {
- if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
- fd_updt[fd_nbupdt++] = fd;
+ /* polling will be on this instance if the FD was migrated */
+ if (ret == FD_UPDT_MIGRATED)
continue;
- }
/*
* This file descriptor was closed during the processing of
for (count = 0; count < status; count++) {
unsigned int n = 0;
- int ret;
fd = kev[count].ident;
n |= FD_EV_ERR_RW;
}
- ret = fd_update_events(fd, n);
-
- if (ret == FD_UPDT_MIGRATED) {
- /* FD was migrated, let's stop polling it */
- if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
- fd_updt[fd_nbupdt++] = fd;
- }
+ fd_update_events(fd, n);
}
}
for (count = 0; status > 0 && count < nbfd; count++) {
unsigned int n;
- int ret;
int e = poll_events[count].revents;
+
fd = poll_events[count].fd;
if ((e & POLLRDHUP) && !(cur_poller.flags & HAP_POLL_F_RDHUP))
((e & POLLHUP) ? FD_EV_SHUT_RW : 0) |
((e & POLLERR) ? FD_EV_ERR_RW : 0);
- ret = fd_update_events(fd, n);
-
- if (ret == FD_UPDT_MIGRATED) {
- /* FD was migrated, let's stop polling it */
- if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
- fd_updt[fd_nbupdt++] = fd;
- }
+ fd_update_events(fd, n);
}
}
if (!(tmask & tid_bit)) {
/* a takeover has started */
activity[tid].poll_skip_fd++;
+
+ /* Let the poller know this FD was lost */
+ if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
+ fd_updt[fd_nbupdt++] = fd;
return FD_UPDT_MIGRATED;
}
} while (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &rmask, rmask | tid_bit));