int prev;
} ALIGNED(8);
-/* head of the fd cache */
+/* head of the fd cache, per-group */
struct fdlist {
int first;
int last;
-} ALIGNED(8);
+} ALIGNED(64);
/* info about one given fd. Note: only align on cache lines when using threads;
* 32-bit small archs can put everything in 32-bytes when threads are disabled.
extern int totalconn; /* total # of terminated sessions */
extern int actconn; /* # of active sessions */
-extern volatile struct fdlist update_list;
+extern volatile struct fdlist update_list[MAX_TGROUPS];
extern struct polled_mask *polled_mask;
extern THREAD_LOCAL int *fd_updt; // FD updates list
update_mask = _HA_ATOMIC_AND_FETCH(&fdtab[fd].update_mask, ~tid_bit);
while ((update_mask & all_threads_mask)== 0) {
/* If we were the last one that had to update that entry, remove it from the list */
- fd_rm_from_fd_list(&update_list, fd);
+ fd_rm_from_fd_list(&update_list[tgid - 1], fd);
update_mask = (volatile unsigned long)fdtab[fd].update_mask;
if ((update_mask & all_threads_mask) != 0) {
/* Maybe it's been re-updated in the meanwhile, and we
* wrongly removed it from the list, if so, re-add it
*/
- fd_add_to_fd_list(&update_list, fd);
+ fd_add_to_fd_list(&update_list[tgid - 1], fd);
update_mask = (volatile unsigned long)(fdtab[fd].update_mask);
/* And then check again, just in case after all it
* should be removed, even if it's very unlikely, given
* care of it yet */
} else
break;
-
}
}
}
fd_nbupdt = 0;
/* Scan the global update list */
- for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
if (fd == -2) {
fd = old_fd;
continue;
}
fd_nbupdt = 0;
/* Scan the global update list */
- for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
if (fd == -2) {
fd = old_fd;
continue;
changes = _update_fd(fd, changes);
}
/* Scan the global update list */
- for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
if (fd == -2) {
fd = old_fd;
continue;
}
/* Now scan the global update list */
- for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
if (fd == -2) {
fd = old_fd;
continue;
_update_fd(fd, &max_add_fd);
}
/* Now scan the global update list */
- for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
if (fd == -2) {
fd = old_fd;
continue;
struct poller cur_poller __read_mostly;
int nbpollers = 0;
-volatile struct fdlist update_list; // Global update list
+volatile struct fdlist update_list[MAX_TGROUPS]; // Global update list
THREAD_LOCAL int *fd_updt = NULL; // FD updates list
THREAD_LOCAL int fd_nbupdt = 0; // number of updates in the list
cur_poller.clo(fd);
/* we don't want this FD anymore in the global list */
- fd_rm_from_fd_list(&update_list, fd);
+ fd_rm_from_fd_list(&update_list[tgid - 1], fd);
/* no more updates on this FD are relevant anymore */
HA_ATOMIC_STORE(&fdtab[fd].update_mask, 0);
return;
} while (!_HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask, fdtab[fd].thread_mask));
- fd_add_to_fd_list(&update_list, fd);
+ fd_add_to_fd_list(&update_list[tgid - 1], fd);
if (fd_active(fd) && !(fdtab[fd].thread_mask & tid_bit)) {
/* we need to wake up another thread to handle it immediately, any will fit,
goto fail_info;
}
- update_list.first = update_list.last = -1;
+ for (p = 0; p < MAX_TGROUPS; p++)
+ update_list[p].first = update_list[p].last = -1;
for (p = 0; p < global.maxsock; p++) {
/* Mark the fd as out of the fd cache */