if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable processing send events on fd <fd> */
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable processing of events on fd <fd> for both directions. */
if ((old ^ new) & FD_EV_POLLED_RW)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> can receive anymore without polling. */
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable readiness when polled. This is useful to interrupt reading when it
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> can send anymore without polling (EAGAIN detected). */
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Prepare FD <fd> to try to receive */
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Prepare FD <fd> to try to send */
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Update events seen for FD <fd> and its state if needed. This should be called
* by the poller to set FD_POLL_* flags. */
static inline void fd_update_events(int fd, int evts)
{
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].ev &= FD_POLL_STICKY;
fdtab[fd].ev |= evts;
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
fd_may_recv(fd);
/* Prepares <fd> for being polled */
static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned long thread_mask)
{
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(thread_mask))
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].owner = owner;
fdtab[fd].iocb = iocb;
fdtab[fd].ev = 0;
/* note: do not reset polled_mask here as it indicates which poller
* still knows this FD from a possible previous round.
*/
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */
*/
static void fd_dodelete(int fd, int do_close)
{
- HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ unsigned long locked = atleast2(fdtab[fd].thread_mask);
+
+ if (locked)
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].linger_risk) {
/* this is generally set when connecting to servers */
setsockopt(fd, SOL_SOCKET, SO_LINGER,
polled_mask[fd] = 0;
close(fd);
}
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (locked)
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Deletes an FD from the fdsets.
continue;
HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
- if (HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
+ if (atleast2(fdtab[fd].thread_mask) && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
activity[tid].fd_lock++;
continue;
}
fdtab[fd].ev |= FD_POLL_OUT;
if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) {
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].iocb(fd);
}
else {
fd_release_cache_entry(fd);
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (atleast2(fdtab[fd].thread_mask))
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
}
}