{
appctx->st0 = appctx->st1 = appctx->st2 = 0;
appctx->io_release = NULL;
- appctx->process_mask = thread_mask;
+ appctx->thread_mask = thread_mask;
appctx->state = APPLET_SLEEPING;
}
fdtab[fd].linger_risk = 0;
fdtab[fd].cloned = 0;
fdtab[fd].cache = 0;
- fdtab[fd].process_mask = thread_mask;
+ fdtab[fd].thread_mask = thread_mask;
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
return t;
}
+/* change the thread affinity of a task to <thread_mask> */
static inline void task_set_affinity(struct task *t, unsigned long thread_mask)
{
-
- t->process_mask = thread_mask;
+ t->thread_mask = thread_mask;
}
+
/*
* Unlink the task from the wait queue, and possibly update the last_timer
* pointer. A pointer to the task itself is returned. The task *must* already
t->wq.node.leaf_p = NULL;
t->rq.node.leaf_p = NULL;
t->pending_state = t->state = TASK_SLEEPING;
- t->process_mask = thread_mask;
+ t->thread_mask = thread_mask;
t->nice = 0;
t->calls = 0;
t->expire = TICK_ETERNITY;
if the command is terminated or the session released */
int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
- unsigned long process_mask; /* mask of thread IDs authorized to process the applet */
+ unsigned long thread_mask; /* mask of thread IDs authorized to process the applet */
union {
struct {
struct fdtab {
void (*iocb)(int fd); /* I/O handler */
void *owner; /* the connection or listener associated with this fd, NULL if closed */
- unsigned long process_mask; /* mask of thread IDs authorized to process the task */
+ unsigned long thread_mask; /* mask of thread IDs authorized to process the task */
#ifdef USE_THREAD
HA_SPINLOCK_T lock;
#endif
void *context; /* the task's context */
struct eb32_node wq; /* ebtree node used to hold the task in the wait queue */
int expire; /* next expiration date for this task, in ticks */
- unsigned long process_mask; /* mask of thread IDs authorized to process the task */
+ unsigned long thread_mask; /* mask of thread IDs authorized to process the task */
};
/*
curr = LIST_NEXT(&applet_active_queue, typeof(curr), runq);
while (&curr->runq != &applet_active_queue) {
next = LIST_NEXT(&curr->runq, typeof(next), runq);
- if (curr->process_mask & tid_bit) {
+ if (curr->thread_mask & tid_bit) {
LIST_DEL(&curr->runq);
curr->state = APPLET_RUNNING;
LIST_ADDQ(&applet_cur_queue, &curr->runq);
unsigned int e = epoll_events[count].events;
fd = epoll_events[count].data.fd;
- if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
+ if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
continue;
/* it looks complicated but gcc can optimize it away when constants
unsigned int n = 0;
fd = kev[count].ident;
- if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
+ if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
continue;
if (kev[count].filter == EVFILT_READ) {
for (count = 0, fd = fds * 8*sizeof(**fd_evts); count < 8*sizeof(**fd_evts) && fd < maxfd; count++, fd++) {
- if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
+ if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
continue;
sr = (rn >> count) & 1;
/* if we specify read first, the accepts and zero reads will be
* seen first. Moreover, system buffers will be flushed faster.
*/
- if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
+ if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
continue;
if (FD_ISSET(fd, tmp_evts[DIR_RD]))
fdtab[fd].owner = NULL;
fdtab[fd].updated = 0;
fdtab[fd].new = 0;
- fdtab[fd].process_mask = 0;
+ fdtab[fd].thread_mask = 0;
if (do_close)
close(fd);
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
for (entry = 0; entry < fd_cache_num; ) {
fd = fd_cache[entry];
- if (!(fdtab[fd].process_mask & tid_bit))
+ if (!(fdtab[fd].thread_mask & tid_bit))
goto next;
if (SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
goto next;
while (local_tasks_count < 16) {
t = eb32_entry(rq_next, struct task, rq);
rq_next = eb32_next(rq_next);
- if (t->process_mask & tid_bit) {
+ if (t->thread_mask & tid_bit) {
/* detach the task from the queue */
__task_unlink_rq(t);
t->state |= TASK_RUNNING;