{
struct thread_ctx *ctx = &ha_thread_ctx[thr];
- if (sleeping_thread_mask & (1UL << thr) &&
- (_HA_ATOMIC_LOAD(&ctx->flags) & TH_FL_NOTIFIED) == 0) {
+ if ((_HA_ATOMIC_FETCH_OR(&ctx->flags, TH_FL_NOTIFIED) & (TH_FL_SLEEPING|TH_FL_NOTIFIED)) == TH_FL_SLEEPING) {
char c = 'c';
- _HA_ATOMIC_OR(&ctx->flags, TH_FL_NOTIFIED);
DISGUISE(write(poller_wr_pipe[thr], &c, 1));
}
}
extern char hostname[MAX_HOSTNAME_LEN];
extern char *localpeer;
extern unsigned int warned; /* bitfield of a few warnings to emit just once */
-extern volatile unsigned long sleeping_thread_mask;
extern struct list proc_list; /* list of process in mworker mode */
extern int master; /* 1 if in master, 0 otherwise */
extern unsigned int rlim_fd_cur_at_boot;
#define TH_FL_STUCK 0x00000001
#define TH_FL_TASK_PROFILING 0x00000002
#define TH_FL_NOTIFIED 0x00000004 /* task was notified about the need to wake up */
+#define TH_FL_SLEEPING 0x00000008 /* thread won't check its task list before next wakeup */
/* Thread group information. This defines a base and a count of global thread
/* mark the current thread as stuck to detect it upon next invocation
* if it didn't move.
*/
- if (!((threads_harmless_mask|sleeping_thread_mask) & tid_bit))
+ if (!(threads_harmless_mask & tid_bit) &&
+ !(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_SLEEPING))
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STUCK);
}
thread_harmless_end();
thread_idle_end();
- if (sleeping_thread_mask & tid_bit)
- _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_SLEEPING);
}
/* disable the specified poller */
static struct list cfg_cfgfiles = LIST_HEAD_INIT(cfg_cfgfiles);
int pid; /* current process id */
-volatile unsigned long sleeping_thread_mask = 0; /* Threads that are about to sleep in poll() */
volatile unsigned long stopping_thread_mask = 0; /* Threads acknowledged stopping */
/* global options */
if (thread_has_tasks())
activity[tid].wake_tasks++;
else {
- _HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_SLEEPING);
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_NOTIFIED);
__ha_barrier_atomic_store();
if (thread_has_tasks()) {
activity[tid].wake_tasks++;
- _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_SLEEPING);
} else
wake = 0;
}
if (!p || n - p < 1000000000UL)
goto update_and_leave;
- if ((threads_harmless_mask|sleeping_thread_mask|threads_to_dump) & (1UL << thr)) {
+ if ((_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_SLEEPING) &&
+ ((threads_harmless_mask|threads_to_dump) & (1UL << thr))) {
/* This thread is currently doing exactly nothing
* waiting in the poll loop (unlikely but possible),
* waiting for all other threads to join the rendez-vous