*/
enum { all_threads_mask = 1UL };
enum { all_tgroups_mask = 1UL };
-enum { threads_harmless_mask = 0 };
-enum { threads_idle_mask = 0 };
-enum { threads_want_rdv_mask = 0 };
enum { tid_bit = 1UL };
enum { tid = 0 };
enum { tgid = 1 };
extern volatile unsigned long all_threads_mask;
extern volatile unsigned long all_tgroups_mask;
-extern volatile unsigned long threads_harmless_mask;
-extern volatile unsigned long threads_idle_mask;
-extern volatile unsigned long threads_want_rdv_mask;
extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
extern THREAD_LOCAL unsigned int tid; /* The thread id */
extern THREAD_LOCAL unsigned int tgid; /* The thread group id (starts at 1) */
-/* explanation for threads_want_rdv_mask, and threads_harmless_mask:
- * - threads_want_rdv_mask is a bit field indicating all threads that have
+/* explanation for tg_ctx->threads_want_rdv, and tg_ctx->threads_harmless:
+ * - tg_ctx->threads_want_rdv is a bit field indicating all threads that have
* requested a rendez-vous of other threads using thread_isolate().
- * - threads_harmless_mask is a bit field indicating all threads that are
+ * - tg_ctx->threads_harmless is a bit field indicating all threads that are
* currently harmless in that they promise not to access a shared resource.
*
* For a given thread, its bits in want_rdv and harmless can be translated like
tgid = thr->tgid;
tid_bit = 1UL << tid; /* FIXME: must become thr->ltid_bit */
th_ctx = &ha_thread_ctx[tid];
+ tg_ctx = &ha_tgroup_ctx[tgid-1];
} else {
tgid = 1;
tid = 0;
ti = &ha_thread_info[0];
tg = &ha_tgroup_info[0];
th_ctx = &ha_thread_ctx[0];
+ tg_ctx = &ha_tgroup_ctx[0];
}
}
*/
static inline void thread_idle_now()
{
- HA_ATOMIC_OR(&threads_idle_mask, tid_bit);
+ HA_ATOMIC_OR(&tg_ctx->threads_idle, ti->ltid_bit);
}
/* Ends the harmless period started by thread_idle_now(), i.e. the thread is
*/
static inline void thread_idle_end()
{
- HA_ATOMIC_AND(&threads_idle_mask, ~tid_bit);
+ HA_ATOMIC_AND(&tg_ctx->threads_idle, ~ti->ltid_bit);
}
*/
static inline void thread_harmless_now()
{
- HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
+ HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
}
/* Ends the harmless period started by thread_harmless_now(). Usually this is
static inline void thread_harmless_end()
{
while (1) {
- HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
- if (likely((threads_want_rdv_mask & all_threads_mask & ~tid_bit) == 0))
+ HA_ATOMIC_AND(&tg_ctx->threads_harmless, ~tid_bit);
+ if (likely((_HA_ATOMIC_LOAD(&tg_ctx->threads_want_rdv) &
+ tg->threads_enabled & ~ti->ltid_bit) == 0))
break;
thread_harmless_till_end();
}
/* an isolated thread has harmless cleared and want_rdv set */
static inline unsigned long thread_isolated()
{
- return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
+ return _HA_ATOMIC_LOAD(&tg_ctx->threads_want_rdv) &
+ ~_HA_ATOMIC_LOAD(&tg_ctx->threads_harmless) & ti->ltid_bit;
}
/* Returns 1 if the cpu set is currently restricted for the process else 0.
char __end[0] __attribute__((aligned(64)));
};
+/* This structure describes the group-specific context (e.g. active threads
+ * etc). It uses one cache line per thread to limit false sharing.
+ */
+struct tgroup_ctx {
+ ulong threads_want_rdv; /* mask of threads that wand a rendez-vous */
+ ulong threads_harmless; /* mask of threads that are not modifying anything */
+ ulong threads_idle; /* mask of threads idling in the poller */
+ /* pad to cache line (64B) */
+ char __pad[0]; /* unused except to check remaining room */
+ char __end[0] __attribute__((aligned(64)));
+};
+
/* This structure describes all the per-thread info we need. When threads are
* disabled, it contains the same info for the single running thread. This is
* stable across all of a thread's life, and is being pointed to by the
*/
struct thread_info {
const struct tgroup_info *tg; /* config of the thread-group this thread belongs to */
+ struct tgroup_ctx *tg_ctx; /* context of the thread-group this thread belongs to */
uint tid, ltid; /* process-wide and group-wide thread ID (start at 0) */
ulong ltid_bit; /* bit masks for the tid/ltid */
uint tgid; /* ID of the thread group this thread belongs to (starts at 1; 0=unset) */
extern struct thread_info ha_thread_info[MAX_THREADS];
extern THREAD_LOCAL const struct thread_info *ti; /* thread_info for the current thread */
+extern struct tgroup_ctx ha_tgroup_ctx[MAX_TGROUPS];
+extern THREAD_LOCAL struct tgroup_ctx *tg_ctx; /* ha_tgroup_ctx for the current thread */
+
extern struct thread_ctx ha_thread_ctx[MAX_THREADS];
extern THREAD_LOCAL struct thread_ctx *th_ctx; /* ha_thread_ctx for the current thread */
unsigned long long p = ha_thread_ctx[thr].prev_cpu_time;
unsigned long long n = now_cpu_time_thread(thr);
int stuck = !!(ha_thread_ctx[thr].flags & TH_FL_STUCK);
+ int tgrp = ha_thread_info[thr].tgid;
chunk_appendf(buf,
"%c%cThread %-2u: id=0x%llx act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
chunk_appendf(buf,
" harmless=%d wantrdv=%d",
- !!(threads_harmless_mask & thr_bit),
+ !!(_HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp-1].threads_harmless) & thr_bit),
!!(th_ctx->flags & TH_FL_TASK_PROFILING));
chunk_appendf(buf, "\n");
/* mark the current thread as stuck to detect it upon next invocation
* if it didn't move.
*/
- if (!(threads_harmless_mask & tid_bit) &&
+ if (!(_HA_ATOMIC_LOAD(&tg_ctx->threads_harmless) & ti->ltid_bit) &&
!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_SLEEPING))
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STUCK);
}
struct thread_info ha_thread_info[MAX_THREADS] = { };
THREAD_LOCAL const struct thread_info *ti = &ha_thread_info[0];
+struct tgroup_ctx ha_tgroup_ctx[MAX_TGROUPS] = { };
+THREAD_LOCAL struct tgroup_ctx *tg_ctx = &ha_tgroup_ctx[0];
+
struct thread_ctx ha_thread_ctx[MAX_THREADS] = { };
THREAD_LOCAL struct thread_ctx *th_ctx = &ha_thread_ctx[0];
#ifdef USE_THREAD
-volatile unsigned long threads_want_rdv_mask __read_mostly = 0;
-volatile unsigned long threads_harmless_mask = 0;
-volatile unsigned long threads_idle_mask = 0;
volatile unsigned long all_threads_mask __read_mostly = 1; // nbthread 1 assumed by default
volatile unsigned long all_tgroups_mask __read_mostly = 1; // nbtgroup 1 assumed by default
THREAD_LOCAL unsigned int tgid = 1; // thread ID starts at 1
*/
void thread_harmless_till_end()
{
- _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
- while (threads_want_rdv_mask & all_threads_mask & ~tid_bit) {
+ _HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
+ while (_HA_ATOMIC_LOAD(&tg_ctx->threads_want_rdv) & tg->threads_enabled & ~ti->ltid_bit) {
ha_thread_relax();
}
}
/* Isolates the current thread : request the ability to work while all other
* threads are harmless, as defined by thread_harmless_now() (i.e. they're not
* going to touch any visible memory area). Only returns once all of them are
- * harmless, with the current thread's bit in threads_harmless_mask cleared.
+ * harmless, with the current thread's bit in &tg_ctx->threads_harmless cleared.
* Needs to be completed using thread_release().
*/
void thread_isolate()
{
unsigned long old;
- _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
+ _HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
__ha_barrier_atomic_store();
- _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
+ _HA_ATOMIC_OR(&tg_ctx->threads_want_rdv, ti->ltid_bit);
/* wait for all threads to become harmless */
- old = threads_harmless_mask;
+ old = _HA_ATOMIC_LOAD(&tg_ctx->threads_harmless);
while (1) {
- if (unlikely((old & all_threads_mask) != all_threads_mask))
- old = threads_harmless_mask;
- else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
+ if (unlikely((old & tg->threads_enabled) != tg->threads_enabled))
+ old = _HA_ATOMIC_LOAD(&tg_ctx->threads_harmless);
+ else if (_HA_ATOMIC_CAS(&tg_ctx->threads_harmless, &old, old & ~ti->ltid_bit))
break;
ha_thread_relax();
/* Isolates the current thread : request the ability to work while all other
* threads are idle, as defined by thread_idle_now(). It only returns once
* all of them are both harmless and idle, with the current thread's bit in
- * threads_harmless_mask and idle_mask cleared. Needs to be completed using
+ * &tg_ctx->threads_harmless and idle_mask cleared. Needs to be completed using
* thread_release(). By doing so the thread also engages in being safe against
* any actions that other threads might be about to start under the same
* conditions. This specifically targets destruction of any internal structure,
{
unsigned long old;
- _HA_ATOMIC_OR(&threads_idle_mask, tid_bit);
- _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
+ _HA_ATOMIC_OR(&tg_ctx->threads_idle, ti->ltid_bit);
+ _HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
__ha_barrier_atomic_store();
- _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
+ _HA_ATOMIC_OR(&tg_ctx->threads_want_rdv, ti->ltid_bit);
/* wait for all threads to become harmless */
- old = threads_harmless_mask;
+ old = _HA_ATOMIC_LOAD(&tg_ctx->threads_harmless);
while (1) {
- unsigned long idle = _HA_ATOMIC_LOAD(&threads_idle_mask);
+ unsigned long idle = _HA_ATOMIC_LOAD(&tg_ctx->threads_idle);
- if (unlikely((old & all_threads_mask) != all_threads_mask))
- old = _HA_ATOMIC_LOAD(&threads_harmless_mask);
- else if ((idle & all_threads_mask) == all_threads_mask &&
- _HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
+ if (unlikely((old & tg->threads_enabled) != tg->threads_enabled))
+ old = _HA_ATOMIC_LOAD(&tg_ctx->threads_harmless);
+ else if ((idle & tg->threads_enabled) == tg->threads_enabled &&
+ _HA_ATOMIC_CAS(&tg_ctx->threads_harmless, &old, old & ~ti->ltid_bit))
break;
ha_thread_relax();
* condition will need to wait until out next pass to the poller, or
* our next call to thread_isolate_full().
*/
- _HA_ATOMIC_AND(&threads_idle_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&tg_ctx->threads_idle, ~ti->ltid_bit);
}
/* Cancels the effect of thread_isolate() by releasing the current thread's bit
- * in threads_want_rdv_mask. This immediately allows other threads to expect be
+ * in &tg_ctx->threads_want_rdv. This immediately allows other threads to expect be
* executed, though they will first have to wait for this thread to become
* harmless again (possibly by reaching the poller again).
*/
void thread_release()
{
- _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
+ _HA_ATOMIC_AND(&tg_ctx->threads_want_rdv, ~ti->ltid_bit);
}
/* Sets up threads, signals and masks, and starts threads 2 and above.
ha_tgroup_info[g].count++;
ha_thread_info[t].tgid = g + 1;
ha_thread_info[t].tg = &ha_tgroup_info[g];
+ ha_thread_info[t].tg_ctx = &ha_tgroup_ctx[g];
ut--;
/* switch to next unassigned thread */
if (ha_thread_info[tnum-1].tg == &ha_tgroup_info[tgroup-1]) {
ha_thread_info[tnum-1].tg = NULL;
ha_thread_info[tnum-1].tgid = 0;
+ ha_thread_info[tnum-1].tg_ctx = NULL;
}
}
ha_tgroup_info[tgroup-1].count = ha_tgroup_info[tgroup-1].base = 0;
ha_thread_info[tnum-1].tgid = tgroup;
ha_thread_info[tnum-1].tg = &ha_tgroup_info[tgroup-1];
+ ha_thread_info[tnum-1].tg_ctx = &ha_tgroup_ctx[tgroup-1];
tot++;
}
}
{
unsigned long long n, p;
ulong thr_bit;
- int thr;
+ int thr, tgrp;
switch (si->si_code) {
case SI_TIMER:
if (thr < 0 || thr >= global.nbthread)
break;
+ tgrp = ha_thread_info[thr].tgid;
thr_bit = ha_thread_info[thr].ltid_bit;
p = ha_thread_ctx[thr].prev_cpu_time;
n = now_cpu_time_thread(thr);
goto update_and_leave;
if ((_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_SLEEPING) &&
- ((threads_harmless_mask|threads_to_dump) & thr_bit)) {
+ ((_HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp-1].threads_harmless) | threads_to_dump) & thr_bit)) {
/* This thread is currently doing exactly nothing
* waiting in the poll loop (unlikely but possible),
* waiting for all other threads to join the rendez-vous