TL_CLASSES /* must be last */
};
-/* thread info flags, for ha_thread_info[].flags */
-#define TI_FL_STUCK 0x00000001
+/* thread_ctx flags, for ha_thread_ctx[].flags */
+#define TH_FL_STUCK 0x00000001
/* This structure describes all the per-thread info we need. When threads are
* disabled, it contains the same info for the single running thread.
*/
struct thread_info {
- unsigned int flags; /* thread info flags, TI_FL_* */
-
#ifdef CONFIG_HAP_POOLS
struct list pool_lru_head; /* oldest objects */
#endif
unsigned int rqueue_ticks; /* Insertion counter for the run queue */
int current_queue; /* points to current tasklet list being run, -1 if none */
unsigned int nb_tasks; /* number of tasks allocated on this thread */
+ uint flags; /* thread flags, TH_FL_* */
uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
- // 11 bytes hole here
+ // 7 bytes hole here
ALWAYS_ALIGN(2*sizeof(void*));
struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */
unsigned long thr_bit = 1UL << thr;
unsigned long long p = ha_thread_ctx[thr].prev_cpu_time;
unsigned long long n = now_cpu_time_thread(thr);
- int stuck = !!(ha_thread_info[thr].flags & TI_FL_STUCK);
+ int stuck = !!(ha_thread_ctx[thr].flags & TH_FL_STUCK);
chunk_appendf(buf,
"%c%cThread %-2u: id=0x%llx act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
* if it didn't move.
*/
if (!((threads_harmless_mask|sleeping_thread_mask) & tid_bit))
- ti->flags |= TI_FL_STUCK;
+ th_ctx->flags |= TH_FL_STUCK;
}
static int init_debug_per_thread()
uint new_flags, must_stop;
ulong rmask, tmask;
- ti->flags &= ~TI_FL_STUCK; // this thread is still running
+ th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
/* do nothing if the FD was taken over under us */
do {
}
#endif
- ti->flags &= ~TI_FL_STUCK; // this thread is still running
+ th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
} /* end of for (max_accept--) */
end:
t = (struct task *)LIST_ELEM(tl_queues[queue].n, struct tasklet *, list);
state = t->state & (TASK_SHARED_WQ|TASK_SELF_WAKING|TASK_HEAVY|TASK_F_TASKLET|TASK_KILLED|TASK_F_USR1|TASK_KILLED);
- ti->flags &= ~TI_FL_STUCK; // this thread is still running
+ th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
activity[tid].ctxsw++;
ctx = t->context;
process = t->process;
int heavy_queued = 0;
int budget;
- ti->flags &= ~TI_FL_STUCK; // this thread is still running
+ th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
if (!thread_has_tasks()) {
activity[tid].empty_rq++;
* certain that we're not witnessing an exceptional spike of
* CPU usage due to a configuration issue (like running tens
* of thousands of tasks in a single loop), we'll check if the
- * scheduler is still alive by setting the TI_FL_STUCK flag
+ * scheduler is still alive by setting the TH_FL_STUCK flag
* that the scheduler clears when switching to the next task.
* If it's already set, then it's our second call with no
* progress and the thread is dead.
*/
- if (!(ha_thread_info[thr].flags & TI_FL_STUCK)) {
- _HA_ATOMIC_OR(&ha_thread_info[thr].flags, TI_FL_STUCK);
+ if (!(ha_thread_ctx[thr].flags & TH_FL_STUCK)) {
+ _HA_ATOMIC_OR(&ha_thread_ctx[thr].flags, TH_FL_STUCK);
goto update_and_leave;
}