int tasks_in_list; /* Number of tasks in the per-thread tasklets list */
uint idle_pct; /* idle to total ratio over last sample (percent) */
uint flags; /* thread flags, TH_FL_*, atomic! */
- /* 32-bit hole here */
+ uint active_checks; /* number of active health checks on this thread, incl migrated */
uint32_t sched_wake_date; /* current task/tasklet's wake date or 0 */
uint32_t sched_call_date; /* current task/tasklet's call date (valid if sched_wake_date > 0) */
__decl_thread(HA_SPINLOCK_T rqsh_lock); /* lock protecting the shared runqueue */
struct freq_ctr out_32bps; /* #of 32-byte blocks emitted per second */
+ uint running_checks; /* number of health checks currently running on this thread */
+
unsigned long long out_bytes; /* total #of bytes emitted */
unsigned long long spliced_out_bytes; /* total #of bytes emitted though a kernel pipe */
struct buffer *thread_dump_buffer; /* NULL out of dump, valid during a dump, 0x01 once done */
case __LINE__: SHOW_VAL("check_adopted:",activity[thr].check_adopted, _tot); break;
#endif
case __LINE__: SHOW_VAL("check_started:",activity[thr].check_started, _tot); break;
+ case __LINE__: SHOW_VAL("check_active:", _HA_ATOMIC_LOAD(&ha_thread_ctx[thr].active_checks), _tot); break;
+ case __LINE__: SHOW_VAL("check_running:",_HA_ATOMIC_LOAD(&ha_thread_ctx[thr].running_checks), _tot); break;
#if defined(DEBUG_DEV)
/* keep these ones at the end */
uint my_load = HA_ATOMIC_LOAD(&th_ctx->rq_total);
if (check->state & CHK_ST_READY) {
- /* check was migrated */
+ /* check was migrated, active already counted */
activity[tid].check_adopted++;
}
else if (my_load >= 2) {
* foreign thread. The recipient will restore the expiration.
*/
check->state |= CHK_ST_READY;
+ HA_ATOMIC_INC(&ha_thread_ctx[new_tid].active_checks);
task_unlink_wq(t);
t->expire = TICK_ETERNITY;
task_set_thread(t, new_tid);
TRACE_LEAVE(CHK_EV_TASK_WAKE, check);
return t;
}
+ /* check just woke up, count it as active */
+ _HA_ATOMIC_INC(&th_ctx->active_checks);
+ }
+ else {
+ /* check just woke up, count it as active */
+ _HA_ATOMIC_INC(&th_ctx->active_checks);
}
/* OK we're keeping it so this check is ours now */
/* OK let's run, now we cannot roll back anymore */
check->state |= CHK_ST_READY;
activity[tid].check_started++;
+ _HA_ATOMIC_INC(&th_ctx->running_checks);
}
/* at this point, CHK_ST_SLEEPING = 0 and CHK_ST_READY = 1*/
check_release_buf(check, &check->bi);
check_release_buf(check, &check->bo);
+ _HA_ATOMIC_DEC(&th_ctx->running_checks);
+ _HA_ATOMIC_DEC(&th_ctx->active_checks);
check->state &= ~(CHK_ST_INPROGRESS|CHK_ST_IN_ALLOC|CHK_ST_OUT_ALLOC);
check->state &= ~CHK_ST_READY;
check->state |= CHK_ST_SLEEPING;