unsigned int long_rq; // process_runnable_tasks() left with tasks in the run queue
unsigned int cpust_total; // sum of half-ms stolen per thread
unsigned int fd_takeover; // number of times this thread stole another one's FD
+ unsigned int check_adopted;// number of times a check was migrated to this thread
ALWAYS_ALIGN(64);
struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second
unsigned int accq_full; // accept queue connection not pushed because full
unsigned int pool_fail; // failed a pool allocation
unsigned int buf_wait; // waited on a buffer allocation
+ unsigned int check_started;// number of times a check was started on this thread
#if defined(DEBUG_DEV)
/* keep these ones at the end */
unsigned int ctr0; // general purposee debug counter
#ifdef USE_THREAD
case __LINE__: SHOW_VAL("accq_ring:", accept_queue_ring_len(&accept_queue_rings[thr]), _tot); break;
case __LINE__: SHOW_VAL("fd_takeover:", activity[thr].fd_takeover, _tot); break;
+ case __LINE__: SHOW_VAL("check_adopted:",activity[thr].check_adopted, _tot); break;
#endif
+ case __LINE__: SHOW_VAL("check_started:",activity[thr].check_started, _tot); break;
#if defined(DEBUG_DEV)
/* keep these ones at the end */
*/
uint my_load = HA_ATOMIC_LOAD(&th_ctx->rq_total);
- if (!(check->state & CHK_ST_READY) && my_load >= 2) {
+ if (check->state & CHK_ST_READY) {
+ /* check was migrated */
+ activity[tid].check_adopted++;
+ }
+ else if (my_load >= 2) {
uint new_tid = statistical_prng_range(global.nbthread);
uint new_load = HA_ATOMIC_LOAD(&ha_thread_ctx[new_tid].rq_total);
/* OK let's run, now we cannot roll back anymore */
check->state |= CHK_ST_READY;
+ activity[tid].check_started++;
}
/* at this point, CHK_ST_SLEEPING = 0 and CHK_ST_READY = 1*/