struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second
struct freq_ctr_period cpust_15s; // avg amount of half-ms stolen over last 15s
unsigned int avg_loop_us; // average run time per loop over last 1024 runs
+ unsigned int accq_pushed; // accept queue connections pushed
+ unsigned int accq_full; // accept queue connection not pushed because full
char __pad[0]; // unused except to check remaining room
char __end[0] __attribute__((aligned(64))); // align size to 64.
};
chunk_appendf(&trash, "\ncpust_ms_1s:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr(&activity[thr].cpust_1s)/2);
chunk_appendf(&trash, "\ncpust_ms_15s:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr_period(&activity[thr].cpust_15s, 15000)/2);
chunk_appendf(&trash, "\navg_loop_us:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", swrate_avg(activity[thr].avg_loop_us, TIME_STATS_SAMPLES));
+ chunk_appendf(&trash, "\naccq_pushed:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].accq_pushed);
+ chunk_appendf(&trash, "\naccq_full:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].accq_full);
chunk_appendf(&trash, "\n");
*/
ring = &accept_queue_rings[t1];
if (accept_queue_push_mp(ring, cfd, l, &addr, laddr)) {
+ HA_ATOMIC_ADD(&activity[t1].accq_pushed, 1);
task_wakeup(ring->task, TASK_WOKEN_IO);
continue;
}
/* If the ring is full we do a synchronous accept on
* the local thread here.
- * FIXME: we should update some stats here.
*/
+ HA_ATOMIC_ADD(&activity[t1].accq_full, 1);
}
#endif // USE_THREAD