From: Willy Tarreau Date: Wed, 24 Apr 2019 06:10:57 +0000 (+0200) Subject: MINOR: tasks/activity: report the context switch and task wakeup rates X-Git-Tag: v2.0-dev3~179 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0212fadd658bfcade0342dea708e398b47e31188;p=thirdparty%2Fhaproxy.git MINOR: tasks/activity: report the context switch and task wakeup rates It's particularly useful to spot runaway tasks to see this. The context switch rate covers all tasklet calls (tasks and I/O handlers) while the task wakeups only covers tasks picked from the run queue to be executed. High values there will indicate either an intense traffic or a bug that mades a task go wild. --- diff --git a/include/types/activity.h b/include/types/activity.h index 329513a39e..0c23f76f63 100644 --- a/include/types/activity.h +++ b/include/types/activity.h @@ -50,6 +50,8 @@ struct activity { /* one cache line */ struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second struct freq_ctr_period cpust_15s; // avg amount of half-ms stolen over last 15s + struct freq_ctr ctxsw_rate;// context switching rate over last second + struct freq_ctr tasks_rate;// task wakeup rate over last second unsigned int avg_loop_us; // average run time per loop over last 1024 runs unsigned int accepted; // accepted incoming connections unsigned int accq_pushed; // accept queue connections pushed diff --git a/src/cli.c b/src/cli.c index 843c3d043f..9581369c23 100644 --- a/src/cli.c +++ b/src/cli.c @@ -1087,6 +1087,8 @@ static int cli_io_handler_show_activity(struct appctx *appctx) chunk_appendf(&trash, "\nstream:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].stream); chunk_appendf(&trash, "\nempty_rq:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].empty_rq); chunk_appendf(&trash, "\nlong_rq:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].long_rq); + chunk_appendf(&trash, "\nctxsw_rate:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr(&activity[thr].ctxsw_rate)); + chunk_appendf(&trash, "\ntasks_rate:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr(&activity[thr].tasks_rate)); chunk_appendf(&trash, "\ncpust_ms_tot:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", activity[thr].cpust_total/2); chunk_appendf(&trash, "\ncpust_ms_1s:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr(&activity[thr].cpust_1s)/2); chunk_appendf(&trash, "\ncpust_ms_15s:"); for (thr = 0; thr < global.nbthread; thr++) chunk_appendf(&trash, " %u", read_freq_ctr_period(&activity[thr].cpust_15s, 15000)/2); diff --git a/src/task.c b/src/task.c index de6eda1e72..04476fe5aa 100644 --- a/src/task.c +++ b/src/task.c @@ -20,10 +20,11 @@ #include #include +#include +#include #include #include #include -#include DECLARE_POOL(pool_head_task, "task", sizeof(struct task)); DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet)); @@ -278,6 +279,8 @@ void process_runnable_tasks() struct eb32sc_node *lrq = NULL; // next local run queue entry struct eb32sc_node *grq = NULL; // next global run queue entry struct task *t; + int to_process; + int wakeups; int max_processed; if (!(active_tasks_mask & tid_bit)) { @@ -292,6 +295,9 @@ void process_runnable_tasks() if (likely(niced_tasks)) max_processed = (max_processed + 3) / 4; + to_process = max_processed; + wakeups = 0; + /* Note: the grq lock is always held when grq is not null */ while (task_per_thread[tid].task_list_size < max_processed) { @@ -344,6 +350,7 @@ void process_runnable_tasks() /* And add it to the local task list */ task_insert_into_tasklet_list(t); + wakeups++; } /* release the rqueue lock */ @@ -419,6 +426,11 @@ void process_runnable_tasks() _HA_ATOMIC_OR(&active_tasks_mask, tid_bit); activity[tid].long_rq++; } + + if (wakeups) + update_freq_ctr(&activity[tid].tasks_rate, wakeups); + if (to_process - max_processed) + update_freq_ctr(&activity[tid].ctxsw_rate, to_process - max_processed); } /*