From: Willy Tarreau Date: Wed, 22 Jun 2022 07:19:46 +0000 (+0200) Subject: MINOR: thread: only use atomic ops to touch the flags X-Git-Tag: v2.7-dev2~136 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=bdcd32598f4f10a9cdf266f5375b1600b9f8a9a3;p=thirdparty%2Fhaproxy.git MINOR: thread: only use atomic ops to touch the flags The thread flags are touched a little bit by other threads, e.g. the STUCK flag may be set by other ones, and they're watched a little bit. As such we need to use atomic ops only to manipulate them. Most places were already using them, but here we generalize the practice. Only ha_thread_dump() does not change because it's run under isolation. --- diff --git a/include/haproxy/task.h b/include/haproxy/task.h index da5a684367..a79319eb59 100644 --- a/include/haproxy/task.h +++ b/include/haproxy/task.h @@ -367,7 +367,7 @@ static inline void _tasklet_wakeup_on(struct tasklet *tl, int thr, const char *f tl->debug.caller_idx = !tl->debug.caller_idx; tl->debug.caller_file[tl->debug.caller_idx] = file; tl->debug.caller_line[tl->debug.caller_idx] = line; - if (th_ctx->flags & TH_FL_TASK_PROFILING) + if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING) tl->call_date = now_mono_time(); #endif __tasklet_wakeup_on(tl, thr); @@ -421,7 +421,7 @@ static inline void _task_instant_wakeup(struct task *t, unsigned int f, const ch tl->debug.caller_idx = !tl->debug.caller_idx; tl->debug.caller_file[tl->debug.caller_idx] = file; tl->debug.caller_line[tl->debug.caller_idx] = line; - if (th_ctx->flags & TH_FL_TASK_PROFILING) + if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING) tl->call_date = now_mono_time(); #endif __tasklet_wakeup_on(tl, thr); diff --git a/src/activity.c b/src/activity.c index 82364dd0a4..ff9effbde3 100644 --- a/src/activity.c +++ b/src/activity.c @@ -383,7 +383,7 @@ void activity_count_runtime(uint32_t run_time) * profiling to "on" when automatic, and going back below the "down" * threshold switches to off. The forced modes don't check the load. */ - if (!(th_ctx->flags & TH_FL_TASK_PROFILING)) { + if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) { if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON || ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON && swrate_avg(run_time, TIME_STATS_SAMPLES) >= up))) diff --git a/src/debug.c b/src/debug.c index a55d087b05..4207d13099 100644 --- a/src/debug.c +++ b/src/debug.c @@ -154,6 +154,7 @@ void ha_backtrace_to_stderr(void) * The calling thread ID needs to be passed in to display a star * in front of the calling thread's line (usually it's tid). Any stuck thread * is also prefixed with a '>'. + * It must be called under thread isolation. */ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid) { @@ -1365,7 +1366,7 @@ void debug_handler(int sig, siginfo_t *si, void *arg) * if it didn't move. */ if (!((threads_harmless_mask|sleeping_thread_mask) & tid_bit)) - th_ctx->flags |= TH_FL_STUCK; + _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STUCK); } static int init_debug_per_thread() diff --git a/src/fd.c b/src/fd.c index 96d77bd55f..99c8c4e5b3 100644 --- a/src/fd.c +++ b/src/fd.c @@ -472,7 +472,7 @@ int fd_update_events(int fd, uint evts) uint new_flags, must_stop; ulong rmask, tmask; - th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running + _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running /* do nothing if the FD was taken over under us */ do { diff --git a/src/listener.c b/src/listener.c index 65bb766927..8a033fdc9e 100644 --- a/src/listener.c +++ b/src/listener.c @@ -1135,7 +1135,7 @@ void listener_accept(struct listener *l) } #endif - th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running + _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running } /* end of for (max_accept--) */ end: diff --git a/src/task.c b/src/task.c index 76a7432737..00b2887c09 100644 --- a/src/task.c +++ b/src/task.c @@ -241,7 +241,7 @@ void __task_wakeup(struct task *t) t->rq.key += offset; } - if (th_ctx->flags & TH_FL_TASK_PROFILING) + if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING) t->call_date = now_mono_time(); eb32_insert(root, &t->rq); @@ -568,7 +568,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[]) process = t->process; t->calls++; th_ctx->current = t; - th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running + _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running _HA_ATOMIC_DEC(&th_ctx->rq_total); @@ -578,7 +578,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[]) LIST_DEL_INIT(&((struct tasklet *)t)->list); __ha_barrier_store(); - if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { + if (unlikely(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) { profile_entry = sched_activity_entry(sched_activity, t->process); before = now_mono_time(); #ifdef DEBUG_TASK @@ -603,7 +603,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[]) continue; } - if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { + if (unlikely(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) { HA_ATOMIC_INC(&profile_entry->calls); HA_ATOMIC_ADD(&profile_entry->cpu_time, now_mono_time() - before); } @@ -734,7 +734,7 @@ void process_runnable_tasks() int heavy_queued = 0; int budget; - th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running + _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running if (!thread_has_tasks()) { activity[tid].empty_rq++; diff --git a/src/wdt.c b/src/wdt.c index 6bb7d85a6d..dc5c1dde50 100644 --- a/src/wdt.c +++ b/src/wdt.c @@ -99,7 +99,7 @@ void wdt_handler(int sig, siginfo_t *si, void *arg) * If it's already set, then it's our second call with no * progress and the thread is dead. */ - if (!(ha_thread_ctx[thr].flags & TH_FL_STUCK)) { + if (!(_HA_ATOMIC_LOAD(&ha_thread_ctx[thr].flags) & TH_FL_STUCK)) { _HA_ATOMIC_OR(&ha_thread_ctx[thr].flags, TH_FL_STUCK); goto update_and_leave; }