From: Amaury Denoyelle Date: Tue, 17 Dec 2024 15:28:16 +0000 (+0100) Subject: MINOR: trace: implement tracing disabling API X-Git-Tag: v3.2-dev2~38 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9d155ca7064290d3280621c85443221fa13334b2;p=thirdparty%2Fhaproxy.git MINOR: trace: implement tracing disabling API Define a set of functions to temporarily disable/reactivate tracing for the current thread. This could be useful when wanting to quickly remove tracing output for some code parts. The API relies on a disable/resume set of functions, with a thread-local counter. This counter is tested under __trace_enabled(). It is a cumulative value so that the same count of resume must be issued after several disable usage. There is also the possibility to force reset the counter to 0 before restoring the old value. This should be backported up to 3.1. --- diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h index 3db77e56f1..991786fc27 100644 --- a/include/haproxy/tinfo-t.h +++ b/include/haproxy/tinfo-t.h @@ -136,8 +136,9 @@ struct thread_ctx { unsigned int nb_tasks; /* number of tasks allocated on this thread */ uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */ uint8_t bufq_map; /* one bit per non-empty buffer_wq */ + uint8_t trc_disable_ctr; /* cumulative counter to temporarily disable tracing */ - // 2 bytes hole here + // 1 byte hole here unsigned int nb_rhttp_conns; /* count of current conns used for active reverse HTTP */ struct sched_activity *sched_profile_entry; /* profile entry in use by the current task/tasklet, only if sched_wake_date>0 */ diff --git a/include/haproxy/trace.h b/include/haproxy/trace.h index ce94289154..cab11299cf 100644 --- a/include/haproxy/trace.h +++ b/include/haproxy/trace.h @@ -206,6 +206,48 @@ static inline char trace_event_char(uint64_t conf, uint64_t ev) return (conf & ev) ? '+' : '-'; } +/* Temporarily disable trace using a cumulative counter. If called multiple + * times, the same number of resume must be used to reactivate tracing. + * + * Returns the incremented counter value or 0 if already at the maximum value. + */ +static inline uint8_t trace_disable(void) +{ + if (unlikely(th_ctx->trc_disable_ctr == UCHAR_MAX)) + return 0; + return ++th_ctx->trc_disable_ctr; +} + +/* Resume tracing after a temporarily disabling. It may be called several times + * as disable operation is cumulative. + */ +static inline void trace_resume(void) +{ + if (th_ctx->trc_disable_ctr) + --th_ctx->trc_disable_ctr; +} + +/* Resume tracing immediately even after multiple disable operations. + * + * Returns the old counter value. Useful to reactivate trace disabling at the + * previous level. + */ +static inline uint8_t trace_force_resume(void) +{ + const int val = th_ctx->trc_disable_ctr; + th_ctx->trc_disable_ctr = 0; + return val; +} + +/* Set trace disabling counter to . Mostly useful with the value + * returned from trace_force_resume() to restore tracing disable status to the + * previous level. + */ +static inline void trace_reset_disable(uint8_t disable) +{ + th_ctx->trc_disable_ctr = disable; +} + #endif /* _HAPROXY_TRACE_H */ /* diff --git a/src/trace.c b/src/trace.c index cc49ed98b1..64af3705f3 100644 --- a/src/trace.c +++ b/src/trace.c @@ -97,9 +97,13 @@ int __trace_enabled(enum trace_level level, uint64_t mask, struct trace_source * /* in case we also follow another one (e.g. session) */ origin = HA_ATOMIC_LOAD(&src->follow); + /* Trace can be temporarily disabled via trace_disable(). */ if (likely(src->state == TRACE_STATE_STOPPED) && !origin) return 0; + if (th_ctx->trc_disable_ctr) + return 0; + /* check that at least one action is interested by this event */ if (((src->report_events | src->start_events | src->pause_events | src->stop_events) & mask) == 0) return 0;