extern uint64_t now_mono_time(void); \
if (_LK_ != _LK_UN) { \
th_ctx->lock_level += bal; \
- if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \
+ if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) \
lock_start = now_mono_time(); \
} \
(void)(expr); \
if (_LK_ == _LK_UN) { \
th_ctx->lock_level += bal; \
- if (th_ctx->lock_level == 0 && unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \
+ if (th_ctx->lock_level == 0 &&\
+ unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) \
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
- } else if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { \
+ } else if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) { \
uint64_t now = now_mono_time(); \
if (lock_start) \
th_ctx->lock_wait_total += now - lock_start; \
typeof(expr) _expr = (expr); \
if (_expr == 0) { \
th_ctx->lock_level += bal; \
- if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { \
+ if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) { \
if (_LK_ == _LK_UN && th_ctx->lock_level == 0) \
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
else if (_LK_ != _LK_UN && th_ctx->lock_level == 1) \
#define TH_FL_IN_DBG_HANDLER 0x00000100 /* thread currently in the debug signal handler */
#define TH_FL_IN_WDT_HANDLER 0x00000200 /* thread currently in the wdt signal handler */
#define TH_FL_IN_ANY_HANDLER 0x00000380 /* mask to test if the thread is in any signal handler */
+#define TH_FL_TASK_PROFILING_L 0x00000400 /* task profiling in locks (also requires TASK_PROFILING) */
+#define TH_FL_TASK_PROFILING_M 0x00000800 /* task profiling in mem alloc (also requires TASK_PROFILING) */
/* we have 4 buffer-wait queues, in highest to lowest emergency order */
#define DYNBUF_NBQ 4
if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON ||
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON &&
- swrate_avg(run_time, TIME_STATS_SAMPLES) >= up)))
+ swrate_avg(run_time, TIME_STATS_SAMPLES) >= up))) {
+
+ if (profiling & HA_PROF_TASKS_LOCK)
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING_L);
+ else
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING_L);
+
+ if (profiling & HA_PROF_TASKS_MEM)
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING_M);
+ else
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING_M);
+
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING);
+ }
} else {
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF ||
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF &&
if (unlikely(pool_cache_bytes > global.tune.pool_cache_size * 3 / 4)) {
uint64_t mem_wait_start = 0;
- if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
+ if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
mem_wait_start = now_mono_time();
if (ph->count >= 16 + pool_cache_count / 8 + CONFIG_HAP_POOL_CLUSTER_SIZE)
uint64_t mem_wait_start = 0;
int isolated = thread_isolated();
- if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
+ if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
mem_wait_start = now_mono_time();
if (!isolated)
/* count allocation time only for cache misses */
uint64_t mem_wait_start = 0;
- if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
+ if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
mem_wait_start = now_mono_time();
p = pool_alloc_nocache(pool, caller);
global.tune.pool_cache_size < pool->size)) {
uint64_t mem_wait_start = 0;
- if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
+ if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
+ (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
mem_wait_start = now_mono_time();
pool_free_nocache(pool, ptr);