/* When thread debugging is enabled, we remap HA_SPINLOCK_T and HA_RWLOCK_T to
* complex structures which embed debugging info.
*/
-#if (DEBUG_THREAD < 1) && !defined(DEBUG_FULL)
+#if (DEBUG_THREAD < 2) && !defined(DEBUG_FULL)
#define HA_SPINLOCK_T __HA_SPINLOCK_T
#define HA_RWLOCK_T __HA_RWLOCK_T
return _HA_ATOMIC_LOAD(&isolated_thread) == tid;
}
-#if (DEBUG_THREAD < 1) && !defined(DEBUG_FULL)
+#if (DEBUG_THREAD < 2) && !defined(DEBUG_FULL)
/* Thread debugging is DISABLED, these are the regular locking functions */
#define HA_RWLOCK_TRYRDTOSK(lbl,l) (!pl_try_rtos(l)) /* R -?> S */
#define HA_RWLOCK_TRYRDTOWR(lbl, l) (!pl_try_rtow(l)) /* R -?> W */
-#else /* (DEBUG_THREAD < 1) && !defined(DEBUG_FULL) */
+#else /* (DEBUG_THREAD < 2) && !defined(DEBUG_FULL) */
/* Thread debugging is ENABLED, these are the instrumented functions */
for (i = 1; i < global.nbthread; i++)
pthread_join(ha_pthread[i], NULL);
-#if (DEBUG_THREAD > 0) || defined(DEBUG_FULL)
+#if (DEBUG_THREAD > 1) || defined(DEBUG_FULL)
show_lock_stats();
#endif
}
#if (DEBUG_THREAD > 0) || defined(DEBUG_FULL)
-struct lock_stat lock_stats_rd[LOCK_LABELS] = { };
-struct lock_stat lock_stats_sk[LOCK_LABELS] = { };
-struct lock_stat lock_stats_wr[LOCK_LABELS] = { };
-
-/* this is only used below */
-static const char *lock_label(enum lock_label label)
+const char *lock_label(enum lock_label label)
{
switch (label) {
case TASK_RQ_LOCK: return "TASK_RQ";
/* only way to come here is consecutive to an internal bug */
abort();
}
+#endif
+
+#if (DEBUG_THREAD > 1) || defined(DEBUG_FULL)
+
+struct lock_stat lock_stats_rd[LOCK_LABELS] = { };
+struct lock_stat lock_stats_sk[LOCK_LABELS] = { };
+struct lock_stat lock_stats_wr[LOCK_LABELS] = { };
/* returns the num read/seek/write for a given label by summing buckets */
static uint64_t get_lock_stat_num_read(int lbl)
HA_ATOMIC_INC(&lock_stats_sk[lbl].num_unlocked);
}
-#endif // (DEBUG_THREAD > 0) || defined(DEBUG_FULL)
+#endif // (DEBUG_THREAD > 1) || defined(DEBUG_FULL)
#if defined(USE_PTHREAD_EMULATION)