]> git.ipfire.org Git - thirdparty/git.git/blob - trace2/tr2_tmr.c
submodule-config.c: strengthen URL fsck check
[thirdparty/git.git] / trace2 / tr2_tmr.c
1 #include "git-compat-util.h"
2 #include "thread-utils.h"
3 #include "trace2/tr2_tgt.h"
4 #include "trace2/tr2_tls.h"
5 #include "trace2/tr2_tmr.h"
6 #include "trace.h"
7
8 #define MY_MAX(a, b) ((a) > (b) ? (a) : (b))
9 #define MY_MIN(a, b) ((a) < (b) ? (a) : (b))
10
11 /*
12 * A global timer block to aggregate values from the partial sums from
13 * each thread.
14 */
15 static struct tr2_timer_block final_timer_block; /* access under tr2tls_mutex */
16
17 /*
18 * Define metadata for each stopwatch timer.
19 *
20 * This array must match "enum trace2_timer_id" and the values
21 * in "struct tr2_timer_block.timer[*]".
22 */
23 static struct tr2_timer_metadata tr2_timer_metadata[TRACE2_NUMBER_OF_TIMERS] = {
24 [TRACE2_TIMER_ID_TEST1] = {
25 .category = "test",
26 .name = "test1",
27 .want_per_thread_events = 0,
28 },
29 [TRACE2_TIMER_ID_TEST2] = {
30 .category = "test",
31 .name = "test2",
32 .want_per_thread_events = 1,
33 },
34
35 /* Add additional metadata before here. */
36 };
37
38 void tr2_start_timer(enum trace2_timer_id tid)
39 {
40 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
41 struct tr2_timer *t = &ctx->timer_block.timer[tid];
42
43 t->recursion_count++;
44 if (t->recursion_count > 1)
45 return; /* ignore recursive starts */
46
47 t->start_ns = getnanotime();
48 }
49
50 void tr2_stop_timer(enum trace2_timer_id tid)
51 {
52 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
53 struct tr2_timer *t = &ctx->timer_block.timer[tid];
54 uint64_t ns_now;
55 uint64_t ns_interval;
56
57 assert(t->recursion_count > 0);
58
59 t->recursion_count--;
60 if (t->recursion_count)
61 return; /* still in recursive call(s) */
62
63 ns_now = getnanotime();
64 ns_interval = ns_now - t->start_ns;
65
66 t->total_ns += ns_interval;
67
68 /*
69 * min_ns was initialized to zero (in the xcalloc()) rather
70 * than UINT_MAX when the block of timers was allocated,
71 * so we should always set both the min_ns and max_ns values
72 * the first time that the timer is used.
73 */
74 if (!t->interval_count) {
75 t->min_ns = ns_interval;
76 t->max_ns = ns_interval;
77 } else {
78 t->min_ns = MY_MIN(ns_interval, t->min_ns);
79 t->max_ns = MY_MAX(ns_interval, t->max_ns);
80 }
81
82 t->interval_count++;
83
84 ctx->used_any_timer = 1;
85 if (tr2_timer_metadata[tid].want_per_thread_events)
86 ctx->used_any_per_thread_timer = 1;
87 }
88
89 void tr2_update_final_timers(void)
90 {
91 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
92 enum trace2_timer_id tid;
93
94 if (!ctx->used_any_timer)
95 return;
96
97 /*
98 * Accessing `final_timer_block` requires holding `tr2tls_mutex`.
99 * We assume that our caller is holding the lock.
100 */
101
102 for (tid = 0; tid < TRACE2_NUMBER_OF_TIMERS; tid++) {
103 struct tr2_timer *t_final = &final_timer_block.timer[tid];
104 struct tr2_timer *t = &ctx->timer_block.timer[tid];
105
106 if (t->recursion_count) {
107 /*
108 * The current thread is exiting with
109 * timer[tid] still running.
110 *
111 * Technically, this is a bug, but I'm going
112 * to ignore it.
113 *
114 * I don't think it is worth calling die()
115 * for. I don't think it is worth killing the
116 * process for this bookkeeping error. We
117 * might want to call warning(), but I'm going
118 * to wait on that.
119 *
120 * The downside here is that total_ns won't
121 * include the current open interval (now -
122 * start_ns). I can live with that.
123 */
124 }
125
126 if (!t->interval_count)
127 continue; /* this timer was not used by this thread */
128
129 t_final->total_ns += t->total_ns;
130
131 /*
132 * final_timer_block.timer[tid].min_ns was initialized to
133 * was initialized to zero rather than UINT_MAX, so we should
134 * always set both the min_ns and max_ns values the first time
135 * that we add a partial sum into it.
136 */
137 if (!t_final->interval_count) {
138 t_final->min_ns = t->min_ns;
139 t_final->max_ns = t->max_ns;
140 } else {
141 t_final->min_ns = MY_MIN(t_final->min_ns, t->min_ns);
142 t_final->max_ns = MY_MAX(t_final->max_ns, t->max_ns);
143 }
144
145 t_final->interval_count += t->interval_count;
146 }
147 }
148
149 void tr2_emit_per_thread_timers(tr2_tgt_evt_timer_t *fn_apply)
150 {
151 struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
152 enum trace2_timer_id tid;
153
154 if (!ctx->used_any_per_thread_timer)
155 return;
156
157 /*
158 * For each timer, if the timer wants per-thread events and
159 * this thread used it, emit it.
160 */
161 for (tid = 0; tid < TRACE2_NUMBER_OF_TIMERS; tid++)
162 if (tr2_timer_metadata[tid].want_per_thread_events &&
163 ctx->timer_block.timer[tid].interval_count)
164 fn_apply(&tr2_timer_metadata[tid],
165 &ctx->timer_block.timer[tid],
166 0);
167 }
168
169 void tr2_emit_final_timers(tr2_tgt_evt_timer_t *fn_apply)
170 {
171 enum trace2_timer_id tid;
172
173 /*
174 * Accessing `final_timer_block` requires holding `tr2tls_mutex`.
175 * We assume that our caller is holding the lock.
176 */
177
178 for (tid = 0; tid < TRACE2_NUMBER_OF_TIMERS; tid++)
179 if (final_timer_block.timer[tid].interval_count)
180 fn_apply(&tr2_timer_metadata[tid],
181 &final_timer_block.timer[tid],
182 1);
183 }