]> git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/tsan/tsan_interface_atomic.cc
[libsanitizer] merge from upstream r168514
[thirdparty/gcc.git] / libsanitizer / tsan / tsan_interface_atomic.cc
1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_interface_atomic.h"
14 #include "tsan_flags.h"
15 #include "tsan_rtl.h"
16
17 using namespace __tsan; // NOLINT
18
19 class ScopedAtomic {
20 public:
21 ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
22 : thr_(thr) {
23 CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
24 DPrintf("#%d: %s\n", thr_->tid, func);
25 }
26 ~ScopedAtomic() {
27 CHECK_EQ(thr_->in_rtl, 1);
28 }
29 private:
30 ThreadState *thr_;
31 ScopedInRtl in_rtl_;
32 };
33
34 // Some shortcuts.
35 typedef __tsan_memory_order morder;
36 typedef __tsan_atomic8 a8;
37 typedef __tsan_atomic16 a16;
38 typedef __tsan_atomic32 a32;
39 typedef __tsan_atomic64 a64;
40 const morder mo_relaxed = __tsan_memory_order_relaxed;
41 const morder mo_consume = __tsan_memory_order_consume;
42 const morder mo_acquire = __tsan_memory_order_acquire;
43 const morder mo_release = __tsan_memory_order_release;
44 const morder mo_acq_rel = __tsan_memory_order_acq_rel;
45 const morder mo_seq_cst = __tsan_memory_order_seq_cst;
46
47 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
48 StatInc(thr, StatAtomic);
49 StatInc(thr, t);
50 StatInc(thr, size == 1 ? StatAtomic1
51 : size == 2 ? StatAtomic2
52 : size == 4 ? StatAtomic4
53 : StatAtomic8);
54 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
55 : mo == mo_consume ? StatAtomicConsume
56 : mo == mo_acquire ? StatAtomicAcquire
57 : mo == mo_release ? StatAtomicRelease
58 : mo == mo_acq_rel ? StatAtomicAcq_Rel
59 : StatAtomicSeq_Cst);
60 }
61
62 static bool IsLoadOrder(morder mo) {
63 return mo == mo_relaxed || mo == mo_consume
64 || mo == mo_acquire || mo == mo_seq_cst;
65 }
66
67 static bool IsStoreOrder(morder mo) {
68 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
69 }
70
71 static bool IsReleaseOrder(morder mo) {
72 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
73 }
74
75 static bool IsAcquireOrder(morder mo) {
76 return mo == mo_consume || mo == mo_acquire
77 || mo == mo_acq_rel || mo == mo_seq_cst;
78 }
79
80 static morder ConvertOrder(morder mo) {
81 if (mo > (morder)100500) {
82 mo = morder(mo - 100500);
83 if (mo == morder(1 << 0))
84 mo = mo_relaxed;
85 else if (mo == morder(1 << 1))
86 mo = mo_consume;
87 else if (mo == morder(1 << 2))
88 mo = mo_acquire;
89 else if (mo == morder(1 << 3))
90 mo = mo_release;
91 else if (mo == morder(1 << 4))
92 mo = mo_acq_rel;
93 else if (mo == morder(1 << 5))
94 mo = mo_seq_cst;
95 }
96 CHECK_GE(mo, mo_relaxed);
97 CHECK_LE(mo, mo_seq_cst);
98 return mo;
99 }
100
101 #define SCOPED_ATOMIC(func, ...) \
102 mo = ConvertOrder(mo); \
103 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
104 ThreadState *const thr = cur_thread(); \
105 ProcessPendingSignals(thr); \
106 const uptr pc = (uptr)__builtin_return_address(0); \
107 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
108 ScopedAtomic sa(thr, pc, __FUNCTION__); \
109 return Atomic##func(thr, pc, __VA_ARGS__); \
110 /**/
111
112 template<typename T>
113 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
114 morder mo) {
115 CHECK(IsLoadOrder(mo));
116 T v = *a;
117 if (IsAcquireOrder(mo))
118 Acquire(thr, pc, (uptr)a);
119 return v;
120 }
121
122 template<typename T>
123 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
124 morder mo) {
125 CHECK(IsStoreOrder(mo));
126 if (IsReleaseOrder(mo))
127 ReleaseStore(thr, pc, (uptr)a);
128 *a = v;
129 }
130
131 template<typename T>
132 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
133 morder mo) {
134 if (IsReleaseOrder(mo))
135 Release(thr, pc, (uptr)a);
136 v = __sync_lock_test_and_set(a, v);
137 if (IsAcquireOrder(mo))
138 Acquire(thr, pc, (uptr)a);
139 return v;
140 }
141
142 template<typename T>
143 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
144 morder mo) {
145 if (IsReleaseOrder(mo))
146 Release(thr, pc, (uptr)a);
147 v = __sync_fetch_and_add(a, v);
148 if (IsAcquireOrder(mo))
149 Acquire(thr, pc, (uptr)a);
150 return v;
151 }
152
153 template<typename T>
154 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
155 morder mo) {
156 if (IsReleaseOrder(mo))
157 Release(thr, pc, (uptr)a);
158 v = __sync_fetch_and_sub(a, v);
159 if (IsAcquireOrder(mo))
160 Acquire(thr, pc, (uptr)a);
161 return v;
162 }
163
164 template<typename T>
165 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
166 morder mo) {
167 if (IsReleaseOrder(mo))
168 Release(thr, pc, (uptr)a);
169 v = __sync_fetch_and_and(a, v);
170 if (IsAcquireOrder(mo))
171 Acquire(thr, pc, (uptr)a);
172 return v;
173 }
174
175 template<typename T>
176 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
177 morder mo) {
178 if (IsReleaseOrder(mo))
179 Release(thr, pc, (uptr)a);
180 v = __sync_fetch_and_or(a, v);
181 if (IsAcquireOrder(mo))
182 Acquire(thr, pc, (uptr)a);
183 return v;
184 }
185
186 template<typename T>
187 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
188 morder mo) {
189 if (IsReleaseOrder(mo))
190 Release(thr, pc, (uptr)a);
191 v = __sync_fetch_and_xor(a, v);
192 if (IsAcquireOrder(mo))
193 Acquire(thr, pc, (uptr)a);
194 return v;
195 }
196
197 template<typename T>
198 static bool AtomicCAS(ThreadState *thr, uptr pc,
199 volatile T *a, T *c, T v, morder mo) {
200 if (IsReleaseOrder(mo))
201 Release(thr, pc, (uptr)a);
202 T cc = *c;
203 T pr = __sync_val_compare_and_swap(a, cc, v);
204 if (IsAcquireOrder(mo))
205 Acquire(thr, pc, (uptr)a);
206 if (pr == cc)
207 return true;
208 *c = pr;
209 return false;
210 }
211
212 template<typename T>
213 static T AtomicCAS(ThreadState *thr, uptr pc,
214 volatile T *a, T c, T v, morder mo) {
215 AtomicCAS(thr, pc, a, &c, v, mo);
216 return c;
217 }
218
219 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
220 __sync_synchronize();
221 }
222
223 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
224 SCOPED_ATOMIC(Load, a, mo);
225 }
226
227 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
228 SCOPED_ATOMIC(Load, a, mo);
229 }
230
231 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
232 SCOPED_ATOMIC(Load, a, mo);
233 }
234
235 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
236 SCOPED_ATOMIC(Load, a, mo);
237 }
238
239 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
240 SCOPED_ATOMIC(Store, a, v, mo);
241 }
242
243 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
244 SCOPED_ATOMIC(Store, a, v, mo);
245 }
246
247 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
248 SCOPED_ATOMIC(Store, a, v, mo);
249 }
250
251 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
252 SCOPED_ATOMIC(Store, a, v, mo);
253 }
254
255 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
256 SCOPED_ATOMIC(Exchange, a, v, mo);
257 }
258
259 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
260 SCOPED_ATOMIC(Exchange, a, v, mo);
261 }
262
263 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
264 SCOPED_ATOMIC(Exchange, a, v, mo);
265 }
266
267 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
268 SCOPED_ATOMIC(Exchange, a, v, mo);
269 }
270
271 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
272 SCOPED_ATOMIC(FetchAdd, a, v, mo);
273 }
274
275 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
276 SCOPED_ATOMIC(FetchAdd, a, v, mo);
277 }
278
279 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
280 SCOPED_ATOMIC(FetchAdd, a, v, mo);
281 }
282
283 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
284 SCOPED_ATOMIC(FetchAdd, a, v, mo);
285 }
286
287 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
288 SCOPED_ATOMIC(FetchSub, a, v, mo);
289 }
290
291 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
292 SCOPED_ATOMIC(FetchSub, a, v, mo);
293 }
294
295 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
296 SCOPED_ATOMIC(FetchSub, a, v, mo);
297 }
298
299 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
300 SCOPED_ATOMIC(FetchSub, a, v, mo);
301 }
302
303 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
304 SCOPED_ATOMIC(FetchAnd, a, v, mo);
305 }
306
307 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
308 SCOPED_ATOMIC(FetchAnd, a, v, mo);
309 }
310
311 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
312 SCOPED_ATOMIC(FetchAnd, a, v, mo);
313 }
314
315 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
316 SCOPED_ATOMIC(FetchAnd, a, v, mo);
317 }
318
319 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
320 SCOPED_ATOMIC(FetchOr, a, v, mo);
321 }
322
323 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
324 SCOPED_ATOMIC(FetchOr, a, v, mo);
325 }
326
327 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
328 SCOPED_ATOMIC(FetchOr, a, v, mo);
329 }
330
331 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
332 SCOPED_ATOMIC(FetchOr, a, v, mo);
333 }
334
335 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
336 SCOPED_ATOMIC(FetchXor, a, v, mo);
337 }
338
339 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
340 SCOPED_ATOMIC(FetchXor, a, v, mo);
341 }
342
343 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
344 SCOPED_ATOMIC(FetchXor, a, v, mo);
345 }
346
347 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
348 SCOPED_ATOMIC(FetchXor, a, v, mo);
349 }
350
351 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
352 morder mo) {
353 SCOPED_ATOMIC(CAS, a, c, v, mo);
354 }
355
356 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
357 morder mo) {
358 SCOPED_ATOMIC(CAS, a, c, v, mo);
359 }
360
361 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
362 morder mo) {
363 SCOPED_ATOMIC(CAS, a, c, v, mo);
364 }
365
366 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
367 morder mo) {
368 SCOPED_ATOMIC(CAS, a, c, v, mo);
369 }
370
371 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
372 morder mo) {
373 SCOPED_ATOMIC(CAS, a, c, v, mo);
374 }
375
376 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
377 morder mo) {
378 SCOPED_ATOMIC(CAS, a, c, v, mo);
379 }
380
381 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
382 morder mo) {
383 SCOPED_ATOMIC(CAS, a, c, v, mo);
384 }
385
386 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
387 morder mo) {
388 SCOPED_ATOMIC(CAS, a, c, v, mo);
389 }
390
391 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
392 morder mo) {
393 SCOPED_ATOMIC(CAS, a, c, v, mo);
394 }
395 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
396 morder mo) {
397 SCOPED_ATOMIC(CAS, a, c, v, mo);
398 }
399
400 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
401 morder mo) {
402 SCOPED_ATOMIC(CAS, a, c, v, mo);
403 }
404
405 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
406 morder mo) {
407 SCOPED_ATOMIC(CAS, a, c, v, mo);
408 }
409
410 void __tsan_atomic_thread_fence(morder mo) {
411 char* a;
412 SCOPED_ATOMIC(Fence, mo);
413 }
414
415 void __tsan_atomic_signal_fence(morder mo) {
416 }