]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/tsan/tsan_interface_atomic.cc
libsanitizer merge from upstream r196090
[thirdparty/gcc.git] / libsanitizer / tsan / tsan_interface_atomic.cc
CommitLineData
cd0be65c
WM
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer (TSan), a race detector.
9//
10//===----------------------------------------------------------------------===//
11
4ba5ca46
KS
12// ThreadSanitizer atomic operations are based on C++11/C1x standards.
13// For background see C++11 standard. A slightly older, publically
14// available draft of the standard (not entirely up-to-date, but close enough
15// for casual browsing) is available here:
16// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17// The following page contains more background information:
18// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
19
cd0be65c 20#include "sanitizer_common/sanitizer_placement_new.h"
b4ab7d34 21#include "sanitizer_common/sanitizer_stacktrace.h"
cd0be65c
WM
22#include "tsan_interface_atomic.h"
23#include "tsan_flags.h"
24#include "tsan_rtl.h"
25
26using namespace __tsan; // NOLINT
27
b4ab7d34
KS
28#define SCOPED_ATOMIC(func, ...) \
29 const uptr callpc = (uptr)__builtin_return_address(0); \
30 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
b4ab7d34
KS
31 mo = ConvertOrder(mo); \
32 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
33 ThreadState *const thr = cur_thread(); \
34 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
ef1b3fda 35 ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
b4ab7d34
KS
36 return Atomic##func(thr, pc, __VA_ARGS__); \
37/**/
38
ef1b3fda
KS
39// Some shortcuts.
40typedef __tsan_memory_order morder;
41typedef __tsan_atomic8 a8;
42typedef __tsan_atomic16 a16;
43typedef __tsan_atomic32 a32;
44typedef __tsan_atomic64 a64;
45typedef __tsan_atomic128 a128;
46const morder mo_relaxed = __tsan_memory_order_relaxed;
47const morder mo_consume = __tsan_memory_order_consume;
48const morder mo_acquire = __tsan_memory_order_acquire;
49const morder mo_release = __tsan_memory_order_release;
50const morder mo_acq_rel = __tsan_memory_order_acq_rel;
51const morder mo_seq_cst = __tsan_memory_order_seq_cst;
52
cd0be65c
WM
53class ScopedAtomic {
54 public:
ef1b3fda
KS
55 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
56 morder mo, const char *func)
cd0be65c 57 : thr_(thr) {
b4ab7d34
KS
58 CHECK_EQ(thr_->in_rtl, 0);
59 ProcessPendingSignals(thr);
60 FuncEntry(thr_, pc);
ef1b3fda 61 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
b4ab7d34 62 thr_->in_rtl++;
cd0be65c
WM
63 }
64 ~ScopedAtomic() {
b4ab7d34
KS
65 thr_->in_rtl--;
66 CHECK_EQ(thr_->in_rtl, 0);
67 FuncExit(thr_);
cd0be65c
WM
68 }
69 private:
70 ThreadState *thr_;
cd0be65c
WM
71};
72
cd0be65c
WM
73static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
74 StatInc(thr, StatAtomic);
75 StatInc(thr, t);
76 StatInc(thr, size == 1 ? StatAtomic1
77 : size == 2 ? StatAtomic2
78 : size == 4 ? StatAtomic4
4ba5ca46
KS
79 : size == 8 ? StatAtomic8
80 : StatAtomic16);
cd0be65c
WM
81 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
82 : mo == mo_consume ? StatAtomicConsume
83 : mo == mo_acquire ? StatAtomicAcquire
84 : mo == mo_release ? StatAtomicRelease
85 : mo == mo_acq_rel ? StatAtomicAcq_Rel
86 : StatAtomicSeq_Cst);
87}
88
89static bool IsLoadOrder(morder mo) {
90 return mo == mo_relaxed || mo == mo_consume
91 || mo == mo_acquire || mo == mo_seq_cst;
92}
93
94static bool IsStoreOrder(morder mo) {
95 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
96}
97
98static bool IsReleaseOrder(morder mo) {
99 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
100}
101
102static bool IsAcquireOrder(morder mo) {
103 return mo == mo_consume || mo == mo_acquire
104 || mo == mo_acq_rel || mo == mo_seq_cst;
105}
106
4ba5ca46
KS
107static bool IsAcqRelOrder(morder mo) {
108 return mo == mo_acq_rel || mo == mo_seq_cst;
109}
110
e297eb60
KS
111static morder ConvertOrder(morder mo) {
112 if (mo > (morder)100500) {
113 mo = morder(mo - 100500);
114 if (mo == morder(1 << 0))
115 mo = mo_relaxed;
116 else if (mo == morder(1 << 1))
117 mo = mo_consume;
118 else if (mo == morder(1 << 2))
119 mo = mo_acquire;
120 else if (mo == morder(1 << 3))
121 mo = mo_release;
122 else if (mo == morder(1 << 4))
123 mo = mo_acq_rel;
124 else if (mo == morder(1 << 5))
125 mo = mo_seq_cst;
126 }
127 CHECK_GE(mo, mo_relaxed);
128 CHECK_LE(mo, mo_seq_cst);
129 return mo;
130}
131
a0408454 132template<typename T> T func_xchg(volatile T *v, T op) {
695edbac
KS
133 T res = __sync_lock_test_and_set(v, op);
134 // __sync_lock_test_and_set does not contain full barrier.
135 __sync_synchronize();
136 return res;
4ba5ca46
KS
137}
138
a0408454
KS
139template<typename T> T func_add(volatile T *v, T op) {
140 return __sync_fetch_and_add(v, op);
4ba5ca46
KS
141}
142
a0408454
KS
143template<typename T> T func_sub(volatile T *v, T op) {
144 return __sync_fetch_and_sub(v, op);
4ba5ca46
KS
145}
146
a0408454
KS
147template<typename T> T func_and(volatile T *v, T op) {
148 return __sync_fetch_and_and(v, op);
4ba5ca46
KS
149}
150
a0408454
KS
151template<typename T> T func_or(volatile T *v, T op) {
152 return __sync_fetch_and_or(v, op);
4ba5ca46
KS
153}
154
a0408454
KS
155template<typename T> T func_xor(volatile T *v, T op) {
156 return __sync_fetch_and_xor(v, op);
4ba5ca46
KS
157}
158
a0408454
KS
159template<typename T> T func_nand(volatile T *v, T op) {
160 // clang does not support __sync_fetch_and_nand.
161 T cmp = *v;
162 for (;;) {
163 T newv = ~(cmp & op);
164 T cur = __sync_val_compare_and_swap(v, cmp, newv);
165 if (cmp == cur)
166 return cmp;
167 cmp = cur;
168 }
169}
170
171template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
172 return __sync_val_compare_and_swap(v, cmp, xch);
173}
174
175// clang does not support 128-bit atomic ops.
176// Atomic ops are executed under tsan internal mutex,
177// here we assume that the atomic variables are not accessed
178// from non-instrumented code.
179#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
180a128 func_xchg(volatile a128 *v, a128 op) {
181 a128 cmp = *v;
182 *v = op;
183 return cmp;
184}
185
186a128 func_add(volatile a128 *v, a128 op) {
187 a128 cmp = *v;
188 *v = cmp + op;
189 return cmp;
190}
191
192a128 func_sub(volatile a128 *v, a128 op) {
193 a128 cmp = *v;
194 *v = cmp - op;
195 return cmp;
196}
197
198a128 func_and(volatile a128 *v, a128 op) {
199 a128 cmp = *v;
200 *v = cmp & op;
201 return cmp;
202}
203
204a128 func_or(volatile a128 *v, a128 op) {
205 a128 cmp = *v;
206 *v = cmp | op;
207 return cmp;
4ba5ca46
KS
208}
209
a0408454
KS
210a128 func_xor(volatile a128 *v, a128 op) {
211 a128 cmp = *v;
212 *v = cmp ^ op;
213 return cmp;
214}
215
216a128 func_nand(volatile a128 *v, a128 op) {
217 a128 cmp = *v;
218 *v = ~(cmp & op);
219 return cmp;
220}
221
222a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
223 a128 cur = *v;
224 if (cur == cmp)
225 *v = xch;
226 return cur;
227}
228#endif
229
b4ab7d34
KS
230template<typename T>
231static int SizeLog() {
232 if (sizeof(T) <= 1)
233 return kSizeLog1;
234 else if (sizeof(T) <= 2)
235 return kSizeLog2;
236 else if (sizeof(T) <= 4)
237 return kSizeLog4;
238 else
239 return kSizeLog8;
240 // For 16-byte atomics we also use 8-byte memory access,
241 // this leads to false negatives only in very obscure cases.
242}
cd0be65c
WM
243
244template<typename T>
245static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
246 morder mo) {
247 CHECK(IsLoadOrder(mo));
4ba5ca46
KS
248 // This fast-path is critical for performance.
249 // Assume the access is atomic.
b4ab7d34
KS
250 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
251 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
df77f0e4 252 return *a; // as if atomic
b4ab7d34 253 }
e9772e16 254 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
df77f0e4 255 AcquireImpl(thr, pc, &s->clock);
cd0be65c 256 T v = *a;
4ba5ca46 257 s->mtx.ReadUnlock();
a0408454 258 __sync_synchronize();
b4ab7d34 259 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
cd0be65c
WM
260 return v;
261}
262
263template<typename T>
264static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
265 morder mo) {
266 CHECK(IsStoreOrder(mo));
b4ab7d34 267 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
4ba5ca46
KS
268 // This fast-path is critical for performance.
269 // Assume the access is atomic.
270 // Strictly saying even relaxed store cuts off release sequence,
271 // so must reset the clock.
272 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
df77f0e4 273 *a = v; // as if atomic
4ba5ca46
KS
274 return;
275 }
a0408454 276 __sync_synchronize();
e9772e16 277 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
df77f0e4
KS
278 thr->fast_state.IncrementEpoch();
279 // Can't increment epoch w/o writing to the trace as well.
280 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
281 ReleaseImpl(thr, pc, &s->clock);
cd0be65c 282 *a = v;
4ba5ca46 283 s->mtx.Unlock();
695edbac
KS
284 // Trainling memory barrier to provide sequential consistency
285 // for Dekker-like store-load synchronization.
286 __sync_synchronize();
4ba5ca46
KS
287}
288
a0408454 289template<typename T, T (*F)(volatile T *v, T op)>
4ba5ca46 290static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
b4ab7d34 291 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
ef1b3fda
KS
292 SyncVar *s = 0;
293 if (mo != mo_relaxed) {
294 s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
df77f0e4
KS
295 thr->fast_state.IncrementEpoch();
296 // Can't increment epoch w/o writing to the trace as well.
297 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
ef1b3fda 298 if (IsAcqRelOrder(mo))
df77f0e4 299 AcquireReleaseImpl(thr, pc, &s->clock);
ef1b3fda 300 else if (IsReleaseOrder(mo))
df77f0e4 301 ReleaseImpl(thr, pc, &s->clock);
ef1b3fda 302 else if (IsAcquireOrder(mo))
df77f0e4 303 AcquireImpl(thr, pc, &s->clock);
ef1b3fda 304 }
a0408454 305 v = F(a, v);
ef1b3fda
KS
306 if (s)
307 s->mtx.Unlock();
a0408454 308 return v;
cd0be65c
WM
309}
310
311template<typename T>
312static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
313 morder mo) {
4ba5ca46 314 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
cd0be65c
WM
315}
316
317template<typename T>
318static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
319 morder mo) {
4ba5ca46 320 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
cd0be65c
WM
321}
322
323template<typename T>
324static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
325 morder mo) {
4ba5ca46 326 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
cd0be65c
WM
327}
328
329template<typename T>
330static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
331 morder mo) {
4ba5ca46 332 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
cd0be65c
WM
333}
334
335template<typename T>
336static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
337 morder mo) {
4ba5ca46 338 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
cd0be65c
WM
339}
340
341template<typename T>
342static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
343 morder mo) {
4ba5ca46
KS
344 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
345}
346
347template<typename T>
348static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
349 morder mo) {
350 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
cd0be65c
WM
351}
352
353template<typename T>
354static bool AtomicCAS(ThreadState *thr, uptr pc,
4ba5ca46
KS
355 volatile T *a, T *c, T v, morder mo, morder fmo) {
356 (void)fmo; // Unused because llvm does not pass it yet.
b4ab7d34 357 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
ef1b3fda
KS
358 SyncVar *s = 0;
359 if (mo != mo_relaxed) {
360 s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
df77f0e4
KS
361 thr->fast_state.IncrementEpoch();
362 // Can't increment epoch w/o writing to the trace as well.
363 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
ef1b3fda 364 if (IsAcqRelOrder(mo))
df77f0e4 365 AcquireReleaseImpl(thr, pc, &s->clock);
ef1b3fda 366 else if (IsReleaseOrder(mo))
df77f0e4 367 ReleaseImpl(thr, pc, &s->clock);
ef1b3fda 368 else if (IsAcquireOrder(mo))
df77f0e4 369 AcquireImpl(thr, pc, &s->clock);
ef1b3fda 370 }
a0408454
KS
371 T cc = *c;
372 T pr = func_cas(a, cc, v);
ef1b3fda
KS
373 if (s)
374 s->mtx.Unlock();
a0408454
KS
375 if (pr == cc)
376 return true;
377 *c = pr;
378 return false;
cd0be65c
WM
379}
380
e297eb60
KS
381template<typename T>
382static T AtomicCAS(ThreadState *thr, uptr pc,
4ba5ca46
KS
383 volatile T *a, T c, T v, morder mo, morder fmo) {
384 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
e297eb60
KS
385 return c;
386}
387
cd0be65c 388static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
4ba5ca46 389 // FIXME(dvyukov): not implemented.
cd0be65c
WM
390 __sync_synchronize();
391}
392
393a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
394 SCOPED_ATOMIC(Load, a, mo);
395}
396
397a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
398 SCOPED_ATOMIC(Load, a, mo);
399}
400
401a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
402 SCOPED_ATOMIC(Load, a, mo);
403}
404
405a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
406 SCOPED_ATOMIC(Load, a, mo);
407}
408
4ba5ca46
KS
409#if __TSAN_HAS_INT128
410a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
411 SCOPED_ATOMIC(Load, a, mo);
412}
413#endif
414
cd0be65c
WM
415void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
416 SCOPED_ATOMIC(Store, a, v, mo);
417}
418
419void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
420 SCOPED_ATOMIC(Store, a, v, mo);
421}
422
423void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
424 SCOPED_ATOMIC(Store, a, v, mo);
425}
426
427void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
428 SCOPED_ATOMIC(Store, a, v, mo);
429}
430
4ba5ca46
KS
431#if __TSAN_HAS_INT128
432void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
433 SCOPED_ATOMIC(Store, a, v, mo);
434}
435#endif
436
cd0be65c
WM
437a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
438 SCOPED_ATOMIC(Exchange, a, v, mo);
439}
440
441a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
442 SCOPED_ATOMIC(Exchange, a, v, mo);
443}
444
445a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
446 SCOPED_ATOMIC(Exchange, a, v, mo);
447}
448
449a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
450 SCOPED_ATOMIC(Exchange, a, v, mo);
451}
452
4ba5ca46
KS
453#if __TSAN_HAS_INT128
454a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
455 SCOPED_ATOMIC(Exchange, a, v, mo);
456}
457#endif
458
cd0be65c
WM
459a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
460 SCOPED_ATOMIC(FetchAdd, a, v, mo);
461}
462
463a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
464 SCOPED_ATOMIC(FetchAdd, a, v, mo);
465}
466
467a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
468 SCOPED_ATOMIC(FetchAdd, a, v, mo);
469}
470
471a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
472 SCOPED_ATOMIC(FetchAdd, a, v, mo);
473}
474
4ba5ca46
KS
475#if __TSAN_HAS_INT128
476a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
477 SCOPED_ATOMIC(FetchAdd, a, v, mo);
478}
479#endif
480
cd0be65c
WM
481a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
482 SCOPED_ATOMIC(FetchSub, a, v, mo);
483}
484
485a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
486 SCOPED_ATOMIC(FetchSub, a, v, mo);
487}
488
489a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
490 SCOPED_ATOMIC(FetchSub, a, v, mo);
491}
492
493a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
494 SCOPED_ATOMIC(FetchSub, a, v, mo);
495}
496
4ba5ca46
KS
497#if __TSAN_HAS_INT128
498a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
499 SCOPED_ATOMIC(FetchSub, a, v, mo);
500}
501#endif
502
cd0be65c
WM
503a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
504 SCOPED_ATOMIC(FetchAnd, a, v, mo);
505}
506
507a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
508 SCOPED_ATOMIC(FetchAnd, a, v, mo);
509}
510
511a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
512 SCOPED_ATOMIC(FetchAnd, a, v, mo);
513}
514
515a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
516 SCOPED_ATOMIC(FetchAnd, a, v, mo);
517}
518
4ba5ca46
KS
519#if __TSAN_HAS_INT128
520a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
521 SCOPED_ATOMIC(FetchAnd, a, v, mo);
522}
523#endif
524
cd0be65c
WM
525a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
526 SCOPED_ATOMIC(FetchOr, a, v, mo);
527}
528
529a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
530 SCOPED_ATOMIC(FetchOr, a, v, mo);
531}
532
533a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
534 SCOPED_ATOMIC(FetchOr, a, v, mo);
535}
536
537a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
538 SCOPED_ATOMIC(FetchOr, a, v, mo);
539}
540
4ba5ca46
KS
541#if __TSAN_HAS_INT128
542a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
543 SCOPED_ATOMIC(FetchOr, a, v, mo);
544}
545#endif
546
cd0be65c
WM
547a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
548 SCOPED_ATOMIC(FetchXor, a, v, mo);
549}
550
551a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
552 SCOPED_ATOMIC(FetchXor, a, v, mo);
553}
554
555a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
556 SCOPED_ATOMIC(FetchXor, a, v, mo);
557}
558
559a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
560 SCOPED_ATOMIC(FetchXor, a, v, mo);
561}
562
4ba5ca46
KS
563#if __TSAN_HAS_INT128
564a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
565 SCOPED_ATOMIC(FetchXor, a, v, mo);
566}
567#endif
568
569a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
570 SCOPED_ATOMIC(FetchNand, a, v, mo);
571}
572
573a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
574 SCOPED_ATOMIC(FetchNand, a, v, mo);
575}
576
577a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
578 SCOPED_ATOMIC(FetchNand, a, v, mo);
579}
580
581a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
582 SCOPED_ATOMIC(FetchNand, a, v, mo);
583}
584
585#if __TSAN_HAS_INT128
586a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
587 SCOPED_ATOMIC(FetchNand, a, v, mo);
588}
589#endif
590
cd0be65c 591int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
4ba5ca46
KS
592 morder mo, morder fmo) {
593 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c
WM
594}
595
596int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
4ba5ca46
KS
597 morder mo, morder fmo) {
598 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c
WM
599}
600
601int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
4ba5ca46
KS
602 morder mo, morder fmo) {
603 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c
WM
604}
605
606int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
4ba5ca46
KS
607 morder mo, morder fmo) {
608 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
609}
610
611#if __TSAN_HAS_INT128
612int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
613 morder mo, morder fmo) {
614 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c 615}
4ba5ca46 616#endif
cd0be65c
WM
617
618int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
4ba5ca46
KS
619 morder mo, morder fmo) {
620 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c
WM
621}
622
623int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
4ba5ca46
KS
624 morder mo, morder fmo) {
625 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c
WM
626}
627
628int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
4ba5ca46
KS
629 morder mo, morder fmo) {
630 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c
WM
631}
632
633int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
4ba5ca46
KS
634 morder mo, morder fmo) {
635 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
cd0be65c
WM
636}
637
4ba5ca46
KS
638#if __TSAN_HAS_INT128
639int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
640 morder mo, morder fmo) {
641 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
642}
643#endif
644
e297eb60 645a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
4ba5ca46
KS
646 morder mo, morder fmo) {
647 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
e297eb60
KS
648}
649a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
4ba5ca46
KS
650 morder mo, morder fmo) {
651 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
e297eb60
KS
652}
653
654a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
4ba5ca46
KS
655 morder mo, morder fmo) {
656 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
e297eb60
KS
657}
658
659a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
4ba5ca46
KS
660 morder mo, morder fmo) {
661 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
662}
663
664#if __TSAN_HAS_INT128
ef1b3fda 665a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
4ba5ca46
KS
666 morder mo, morder fmo) {
667 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
e297eb60 668}
4ba5ca46 669#endif
e297eb60 670
cd0be65c 671void __tsan_atomic_thread_fence(morder mo) {
ef1b3fda 672 char* a = 0;
cd0be65c
WM
673 SCOPED_ATOMIC(Fence, mo);
674}
675
676void __tsan_atomic_signal_fence(morder mo) {
677}