]> git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/tsan/tsan_interface_atomic.cc
libsanitizer merge from upstream r196090
[thirdparty/gcc.git] / libsanitizer / tsan / tsan_interface_atomic.cc
1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publically
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
19
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "tsan_interface_atomic.h"
23 #include "tsan_flags.h"
24 #include "tsan_rtl.h"
25
26 using namespace __tsan; // NOLINT
27
28 #define SCOPED_ATOMIC(func, ...) \
29 const uptr callpc = (uptr)__builtin_return_address(0); \
30 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
31 mo = ConvertOrder(mo); \
32 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
33 ThreadState *const thr = cur_thread(); \
34 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
35 ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
36 return Atomic##func(thr, pc, __VA_ARGS__); \
37 /**/
38
39 // Some shortcuts.
40 typedef __tsan_memory_order morder;
41 typedef __tsan_atomic8 a8;
42 typedef __tsan_atomic16 a16;
43 typedef __tsan_atomic32 a32;
44 typedef __tsan_atomic64 a64;
45 typedef __tsan_atomic128 a128;
46 const morder mo_relaxed = __tsan_memory_order_relaxed;
47 const morder mo_consume = __tsan_memory_order_consume;
48 const morder mo_acquire = __tsan_memory_order_acquire;
49 const morder mo_release = __tsan_memory_order_release;
50 const morder mo_acq_rel = __tsan_memory_order_acq_rel;
51 const morder mo_seq_cst = __tsan_memory_order_seq_cst;
52
53 class ScopedAtomic {
54 public:
55 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
56 morder mo, const char *func)
57 : thr_(thr) {
58 CHECK_EQ(thr_->in_rtl, 0);
59 ProcessPendingSignals(thr);
60 FuncEntry(thr_, pc);
61 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
62 thr_->in_rtl++;
63 }
64 ~ScopedAtomic() {
65 thr_->in_rtl--;
66 CHECK_EQ(thr_->in_rtl, 0);
67 FuncExit(thr_);
68 }
69 private:
70 ThreadState *thr_;
71 };
72
73 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
74 StatInc(thr, StatAtomic);
75 StatInc(thr, t);
76 StatInc(thr, size == 1 ? StatAtomic1
77 : size == 2 ? StatAtomic2
78 : size == 4 ? StatAtomic4
79 : size == 8 ? StatAtomic8
80 : StatAtomic16);
81 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
82 : mo == mo_consume ? StatAtomicConsume
83 : mo == mo_acquire ? StatAtomicAcquire
84 : mo == mo_release ? StatAtomicRelease
85 : mo == mo_acq_rel ? StatAtomicAcq_Rel
86 : StatAtomicSeq_Cst);
87 }
88
89 static bool IsLoadOrder(morder mo) {
90 return mo == mo_relaxed || mo == mo_consume
91 || mo == mo_acquire || mo == mo_seq_cst;
92 }
93
94 static bool IsStoreOrder(morder mo) {
95 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
96 }
97
98 static bool IsReleaseOrder(morder mo) {
99 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
100 }
101
102 static bool IsAcquireOrder(morder mo) {
103 return mo == mo_consume || mo == mo_acquire
104 || mo == mo_acq_rel || mo == mo_seq_cst;
105 }
106
107 static bool IsAcqRelOrder(morder mo) {
108 return mo == mo_acq_rel || mo == mo_seq_cst;
109 }
110
111 static morder ConvertOrder(morder mo) {
112 if (mo > (morder)100500) {
113 mo = morder(mo - 100500);
114 if (mo == morder(1 << 0))
115 mo = mo_relaxed;
116 else if (mo == morder(1 << 1))
117 mo = mo_consume;
118 else if (mo == morder(1 << 2))
119 mo = mo_acquire;
120 else if (mo == morder(1 << 3))
121 mo = mo_release;
122 else if (mo == morder(1 << 4))
123 mo = mo_acq_rel;
124 else if (mo == morder(1 << 5))
125 mo = mo_seq_cst;
126 }
127 CHECK_GE(mo, mo_relaxed);
128 CHECK_LE(mo, mo_seq_cst);
129 return mo;
130 }
131
132 template<typename T> T func_xchg(volatile T *v, T op) {
133 T res = __sync_lock_test_and_set(v, op);
134 // __sync_lock_test_and_set does not contain full barrier.
135 __sync_synchronize();
136 return res;
137 }
138
139 template<typename T> T func_add(volatile T *v, T op) {
140 return __sync_fetch_and_add(v, op);
141 }
142
143 template<typename T> T func_sub(volatile T *v, T op) {
144 return __sync_fetch_and_sub(v, op);
145 }
146
147 template<typename T> T func_and(volatile T *v, T op) {
148 return __sync_fetch_and_and(v, op);
149 }
150
151 template<typename T> T func_or(volatile T *v, T op) {
152 return __sync_fetch_and_or(v, op);
153 }
154
155 template<typename T> T func_xor(volatile T *v, T op) {
156 return __sync_fetch_and_xor(v, op);
157 }
158
159 template<typename T> T func_nand(volatile T *v, T op) {
160 // clang does not support __sync_fetch_and_nand.
161 T cmp = *v;
162 for (;;) {
163 T newv = ~(cmp & op);
164 T cur = __sync_val_compare_and_swap(v, cmp, newv);
165 if (cmp == cur)
166 return cmp;
167 cmp = cur;
168 }
169 }
170
171 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
172 return __sync_val_compare_and_swap(v, cmp, xch);
173 }
174
175 // clang does not support 128-bit atomic ops.
176 // Atomic ops are executed under tsan internal mutex,
177 // here we assume that the atomic variables are not accessed
178 // from non-instrumented code.
179 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
180 a128 func_xchg(volatile a128 *v, a128 op) {
181 a128 cmp = *v;
182 *v = op;
183 return cmp;
184 }
185
186 a128 func_add(volatile a128 *v, a128 op) {
187 a128 cmp = *v;
188 *v = cmp + op;
189 return cmp;
190 }
191
192 a128 func_sub(volatile a128 *v, a128 op) {
193 a128 cmp = *v;
194 *v = cmp - op;
195 return cmp;
196 }
197
198 a128 func_and(volatile a128 *v, a128 op) {
199 a128 cmp = *v;
200 *v = cmp & op;
201 return cmp;
202 }
203
204 a128 func_or(volatile a128 *v, a128 op) {
205 a128 cmp = *v;
206 *v = cmp | op;
207 return cmp;
208 }
209
210 a128 func_xor(volatile a128 *v, a128 op) {
211 a128 cmp = *v;
212 *v = cmp ^ op;
213 return cmp;
214 }
215
216 a128 func_nand(volatile a128 *v, a128 op) {
217 a128 cmp = *v;
218 *v = ~(cmp & op);
219 return cmp;
220 }
221
222 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
223 a128 cur = *v;
224 if (cur == cmp)
225 *v = xch;
226 return cur;
227 }
228 #endif
229
230 template<typename T>
231 static int SizeLog() {
232 if (sizeof(T) <= 1)
233 return kSizeLog1;
234 else if (sizeof(T) <= 2)
235 return kSizeLog2;
236 else if (sizeof(T) <= 4)
237 return kSizeLog4;
238 else
239 return kSizeLog8;
240 // For 16-byte atomics we also use 8-byte memory access,
241 // this leads to false negatives only in very obscure cases.
242 }
243
244 template<typename T>
245 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
246 morder mo) {
247 CHECK(IsLoadOrder(mo));
248 // This fast-path is critical for performance.
249 // Assume the access is atomic.
250 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
251 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
252 return *a; // as if atomic
253 }
254 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
255 AcquireImpl(thr, pc, &s->clock);
256 T v = *a;
257 s->mtx.ReadUnlock();
258 __sync_synchronize();
259 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
260 return v;
261 }
262
263 template<typename T>
264 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
265 morder mo) {
266 CHECK(IsStoreOrder(mo));
267 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
268 // This fast-path is critical for performance.
269 // Assume the access is atomic.
270 // Strictly saying even relaxed store cuts off release sequence,
271 // so must reset the clock.
272 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
273 *a = v; // as if atomic
274 return;
275 }
276 __sync_synchronize();
277 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
278 thr->fast_state.IncrementEpoch();
279 // Can't increment epoch w/o writing to the trace as well.
280 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
281 ReleaseImpl(thr, pc, &s->clock);
282 *a = v;
283 s->mtx.Unlock();
284 // Trainling memory barrier to provide sequential consistency
285 // for Dekker-like store-load synchronization.
286 __sync_synchronize();
287 }
288
289 template<typename T, T (*F)(volatile T *v, T op)>
290 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
291 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
292 SyncVar *s = 0;
293 if (mo != mo_relaxed) {
294 s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
295 thr->fast_state.IncrementEpoch();
296 // Can't increment epoch w/o writing to the trace as well.
297 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
298 if (IsAcqRelOrder(mo))
299 AcquireReleaseImpl(thr, pc, &s->clock);
300 else if (IsReleaseOrder(mo))
301 ReleaseImpl(thr, pc, &s->clock);
302 else if (IsAcquireOrder(mo))
303 AcquireImpl(thr, pc, &s->clock);
304 }
305 v = F(a, v);
306 if (s)
307 s->mtx.Unlock();
308 return v;
309 }
310
311 template<typename T>
312 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
313 morder mo) {
314 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
315 }
316
317 template<typename T>
318 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
319 morder mo) {
320 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
321 }
322
323 template<typename T>
324 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
325 morder mo) {
326 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
327 }
328
329 template<typename T>
330 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
331 morder mo) {
332 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
333 }
334
335 template<typename T>
336 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
337 morder mo) {
338 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
339 }
340
341 template<typename T>
342 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
343 morder mo) {
344 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
345 }
346
347 template<typename T>
348 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
349 morder mo) {
350 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
351 }
352
353 template<typename T>
354 static bool AtomicCAS(ThreadState *thr, uptr pc,
355 volatile T *a, T *c, T v, morder mo, morder fmo) {
356 (void)fmo; // Unused because llvm does not pass it yet.
357 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
358 SyncVar *s = 0;
359 if (mo != mo_relaxed) {
360 s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
361 thr->fast_state.IncrementEpoch();
362 // Can't increment epoch w/o writing to the trace as well.
363 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
364 if (IsAcqRelOrder(mo))
365 AcquireReleaseImpl(thr, pc, &s->clock);
366 else if (IsReleaseOrder(mo))
367 ReleaseImpl(thr, pc, &s->clock);
368 else if (IsAcquireOrder(mo))
369 AcquireImpl(thr, pc, &s->clock);
370 }
371 T cc = *c;
372 T pr = func_cas(a, cc, v);
373 if (s)
374 s->mtx.Unlock();
375 if (pr == cc)
376 return true;
377 *c = pr;
378 return false;
379 }
380
381 template<typename T>
382 static T AtomicCAS(ThreadState *thr, uptr pc,
383 volatile T *a, T c, T v, morder mo, morder fmo) {
384 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
385 return c;
386 }
387
388 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
389 // FIXME(dvyukov): not implemented.
390 __sync_synchronize();
391 }
392
393 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
394 SCOPED_ATOMIC(Load, a, mo);
395 }
396
397 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
398 SCOPED_ATOMIC(Load, a, mo);
399 }
400
401 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
402 SCOPED_ATOMIC(Load, a, mo);
403 }
404
405 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
406 SCOPED_ATOMIC(Load, a, mo);
407 }
408
409 #if __TSAN_HAS_INT128
410 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
411 SCOPED_ATOMIC(Load, a, mo);
412 }
413 #endif
414
415 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
416 SCOPED_ATOMIC(Store, a, v, mo);
417 }
418
419 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
420 SCOPED_ATOMIC(Store, a, v, mo);
421 }
422
423 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
424 SCOPED_ATOMIC(Store, a, v, mo);
425 }
426
427 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
428 SCOPED_ATOMIC(Store, a, v, mo);
429 }
430
431 #if __TSAN_HAS_INT128
432 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
433 SCOPED_ATOMIC(Store, a, v, mo);
434 }
435 #endif
436
437 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
438 SCOPED_ATOMIC(Exchange, a, v, mo);
439 }
440
441 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
442 SCOPED_ATOMIC(Exchange, a, v, mo);
443 }
444
445 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
446 SCOPED_ATOMIC(Exchange, a, v, mo);
447 }
448
449 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
450 SCOPED_ATOMIC(Exchange, a, v, mo);
451 }
452
453 #if __TSAN_HAS_INT128
454 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
455 SCOPED_ATOMIC(Exchange, a, v, mo);
456 }
457 #endif
458
459 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
460 SCOPED_ATOMIC(FetchAdd, a, v, mo);
461 }
462
463 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
464 SCOPED_ATOMIC(FetchAdd, a, v, mo);
465 }
466
467 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
468 SCOPED_ATOMIC(FetchAdd, a, v, mo);
469 }
470
471 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
472 SCOPED_ATOMIC(FetchAdd, a, v, mo);
473 }
474
475 #if __TSAN_HAS_INT128
476 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
477 SCOPED_ATOMIC(FetchAdd, a, v, mo);
478 }
479 #endif
480
481 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
482 SCOPED_ATOMIC(FetchSub, a, v, mo);
483 }
484
485 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
486 SCOPED_ATOMIC(FetchSub, a, v, mo);
487 }
488
489 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
490 SCOPED_ATOMIC(FetchSub, a, v, mo);
491 }
492
493 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
494 SCOPED_ATOMIC(FetchSub, a, v, mo);
495 }
496
497 #if __TSAN_HAS_INT128
498 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
499 SCOPED_ATOMIC(FetchSub, a, v, mo);
500 }
501 #endif
502
503 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
504 SCOPED_ATOMIC(FetchAnd, a, v, mo);
505 }
506
507 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
508 SCOPED_ATOMIC(FetchAnd, a, v, mo);
509 }
510
511 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
512 SCOPED_ATOMIC(FetchAnd, a, v, mo);
513 }
514
515 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
516 SCOPED_ATOMIC(FetchAnd, a, v, mo);
517 }
518
519 #if __TSAN_HAS_INT128
520 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
521 SCOPED_ATOMIC(FetchAnd, a, v, mo);
522 }
523 #endif
524
525 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
526 SCOPED_ATOMIC(FetchOr, a, v, mo);
527 }
528
529 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
530 SCOPED_ATOMIC(FetchOr, a, v, mo);
531 }
532
533 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
534 SCOPED_ATOMIC(FetchOr, a, v, mo);
535 }
536
537 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
538 SCOPED_ATOMIC(FetchOr, a, v, mo);
539 }
540
541 #if __TSAN_HAS_INT128
542 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
543 SCOPED_ATOMIC(FetchOr, a, v, mo);
544 }
545 #endif
546
547 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
548 SCOPED_ATOMIC(FetchXor, a, v, mo);
549 }
550
551 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
552 SCOPED_ATOMIC(FetchXor, a, v, mo);
553 }
554
555 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
556 SCOPED_ATOMIC(FetchXor, a, v, mo);
557 }
558
559 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
560 SCOPED_ATOMIC(FetchXor, a, v, mo);
561 }
562
563 #if __TSAN_HAS_INT128
564 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
565 SCOPED_ATOMIC(FetchXor, a, v, mo);
566 }
567 #endif
568
569 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
570 SCOPED_ATOMIC(FetchNand, a, v, mo);
571 }
572
573 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
574 SCOPED_ATOMIC(FetchNand, a, v, mo);
575 }
576
577 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
578 SCOPED_ATOMIC(FetchNand, a, v, mo);
579 }
580
581 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
582 SCOPED_ATOMIC(FetchNand, a, v, mo);
583 }
584
585 #if __TSAN_HAS_INT128
586 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
587 SCOPED_ATOMIC(FetchNand, a, v, mo);
588 }
589 #endif
590
591 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
592 morder mo, morder fmo) {
593 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
594 }
595
596 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
597 morder mo, morder fmo) {
598 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
599 }
600
601 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
602 morder mo, morder fmo) {
603 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
604 }
605
606 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
607 morder mo, morder fmo) {
608 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
609 }
610
611 #if __TSAN_HAS_INT128
612 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
613 morder mo, morder fmo) {
614 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
615 }
616 #endif
617
618 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
619 morder mo, morder fmo) {
620 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
621 }
622
623 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
624 morder mo, morder fmo) {
625 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
626 }
627
628 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
629 morder mo, morder fmo) {
630 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
631 }
632
633 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
634 morder mo, morder fmo) {
635 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
636 }
637
638 #if __TSAN_HAS_INT128
639 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
640 morder mo, morder fmo) {
641 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
642 }
643 #endif
644
645 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
646 morder mo, morder fmo) {
647 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
648 }
649 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
650 morder mo, morder fmo) {
651 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
652 }
653
654 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
655 morder mo, morder fmo) {
656 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
657 }
658
659 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
660 morder mo, morder fmo) {
661 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
662 }
663
664 #if __TSAN_HAS_INT128
665 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
666 morder mo, morder fmo) {
667 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
668 }
669 #endif
670
671 void __tsan_atomic_thread_fence(morder mo) {
672 char* a = 0;
673 SCOPED_ATOMIC(Fence, mo);
674 }
675
676 void __tsan_atomic_signal_fence(morder mo) {
677 }