1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publically
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "tsan_interface_atomic.h"
23 #include "tsan_flags.h"
26 using namespace __tsan
; // NOLINT
28 #define SCOPED_ATOMIC(func, ...) \
29 const uptr callpc = (uptr)__builtin_return_address(0); \
30 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
31 mo = ConvertOrder(mo); \
32 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
33 ThreadState *const thr = cur_thread(); \
34 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
35 ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
36 return Atomic##func(thr, pc, __VA_ARGS__); \
40 typedef __tsan_memory_order morder
;
41 typedef __tsan_atomic8 a8
;
42 typedef __tsan_atomic16 a16
;
43 typedef __tsan_atomic32 a32
;
44 typedef __tsan_atomic64 a64
;
45 typedef __tsan_atomic128 a128
;
46 const morder mo_relaxed
= __tsan_memory_order_relaxed
;
47 const morder mo_consume
= __tsan_memory_order_consume
;
48 const morder mo_acquire
= __tsan_memory_order_acquire
;
49 const morder mo_release
= __tsan_memory_order_release
;
50 const morder mo_acq_rel
= __tsan_memory_order_acq_rel
;
51 const morder mo_seq_cst
= __tsan_memory_order_seq_cst
;
55 ScopedAtomic(ThreadState
*thr
, uptr pc
, const volatile void *a
,
56 morder mo
, const char *func
)
58 CHECK_EQ(thr_
->in_rtl
, 0);
59 ProcessPendingSignals(thr
);
61 DPrintf("#%d: %s(%p, %d)\n", thr_
->tid
, func
, a
, mo
);
66 CHECK_EQ(thr_
->in_rtl
, 0);
73 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
74 StatInc(thr
, StatAtomic
);
76 StatInc(thr
, size
== 1 ? StatAtomic1
77 : size
== 2 ? StatAtomic2
78 : size
== 4 ? StatAtomic4
79 : size
== 8 ? StatAtomic8
81 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
82 : mo
== mo_consume
? StatAtomicConsume
83 : mo
== mo_acquire
? StatAtomicAcquire
84 : mo
== mo_release
? StatAtomicRelease
85 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
89 static bool IsLoadOrder(morder mo
) {
90 return mo
== mo_relaxed
|| mo
== mo_consume
91 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
94 static bool IsStoreOrder(morder mo
) {
95 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
98 static bool IsReleaseOrder(morder mo
) {
99 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
102 static bool IsAcquireOrder(morder mo
) {
103 return mo
== mo_consume
|| mo
== mo_acquire
104 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
107 static bool IsAcqRelOrder(morder mo
) {
108 return mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
111 static morder
ConvertOrder(morder mo
) {
112 if (mo
> (morder
)100500) {
113 mo
= morder(mo
- 100500);
114 if (mo
== morder(1 << 0))
116 else if (mo
== morder(1 << 1))
118 else if (mo
== morder(1 << 2))
120 else if (mo
== morder(1 << 3))
122 else if (mo
== morder(1 << 4))
124 else if (mo
== morder(1 << 5))
127 CHECK_GE(mo
, mo_relaxed
);
128 CHECK_LE(mo
, mo_seq_cst
);
132 template<typename T
> T
func_xchg(volatile T
*v
, T op
) {
133 T res
= __sync_lock_test_and_set(v
, op
);
134 // __sync_lock_test_and_set does not contain full barrier.
135 __sync_synchronize();
139 template<typename T
> T
func_add(volatile T
*v
, T op
) {
140 return __sync_fetch_and_add(v
, op
);
143 template<typename T
> T
func_sub(volatile T
*v
, T op
) {
144 return __sync_fetch_and_sub(v
, op
);
147 template<typename T
> T
func_and(volatile T
*v
, T op
) {
148 return __sync_fetch_and_and(v
, op
);
151 template<typename T
> T
func_or(volatile T
*v
, T op
) {
152 return __sync_fetch_and_or(v
, op
);
155 template<typename T
> T
func_xor(volatile T
*v
, T op
) {
156 return __sync_fetch_and_xor(v
, op
);
159 template<typename T
> T
func_nand(volatile T
*v
, T op
) {
160 // clang does not support __sync_fetch_and_nand.
163 T newv
= ~(cmp
& op
);
164 T cur
= __sync_val_compare_and_swap(v
, cmp
, newv
);
171 template<typename T
> T
func_cas(volatile T
*v
, T cmp
, T xch
) {
172 return __sync_val_compare_and_swap(v
, cmp
, xch
);
175 // clang does not support 128-bit atomic ops.
176 // Atomic ops are executed under tsan internal mutex,
177 // here we assume that the atomic variables are not accessed
178 // from non-instrumented code.
179 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
180 a128
func_xchg(volatile a128
*v
, a128 op
) {
186 a128
func_add(volatile a128
*v
, a128 op
) {
192 a128
func_sub(volatile a128
*v
, a128 op
) {
198 a128
func_and(volatile a128
*v
, a128 op
) {
204 a128
func_or(volatile a128
*v
, a128 op
) {
210 a128
func_xor(volatile a128
*v
, a128 op
) {
216 a128
func_nand(volatile a128
*v
, a128 op
) {
222 a128
func_cas(volatile a128
*v
, a128 cmp
, a128 xch
) {
231 static int SizeLog() {
234 else if (sizeof(T
) <= 2)
236 else if (sizeof(T
) <= 4)
240 // For 16-byte atomics we also use 8-byte memory access,
241 // this leads to false negatives only in very obscure cases.
245 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
247 CHECK(IsLoadOrder(mo
));
248 // This fast-path is critical for performance.
249 // Assume the access is atomic.
250 if (!IsAcquireOrder(mo
) && sizeof(T
) <= sizeof(a
)) {
251 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
252 return *a
; // as if atomic
254 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, false);
255 AcquireImpl(thr
, pc
, &s
->clock
);
258 __sync_synchronize();
259 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
264 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
266 CHECK(IsStoreOrder(mo
));
267 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
268 // This fast-path is critical for performance.
269 // Assume the access is atomic.
270 // Strictly saying even relaxed store cuts off release sequence,
271 // so must reset the clock.
272 if (!IsReleaseOrder(mo
) && sizeof(T
) <= sizeof(a
)) {
273 *a
= v
; // as if atomic
276 __sync_synchronize();
277 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
278 thr
->fast_state
.IncrementEpoch();
279 // Can't increment epoch w/o writing to the trace as well.
280 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
281 ReleaseImpl(thr
, pc
, &s
->clock
);
284 // Trainling memory barrier to provide sequential consistency
285 // for Dekker-like store-load synchronization.
286 __sync_synchronize();
289 template<typename T
, T (*F
)(volatile T
*v
, T op
)>
290 static T
AtomicRMW(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
, morder mo
) {
291 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
293 if (mo
!= mo_relaxed
) {
294 s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
295 thr
->fast_state
.IncrementEpoch();
296 // Can't increment epoch w/o writing to the trace as well.
297 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
298 if (IsAcqRelOrder(mo
))
299 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
300 else if (IsReleaseOrder(mo
))
301 ReleaseImpl(thr
, pc
, &s
->clock
);
302 else if (IsAcquireOrder(mo
))
303 AcquireImpl(thr
, pc
, &s
->clock
);
312 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
314 return AtomicRMW
<T
, func_xchg
>(thr
, pc
, a
, v
, mo
);
318 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
320 return AtomicRMW
<T
, func_add
>(thr
, pc
, a
, v
, mo
);
324 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
326 return AtomicRMW
<T
, func_sub
>(thr
, pc
, a
, v
, mo
);
330 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
332 return AtomicRMW
<T
, func_and
>(thr
, pc
, a
, v
, mo
);
336 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
338 return AtomicRMW
<T
, func_or
>(thr
, pc
, a
, v
, mo
);
342 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
344 return AtomicRMW
<T
, func_xor
>(thr
, pc
, a
, v
, mo
);
348 static T
AtomicFetchNand(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
350 return AtomicRMW
<T
, func_nand
>(thr
, pc
, a
, v
, mo
);
354 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
355 volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
356 (void)fmo
; // Unused because llvm does not pass it yet.
357 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
359 if (mo
!= mo_relaxed
) {
360 s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
361 thr
->fast_state
.IncrementEpoch();
362 // Can't increment epoch w/o writing to the trace as well.
363 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
364 if (IsAcqRelOrder(mo
))
365 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
366 else if (IsReleaseOrder(mo
))
367 ReleaseImpl(thr
, pc
, &s
->clock
);
368 else if (IsAcquireOrder(mo
))
369 AcquireImpl(thr
, pc
, &s
->clock
);
372 T pr
= func_cas(a
, cc
, v
);
382 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
383 volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
384 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
, fmo
);
388 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
389 // FIXME(dvyukov): not implemented.
390 __sync_synchronize();
393 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
394 SCOPED_ATOMIC(Load
, a
, mo
);
397 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
398 SCOPED_ATOMIC(Load
, a
, mo
);
401 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
402 SCOPED_ATOMIC(Load
, a
, mo
);
405 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
406 SCOPED_ATOMIC(Load
, a
, mo
);
409 #if __TSAN_HAS_INT128
410 a128
__tsan_atomic128_load(const volatile a128
*a
, morder mo
) {
411 SCOPED_ATOMIC(Load
, a
, mo
);
415 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
416 SCOPED_ATOMIC(Store
, a
, v
, mo
);
419 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
420 SCOPED_ATOMIC(Store
, a
, v
, mo
);
423 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
424 SCOPED_ATOMIC(Store
, a
, v
, mo
);
427 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
428 SCOPED_ATOMIC(Store
, a
, v
, mo
);
431 #if __TSAN_HAS_INT128
432 void __tsan_atomic128_store(volatile a128
*a
, a128 v
, morder mo
) {
433 SCOPED_ATOMIC(Store
, a
, v
, mo
);
437 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
438 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
441 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
442 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
445 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
446 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
449 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
450 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
453 #if __TSAN_HAS_INT128
454 a128
__tsan_atomic128_exchange(volatile a128
*a
, a128 v
, morder mo
) {
455 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
459 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
460 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
463 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
464 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
467 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
468 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
471 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
472 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
475 #if __TSAN_HAS_INT128
476 a128
__tsan_atomic128_fetch_add(volatile a128
*a
, a128 v
, morder mo
) {
477 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
481 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
482 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
485 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
486 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
489 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
490 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
493 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
494 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
497 #if __TSAN_HAS_INT128
498 a128
__tsan_atomic128_fetch_sub(volatile a128
*a
, a128 v
, morder mo
) {
499 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
503 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
504 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
507 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
508 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
511 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
512 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
515 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
516 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
519 #if __TSAN_HAS_INT128
520 a128
__tsan_atomic128_fetch_and(volatile a128
*a
, a128 v
, morder mo
) {
521 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
525 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
526 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
529 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
530 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
533 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
534 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
537 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
538 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
541 #if __TSAN_HAS_INT128
542 a128
__tsan_atomic128_fetch_or(volatile a128
*a
, a128 v
, morder mo
) {
543 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
547 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
548 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
551 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
552 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
555 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
556 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
559 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
560 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
563 #if __TSAN_HAS_INT128
564 a128
__tsan_atomic128_fetch_xor(volatile a128
*a
, a128 v
, morder mo
) {
565 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
569 a8
__tsan_atomic8_fetch_nand(volatile a8
*a
, a8 v
, morder mo
) {
570 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
573 a16
__tsan_atomic16_fetch_nand(volatile a16
*a
, a16 v
, morder mo
) {
574 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
577 a32
__tsan_atomic32_fetch_nand(volatile a32
*a
, a32 v
, morder mo
) {
578 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
581 a64
__tsan_atomic64_fetch_nand(volatile a64
*a
, a64 v
, morder mo
) {
582 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
585 #if __TSAN_HAS_INT128
586 a128
__tsan_atomic128_fetch_nand(volatile a128
*a
, a128 v
, morder mo
) {
587 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
591 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
592 morder mo
, morder fmo
) {
593 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
596 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
597 morder mo
, morder fmo
) {
598 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
601 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
602 morder mo
, morder fmo
) {
603 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
606 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
607 morder mo
, morder fmo
) {
608 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
611 #if __TSAN_HAS_INT128
612 int __tsan_atomic128_compare_exchange_strong(volatile a128
*a
, a128
*c
, a128 v
,
613 morder mo
, morder fmo
) {
614 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
618 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
619 morder mo
, morder fmo
) {
620 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
623 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
624 morder mo
, morder fmo
) {
625 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
628 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
629 morder mo
, morder fmo
) {
630 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
633 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
634 morder mo
, morder fmo
) {
635 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
638 #if __TSAN_HAS_INT128
639 int __tsan_atomic128_compare_exchange_weak(volatile a128
*a
, a128
*c
, a128 v
,
640 morder mo
, morder fmo
) {
641 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
645 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
646 morder mo
, morder fmo
) {
647 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
649 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
650 morder mo
, morder fmo
) {
651 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
654 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
655 morder mo
, morder fmo
) {
656 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
659 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
660 morder mo
, morder fmo
) {
661 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
664 #if __TSAN_HAS_INT128
665 a128
__tsan_atomic128_compare_exchange_val(volatile a128
*a
, a128 c
, a128 v
,
666 morder mo
, morder fmo
) {
667 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
671 void __tsan_atomic_thread_fence(morder mo
) {
673 SCOPED_ATOMIC(Fence
, mo
);
676 void __tsan_atomic_signal_fence(morder mo
) {