1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_interface_atomic.h"
14 #include "tsan_flags.h"
17 using namespace __tsan
; // NOLINT
21 ScopedAtomic(ThreadState
*thr
, uptr pc
, const char *func
)
23 CHECK_EQ(thr_
->in_rtl
, 1); // 1 due to our own ScopedInRtl member.
24 DPrintf("#%d: %s\n", thr_
->tid
, func
);
27 CHECK_EQ(thr_
->in_rtl
, 1);
35 typedef __tsan_memory_order morder
;
36 typedef __tsan_atomic8 a8
;
37 typedef __tsan_atomic16 a16
;
38 typedef __tsan_atomic32 a32
;
39 typedef __tsan_atomic64 a64
;
40 const morder mo_relaxed
= __tsan_memory_order_relaxed
;
41 const morder mo_consume
= __tsan_memory_order_consume
;
42 const morder mo_acquire
= __tsan_memory_order_acquire
;
43 const morder mo_release
= __tsan_memory_order_release
;
44 const morder mo_acq_rel
= __tsan_memory_order_acq_rel
;
45 const morder mo_seq_cst
= __tsan_memory_order_seq_cst
;
47 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
48 StatInc(thr
, StatAtomic
);
50 StatInc(thr
, size
== 1 ? StatAtomic1
51 : size
== 2 ? StatAtomic2
52 : size
== 4 ? StatAtomic4
54 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
55 : mo
== mo_consume
? StatAtomicConsume
56 : mo
== mo_acquire
? StatAtomicAcquire
57 : mo
== mo_release
? StatAtomicRelease
58 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
62 static bool IsLoadOrder(morder mo
) {
63 return mo
== mo_relaxed
|| mo
== mo_consume
64 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
67 static bool IsStoreOrder(morder mo
) {
68 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
71 static bool IsReleaseOrder(morder mo
) {
72 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
75 static bool IsAcquireOrder(morder mo
) {
76 return mo
== mo_consume
|| mo
== mo_acquire
77 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
80 static morder
ConvertOrder(morder mo
) {
81 if (mo
> (morder
)100500) {
82 mo
= morder(mo
- 100500);
83 if (mo
== morder(1 << 0))
85 else if (mo
== morder(1 << 1))
87 else if (mo
== morder(1 << 2))
89 else if (mo
== morder(1 << 3))
91 else if (mo
== morder(1 << 4))
93 else if (mo
== morder(1 << 5))
96 CHECK_GE(mo
, mo_relaxed
);
97 CHECK_LE(mo
, mo_seq_cst
);
101 #define SCOPED_ATOMIC(func, ...) \
102 mo = ConvertOrder(mo); \
103 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
104 ThreadState *const thr = cur_thread(); \
105 ProcessPendingSignals(thr); \
106 const uptr pc = (uptr)__builtin_return_address(0); \
107 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
108 ScopedAtomic sa(thr, pc, __FUNCTION__); \
109 return Atomic##func(thr, pc, __VA_ARGS__); \
113 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
115 CHECK(IsLoadOrder(mo
));
117 if (IsAcquireOrder(mo
))
118 Acquire(thr
, pc
, (uptr
)a
);
123 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
125 CHECK(IsStoreOrder(mo
));
126 if (IsReleaseOrder(mo
))
127 ReleaseStore(thr
, pc
, (uptr
)a
);
132 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
134 if (IsReleaseOrder(mo
))
135 Release(thr
, pc
, (uptr
)a
);
136 v
= __sync_lock_test_and_set(a
, v
);
137 if (IsAcquireOrder(mo
))
138 Acquire(thr
, pc
, (uptr
)a
);
143 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
145 if (IsReleaseOrder(mo
))
146 Release(thr
, pc
, (uptr
)a
);
147 v
= __sync_fetch_and_add(a
, v
);
148 if (IsAcquireOrder(mo
))
149 Acquire(thr
, pc
, (uptr
)a
);
154 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
156 if (IsReleaseOrder(mo
))
157 Release(thr
, pc
, (uptr
)a
);
158 v
= __sync_fetch_and_sub(a
, v
);
159 if (IsAcquireOrder(mo
))
160 Acquire(thr
, pc
, (uptr
)a
);
165 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
167 if (IsReleaseOrder(mo
))
168 Release(thr
, pc
, (uptr
)a
);
169 v
= __sync_fetch_and_and(a
, v
);
170 if (IsAcquireOrder(mo
))
171 Acquire(thr
, pc
, (uptr
)a
);
176 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
178 if (IsReleaseOrder(mo
))
179 Release(thr
, pc
, (uptr
)a
);
180 v
= __sync_fetch_and_or(a
, v
);
181 if (IsAcquireOrder(mo
))
182 Acquire(thr
, pc
, (uptr
)a
);
187 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
189 if (IsReleaseOrder(mo
))
190 Release(thr
, pc
, (uptr
)a
);
191 v
= __sync_fetch_and_xor(a
, v
);
192 if (IsAcquireOrder(mo
))
193 Acquire(thr
, pc
, (uptr
)a
);
198 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
199 volatile T
*a
, T
*c
, T v
, morder mo
) {
200 if (IsReleaseOrder(mo
))
201 Release(thr
, pc
, (uptr
)a
);
203 T pr
= __sync_val_compare_and_swap(a
, cc
, v
);
204 if (IsAcquireOrder(mo
))
205 Acquire(thr
, pc
, (uptr
)a
);
213 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
214 volatile T
*a
, T c
, T v
, morder mo
) {
215 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
);
219 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
220 __sync_synchronize();
223 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
224 SCOPED_ATOMIC(Load
, a
, mo
);
227 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
228 SCOPED_ATOMIC(Load
, a
, mo
);
231 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
232 SCOPED_ATOMIC(Load
, a
, mo
);
235 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
236 SCOPED_ATOMIC(Load
, a
, mo
);
239 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
240 SCOPED_ATOMIC(Store
, a
, v
, mo
);
243 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
244 SCOPED_ATOMIC(Store
, a
, v
, mo
);
247 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
248 SCOPED_ATOMIC(Store
, a
, v
, mo
);
251 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
252 SCOPED_ATOMIC(Store
, a
, v
, mo
);
255 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
256 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
259 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
260 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
263 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
264 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
267 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
268 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
271 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
272 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
275 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
276 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
279 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
280 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
283 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
284 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
287 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
288 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
291 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
292 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
295 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
296 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
299 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
300 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
303 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
304 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
307 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
308 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
311 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
312 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
315 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
316 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
319 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
320 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
323 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
324 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
327 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
328 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
331 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
332 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
335 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
336 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
339 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
340 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
343 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
344 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
347 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
348 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
351 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
353 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
356 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
358 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
361 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
363 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
366 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
368 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
371 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
373 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
376 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
378 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
381 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
383 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
386 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
388 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
391 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
393 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
395 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
397 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
400 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
402 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
405 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
407 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
);
410 void __tsan_atomic_thread_fence(morder mo
) {
412 SCOPED_ATOMIC(Fence
, mo
);
415 void __tsan_atomic_signal_fence(morder mo
) {