]>
Commit | Line | Data |
---|---|---|
cd0be65c WM |
1 | //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// |
2 | // | |
b667dd70 ML |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. | |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |
cd0be65c WM |
6 | // |
7 | //===----------------------------------------------------------------------===// | |
8 | // | |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. | |
10 | // | |
11 | // Main internal TSan header file. | |
12 | // | |
13 | // Ground rules: | |
14 | // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static | |
15 | // function-scope locals) | |
16 | // - All functions/classes/etc reside in namespace __tsan, except for those | |
17 | // declared in tsan_interface.h. | |
18 | // - Platform-specific files should be used instead of ifdefs (*). | |
19 | // - No system headers included in header files (*). | |
20 | // - Platform specific headres included only into platform-specific files (*). | |
21 | // | |
22 | // (*) Except when inlining is critical for performance. | |
23 | //===----------------------------------------------------------------------===// | |
24 | ||
25 | #ifndef TSAN_RTL_H | |
26 | #define TSAN_RTL_H | |
27 | ||
a0408454 | 28 | #include "sanitizer_common/sanitizer_allocator.h" |
ef1b3fda | 29 | #include "sanitizer_common/sanitizer_allocator_internal.h" |
c4c16f74 | 30 | #include "sanitizer_common/sanitizer_asm.h" |
ef1b3fda | 31 | #include "sanitizer_common/sanitizer_common.h" |
dee5ea7a | 32 | #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" |
df77f0e4 | 33 | #include "sanitizer_common/sanitizer_libignore.h" |
ef1b3fda KS |
34 | #include "sanitizer_common/sanitizer_suppressions.h" |
35 | #include "sanitizer_common/sanitizer_thread_registry.h" | |
eac97531 | 36 | #include "sanitizer_common/sanitizer_vector.h" |
cd0be65c WM |
37 | #include "tsan_clock.h" |
38 | #include "tsan_defs.h" | |
39 | #include "tsan_flags.h" | |
eac97531 | 40 | #include "tsan_mman.h" |
cd0be65c WM |
41 | #include "tsan_sync.h" |
42 | #include "tsan_trace.h" | |
cd0be65c | 43 | #include "tsan_report.h" |
a0408454 | 44 | #include "tsan_platform.h" |
e9772e16 | 45 | #include "tsan_mutexset.h" |
df77f0e4 | 46 | #include "tsan_ignoreset.h" |
866e32ad | 47 | #include "tsan_stack_trace.h" |
a0408454 KS |
48 | |
49 | #if SANITIZER_WORDSIZE != 64 | |
50 | # error "ThreadSanitizer is supported only on 64-bit platforms" | |
51 | #endif | |
cd0be65c WM |
52 | |
53 | namespace __tsan { | |
54 | ||
10189819 | 55 | #if !SANITIZER_GO |
ef1b3fda | 56 | struct MapUnmapCallback; |
10189819 | 57 | #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) |
b667dd70 | 58 | |
5d3805fc JJ |
59 | struct AP32 { |
60 | static const uptr kSpaceBeg = 0; | |
61 | static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; | |
62 | static const uptr kMetadataSize = 0; | |
63 | typedef __sanitizer::CompactSizeClassMap SizeClassMap; | |
b667dd70 ML |
64 | static const uptr kRegionSizeLog = 20; |
65 | using AddressSpaceView = LocalAddressSpaceView; | |
5d3805fc JJ |
66 | typedef __tsan::MapUnmapCallback MapUnmapCallback; |
67 | static const uptr kFlags = 0; | |
68 | }; | |
69 | typedef SizeClassAllocator32<AP32> PrimaryAllocator; | |
696d846a | 70 | #else |
10189819 MO |
71 | struct AP64 { // Allocator64 parameters. Deliberately using a short name. |
72 | static const uptr kSpaceBeg = Mapping::kHeapMemBeg; | |
73 | static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg; | |
74 | static const uptr kMetadataSize = 0; | |
75 | typedef DefaultSizeClassMap SizeClassMap; | |
76 | typedef __tsan::MapUnmapCallback MapUnmapCallback; | |
77 | static const uptr kFlags = 0; | |
b667dd70 | 78 | using AddressSpaceView = LocalAddressSpaceView; |
10189819 MO |
79 | }; |
80 | typedef SizeClassAllocator64<AP64> PrimaryAllocator; | |
696d846a | 81 | #endif |
b667dd70 ML |
82 | typedef CombinedAllocator<PrimaryAllocator> Allocator; |
83 | typedef Allocator::AllocatorCache AllocatorCache; | |
cd0be65c WM |
84 | Allocator *allocator(); |
85 | #endif | |
86 | ||
87 | void TsanCheckFailed(const char *file, int line, const char *cond, | |
88 | u64 v1, u64 v2); | |
cd0be65c | 89 | |
ef1b3fda KS |
90 | const u64 kShadowRodata = (u64)-1; // .rodata shadow marker |
91 | ||
cd0be65c | 92 | // FastState (from most significant bit): |
a0408454 | 93 | // ignore : 1 |
cd0be65c | 94 | // tid : kTidBits |
cd0be65c | 95 | // unused : - |
a0408454 | 96 | // history_size : 3 |
866e32ad | 97 | // epoch : kClkBits |
cd0be65c WM |
98 | class FastState { |
99 | public: | |
100 | FastState(u64 tid, u64 epoch) { | |
101 | x_ = tid << kTidShift; | |
866e32ad | 102 | x_ |= epoch; |
a0408454 KS |
103 | DCHECK_EQ(tid, this->tid()); |
104 | DCHECK_EQ(epoch, this->epoch()); | |
105 | DCHECK_EQ(GetIgnoreBit(), false); | |
cd0be65c WM |
106 | } |
107 | ||
108 | explicit FastState(u64 x) | |
109 | : x_(x) { | |
110 | } | |
111 | ||
112 | u64 raw() const { | |
113 | return x_; | |
114 | } | |
115 | ||
116 | u64 tid() const { | |
a0408454 KS |
117 | u64 res = (x_ & ~kIgnoreBit) >> kTidShift; |
118 | return res; | |
119 | } | |
120 | ||
121 | u64 TidWithIgnore() const { | |
cd0be65c WM |
122 | u64 res = x_ >> kTidShift; |
123 | return res; | |
124 | } | |
125 | ||
126 | u64 epoch() const { | |
866e32ad | 127 | u64 res = x_ & ((1ull << kClkBits) - 1); |
cd0be65c WM |
128 | return res; |
129 | } | |
130 | ||
131 | void IncrementEpoch() { | |
132 | u64 old_epoch = epoch(); | |
866e32ad | 133 | x_ += 1; |
cd0be65c WM |
134 | DCHECK_EQ(old_epoch + 1, epoch()); |
135 | (void)old_epoch; | |
136 | } | |
137 | ||
138 | void SetIgnoreBit() { x_ |= kIgnoreBit; } | |
139 | void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } | |
a0408454 KS |
140 | bool GetIgnoreBit() const { return (s64)x_ < 0; } |
141 | ||
142 | void SetHistorySize(int hs) { | |
143 | CHECK_GE(hs, 0); | |
144 | CHECK_LE(hs, 7); | |
866e32ad | 145 | x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift); |
a0408454 KS |
146 | } |
147 | ||
866e32ad | 148 | ALWAYS_INLINE |
a0408454 | 149 | int GetHistorySize() const { |
866e32ad | 150 | return (int)((x_ >> kHistoryShift) & kHistoryMask); |
a0408454 KS |
151 | } |
152 | ||
153 | void ClearHistorySize() { | |
866e32ad | 154 | SetHistorySize(0); |
a0408454 KS |
155 | } |
156 | ||
866e32ad | 157 | ALWAYS_INLINE |
a0408454 KS |
158 | u64 GetTracePos() const { |
159 | const int hs = GetHistorySize(); | |
160 | // When hs == 0, the trace consists of 2 parts. | |
161 | const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; | |
162 | return epoch() & mask; | |
163 | } | |
cd0be65c WM |
164 | |
165 | private: | |
166 | friend class Shadow; | |
167 | static const int kTidShift = 64 - kTidBits - 1; | |
a0408454 | 168 | static const u64 kIgnoreBit = 1ull << 63; |
cd0be65c | 169 | static const u64 kFreedBit = 1ull << 63; |
866e32ad KS |
170 | static const u64 kHistoryShift = kClkBits; |
171 | static const u64 kHistoryMask = 7; | |
cd0be65c WM |
172 | u64 x_; |
173 | }; | |
174 | ||
175 | // Shadow (from most significant bit): | |
176 | // freed : 1 | |
177 | // tid : kTidBits | |
b4ab7d34 KS |
178 | // is_atomic : 1 |
179 | // is_read : 1 | |
cd0be65c WM |
180 | // size_log : 2 |
181 | // addr0 : 3 | |
866e32ad | 182 | // epoch : kClkBits |
cd0be65c WM |
183 | class Shadow : public FastState { |
184 | public: | |
a0408454 KS |
185 | explicit Shadow(u64 x) |
186 | : FastState(x) { | |
187 | } | |
cd0be65c | 188 | |
a0408454 KS |
189 | explicit Shadow(const FastState &s) |
190 | : FastState(s.x_) { | |
191 | ClearHistorySize(); | |
192 | } | |
cd0be65c WM |
193 | |
194 | void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { | |
866e32ad | 195 | DCHECK_EQ((x_ >> kClkBits) & 31, 0); |
cd0be65c WM |
196 | DCHECK_LE(addr0, 7); |
197 | DCHECK_LE(kAccessSizeLog, 3); | |
866e32ad | 198 | x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits; |
cd0be65c WM |
199 | DCHECK_EQ(kAccessSizeLog, size_log()); |
200 | DCHECK_EQ(addr0, this->addr0()); | |
201 | } | |
202 | ||
203 | void SetWrite(unsigned kAccessIsWrite) { | |
b4ab7d34 KS |
204 | DCHECK_EQ(x_ & kReadBit, 0); |
205 | if (!kAccessIsWrite) | |
206 | x_ |= kReadBit; | |
207 | DCHECK_EQ(kAccessIsWrite, IsWrite()); | |
cd0be65c WM |
208 | } |
209 | ||
b4ab7d34 KS |
210 | void SetAtomic(bool kIsAtomic) { |
211 | DCHECK(!IsAtomic()); | |
212 | if (kIsAtomic) | |
213 | x_ |= kAtomicBit; | |
214 | DCHECK_EQ(IsAtomic(), kIsAtomic); | |
215 | } | |
216 | ||
217 | bool IsAtomic() const { | |
218 | return x_ & kAtomicBit; | |
219 | } | |
220 | ||
221 | bool IsZero() const { | |
222 | return x_ == 0; | |
223 | } | |
cd0be65c WM |
224 | |
225 | static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { | |
226 | u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; | |
a0408454 | 227 | DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); |
cd0be65c WM |
228 | return shifted_xor == 0; |
229 | } | |
230 | ||
866e32ad KS |
231 | static ALWAYS_INLINE |
232 | bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { | |
233 | u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31; | |
cd0be65c WM |
234 | return masked_xor == 0; |
235 | } | |
236 | ||
866e32ad | 237 | static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2, |
cd0be65c WM |
238 | unsigned kS2AccessSize) { |
239 | bool res = false; | |
240 | u64 diff = s1.addr0() - s2.addr0(); | |
3ca75cd5 | 241 | if ((s64)diff < 0) { // s1.addr0 < s2.addr0 |
cd0be65c | 242 | // if (s1.addr0() + size1) > s2.addr0()) return true; |
866e32ad KS |
243 | if (s1.size() > -diff) |
244 | res = true; | |
cd0be65c WM |
245 | } else { |
246 | // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; | |
866e32ad KS |
247 | if (kS2AccessSize > diff) |
248 | res = true; | |
cd0be65c | 249 | } |
866e32ad KS |
250 | DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2)); |
251 | DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1)); | |
cd0be65c WM |
252 | return res; |
253 | } | |
254 | ||
866e32ad KS |
255 | u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; } |
256 | u64 ALWAYS_INLINE size() const { return 1ull << size_log(); } | |
257 | bool ALWAYS_INLINE IsWrite() const { return !IsRead(); } | |
258 | bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; } | |
cd0be65c WM |
259 | |
260 | // The idea behind the freed bit is as follows. | |
261 | // When the memory is freed (or otherwise unaccessible) we write to the shadow | |
262 | // values with tid/epoch related to the free and the freed bit set. | |
263 | // During memory accesses processing the freed bit is considered | |
264 | // as msb of tid. So any access races with shadow with freed bit set | |
265 | // (it is as if write from a thread with which we never synchronized before). | |
266 | // This allows us to detect accesses to freed memory w/o additional | |
267 | // overheads in memory access processing and at the same time restore | |
268 | // tid/epoch of free. | |
269 | void MarkAsFreed() { | |
270 | x_ |= kFreedBit; | |
271 | } | |
272 | ||
b4ab7d34 KS |
273 | bool IsFreed() const { |
274 | return x_ & kFreedBit; | |
275 | } | |
276 | ||
cd0be65c WM |
277 | bool GetFreedAndReset() { |
278 | bool res = x_ & kFreedBit; | |
279 | x_ &= ~kFreedBit; | |
280 | return res; | |
281 | } | |
282 | ||
866e32ad KS |
283 | bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { |
284 | bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) | |
285 | | (u64(kIsAtomic) << kAtomicShift)); | |
b4ab7d34 KS |
286 | DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); |
287 | return v; | |
288 | } | |
289 | ||
866e32ad | 290 | bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { |
b4ab7d34 KS |
291 | bool v = ((x_ >> kReadShift) & 3) |
292 | <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); | |
293 | DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || | |
294 | (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); | |
295 | return v; | |
296 | } | |
297 | ||
866e32ad | 298 | bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { |
b4ab7d34 KS |
299 | bool v = ((x_ >> kReadShift) & 3) |
300 | >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); | |
301 | DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || | |
302 | (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); | |
303 | return v; | |
304 | } | |
305 | ||
cd0be65c | 306 | private: |
866e32ad | 307 | static const u64 kReadShift = 5 + kClkBits; |
b4ab7d34 | 308 | static const u64 kReadBit = 1ull << kReadShift; |
866e32ad | 309 | static const u64 kAtomicShift = 6 + kClkBits; |
b4ab7d34 KS |
310 | static const u64 kAtomicBit = 1ull << kAtomicShift; |
311 | ||
866e32ad | 312 | u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; } |
cd0be65c | 313 | |
866e32ad | 314 | static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) { |
cd0be65c WM |
315 | if (s1.addr0() == s2.addr0()) return true; |
316 | if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) | |
317 | return true; | |
318 | if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) | |
319 | return true; | |
320 | return false; | |
321 | } | |
322 | }; | |
323 | ||
696d846a | 324 | struct ThreadSignalContext; |
cd0be65c | 325 | |
ef1b3fda KS |
326 | struct JmpBuf { |
327 | uptr sp; | |
866e32ad KS |
328 | int int_signal_send; |
329 | bool in_blocking_func; | |
330 | uptr in_signal_handler; | |
ef1b3fda KS |
331 | uptr *shadow_stack_pos; |
332 | }; | |
333 | ||
10189819 MO |
334 | // A Processor represents a physical thread, or a P for Go. |
335 | // It is used to store internal resources like allocate cache, and does not | |
336 | // participate in race-detection logic (invisible to end user). | |
337 | // In C++ it is tied to an OS thread just like ThreadState, however ideally | |
338 | // it should be tied to a CPU (this way we will have fewer allocator caches). | |
339 | // In Go it is tied to a P, so there are significantly fewer Processor's than | |
340 | // ThreadState's (which are tied to Gs). | |
341 | // A ThreadState must be wired with a Processor to handle events. | |
342 | struct Processor { | |
343 | ThreadState *thr; // currently wired thread, or nullptr | |
344 | #if !SANITIZER_GO | |
345 | AllocatorCache alloc_cache; | |
346 | InternalAllocatorCache internal_alloc_cache; | |
347 | #endif | |
348 | DenseSlabAllocCache block_cache; | |
349 | DenseSlabAllocCache sync_cache; | |
350 | DenseSlabAllocCache clock_cache; | |
351 | DDPhysicalThread *dd_pt; | |
352 | }; | |
353 | ||
354 | #if !SANITIZER_GO | |
355 | // ScopedGlobalProcessor temporary setups a global processor for the current | |
356 | // thread, if it does not have one. Intended for interceptors that can run | |
357 | // at the very thread end, when we already destroyed the thread processor. | |
358 | struct ScopedGlobalProcessor { | |
359 | ScopedGlobalProcessor(); | |
360 | ~ScopedGlobalProcessor(); | |
361 | }; | |
362 | #endif | |
363 | ||
cd0be65c WM |
364 | // This struct is stored in TLS. |
365 | struct ThreadState { | |
366 | FastState fast_state; | |
367 | // Synch epoch represents the threads's epoch before the last synchronization | |
368 | // action. It allows to reduce number of shadow state updates. | |
369 | // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, | |
370 | // if we are processing write to X from the same thread at epoch=200, | |
371 | // we do nothing, because both writes happen in the same 'synch epoch'. | |
372 | // That is, if another memory access does not race with the former write, | |
373 | // it does not race with the latter as well. | |
374 | // QUESTION: can we can squeeze this into ThreadState::Fast? | |
375 | // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are | |
376 | // taken by epoch between synchs. | |
377 | // This way we can save one load from tls. | |
378 | u64 fast_synch_epoch; | |
b667dd70 ML |
379 | // Technically `current` should be a separate THREADLOCAL variable; |
380 | // but it is placed here in order to share cache line with previous fields. | |
381 | ThreadState* current; | |
cd0be65c WM |
382 | // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. |
383 | // We do not distinguish beteween ignoring reads and writes | |
384 | // for better performance. | |
385 | int ignore_reads_and_writes; | |
df77f0e4 | 386 | int ignore_sync; |
5d3805fc | 387 | int suppress_reports; |
df77f0e4 | 388 | // Go does not support ignores. |
10189819 | 389 | #if !SANITIZER_GO |
df77f0e4 KS |
390 | IgnoreSet mop_ignore_set; |
391 | IgnoreSet sync_ignore_set; | |
392 | #endif | |
393 | // C/C++ uses fixed size shadow stack embed into Trace. | |
394 | // Go uses malloc-allocated shadow stack with dynamic size. | |
cd0be65c WM |
395 | uptr *shadow_stack; |
396 | uptr *shadow_stack_end; | |
df77f0e4 KS |
397 | uptr *shadow_stack_pos; |
398 | u64 *racy_shadow_addr; | |
399 | u64 racy_state[2]; | |
e9772e16 | 400 | MutexSet mset; |
cd0be65c | 401 | ThreadClock clock; |
10189819 | 402 | #if !SANITIZER_GO |
ef1b3fda | 403 | Vector<JmpBuf> jmp_bufs; |
dee5ea7a | 404 | int ignore_interceptors; |
cd0be65c | 405 | #endif |
696d846a | 406 | #if TSAN_COLLECT_STATS |
cd0be65c | 407 | u64 stat[StatCnt]; |
696d846a | 408 | #endif |
cd0be65c WM |
409 | const int tid; |
410 | const int unique_id; | |
b4ab7d34 | 411 | bool in_symbolizer; |
df77f0e4 | 412 | bool in_ignored_lib; |
696d846a | 413 | bool is_inited; |
866e32ad | 414 | bool is_dead; |
b4ab7d34 | 415 | bool is_freeing; |
ef1b3fda | 416 | bool is_vptr_access; |
cd0be65c WM |
417 | const uptr stk_addr; |
418 | const uptr stk_size; | |
419 | const uptr tls_addr; | |
420 | const uptr tls_size; | |
df77f0e4 | 421 | ThreadContext *tctx; |
cd0be65c | 422 | |
696d846a | 423 | #if SANITIZER_DEBUG && !SANITIZER_GO |
dee5ea7a | 424 | InternalDeadlockDetector internal_deadlock_detector; |
696d846a | 425 | #endif |
dee5ea7a | 426 | DDLogicalThread *dd_lt; |
cd0be65c | 427 | |
10189819 MO |
428 | // Current wired Processor, or nullptr. Required to handle any events. |
429 | Processor *proc1; | |
430 | #if !SANITIZER_GO | |
431 | Processor *proc() { return proc1; } | |
432 | #else | |
433 | Processor *proc(); | |
434 | #endif | |
435 | ||
866e32ad | 436 | atomic_uintptr_t in_signal_handler; |
696d846a | 437 | ThreadSignalContext *signal_ctx; |
cd0be65c | 438 | |
10189819 | 439 | #if !SANITIZER_GO |
cd0be65c WM |
440 | u32 last_sleep_stack_id; |
441 | ThreadClock last_sleep_clock; | |
442 | #endif | |
443 | ||
444 | // Set in regions of runtime that must be signal-safe and fork-safe. | |
445 | // If set, malloc must not be called. | |
446 | int nomalloc; | |
447 | ||
10189819 MO |
448 | const ReportDesc *current_report; |
449 | ||
cd0be65c | 450 | explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, |
dee5ea7a | 451 | unsigned reuse_count, |
cd0be65c WM |
452 | uptr stk_addr, uptr stk_size, |
453 | uptr tls_addr, uptr tls_size); | |
454 | }; | |
455 | ||
10189819 MO |
456 | #if !SANITIZER_GO |
457 | #if SANITIZER_MAC || SANITIZER_ANDROID | |
55aea9f5 | 458 | ThreadState *cur_thread(); |
b667dd70 | 459 | void set_cur_thread(ThreadState *thr); |
55aea9f5 | 460 | void cur_thread_finalize(); |
b667dd70 | 461 | INLINE void cur_thread_init() { } |
55aea9f5 | 462 | #else |
dee5ea7a | 463 | __attribute__((tls_model("initial-exec"))) |
cd0be65c WM |
464 | extern THREADLOCAL char cur_thread_placeholder[]; |
465 | INLINE ThreadState *cur_thread() { | |
b667dd70 ML |
466 | return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current; |
467 | } | |
468 | INLINE void cur_thread_init() { | |
469 | ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder); | |
470 | if (UNLIKELY(!thr->current)) | |
471 | thr->current = thr; | |
472 | } | |
473 | INLINE void set_cur_thread(ThreadState *thr) { | |
474 | reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr; | |
cd0be65c | 475 | } |
55aea9f5 | 476 | INLINE void cur_thread_finalize() { } |
10189819 | 477 | #endif // SANITIZER_MAC || SANITIZER_ANDROID |
55aea9f5 | 478 | #endif // SANITIZER_GO |
cd0be65c | 479 | |
ef1b3fda KS |
480 | class ThreadContext : public ThreadContextBase { |
481 | public: | |
482 | explicit ThreadContext(int tid); | |
483 | ~ThreadContext(); | |
cd0be65c | 484 | ThreadState *thr; |
ef1b3fda | 485 | u32 creation_stack_id; |
cd0be65c WM |
486 | SyncClock sync; |
487 | // Epoch at which the thread had started. | |
488 | // If we see an event from the thread stamped by an older epoch, | |
489 | // the event is from a dead thread that shared tid with this thread. | |
490 | u64 epoch0; | |
491 | u64 epoch1; | |
cd0be65c | 492 | |
ef1b3fda | 493 | // Override superclass callbacks. |
696d846a MO |
494 | void OnDead() override; |
495 | void OnJoined(void *arg) override; | |
496 | void OnFinished() override; | |
497 | void OnStarted(void *arg) override; | |
498 | void OnCreated(void *arg) override; | |
499 | void OnReset() override; | |
500 | void OnDetached(void *arg) override; | |
cd0be65c WM |
501 | }; |
502 | ||
503 | struct RacyStacks { | |
504 | MD5Hash hash[2]; | |
505 | bool operator==(const RacyStacks &other) const { | |
506 | if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) | |
507 | return true; | |
508 | if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) | |
509 | return true; | |
510 | return false; | |
511 | } | |
512 | }; | |
513 | ||
514 | struct RacyAddress { | |
515 | uptr addr_min; | |
516 | uptr addr_max; | |
517 | }; | |
518 | ||
519 | struct FiredSuppression { | |
520 | ReportType type; | |
696d846a | 521 | uptr pc_or_addr; |
ef1b3fda | 522 | Suppression *supp; |
cd0be65c WM |
523 | }; |
524 | ||
525 | struct Context { | |
526 | Context(); | |
527 | ||
528 | bool initialized; | |
eac97531 | 529 | #if !SANITIZER_GO |
dee5ea7a | 530 | bool after_multithreaded_fork; |
eac97531 | 531 | #endif |
cd0be65c | 532 | |
866e32ad | 533 | MetaMap metamap; |
cd0be65c WM |
534 | |
535 | Mutex report_mtx; | |
536 | int nreported; | |
537 | int nmissed_expected; | |
ef1b3fda | 538 | atomic_uint64_t last_symbolize_time_ns; |
cd0be65c | 539 | |
dee5ea7a KS |
540 | void *background_thread; |
541 | atomic_uint32_t stop_background_thread; | |
542 | ||
ef1b3fda | 543 | ThreadRegistry *thread_registry; |
cd0be65c | 544 | |
696d846a | 545 | Mutex racy_mtx; |
cd0be65c WM |
546 | Vector<RacyStacks> racy_stacks; |
547 | Vector<RacyAddress> racy_addresses; | |
ef1b3fda | 548 | // Number of fired suppressions may be large enough. |
696d846a | 549 | Mutex fired_suppressions_mtx; |
ef1b3fda | 550 | InternalMmapVector<FiredSuppression> fired_suppressions; |
dee5ea7a | 551 | DDetector *dd; |
cd0be65c | 552 | |
866e32ad KS |
553 | ClockAlloc clock_alloc; |
554 | ||
cd0be65c WM |
555 | Flags flags; |
556 | ||
557 | u64 stat[StatCnt]; | |
558 | u64 int_alloc_cnt[MBlockTypeCount]; | |
559 | u64 int_alloc_siz[MBlockTypeCount]; | |
560 | }; | |
561 | ||
dee5ea7a KS |
562 | extern Context *ctx; // The one and the only global runtime context. |
563 | ||
5d3805fc JJ |
564 | ALWAYS_INLINE Flags *flags() { |
565 | return &ctx->flags; | |
566 | } | |
567 | ||
dee5ea7a KS |
568 | struct ScopedIgnoreInterceptors { |
569 | ScopedIgnoreInterceptors() { | |
10189819 | 570 | #if !SANITIZER_GO |
dee5ea7a KS |
571 | cur_thread()->ignore_interceptors++; |
572 | #endif | |
573 | } | |
574 | ||
575 | ~ScopedIgnoreInterceptors() { | |
10189819 | 576 | #if !SANITIZER_GO |
dee5ea7a KS |
577 | cur_thread()->ignore_interceptors--; |
578 | #endif | |
579 | } | |
cd0be65c WM |
580 | }; |
581 | ||
5d3805fc JJ |
582 | const char *GetObjectTypeFromTag(uptr tag); |
583 | const char *GetReportHeaderFromTag(uptr tag); | |
584 | uptr TagFromShadowStackFrame(uptr pc); | |
585 | ||
eac97531 | 586 | class ScopedReportBase { |
cd0be65c | 587 | public: |
5d3805fc | 588 | void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack, |
e9772e16 | 589 | const MutexSet *mset); |
c5be964a | 590 | void AddStack(StackTrace stack, bool suppressable = false); |
866e32ad KS |
591 | void AddThread(const ThreadContext *tctx, bool suppressable = false); |
592 | void AddThread(int unique_tid, bool suppressable = false); | |
dee5ea7a | 593 | void AddUniqueTid(int unique_tid); |
cd0be65c | 594 | void AddMutex(const SyncVar *s); |
dee5ea7a | 595 | u64 AddMutex(u64 id); |
cd0be65c WM |
596 | void AddLocation(uptr addr, uptr size); |
597 | void AddSleep(u32 stack_id); | |
ef1b3fda | 598 | void SetCount(int count); |
cd0be65c WM |
599 | |
600 | const ReportDesc *GetReport() const; | |
601 | ||
eac97531 ML |
602 | protected: |
603 | ScopedReportBase(ReportType typ, uptr tag); | |
604 | ~ScopedReportBase(); | |
605 | ||
cd0be65c | 606 | private: |
cd0be65c | 607 | ReportDesc *rep_; |
dee5ea7a KS |
608 | // Symbolizer makes lots of intercepted calls. If we try to process them, |
609 | // at best it will cause deadlocks on internal mutexes. | |
610 | ScopedIgnoreInterceptors ignore_interceptors_; | |
cd0be65c | 611 | |
dee5ea7a | 612 | void AddDeadMutex(u64 id); |
e9772e16 | 613 | |
eac97531 ML |
614 | ScopedReportBase(const ScopedReportBase &) = delete; |
615 | void operator=(const ScopedReportBase &) = delete; | |
616 | }; | |
617 | ||
618 | class ScopedReport : public ScopedReportBase { | |
619 | public: | |
620 | explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone); | |
621 | ~ScopedReport(); | |
622 | ||
623 | private: | |
624 | ScopedErrorReportLock lock_; | |
cd0be65c WM |
625 | }; |
626 | ||
5d3805fc | 627 | ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack); |
c5be964a | 628 | void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, |
5d3805fc JJ |
629 | MutexSet *mset, uptr *tag = nullptr); |
630 | ||
631 | // The stack could look like: | |
632 | // <start> | <main> | <foo> | tag | <bar> | |
633 | // This will extract the tag and keep: | |
634 | // <start> | <main> | <foo> | <bar> | |
635 | template<typename StackTraceTy> | |
636 | void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) { | |
637 | if (stack->size < 2) return; | |
638 | uptr possible_tag_pc = stack->trace[stack->size - 2]; | |
639 | uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc); | |
640 | if (possible_tag == kExternalTagNone) return; | |
641 | stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1]; | |
642 | stack->size -= 1; | |
643 | if (tag) *tag = possible_tag; | |
644 | } | |
c5be964a KS |
645 | |
646 | template<typename StackTraceTy> | |
5d3805fc JJ |
647 | void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack, |
648 | uptr *tag = nullptr) { | |
c5be964a KS |
649 | uptr size = thr->shadow_stack_pos - thr->shadow_stack; |
650 | uptr start = 0; | |
651 | if (size + !!toppc > kStackTraceMax) { | |
652 | start = size + !!toppc - kStackTraceMax; | |
653 | size = kStackTraceMax - !!toppc; | |
654 | } | |
655 | stack->Init(&thr->shadow_stack[start], size, toppc); | |
5d3805fc | 656 | ExtractTagFromStack(stack, tag); |
c5be964a KS |
657 | } |
658 | ||
eac97531 ML |
659 | #define GET_STACK_TRACE_FATAL(thr, pc) \ |
660 | VarSizeStackTrace stack; \ | |
661 | ObtainCurrentStack(thr, pc, &stack); \ | |
662 | stack.ReverseOrder(); | |
cd0be65c | 663 | |
696d846a | 664 | #if TSAN_COLLECT_STATS |
cd0be65c WM |
665 | void StatAggregate(u64 *dst, u64 *src); |
666 | void StatOutput(u64 *stat); | |
696d846a MO |
667 | #endif |
668 | ||
ef1b3fda | 669 | void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { |
696d846a MO |
670 | #if TSAN_COLLECT_STATS |
671 | thr->stat[typ] += n; | |
672 | #endif | |
cd0be65c | 673 | } |
ef1b3fda | 674 | void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { |
696d846a MO |
675 | #if TSAN_COLLECT_STATS |
676 | thr->stat[typ] = n; | |
677 | #endif | |
ef1b3fda | 678 | } |
cd0be65c | 679 | |
e297eb60 | 680 | void MapShadow(uptr addr, uptr size); |
696d846a | 681 | void MapThreadTrace(uptr addr, uptr size, const char *name); |
ef1b3fda | 682 | void DontNeedShadowFor(uptr addr, uptr size); |
3ca75cd5 | 683 | void UnmapShadow(ThreadState *thr, uptr addr, uptr size); |
cd0be65c WM |
684 | void InitializeShadowMemory(); |
685 | void InitializeInterceptors(); | |
df77f0e4 | 686 | void InitializeLibIgnore(); |
cd0be65c WM |
687 | void InitializeDynamicAnnotations(); |
688 | ||
dee5ea7a KS |
689 | void ForkBefore(ThreadState *thr, uptr pc); |
690 | void ForkParentAfter(ThreadState *thr, uptr pc); | |
691 | void ForkChildAfter(ThreadState *thr, uptr pc); | |
692 | ||
cd0be65c | 693 | void ReportRace(ThreadState *thr); |
866e32ad | 694 | bool OutputReport(ThreadState *thr, const ScopedReport &srep); |
696d846a | 695 | bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); |
cd0be65c | 696 | bool IsExpectedReport(uptr addr, uptr size); |
ef1b3fda | 697 | void PrintMatchedBenignRaces(); |
cd0be65c WM |
698 | |
699 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 | |
e297eb60 | 700 | # define DPrintf Printf |
cd0be65c WM |
701 | #else |
702 | # define DPrintf(...) | |
703 | #endif | |
704 | ||
705 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 | |
e297eb60 | 706 | # define DPrintf2 Printf |
cd0be65c WM |
707 | #else |
708 | # define DPrintf2(...) | |
709 | #endif | |
710 | ||
711 | u32 CurrentStackId(ThreadState *thr, uptr pc); | |
df77f0e4 | 712 | ReportStack *SymbolizeStackId(u32 stack_id); |
cd0be65c | 713 | void PrintCurrentStack(ThreadState *thr, uptr pc); |
c5be964a | 714 | void PrintCurrentStackSlow(uptr pc); // uses libunwind |
cd0be65c WM |
715 | |
716 | void Initialize(ThreadState *thr); | |
eac97531 | 717 | void MaybeSpawnBackgroundThread(); |
cd0be65c WM |
718 | int Finalize(ThreadState *thr); |
719 | ||
866e32ad KS |
720 | void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write); |
721 | void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write); | |
e9772e16 | 722 | |
cd0be65c | 723 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
b4ab7d34 | 724 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); |
cd0be65c | 725 | void MemoryAccessImpl(ThreadState *thr, uptr addr, |
b4ab7d34 | 726 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, |
cd0be65c | 727 | u64 *shadow_mem, Shadow cur); |
cd0be65c | 728 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |
b4ab7d34 | 729 | uptr size, bool is_write); |
7df59255 KS |
730 | void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, |
731 | uptr size, uptr step, bool is_write); | |
ef1b3fda KS |
732 | void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
733 | int size, bool kAccessIsWrite, bool kIsAtomic); | |
b4ab7d34 KS |
734 | |
735 | const int kSizeLog1 = 0; | |
736 | const int kSizeLog2 = 1; | |
737 | const int kSizeLog4 = 2; | |
738 | const int kSizeLog8 = 3; | |
739 | ||
ef1b3fda | 740 | void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, |
b4ab7d34 KS |
741 | uptr addr, int kAccessSizeLog) { |
742 | MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); | |
743 | } | |
744 | ||
ef1b3fda | 745 | void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, |
b4ab7d34 KS |
746 | uptr addr, int kAccessSizeLog) { |
747 | MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); | |
748 | } | |
749 | ||
ef1b3fda | 750 | void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, |
b4ab7d34 KS |
751 | uptr addr, int kAccessSizeLog) { |
752 | MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); | |
753 | } | |
754 | ||
ef1b3fda | 755 | void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, |
b4ab7d34 KS |
756 | uptr addr, int kAccessSizeLog) { |
757 | MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); | |
758 | } | |
759 | ||
cd0be65c WM |
760 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); |
761 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); | |
762 | void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); | |
3ca75cd5 ML |
763 | void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr, |
764 | uptr size); | |
df77f0e4 | 765 | |
5d3805fc | 766 | void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true); |
df77f0e4 | 767 | void ThreadIgnoreEnd(ThreadState *thr, uptr pc); |
5d3805fc | 768 | void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true); |
df77f0e4 | 769 | void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); |
cd0be65c WM |
770 | |
771 | void FuncEntry(ThreadState *thr, uptr pc); | |
772 | void FuncExit(ThreadState *thr); | |
773 | ||
774 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); | |
b667dd70 ML |
775 | void ThreadStart(ThreadState *thr, int tid, tid_t os_id, |
776 | ThreadType thread_type); | |
cd0be65c | 777 | void ThreadFinish(ThreadState *thr); |
3c6331c2 | 778 | int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid); |
cd0be65c WM |
779 | void ThreadJoin(ThreadState *thr, uptr pc, int tid); |
780 | void ThreadDetach(ThreadState *thr, uptr pc, int tid); | |
781 | void ThreadFinalize(ThreadState *thr); | |
a0408454 | 782 | void ThreadSetName(ThreadState *thr, const char *name); |
e297eb60 KS |
783 | int ThreadCount(ThreadState *thr); |
784 | void ProcessPendingSignals(ThreadState *thr); | |
b667dd70 | 785 | void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid); |
cd0be65c | 786 | |
10189819 MO |
787 | Processor *ProcCreate(); |
788 | void ProcDestroy(Processor *proc); | |
789 | void ProcWire(Processor *proc, ThreadState *thr); | |
790 | void ProcUnwire(Processor *proc, ThreadState *thr); | |
791 | ||
5d3805fc JJ |
792 | // Note: the parameter is called flagz, because flags is already taken |
793 | // by the global function that returns flags. | |
794 | void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); | |
795 | void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); | |
796 | void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); | |
797 | void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0, | |
798 | int rec = 1); | |
799 | int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); | |
800 | void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); | |
801 | void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); | |
cd0be65c WM |
802 | void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); |
803 | void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); | |
df77f0e4 | 804 | void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD |
10189819 | 805 | void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr); |
cd0be65c WM |
806 | |
807 | void Acquire(ThreadState *thr, uptr pc, uptr addr); | |
696d846a MO |
808 | // AcquireGlobal synchronizes the current thread with all other threads. |
809 | // In terms of happens-before relation, it draws a HB edge from all threads | |
810 | // (where they happen to execute right now) to the current thread. We use it to | |
811 | // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal | |
812 | // right before executing finalizers. This provides a coarse, but simple | |
813 | // approximation of the actual required synchronization. | |
e297eb60 | 814 | void AcquireGlobal(ThreadState *thr, uptr pc); |
cd0be65c | 815 | void Release(ThreadState *thr, uptr pc, uptr addr); |
3c6331c2 | 816 | void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr); |
cd0be65c WM |
817 | void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); |
818 | void AfterSleep(ThreadState *thr, uptr pc); | |
df77f0e4 KS |
819 | void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); |
820 | void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); | |
3c6331c2 | 821 | void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); |
df77f0e4 KS |
822 | void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); |
823 | void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); | |
cd0be65c WM |
824 | |
825 | // The hacky call uses custom calling convention and an assembly thunk. | |
826 | // It is considerably faster that a normal call for the caller | |
827 | // if it is not executed (it is intended for slow paths from hot functions). | |
828 | // The trick is that the call preserves all registers and the compiler | |
829 | // does not treat it as a call. | |
830 | // If it does not work for you, use normal call. | |
55aea9f5 | 831 | #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC |
cd0be65c WM |
832 | // The caller may not create the stack frame for itself at all, |
833 | // so we create a reserve stack frame for it (1024b must be enough). | |
834 | #define HACKY_CALL(f) \ | |
835 | __asm__ __volatile__("sub $1024, %%rsp;" \ | |
c4c16f74 | 836 | CFI_INL_ADJUST_CFA_OFFSET(1024) \ |
4ba5ca46 | 837 | ".hidden " #f "_thunk;" \ |
cd0be65c WM |
838 | "call " #f "_thunk;" \ |
839 | "add $1024, %%rsp;" \ | |
c4c16f74 | 840 | CFI_INL_ADJUST_CFA_OFFSET(-1024) \ |
cd0be65c WM |
841 | ::: "memory", "cc"); |
842 | #else | |
843 | #define HACKY_CALL(f) f() | |
844 | #endif | |
845 | ||
846 | void TraceSwitch(ThreadState *thr); | |
a0408454 KS |
847 | uptr TraceTopPC(ThreadState *thr); |
848 | uptr TraceSize(); | |
849 | uptr TraceParts(); | |
ef1b3fda | 850 | Trace *ThreadTrace(int tid); |
cd0be65c WM |
851 | |
852 | extern "C" void __tsan_trace_switch(); | |
ef1b3fda | 853 | void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, |
e9772e16 | 854 | EventType typ, u64 addr) { |
dee5ea7a KS |
855 | if (!kCollectHistory) |
856 | return; | |
e9772e16 KS |
857 | DCHECK_GE((int)typ, 0); |
858 | DCHECK_LE((int)typ, 7); | |
5d3805fc | 859 | DCHECK_EQ(GetLsb(addr, kEventPCBits), addr); |
cd0be65c | 860 | StatInc(thr, StatEvents); |
a0408454 KS |
861 | u64 pos = fs.GetTracePos(); |
862 | if (UNLIKELY((pos % kTracePartSize) == 0)) { | |
10189819 | 863 | #if !SANITIZER_GO |
e297eb60 KS |
864 | HACKY_CALL(__tsan_trace_switch); |
865 | #else | |
cd0be65c | 866 | TraceSwitch(thr); |
e297eb60 | 867 | #endif |
cd0be65c | 868 | } |
a0408454 KS |
869 | Event *trace = (Event*)GetThreadTrace(fs.tid()); |
870 | Event *evp = &trace[pos]; | |
5d3805fc | 871 | Event ev = (u64)addr | ((u64)typ << kEventPCBits); |
cd0be65c WM |
872 | *evp = ev; |
873 | } | |
874 | ||
10189819 | 875 | #if !SANITIZER_GO |
696d846a | 876 | uptr ALWAYS_INLINE HeapEnd() { |
10189819 | 877 | return HeapMemEnd() + PrimaryAllocator::AdditionalSize(); |
696d846a MO |
878 | } |
879 | #endif | |
880 | ||
b667dd70 ML |
881 | ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags); |
882 | void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber); | |
883 | void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags); | |
884 | ||
885 | // These need to match __tsan_switch_to_fiber_* flags defined in | |
886 | // tsan_interface.h. See documentation there as well. | |
887 | enum FiberSwitchFlags { | |
888 | FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync | |
889 | }; | |
890 | ||
cd0be65c WM |
891 | } // namespace __tsan |
892 | ||
893 | #endif // TSAN_RTL_H |