1 //===-- tsan_rtl.cc -------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_file.h"
16 #include "sanitizer_common/sanitizer_libc.h"
17 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "sanitizer_common/sanitizer_symbolizer.h"
20 #include "tsan_defs.h"
21 #include "tsan_platform.h"
23 #include "tsan_mman.h"
24 #include "tsan_suppressions.h"
25 #include "tsan_symbolize.h"
26 #include "ubsan/ubsan_init.h"
29 // <emmintrin.h> transitively includes <stdlib.h>,
30 // and it's prohibited to include std headers into tsan runtime.
31 // So we do this dirty trick.
32 #define _MM_MALLOC_H_INCLUDED
34 #include <emmintrin.h>
38 volatile int __tsan_resumed
= 0;
40 extern "C" void __tsan_resume() {
46 #if !SANITIZER_GO && !SANITIZER_MAC
47 __attribute__((tls_model("initial-exec")))
48 THREADLOCAL
char cur_thread_placeholder
[sizeof(ThreadState
)] ALIGNED(64);
50 static char ctx_placeholder
[sizeof(Context
)] ALIGNED(64);
53 // Can be overriden by a front-end.
54 #ifdef TSAN_EXTERNAL_HOOKS
55 bool OnFinalize(bool failed
);
58 SANITIZER_WEAK_CXX_DEFAULT_IMPL
59 bool OnFinalize(bool failed
) {
62 SANITIZER_WEAK_CXX_DEFAULT_IMPL
63 void OnInitialize() {}
66 static char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
68 static ThreadContextBase
*CreateThreadContext(u32 tid
) {
69 // Map thread trace when context is created.
71 internal_snprintf(name
, sizeof(name
), "trace %u", tid
);
72 MapThreadTrace(GetThreadTrace(tid
), TraceSize() * sizeof(Event
), name
);
73 const uptr hdr
= GetThreadTraceHeader(tid
);
74 internal_snprintf(name
, sizeof(name
), "trace header %u", tid
);
75 MapThreadTrace(hdr
, sizeof(Trace
), name
);
76 new((void*)hdr
) Trace();
77 // We are going to use only a small part of the trace with the default
78 // value of history_size. However, the constructor writes to the whole trace.
79 // Unmap the unused part.
80 uptr hdr_end
= hdr
+ sizeof(Trace
);
81 hdr_end
-= sizeof(TraceHeader
) * (kTraceParts
- TraceParts());
82 hdr_end
= RoundUp(hdr_end
, GetPageSizeCached());
83 if (hdr_end
< hdr
+ sizeof(Trace
))
84 UnmapOrDie((void*)hdr_end
, hdr
+ sizeof(Trace
) - hdr_end
);
85 void *mem
= internal_alloc(MBlockThreadContex
, sizeof(ThreadContext
));
86 return new(mem
) ThreadContext(tid
);
90 static const u32 kThreadQuarantineSize
= 16;
92 static const u32 kThreadQuarantineSize
= 64;
97 , report_mtx(MutexTypeReport
, StatMtxReport
)
100 , thread_registry(new(thread_registry_placeholder
) ThreadRegistry(
101 CreateThreadContext
, kMaxTid
, kThreadQuarantineSize
, kMaxTidReuse
))
102 , racy_mtx(MutexTypeRacy
, StatMtxRacy
)
105 , fired_suppressions_mtx(MutexTypeFired
, StatMtxFired
)
106 , clock_alloc("clock allocator") {
107 fired_suppressions
.reserve(8);
110 // The objects are allocated in TLS, so one may rely on zero-initialization.
111 ThreadState::ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
112 unsigned reuse_count
,
113 uptr stk_addr
, uptr stk_size
,
114 uptr tls_addr
, uptr tls_size
)
115 : fast_state(tid
, epoch
)
116 // Do not touch these, rely on zero initialization,
117 // they may be accessed before the ctor.
118 // , ignore_reads_and_writes()
119 // , ignore_interceptors()
120 , clock(tid
, reuse_count
)
125 , unique_id(unique_id
)
131 , last_sleep_clock(tid
)
137 static void MemoryProfiler(Context
*ctx
, fd_t fd
, int i
) {
139 uptr n_running_threads
;
140 ctx
->thread_registry
->GetNumberOfThreads(&n_threads
, &n_running_threads
);
141 InternalMmapVector
<char> buf(4096);
142 WriteMemoryProfile(buf
.data(), buf
.size(), n_threads
, n_running_threads
);
143 WriteToFile(fd
, buf
.data(), internal_strlen(buf
.data()));
146 static void BackgroundThread(void *arg
) {
147 // This is a non-initialized non-user thread, nothing to see here.
148 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
149 // enabled even when the thread function exits (e.g. during pthread thread
151 cur_thread()->ignore_interceptors
++;
152 const u64 kMs2Ns
= 1000 * 1000;
154 fd_t mprof_fd
= kInvalidFd
;
155 if (flags()->profile_memory
&& flags()->profile_memory
[0]) {
156 if (internal_strcmp(flags()->profile_memory
, "stdout") == 0) {
158 } else if (internal_strcmp(flags()->profile_memory
, "stderr") == 0) {
161 InternalScopedString
filename(kMaxPathLength
);
162 filename
.append("%s.%d", flags()->profile_memory
, (int)internal_getpid());
163 fd_t fd
= OpenFile(filename
.data(), WrOnly
);
164 if (fd
== kInvalidFd
) {
165 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
173 u64 last_flush
= NanoTime();
176 atomic_load(&ctx
->stop_background_thread
, memory_order_relaxed
) == 0;
179 u64 now
= NanoTime();
181 // Flush memory if requested.
182 if (flags()->flush_memory_ms
> 0) {
183 if (last_flush
+ flags()->flush_memory_ms
* kMs2Ns
< now
) {
184 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
186 last_flush
= NanoTime();
189 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
190 if (flags()->memory_limit_mb
> 0) {
192 uptr limit
= uptr(flags()->memory_limit_mb
) << 20;
193 VPrintf(1, "ThreadSanitizer: memory flush check"
194 " RSS=%llu LAST=%llu LIMIT=%llu\n",
195 (u64
)rss
>> 20, (u64
)last_rss
>> 20, (u64
)limit
>> 20);
196 if (2 * rss
> limit
+ last_rss
) {
197 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
200 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64
)rss
>>20);
205 // Write memory profile if requested.
206 if (mprof_fd
!= kInvalidFd
)
207 MemoryProfiler(ctx
, mprof_fd
, i
);
209 // Flush symbolizer cache if requested.
210 if (flags()->flush_symbolizer_ms
> 0) {
211 u64 last
= atomic_load(&ctx
->last_symbolize_time_ns
,
212 memory_order_relaxed
);
213 if (last
!= 0 && last
+ flags()->flush_symbolizer_ms
* kMs2Ns
< now
) {
214 Lock
l(&ctx
->report_mtx
);
215 ScopedErrorReportLock l2
;
217 atomic_store(&ctx
->last_symbolize_time_ns
, 0, memory_order_relaxed
);
223 static void StartBackgroundThread() {
224 ctx
->background_thread
= internal_start_thread(&BackgroundThread
, 0);
228 static void StopBackgroundThread() {
229 atomic_store(&ctx
->stop_background_thread
, 1, memory_order_relaxed
);
230 internal_join_thread(ctx
->background_thread
);
231 ctx
->background_thread
= 0;
236 void DontNeedShadowFor(uptr addr
, uptr size
) {
237 ReleaseMemoryPagesToOS(MemToShadow(addr
), MemToShadow(addr
+ size
));
240 void MapShadow(uptr addr
, uptr size
) {
241 // Global data is not 64K aligned, but there are no adjacent mappings,
242 // so we can get away with unaligned mapping.
243 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
244 const uptr kPageSize
= GetPageSizeCached();
245 uptr shadow_begin
= RoundDownTo((uptr
)MemToShadow(addr
), kPageSize
);
246 uptr shadow_end
= RoundUpTo((uptr
)MemToShadow(addr
+ size
), kPageSize
);
247 if (!MmapFixedNoReserve(shadow_begin
, shadow_end
- shadow_begin
, "shadow"))
250 // Meta shadow is 2:1, so tread carefully.
251 static bool data_mapped
= false;
252 static uptr mapped_meta_end
= 0;
253 uptr meta_begin
= (uptr
)MemToMeta(addr
);
254 uptr meta_end
= (uptr
)MemToMeta(addr
+ size
);
255 meta_begin
= RoundDownTo(meta_begin
, 64 << 10);
256 meta_end
= RoundUpTo(meta_end
, 64 << 10);
258 // First call maps data+bss.
260 if (!MmapFixedNoReserve(meta_begin
, meta_end
- meta_begin
, "meta shadow"))
263 // Mapping continous heap.
264 // Windows wants 64K alignment.
265 meta_begin
= RoundDownTo(meta_begin
, 64 << 10);
266 meta_end
= RoundUpTo(meta_end
, 64 << 10);
267 if (meta_end
<= mapped_meta_end
)
269 if (meta_begin
< mapped_meta_end
)
270 meta_begin
= mapped_meta_end
;
271 if (!MmapFixedNoReserve(meta_begin
, meta_end
- meta_begin
, "meta shadow"))
273 mapped_meta_end
= meta_end
;
275 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
276 addr
, addr
+size
, meta_begin
, meta_end
);
279 void MapThreadTrace(uptr addr
, uptr size
, const char *name
) {
280 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr
, addr
+ size
, size
);
281 CHECK_GE(addr
, TraceMemBeg());
282 CHECK_LE(addr
+ size
, TraceMemEnd());
283 CHECK_EQ(addr
, addr
& ~((64 << 10) - 1)); // windows wants 64K alignment
284 if (!MmapFixedNoReserve(addr
, size
, name
)) {
285 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
291 static void CheckShadowMapping() {
293 for (int i
= 0; GetUserRegion(i
, &beg
, &end
); i
++) {
294 // Skip cases for empty regions (heap definition for architectures that
295 // do not use 64-bit allocator).
298 VPrintf(3, "checking shadow region %p-%p\n", beg
, end
);
300 for (uptr p0
= beg
; p0
<= end
; p0
+= (end
- beg
) / 4) {
301 for (int x
= -(int)kShadowCell
; x
<= (int)kShadowCell
; x
+= kShadowCell
) {
302 const uptr p
= RoundDown(p0
+ x
, kShadowCell
);
303 if (p
< beg
|| p
>= end
)
305 const uptr s
= MemToShadow(p
);
306 const uptr m
= (uptr
)MemToMeta(p
);
307 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p
, s
, m
);
309 CHECK(IsShadowMem(s
));
310 CHECK_EQ(p
, ShadowToMem(s
));
313 // Ensure that shadow and meta mappings are linear within a single
314 // user range. Lots of code that processes memory ranges assumes it.
315 const uptr prev_s
= MemToShadow(prev
);
316 const uptr prev_m
= (uptr
)MemToMeta(prev
);
317 CHECK_EQ(s
- prev_s
, (p
- prev
) * kShadowMultiplier
);
318 CHECK_EQ((m
- prev_m
) / kMetaShadowSize
,
319 (p
- prev
) / kMetaShadowCell
);
328 static void OnStackUnwind(const SignalContext
&sig
, const void *,
329 BufferedStackTrace
*stack
) {
332 bool fast
= common_flags()->fast_unwind_on_fatal
;
333 if (fast
) GetThreadStackTopAndBottom(false, &top
, &bottom
);
334 stack
->Unwind(kStackTraceMax
, sig
.pc
, sig
.bp
, sig
.context
, top
, bottom
, fast
);
337 static void TsanOnDeadlySignal(int signo
, void *siginfo
, void *context
) {
338 HandleDeadlySignal(siginfo
, context
, GetTid(), &OnStackUnwind
, nullptr);
342 void Initialize(ThreadState
*thr
) {
343 // Thread safe because done before all threads exist.
344 static bool is_initialized
= false;
347 is_initialized
= true;
348 // We are not ready to handle interceptors yet.
349 ScopedIgnoreInterceptors ignore
;
350 SanitizerToolName
= "ThreadSanitizer";
351 // Install tool-specific callbacks in sanitizer_common.
352 SetCheckFailedCallback(TsanCheckFailed
);
354 ctx
= new(ctx_placeholder
) Context
;
355 const char *options
= GetEnv(SANITIZER_GO
? "GORACE" : "TSAN_OPTIONS");
358 InitializeFlags(&ctx
->flags
, options
);
359 AvoidCVE_2016_2143();
360 InitializePlatformEarly();
362 // Re-exec ourselves if we need to set additional env or command line args.
365 InitializeAllocator();
366 ReplaceSystemMalloc();
368 if (common_flags()->detect_deadlocks
)
369 ctx
->dd
= DDetector::Create(flags());
370 Processor
*proc
= ProcCreate();
372 InitializeInterceptors();
373 CheckShadowMapping();
374 InitializePlatform();
376 InitializeDynamicAnnotations();
378 InitializeShadowMemory();
379 InitializeAllocatorLate();
380 InstallDeadlySignalHandlers(TsanOnDeadlySignal
);
382 // Setup correct file descriptor for error reports.
383 __sanitizer_set_report_path(common_flags()->log_path
);
384 InitializeSuppressions();
386 InitializeLibIgnore();
387 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer
, ExitSymbolizer
);
390 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
391 (int)internal_getpid());
393 // Initialize thread 0.
394 int tid
= ThreadCreate(thr
, 0, 0, true);
396 ThreadStart(thr
, tid
, GetTid(), /*workerthread*/ false);
397 #if TSAN_CONTAINS_UBSAN
398 __ubsan::InitAsPlugin();
400 ctx
->initialized
= true;
403 Symbolizer::LateInitialize();
406 if (flags()->stop_on_start
) {
407 Printf("ThreadSanitizer is suspended at startup (pid %d)."
408 " Call __tsan_resume().\n",
409 (int)internal_getpid());
410 while (__tsan_resumed
== 0) {}
416 void MaybeSpawnBackgroundThread() {
417 // On MIPS, TSan initialization is run before
418 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
420 #if !SANITIZER_GO && !defined(__mips__)
421 static atomic_uint32_t bg_thread
= {};
422 if (atomic_load(&bg_thread
, memory_order_relaxed
) == 0 &&
423 atomic_exchange(&bg_thread
, 1, memory_order_relaxed
) == 0) {
424 StartBackgroundThread();
425 SetSandboxingCallback(StopBackgroundThread
);
431 int Finalize(ThreadState
*thr
) {
434 if (common_flags()->print_module_map
== 1) PrintModuleMap();
436 if (flags()->atexit_sleep_ms
> 0 && ThreadCount(thr
) > 1)
437 SleepForMillis(flags()->atexit_sleep_ms
);
439 // Wait for pending reports.
440 ctx
->report_mtx
.Lock();
441 { ScopedErrorReportLock l
; }
442 ctx
->report_mtx
.Unlock();
445 if (Verbosity()) AllocatorPrintStats();
450 if (ctx
->nreported
) {
453 Printf("ThreadSanitizer: reported %d warnings\n", ctx
->nreported
);
455 Printf("Found %d data race(s)\n", ctx
->nreported
);
459 if (ctx
->nmissed_expected
) {
461 Printf("ThreadSanitizer: missed %d expected races\n",
462 ctx
->nmissed_expected
);
465 if (common_flags()->print_suppressions
)
466 PrintMatchedSuppressions();
468 if (flags()->print_benign
)
469 PrintMatchedBenignRaces();
472 failed
= OnFinalize(failed
);
474 #if TSAN_COLLECT_STATS
475 StatAggregate(ctx
->stat
, thr
->stat
);
476 StatOutput(ctx
->stat
);
479 return failed
? common_flags()->exitcode
: 0;
483 void ForkBefore(ThreadState
*thr
, uptr pc
) {
484 ctx
->thread_registry
->Lock();
485 ctx
->report_mtx
.Lock();
488 void ForkParentAfter(ThreadState
*thr
, uptr pc
) {
489 ctx
->report_mtx
.Unlock();
490 ctx
->thread_registry
->Unlock();
493 void ForkChildAfter(ThreadState
*thr
, uptr pc
) {
494 ctx
->report_mtx
.Unlock();
495 ctx
->thread_registry
->Unlock();
498 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &nthread
/* alive threads */);
499 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
500 " parent had %d threads\n", (int)internal_getpid(), (int)nthread
);
502 StartBackgroundThread();
504 // We've just forked a multi-threaded process. We cannot reasonably function
505 // after that (some mutexes may be locked before fork). So just enable
506 // ignores for everything in the hope that we will exec soon.
507 ctx
->after_multithreaded_fork
= true;
508 thr
->ignore_interceptors
++;
509 ThreadIgnoreBegin(thr
, pc
);
510 ThreadIgnoreSyncBegin(thr
, pc
);
517 void GrowShadowStack(ThreadState
*thr
) {
518 const int sz
= thr
->shadow_stack_end
- thr
->shadow_stack
;
519 const int newsz
= 2 * sz
;
520 uptr
*newstack
= (uptr
*)internal_alloc(MBlockShadowStack
,
521 newsz
* sizeof(uptr
));
522 internal_memcpy(newstack
, thr
->shadow_stack
, sz
* sizeof(uptr
));
523 internal_free(thr
->shadow_stack
);
524 thr
->shadow_stack
= newstack
;
525 thr
->shadow_stack_pos
= newstack
+ sz
;
526 thr
->shadow_stack_end
= newstack
+ newsz
;
530 u32
CurrentStackId(ThreadState
*thr
, uptr pc
) {
531 if (!thr
->is_inited
) // May happen during bootstrap.
535 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
537 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
)
538 GrowShadowStack(thr
);
540 thr
->shadow_stack_pos
[0] = pc
;
541 thr
->shadow_stack_pos
++;
543 u32 id
= StackDepotPut(
544 StackTrace(thr
->shadow_stack
, thr
->shadow_stack_pos
- thr
->shadow_stack
));
546 thr
->shadow_stack_pos
--;
550 void TraceSwitch(ThreadState
*thr
) {
552 if (ctx
->after_multithreaded_fork
)
556 Trace
*thr_trace
= ThreadTrace(thr
->tid
);
557 Lock
l(&thr_trace
->mtx
);
558 unsigned trace
= (thr
->fast_state
.epoch() / kTracePartSize
) % TraceParts();
559 TraceHeader
*hdr
= &thr_trace
->headers
[trace
];
560 hdr
->epoch0
= thr
->fast_state
.epoch();
561 ObtainCurrentStack(thr
, 0, &hdr
->stack0
);
562 hdr
->mset0
= thr
->mset
;
566 Trace
*ThreadTrace(int tid
) {
567 return (Trace
*)GetThreadTraceHeader(tid
);
570 uptr
TraceTopPC(ThreadState
*thr
) {
571 Event
*events
= (Event
*)GetThreadTrace(thr
->tid
);
572 uptr pc
= events
[thr
->fast_state
.GetTracePos()];
577 return (uptr
)(1ull << (kTracePartSizeBits
+ flags()->history_size
+ 1));
581 return TraceSize() / kTracePartSize
;
585 extern "C" void __tsan_trace_switch() {
586 TraceSwitch(cur_thread());
589 extern "C" void __tsan_report_race() {
590 ReportRace(cur_thread());
595 Shadow
LoadShadow(u64
*p
) {
596 u64 raw
= atomic_load((atomic_uint64_t
*)p
, memory_order_relaxed
);
601 void StoreShadow(u64
*sp
, u64 s
) {
602 atomic_store((atomic_uint64_t
*)sp
, s
, memory_order_relaxed
);
606 void StoreIfNotYetStored(u64
*sp
, u64
*s
) {
612 void HandleRace(ThreadState
*thr
, u64
*shadow_mem
,
613 Shadow cur
, Shadow old
) {
614 thr
->racy_state
[0] = cur
.raw();
615 thr
->racy_state
[1] = old
.raw();
616 thr
->racy_shadow_addr
= shadow_mem
;
618 HACKY_CALL(__tsan_report_race
);
624 static inline bool HappensBefore(Shadow old
, ThreadState
*thr
) {
625 return thr
->clock
.get(old
.TidWithIgnore()) >= old
.epoch();
629 void MemoryAccessImpl1(ThreadState
*thr
, uptr addr
,
630 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
631 u64
*shadow_mem
, Shadow cur
) {
632 StatInc(thr
, StatMop
);
633 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
634 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
636 // This potentially can live in an MMX/SSE scratch register.
637 // The required intrinsics are:
638 // __m128i _mm_move_epi64(__m128i*);
639 // _mm_storel_epi64(u64*, __m128i);
640 u64 store_word
= cur
.raw();
642 // scan all the shadow values and dispatch to 4 categories:
643 // same, replace, candidate and race (see comments below).
644 // we consider only 3 cases regarding access sizes:
645 // equal, intersect and not intersect. initially I considered
646 // larger and smaller as well, it allowed to replace some
647 // 'candidates' with 'same' or 'replace', but I think
648 // it's just not worth it (performance- and complexity-wise).
652 // It release mode we manually unroll the loop,
653 // because empirically gcc generates better code this way.
654 // However, we can't afford unrolling in debug mode, because the function
655 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
656 // threads, which is not enough for the unrolled loop.
658 for (int idx
= 0; idx
< 4; idx
++) {
659 #include "tsan_update_shadow_word_inl.h"
663 #include "tsan_update_shadow_word_inl.h"
665 #include "tsan_update_shadow_word_inl.h"
667 #include "tsan_update_shadow_word_inl.h"
669 #include "tsan_update_shadow_word_inl.h"
672 // we did not find any races and had already stored
673 // the current access info, so we are done
674 if (LIKELY(store_word
== 0))
676 // choose a random candidate slot and replace it
677 StoreShadow(shadow_mem
+ (cur
.epoch() % kShadowCnt
), store_word
);
678 StatInc(thr
, StatShadowReplace
);
681 HandleRace(thr
, shadow_mem
, cur
, old
);
685 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
686 int size
, bool kAccessIsWrite
, bool kIsAtomic
) {
689 int kAccessSizeLog
= kSizeLog1
;
690 if (size
>= 8 && (addr
& ~7) == ((addr
+ 7) & ~7)) {
692 kAccessSizeLog
= kSizeLog8
;
693 } else if (size
>= 4 && (addr
& ~7) == ((addr
+ 3) & ~7)) {
695 kAccessSizeLog
= kSizeLog4
;
696 } else if (size
>= 2 && (addr
& ~7) == ((addr
+ 1) & ~7)) {
698 kAccessSizeLog
= kSizeLog2
;
700 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
);
707 bool ContainsSameAccessSlow(u64
*s
, u64 a
, u64 sync_epoch
, bool is_write
) {
709 for (uptr i
= 0; i
< kShadowCnt
; i
++) {
710 Shadow
old(LoadShadow(&s
[i
]));
711 if (Shadow::Addr0AndSizeAreEqual(cur
, old
) &&
712 old
.TidWithIgnore() == cur
.TidWithIgnore() &&
713 old
.epoch() > sync_epoch
&&
714 old
.IsAtomic() == cur
.IsAtomic() &&
715 old
.IsRead() <= cur
.IsRead())
721 #if defined(__SSE3__)
722 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
723 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
724 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
726 bool ContainsSameAccessFast(u64
*s
, u64 a
, u64 sync_epoch
, bool is_write
) {
727 // This is an optimized version of ContainsSameAccessSlow.
728 // load current access into access[0:63]
729 const m128 access
= _mm_cvtsi64_si128(a
);
730 // duplicate high part of access in addr0:
731 // addr0[0:31] = access[32:63]
732 // addr0[32:63] = access[32:63]
733 // addr0[64:95] = access[32:63]
734 // addr0[96:127] = access[32:63]
735 const m128 addr0
= SHUF(access
, access
, 1, 1, 1, 1);
736 // load 4 shadow slots
737 const m128 shadow0
= _mm_load_si128((__m128i
*)s
);
738 const m128 shadow1
= _mm_load_si128((__m128i
*)s
+ 1);
739 // load high parts of 4 shadow slots into addr_vect:
740 // addr_vect[0:31] = shadow0[32:63]
741 // addr_vect[32:63] = shadow0[96:127]
742 // addr_vect[64:95] = shadow1[32:63]
743 // addr_vect[96:127] = shadow1[96:127]
744 m128 addr_vect
= SHUF(shadow0
, shadow1
, 1, 3, 1, 3);
746 // set IsRead bit in addr_vect
747 const m128 rw_mask1
= _mm_cvtsi64_si128(1<<15);
748 const m128 rw_mask
= SHUF(rw_mask1
, rw_mask1
, 0, 0, 0, 0);
749 addr_vect
= _mm_or_si128(addr_vect
, rw_mask
);
751 // addr0 == addr_vect?
752 const m128 addr_res
= _mm_cmpeq_epi32(addr0
, addr_vect
);
753 // epoch1[0:63] = sync_epoch
754 const m128 epoch1
= _mm_cvtsi64_si128(sync_epoch
);
755 // epoch[0:31] = sync_epoch[0:31]
756 // epoch[32:63] = sync_epoch[0:31]
757 // epoch[64:95] = sync_epoch[0:31]
758 // epoch[96:127] = sync_epoch[0:31]
759 const m128 epoch
= SHUF(epoch1
, epoch1
, 0, 0, 0, 0);
760 // load low parts of shadow cell epochs into epoch_vect:
761 // epoch_vect[0:31] = shadow0[0:31]
762 // epoch_vect[32:63] = shadow0[64:95]
763 // epoch_vect[64:95] = shadow1[0:31]
764 // epoch_vect[96:127] = shadow1[64:95]
765 const m128 epoch_vect
= SHUF(shadow0
, shadow1
, 0, 2, 0, 2);
766 // epoch_vect >= sync_epoch?
767 const m128 epoch_res
= _mm_cmpgt_epi32(epoch_vect
, epoch
);
768 // addr_res & epoch_res
769 const m128 res
= _mm_and_si128(addr_res
, epoch_res
);
773 // mask[15] = res[127]
774 const int mask
= _mm_movemask_epi8(res
);
780 bool ContainsSameAccess(u64
*s
, u64 a
, u64 sync_epoch
, bool is_write
) {
781 #if defined(__SSE3__)
782 bool res
= ContainsSameAccessFast(s
, a
, sync_epoch
, is_write
);
783 // NOTE: this check can fail if the shadow is concurrently mutated
784 // by other threads. But it still can be useful if you modify
785 // ContainsSameAccessFast and want to ensure that it's not completely broken.
786 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
789 return ContainsSameAccessSlow(s
, a
, sync_epoch
, is_write
);
794 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
795 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
) {
796 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
797 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
798 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
799 (int)thr
->fast_state
.tid(), (void*)pc
, (void*)addr
,
800 (int)(1 << kAccessSizeLog
), kAccessIsWrite
, shadow_mem
,
801 (uptr
)shadow_mem
[0], (uptr
)shadow_mem
[1],
802 (uptr
)shadow_mem
[2], (uptr
)shadow_mem
[3]);
804 if (!IsAppMem(addr
)) {
805 Printf("Access to non app mem %zx\n", addr
);
806 DCHECK(IsAppMem(addr
));
808 if (!IsShadowMem((uptr
)shadow_mem
)) {
809 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
810 DCHECK(IsShadowMem((uptr
)shadow_mem
));
814 if (!SANITIZER_GO
&& *shadow_mem
== kShadowRodata
) {
815 // Access to .rodata section, no races here.
816 // Measurements show that it can be 10-20% of all memory accesses.
817 StatInc(thr
, StatMop
);
818 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
819 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
820 StatInc(thr
, StatMopRodata
);
824 FastState fast_state
= thr
->fast_state
;
825 if (fast_state
.GetIgnoreBit()) {
826 StatInc(thr
, StatMop
);
827 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
828 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
829 StatInc(thr
, StatMopIgnored
);
833 Shadow
cur(fast_state
);
834 cur
.SetAddr0AndSizeLog(addr
& 7, kAccessSizeLog
);
835 cur
.SetWrite(kAccessIsWrite
);
836 cur
.SetAtomic(kIsAtomic
);
838 if (LIKELY(ContainsSameAccess(shadow_mem
, cur
.raw(),
839 thr
->fast_synch_epoch
, kAccessIsWrite
))) {
840 StatInc(thr
, StatMop
);
841 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
842 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
843 StatInc(thr
, StatMopSame
);
847 if (kCollectHistory
) {
848 fast_state
.IncrementEpoch();
849 thr
->fast_state
= fast_state
;
850 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
851 cur
.IncrementEpoch();
854 MemoryAccessImpl1(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
858 // Called by MemoryAccessRange in tsan_rtl_thread.cc
860 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
861 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
862 u64
*shadow_mem
, Shadow cur
) {
863 if (LIKELY(ContainsSameAccess(shadow_mem
, cur
.raw(),
864 thr
->fast_synch_epoch
, kAccessIsWrite
))) {
865 StatInc(thr
, StatMop
);
866 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
867 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
868 StatInc(thr
, StatMopSame
);
872 MemoryAccessImpl1(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
876 static void MemoryRangeSet(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
,
883 uptr offset
= addr
% kShadowCell
;
885 offset
= kShadowCell
- offset
;
891 DCHECK_EQ(addr
% 8, 0);
892 // If a user passes some insane arguments (memset(0)),
893 // let it just crash as usual.
894 if (!IsAppMem(addr
) || !IsAppMem(addr
+ size
- 1))
896 // Don't want to touch lots of shadow memory.
897 // If a program maps 10MB stack, there is no need reset the whole range.
898 size
= (size
+ (kShadowCell
- 1)) & ~(kShadowCell
- 1);
899 // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
900 if (SANITIZER_WINDOWS
|| size
< common_flags()->clear_shadow_mmap_threshold
) {
901 u64
*p
= (u64
*)MemToShadow(addr
);
902 CHECK(IsShadowMem((uptr
)p
));
903 CHECK(IsShadowMem((uptr
)(p
+ size
* kShadowCnt
/ kShadowCell
- 1)));
904 // FIXME: may overwrite a part outside the region
905 for (uptr i
= 0; i
< size
/ kShadowCell
* kShadowCnt
;) {
907 for (uptr j
= 1; j
< kShadowCnt
; j
++)
911 // The region is big, reset only beginning and end.
912 const uptr kPageSize
= GetPageSizeCached();
913 u64
*begin
= (u64
*)MemToShadow(addr
);
914 u64
*end
= begin
+ size
/ kShadowCell
* kShadowCnt
;
916 // Set at least first kPageSize/2 to page boundary.
917 while ((p
< begin
+ kPageSize
/ kShadowSize
/ 2) || ((uptr
)p
% kPageSize
)) {
919 for (uptr j
= 1; j
< kShadowCnt
; j
++)
922 // Reset middle part.
924 p
= RoundDown(end
, kPageSize
);
925 UnmapOrDie((void*)p1
, (uptr
)p
- (uptr
)p1
);
926 if (!MmapFixedNoReserve((uptr
)p1
, (uptr
)p
- (uptr
)p1
))
931 for (uptr j
= 1; j
< kShadowCnt
; j
++)
937 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
938 MemoryRangeSet(thr
, pc
, addr
, size
, 0);
941 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
942 // Processing more than 1k (4k of shadow) is expensive,
943 // can cause excessive memory consumption (user does not necessary touch
944 // the whole range) and most likely unnecessary.
947 CHECK_EQ(thr
->is_freeing
, false);
948 thr
->is_freeing
= true;
949 MemoryAccessRange(thr
, pc
, addr
, size
, true);
950 thr
->is_freeing
= false;
951 if (kCollectHistory
) {
952 thr
->fast_state
.IncrementEpoch();
953 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
955 Shadow
s(thr
->fast_state
);
959 s
.SetAddr0AndSizeLog(0, 3);
960 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
963 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
964 if (kCollectHistory
) {
965 thr
->fast_state
.IncrementEpoch();
966 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
968 Shadow
s(thr
->fast_state
);
971 s
.SetAddr0AndSizeLog(0, 3);
972 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
976 void FuncEntry(ThreadState
*thr
, uptr pc
) {
977 StatInc(thr
, StatFuncEnter
);
978 DPrintf2("#%d: FuncEntry %p\n", (int)thr
->fast_state
.tid(), (void*)pc
);
979 if (kCollectHistory
) {
980 thr
->fast_state
.IncrementEpoch();
981 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncEnter
, pc
);
984 // Shadow stack maintenance can be replaced with
985 // stack unwinding during trace switch (which presumably must be faster).
986 DCHECK_GE(thr
->shadow_stack_pos
, thr
->shadow_stack
);
988 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
990 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
)
991 GrowShadowStack(thr
);
993 thr
->shadow_stack_pos
[0] = pc
;
994 thr
->shadow_stack_pos
++;
998 void FuncExit(ThreadState
*thr
) {
999 StatInc(thr
, StatFuncExit
);
1000 DPrintf2("#%d: FuncExit\n", (int)thr
->fast_state
.tid());
1001 if (kCollectHistory
) {
1002 thr
->fast_state
.IncrementEpoch();
1003 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncExit
, 0);
1006 DCHECK_GT(thr
->shadow_stack_pos
, thr
->shadow_stack
);
1008 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
1010 thr
->shadow_stack_pos
--;
1013 void ThreadIgnoreBegin(ThreadState
*thr
, uptr pc
, bool save_stack
) {
1014 DPrintf("#%d: ThreadIgnoreBegin\n", thr
->tid
);
1015 thr
->ignore_reads_and_writes
++;
1016 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
1017 thr
->fast_state
.SetIgnoreBit();
1019 if (save_stack
&& !ctx
->after_multithreaded_fork
)
1020 thr
->mop_ignore_set
.Add(CurrentStackId(thr
, pc
));
1024 void ThreadIgnoreEnd(ThreadState
*thr
, uptr pc
) {
1025 DPrintf("#%d: ThreadIgnoreEnd\n", thr
->tid
);
1026 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
1027 thr
->ignore_reads_and_writes
--;
1028 if (thr
->ignore_reads_and_writes
== 0) {
1029 thr
->fast_state
.ClearIgnoreBit();
1031 thr
->mop_ignore_set
.Reset();
1037 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
1038 uptr
__tsan_testonly_shadow_stack_current_size() {
1039 ThreadState
*thr
= cur_thread();
1040 return thr
->shadow_stack_pos
- thr
->shadow_stack
;
1044 void ThreadIgnoreSyncBegin(ThreadState
*thr
, uptr pc
, bool save_stack
) {
1045 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr
->tid
);
1047 CHECK_GT(thr
->ignore_sync
, 0);
1049 if (save_stack
&& !ctx
->after_multithreaded_fork
)
1050 thr
->sync_ignore_set
.Add(CurrentStackId(thr
, pc
));
1054 void ThreadIgnoreSyncEnd(ThreadState
*thr
, uptr pc
) {
1055 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr
->tid
);
1056 CHECK_GT(thr
->ignore_sync
, 0);
1059 if (thr
->ignore_sync
== 0)
1060 thr
->sync_ignore_set
.Reset();
1064 bool MD5Hash::operator==(const MD5Hash
&other
) const {
1065 return hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1];
1069 void build_consistency_debug() {}
1071 void build_consistency_release() {}
1074 #if TSAN_COLLECT_STATS
1075 void build_consistency_stats() {}
1077 void build_consistency_nostats() {}
1080 } // namespace __tsan
1083 // Must be included in this file to make sure everything is inlined.
1084 #include "tsan_interface_inl.h"