1 //===-- tsan_rtl.cpp ------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Main file (entry points) for the TSan run-time.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_file.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26 #include "tsan_symbolize.h"
27 #include "ubsan/ubsan_init.h"
30 // <emmintrin.h> transitively includes <stdlib.h>,
31 // and it's prohibited to include std headers into tsan runtime.
32 // So we do this dirty trick.
33 #define _MM_MALLOC_H_INCLUDED
35 #include <emmintrin.h>
39 volatile int __tsan_resumed
= 0;
41 extern "C" void __tsan_resume() {
47 #if !SANITIZER_GO && !SANITIZER_MAC
48 __attribute__((tls_model("initial-exec")))
49 THREADLOCAL
char cur_thread_placeholder
[sizeof(ThreadState
)] ALIGNED(64);
51 static char ctx_placeholder
[sizeof(Context
)] ALIGNED(64);
54 // Can be overriden by a front-end.
55 #ifdef TSAN_EXTERNAL_HOOKS
56 bool OnFinalize(bool failed
);
59 SANITIZER_WEAK_CXX_DEFAULT_IMPL
60 bool OnFinalize(bool failed
) {
63 SANITIZER_WEAK_CXX_DEFAULT_IMPL
64 void OnInitialize() {}
67 static char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
69 static ThreadContextBase
*CreateThreadContext(u32 tid
) {
70 // Map thread trace when context is created.
72 internal_snprintf(name
, sizeof(name
), "trace %u", tid
);
73 MapThreadTrace(GetThreadTrace(tid
), TraceSize() * sizeof(Event
), name
);
74 const uptr hdr
= GetThreadTraceHeader(tid
);
75 internal_snprintf(name
, sizeof(name
), "trace header %u", tid
);
76 MapThreadTrace(hdr
, sizeof(Trace
), name
);
77 new((void*)hdr
) Trace();
78 // We are going to use only a small part of the trace with the default
79 // value of history_size. However, the constructor writes to the whole trace.
80 // Unmap the unused part.
81 uptr hdr_end
= hdr
+ sizeof(Trace
);
82 hdr_end
-= sizeof(TraceHeader
) * (kTraceParts
- TraceParts());
83 hdr_end
= RoundUp(hdr_end
, GetPageSizeCached());
84 if (hdr_end
< hdr
+ sizeof(Trace
))
85 UnmapOrDie((void*)hdr_end
, hdr
+ sizeof(Trace
) - hdr_end
);
86 void *mem
= internal_alloc(MBlockThreadContex
, sizeof(ThreadContext
));
87 return new(mem
) ThreadContext(tid
);
91 static const u32 kThreadQuarantineSize
= 16;
93 static const u32 kThreadQuarantineSize
= 64;
98 , report_mtx(MutexTypeReport
, StatMtxReport
)
101 , thread_registry(new(thread_registry_placeholder
) ThreadRegistry(
102 CreateThreadContext
, kMaxTid
, kThreadQuarantineSize
, kMaxTidReuse
))
103 , racy_mtx(MutexTypeRacy
, StatMtxRacy
)
106 , fired_suppressions_mtx(MutexTypeFired
, StatMtxFired
)
107 , clock_alloc("clock allocator") {
108 fired_suppressions
.reserve(8);
111 // The objects are allocated in TLS, so one may rely on zero-initialization.
112 ThreadState::ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
113 unsigned reuse_count
,
114 uptr stk_addr
, uptr stk_size
,
115 uptr tls_addr
, uptr tls_size
)
116 : fast_state(tid
, epoch
)
117 // Do not touch these, rely on zero initialization,
118 // they may be accessed before the ctor.
119 // , ignore_reads_and_writes()
120 // , ignore_interceptors()
121 , clock(tid
, reuse_count
)
126 , unique_id(unique_id
)
132 , last_sleep_clock(tid
)
138 static void MemoryProfiler(Context
*ctx
, fd_t fd
, int i
) {
140 uptr n_running_threads
;
141 ctx
->thread_registry
->GetNumberOfThreads(&n_threads
, &n_running_threads
);
142 InternalMmapVector
<char> buf(4096);
143 WriteMemoryProfile(buf
.data(), buf
.size(), n_threads
, n_running_threads
);
144 WriteToFile(fd
, buf
.data(), internal_strlen(buf
.data()));
147 static void *BackgroundThread(void *arg
) {
148 // This is a non-initialized non-user thread, nothing to see here.
149 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
150 // enabled even when the thread function exits (e.g. during pthread thread
153 cur_thread()->ignore_interceptors
++;
154 const u64 kMs2Ns
= 1000 * 1000;
156 fd_t mprof_fd
= kInvalidFd
;
157 if (flags()->profile_memory
&& flags()->profile_memory
[0]) {
158 if (internal_strcmp(flags()->profile_memory
, "stdout") == 0) {
160 } else if (internal_strcmp(flags()->profile_memory
, "stderr") == 0) {
163 InternalScopedString
filename(kMaxPathLength
);
164 filename
.append("%s.%d", flags()->profile_memory
, (int)internal_getpid());
165 fd_t fd
= OpenFile(filename
.data(), WrOnly
);
166 if (fd
== kInvalidFd
) {
167 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
175 u64 last_flush
= NanoTime();
178 atomic_load(&ctx
->stop_background_thread
, memory_order_relaxed
) == 0;
181 u64 now
= NanoTime();
183 // Flush memory if requested.
184 if (flags()->flush_memory_ms
> 0) {
185 if (last_flush
+ flags()->flush_memory_ms
* kMs2Ns
< now
) {
186 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
188 last_flush
= NanoTime();
191 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
192 if (flags()->memory_limit_mb
> 0) {
194 uptr limit
= uptr(flags()->memory_limit_mb
) << 20;
195 VPrintf(1, "ThreadSanitizer: memory flush check"
196 " RSS=%llu LAST=%llu LIMIT=%llu\n",
197 (u64
)rss
>> 20, (u64
)last_rss
>> 20, (u64
)limit
>> 20);
198 if (2 * rss
> limit
+ last_rss
) {
199 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
202 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64
)rss
>>20);
207 // Write memory profile if requested.
208 if (mprof_fd
!= kInvalidFd
)
209 MemoryProfiler(ctx
, mprof_fd
, i
);
211 // Flush symbolizer cache if requested.
212 if (flags()->flush_symbolizer_ms
> 0) {
213 u64 last
= atomic_load(&ctx
->last_symbolize_time_ns
,
214 memory_order_relaxed
);
215 if (last
!= 0 && last
+ flags()->flush_symbolizer_ms
* kMs2Ns
< now
) {
216 Lock
l(&ctx
->report_mtx
);
217 ScopedErrorReportLock l2
;
219 atomic_store(&ctx
->last_symbolize_time_ns
, 0, memory_order_relaxed
);
226 static void StartBackgroundThread() {
227 ctx
->background_thread
= internal_start_thread(&BackgroundThread
, 0);
231 static void StopBackgroundThread() {
232 atomic_store(&ctx
->stop_background_thread
, 1, memory_order_relaxed
);
233 internal_join_thread(ctx
->background_thread
);
234 ctx
->background_thread
= 0;
239 void DontNeedShadowFor(uptr addr
, uptr size
) {
240 ReleaseMemoryPagesToOS(MemToShadow(addr
), MemToShadow(addr
+ size
));
244 void UnmapShadow(ThreadState
*thr
, uptr addr
, uptr size
) {
245 if (size
== 0) return;
246 DontNeedShadowFor(addr
, size
);
247 ScopedGlobalProcessor sgp
;
248 ctx
->metamap
.ResetRange(thr
->proc(), addr
, size
);
252 void MapShadow(uptr addr
, uptr size
) {
253 // Global data is not 64K aligned, but there are no adjacent mappings,
254 // so we can get away with unaligned mapping.
255 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
256 const uptr kPageSize
= GetPageSizeCached();
257 uptr shadow_begin
= RoundDownTo((uptr
)MemToShadow(addr
), kPageSize
);
258 uptr shadow_end
= RoundUpTo((uptr
)MemToShadow(addr
+ size
), kPageSize
);
259 if (!MmapFixedNoReserve(shadow_begin
, shadow_end
- shadow_begin
, "shadow"))
262 // Meta shadow is 2:1, so tread carefully.
263 static bool data_mapped
= false;
264 static uptr mapped_meta_end
= 0;
265 uptr meta_begin
= (uptr
)MemToMeta(addr
);
266 uptr meta_end
= (uptr
)MemToMeta(addr
+ size
);
267 meta_begin
= RoundDownTo(meta_begin
, 64 << 10);
268 meta_end
= RoundUpTo(meta_end
, 64 << 10);
270 // First call maps data+bss.
272 if (!MmapFixedNoReserve(meta_begin
, meta_end
- meta_begin
, "meta shadow"))
275 // Mapping continous heap.
276 // Windows wants 64K alignment.
277 meta_begin
= RoundDownTo(meta_begin
, 64 << 10);
278 meta_end
= RoundUpTo(meta_end
, 64 << 10);
279 if (meta_end
<= mapped_meta_end
)
281 if (meta_begin
< mapped_meta_end
)
282 meta_begin
= mapped_meta_end
;
283 if (!MmapFixedNoReserve(meta_begin
, meta_end
- meta_begin
, "meta shadow"))
285 mapped_meta_end
= meta_end
;
287 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
288 addr
, addr
+size
, meta_begin
, meta_end
);
291 void MapThreadTrace(uptr addr
, uptr size
, const char *name
) {
292 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr
, addr
+ size
, size
);
293 CHECK_GE(addr
, TraceMemBeg());
294 CHECK_LE(addr
+ size
, TraceMemEnd());
295 CHECK_EQ(addr
, addr
& ~((64 << 10) - 1)); // windows wants 64K alignment
296 if (!MmapFixedNoReserve(addr
, size
, name
)) {
297 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
303 static void CheckShadowMapping() {
305 for (int i
= 0; GetUserRegion(i
, &beg
, &end
); i
++) {
306 // Skip cases for empty regions (heap definition for architectures that
307 // do not use 64-bit allocator).
310 VPrintf(3, "checking shadow region %p-%p\n", beg
, end
);
312 for (uptr p0
= beg
; p0
<= end
; p0
+= (end
- beg
) / 4) {
313 for (int x
= -(int)kShadowCell
; x
<= (int)kShadowCell
; x
+= kShadowCell
) {
314 const uptr p
= RoundDown(p0
+ x
, kShadowCell
);
315 if (p
< beg
|| p
>= end
)
317 const uptr s
= MemToShadow(p
);
318 const uptr m
= (uptr
)MemToMeta(p
);
319 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p
, s
, m
);
321 CHECK(IsShadowMem(s
));
322 CHECK_EQ(p
, ShadowToMem(s
));
325 // Ensure that shadow and meta mappings are linear within a single
326 // user range. Lots of code that processes memory ranges assumes it.
327 const uptr prev_s
= MemToShadow(prev
);
328 const uptr prev_m
= (uptr
)MemToMeta(prev
);
329 CHECK_EQ(s
- prev_s
, (p
- prev
) * kShadowMultiplier
);
330 CHECK_EQ((m
- prev_m
) / kMetaShadowSize
,
331 (p
- prev
) / kMetaShadowCell
);
340 static void OnStackUnwind(const SignalContext
&sig
, const void *,
341 BufferedStackTrace
*stack
) {
342 stack
->Unwind(StackTrace::GetNextInstructionPc(sig
.pc
), sig
.bp
, sig
.context
,
343 common_flags()->fast_unwind_on_fatal
);
346 static void TsanOnDeadlySignal(int signo
, void *siginfo
, void *context
) {
347 HandleDeadlySignal(siginfo
, context
, GetTid(), &OnStackUnwind
, nullptr);
351 void Initialize(ThreadState
*thr
) {
352 // Thread safe because done before all threads exist.
353 static bool is_initialized
= false;
356 is_initialized
= true;
357 // We are not ready to handle interceptors yet.
358 ScopedIgnoreInterceptors ignore
;
359 SanitizerToolName
= "ThreadSanitizer";
360 // Install tool-specific callbacks in sanitizer_common.
361 SetCheckFailedCallback(TsanCheckFailed
);
363 ctx
= new(ctx_placeholder
) Context
;
364 const char *env_name
= SANITIZER_GO
? "GORACE" : "TSAN_OPTIONS";
365 const char *options
= GetEnv(env_name
);
368 InitializeFlags(&ctx
->flags
, options
, env_name
);
369 AvoidCVE_2016_2143();
370 __sanitizer::InitializePlatformEarly();
371 __tsan::InitializePlatformEarly();
374 // Re-exec ourselves if we need to set additional env or command line args.
377 InitializeAllocator();
378 ReplaceSystemMalloc();
380 if (common_flags()->detect_deadlocks
)
381 ctx
->dd
= DDetector::Create(flags());
382 Processor
*proc
= ProcCreate();
384 InitializeInterceptors();
385 CheckShadowMapping();
386 InitializePlatform();
388 InitializeDynamicAnnotations();
390 InitializeShadowMemory();
391 InitializeAllocatorLate();
392 InstallDeadlySignalHandlers(TsanOnDeadlySignal
);
394 // Setup correct file descriptor for error reports.
395 __sanitizer_set_report_path(common_flags()->log_path
);
396 InitializeSuppressions();
398 InitializeLibIgnore();
399 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer
, ExitSymbolizer
);
402 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
403 (int)internal_getpid());
405 // Initialize thread 0.
406 int tid
= ThreadCreate(thr
, 0, 0, true);
408 ThreadStart(thr
, tid
, GetTid(), ThreadType::Regular
);
409 #if TSAN_CONTAINS_UBSAN
410 __ubsan::InitAsPlugin();
412 ctx
->initialized
= true;
415 Symbolizer::LateInitialize();
418 if (flags()->stop_on_start
) {
419 Printf("ThreadSanitizer is suspended at startup (pid %d)."
420 " Call __tsan_resume().\n",
421 (int)internal_getpid());
422 while (__tsan_resumed
== 0) {}
428 void MaybeSpawnBackgroundThread() {
429 // On MIPS, TSan initialization is run before
430 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
432 #if !SANITIZER_GO && !defined(__mips__)
433 static atomic_uint32_t bg_thread
= {};
434 if (atomic_load(&bg_thread
, memory_order_relaxed
) == 0 &&
435 atomic_exchange(&bg_thread
, 1, memory_order_relaxed
) == 0) {
436 StartBackgroundThread();
437 SetSandboxingCallback(StopBackgroundThread
);
443 int Finalize(ThreadState
*thr
) {
446 if (common_flags()->print_module_map
== 1) PrintModuleMap();
448 if (flags()->atexit_sleep_ms
> 0 && ThreadCount(thr
) > 1)
449 SleepForMillis(flags()->atexit_sleep_ms
);
451 // Wait for pending reports.
452 ctx
->report_mtx
.Lock();
453 { ScopedErrorReportLock l
; }
454 ctx
->report_mtx
.Unlock();
457 if (Verbosity()) AllocatorPrintStats();
462 if (ctx
->nreported
) {
465 Printf("ThreadSanitizer: reported %d warnings\n", ctx
->nreported
);
467 Printf("Found %d data race(s)\n", ctx
->nreported
);
471 if (ctx
->nmissed_expected
) {
473 Printf("ThreadSanitizer: missed %d expected races\n",
474 ctx
->nmissed_expected
);
477 if (common_flags()->print_suppressions
)
478 PrintMatchedSuppressions();
480 if (flags()->print_benign
)
481 PrintMatchedBenignRaces();
484 failed
= OnFinalize(failed
);
486 #if TSAN_COLLECT_STATS
487 StatAggregate(ctx
->stat
, thr
->stat
);
488 StatOutput(ctx
->stat
);
491 return failed
? common_flags()->exitcode
: 0;
495 void ForkBefore(ThreadState
*thr
, uptr pc
) {
496 ctx
->thread_registry
->Lock();
497 ctx
->report_mtx
.Lock();
498 // Ignore memory accesses in the pthread_atfork callbacks.
499 // If any of them triggers a data race we will deadlock
500 // on the report_mtx.
501 // We could ignore interceptors and sync operations as well,
502 // but so far it's unclear if it will do more good or harm.
503 // Unnecessarily ignoring things can lead to false positives later.
504 ThreadIgnoreBegin(thr
, pc
);
507 void ForkParentAfter(ThreadState
*thr
, uptr pc
) {
508 ThreadIgnoreEnd(thr
, pc
); // Begin is in ForkBefore.
509 ctx
->report_mtx
.Unlock();
510 ctx
->thread_registry
->Unlock();
513 void ForkChildAfter(ThreadState
*thr
, uptr pc
) {
514 ThreadIgnoreEnd(thr
, pc
); // Begin is in ForkBefore.
515 ctx
->report_mtx
.Unlock();
516 ctx
->thread_registry
->Unlock();
519 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &nthread
/* alive threads */);
520 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
521 " parent had %d threads\n", (int)internal_getpid(), (int)nthread
);
523 StartBackgroundThread();
525 // We've just forked a multi-threaded process. We cannot reasonably function
526 // after that (some mutexes may be locked before fork). So just enable
527 // ignores for everything in the hope that we will exec soon.
528 ctx
->after_multithreaded_fork
= true;
529 thr
->ignore_interceptors
++;
530 ThreadIgnoreBegin(thr
, pc
);
531 ThreadIgnoreSyncBegin(thr
, pc
);
538 void GrowShadowStack(ThreadState
*thr
) {
539 const int sz
= thr
->shadow_stack_end
- thr
->shadow_stack
;
540 const int newsz
= 2 * sz
;
541 uptr
*newstack
= (uptr
*)internal_alloc(MBlockShadowStack
,
542 newsz
* sizeof(uptr
));
543 internal_memcpy(newstack
, thr
->shadow_stack
, sz
* sizeof(uptr
));
544 internal_free(thr
->shadow_stack
);
545 thr
->shadow_stack
= newstack
;
546 thr
->shadow_stack_pos
= newstack
+ sz
;
547 thr
->shadow_stack_end
= newstack
+ newsz
;
551 u32
CurrentStackId(ThreadState
*thr
, uptr pc
) {
552 if (!thr
->is_inited
) // May happen during bootstrap.
556 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
558 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
)
559 GrowShadowStack(thr
);
561 thr
->shadow_stack_pos
[0] = pc
;
562 thr
->shadow_stack_pos
++;
564 u32 id
= StackDepotPut(
565 StackTrace(thr
->shadow_stack
, thr
->shadow_stack_pos
- thr
->shadow_stack
));
567 thr
->shadow_stack_pos
--;
571 void TraceSwitch(ThreadState
*thr
) {
573 if (ctx
->after_multithreaded_fork
)
577 Trace
*thr_trace
= ThreadTrace(thr
->tid
);
578 Lock
l(&thr_trace
->mtx
);
579 unsigned trace
= (thr
->fast_state
.epoch() / kTracePartSize
) % TraceParts();
580 TraceHeader
*hdr
= &thr_trace
->headers
[trace
];
581 hdr
->epoch0
= thr
->fast_state
.epoch();
582 ObtainCurrentStack(thr
, 0, &hdr
->stack0
);
583 hdr
->mset0
= thr
->mset
;
587 Trace
*ThreadTrace(int tid
) {
588 return (Trace
*)GetThreadTraceHeader(tid
);
591 uptr
TraceTopPC(ThreadState
*thr
) {
592 Event
*events
= (Event
*)GetThreadTrace(thr
->tid
);
593 uptr pc
= events
[thr
->fast_state
.GetTracePos()];
598 return (uptr
)(1ull << (kTracePartSizeBits
+ flags()->history_size
+ 1));
602 return TraceSize() / kTracePartSize
;
606 extern "C" void __tsan_trace_switch() {
607 TraceSwitch(cur_thread());
610 extern "C" void __tsan_report_race() {
611 ReportRace(cur_thread());
616 Shadow
LoadShadow(u64
*p
) {
617 u64 raw
= atomic_load((atomic_uint64_t
*)p
, memory_order_relaxed
);
622 void StoreShadow(u64
*sp
, u64 s
) {
623 atomic_store((atomic_uint64_t
*)sp
, s
, memory_order_relaxed
);
627 void StoreIfNotYetStored(u64
*sp
, u64
*s
) {
633 void HandleRace(ThreadState
*thr
, u64
*shadow_mem
,
634 Shadow cur
, Shadow old
) {
635 thr
->racy_state
[0] = cur
.raw();
636 thr
->racy_state
[1] = old
.raw();
637 thr
->racy_shadow_addr
= shadow_mem
;
639 HACKY_CALL(__tsan_report_race
);
645 static inline bool HappensBefore(Shadow old
, ThreadState
*thr
) {
646 return thr
->clock
.get(old
.TidWithIgnore()) >= old
.epoch();
650 void MemoryAccessImpl1(ThreadState
*thr
, uptr addr
,
651 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
652 u64
*shadow_mem
, Shadow cur
) {
653 StatInc(thr
, StatMop
);
654 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
655 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
657 // This potentially can live in an MMX/SSE scratch register.
658 // The required intrinsics are:
659 // __m128i _mm_move_epi64(__m128i*);
660 // _mm_storel_epi64(u64*, __m128i);
661 u64 store_word
= cur
.raw();
664 // scan all the shadow values and dispatch to 4 categories:
665 // same, replace, candidate and race (see comments below).
666 // we consider only 3 cases regarding access sizes:
667 // equal, intersect and not intersect. initially I considered
668 // larger and smaller as well, it allowed to replace some
669 // 'candidates' with 'same' or 'replace', but I think
670 // it's just not worth it (performance- and complexity-wise).
674 // It release mode we manually unroll the loop,
675 // because empirically gcc generates better code this way.
676 // However, we can't afford unrolling in debug mode, because the function
677 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
678 // threads, which is not enough for the unrolled loop.
680 for (int idx
= 0; idx
< 4; idx
++) {
681 #include "tsan_update_shadow_word_inl.h"
685 #include "tsan_update_shadow_word_inl.h"
688 #include "tsan_update_shadow_word_inl.h"
690 #include "tsan_update_shadow_word_inl.h"
694 #include "tsan_update_shadow_word_inl.h"
696 #include "tsan_update_shadow_word_inl.h"
700 #include "tsan_update_shadow_word_inl.h"
702 #include "tsan_update_shadow_word_inl.h"
706 // we did not find any races and had already stored
707 // the current access info, so we are done
710 // choose a random candidate slot and replace it
711 StoreShadow(shadow_mem
+ (cur
.epoch() % kShadowCnt
), store_word
);
712 StatInc(thr
, StatShadowReplace
);
715 HandleRace(thr
, shadow_mem
, cur
, old
);
719 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
720 int size
, bool kAccessIsWrite
, bool kIsAtomic
) {
723 int kAccessSizeLog
= kSizeLog1
;
724 if (size
>= 8 && (addr
& ~7) == ((addr
+ 7) & ~7)) {
726 kAccessSizeLog
= kSizeLog8
;
727 } else if (size
>= 4 && (addr
& ~7) == ((addr
+ 3) & ~7)) {
729 kAccessSizeLog
= kSizeLog4
;
730 } else if (size
>= 2 && (addr
& ~7) == ((addr
+ 1) & ~7)) {
732 kAccessSizeLog
= kSizeLog2
;
734 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
);
741 bool ContainsSameAccessSlow(u64
*s
, u64 a
, u64 sync_epoch
, bool is_write
) {
743 for (uptr i
= 0; i
< kShadowCnt
; i
++) {
744 Shadow
old(LoadShadow(&s
[i
]));
745 if (Shadow::Addr0AndSizeAreEqual(cur
, old
) &&
746 old
.TidWithIgnore() == cur
.TidWithIgnore() &&
747 old
.epoch() > sync_epoch
&&
748 old
.IsAtomic() == cur
.IsAtomic() &&
749 old
.IsRead() <= cur
.IsRead())
755 #if defined(__SSE3__)
756 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
757 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
758 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
760 bool ContainsSameAccessFast(u64
*s
, u64 a
, u64 sync_epoch
, bool is_write
) {
761 // This is an optimized version of ContainsSameAccessSlow.
762 // load current access into access[0:63]
763 const m128 access
= _mm_cvtsi64_si128(a
);
764 // duplicate high part of access in addr0:
765 // addr0[0:31] = access[32:63]
766 // addr0[32:63] = access[32:63]
767 // addr0[64:95] = access[32:63]
768 // addr0[96:127] = access[32:63]
769 const m128 addr0
= SHUF(access
, access
, 1, 1, 1, 1);
770 // load 4 shadow slots
771 const m128 shadow0
= _mm_load_si128((__m128i
*)s
);
772 const m128 shadow1
= _mm_load_si128((__m128i
*)s
+ 1);
773 // load high parts of 4 shadow slots into addr_vect:
774 // addr_vect[0:31] = shadow0[32:63]
775 // addr_vect[32:63] = shadow0[96:127]
776 // addr_vect[64:95] = shadow1[32:63]
777 // addr_vect[96:127] = shadow1[96:127]
778 m128 addr_vect
= SHUF(shadow0
, shadow1
, 1, 3, 1, 3);
780 // set IsRead bit in addr_vect
781 const m128 rw_mask1
= _mm_cvtsi64_si128(1<<15);
782 const m128 rw_mask
= SHUF(rw_mask1
, rw_mask1
, 0, 0, 0, 0);
783 addr_vect
= _mm_or_si128(addr_vect
, rw_mask
);
785 // addr0 == addr_vect?
786 const m128 addr_res
= _mm_cmpeq_epi32(addr0
, addr_vect
);
787 // epoch1[0:63] = sync_epoch
788 const m128 epoch1
= _mm_cvtsi64_si128(sync_epoch
);
789 // epoch[0:31] = sync_epoch[0:31]
790 // epoch[32:63] = sync_epoch[0:31]
791 // epoch[64:95] = sync_epoch[0:31]
792 // epoch[96:127] = sync_epoch[0:31]
793 const m128 epoch
= SHUF(epoch1
, epoch1
, 0, 0, 0, 0);
794 // load low parts of shadow cell epochs into epoch_vect:
795 // epoch_vect[0:31] = shadow0[0:31]
796 // epoch_vect[32:63] = shadow0[64:95]
797 // epoch_vect[64:95] = shadow1[0:31]
798 // epoch_vect[96:127] = shadow1[64:95]
799 const m128 epoch_vect
= SHUF(shadow0
, shadow1
, 0, 2, 0, 2);
800 // epoch_vect >= sync_epoch?
801 const m128 epoch_res
= _mm_cmpgt_epi32(epoch_vect
, epoch
);
802 // addr_res & epoch_res
803 const m128 res
= _mm_and_si128(addr_res
, epoch_res
);
807 // mask[15] = res[127]
808 const int mask
= _mm_movemask_epi8(res
);
814 bool ContainsSameAccess(u64
*s
, u64 a
, u64 sync_epoch
, bool is_write
) {
815 #if defined(__SSE3__)
816 bool res
= ContainsSameAccessFast(s
, a
, sync_epoch
, is_write
);
817 // NOTE: this check can fail if the shadow is concurrently mutated
818 // by other threads. But it still can be useful if you modify
819 // ContainsSameAccessFast and want to ensure that it's not completely broken.
820 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
823 return ContainsSameAccessSlow(s
, a
, sync_epoch
, is_write
);
828 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
829 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
) {
830 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
831 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
832 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
833 (int)thr
->fast_state
.tid(), (void*)pc
, (void*)addr
,
834 (int)(1 << kAccessSizeLog
), kAccessIsWrite
, shadow_mem
,
835 (uptr
)shadow_mem
[0], (uptr
)shadow_mem
[1],
836 (uptr
)shadow_mem
[2], (uptr
)shadow_mem
[3]);
838 if (!IsAppMem(addr
)) {
839 Printf("Access to non app mem %zx\n", addr
);
840 DCHECK(IsAppMem(addr
));
842 if (!IsShadowMem((uptr
)shadow_mem
)) {
843 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
844 DCHECK(IsShadowMem((uptr
)shadow_mem
));
848 if (!SANITIZER_GO
&& !kAccessIsWrite
&& *shadow_mem
== kShadowRodata
) {
849 // Access to .rodata section, no races here.
850 // Measurements show that it can be 10-20% of all memory accesses.
851 StatInc(thr
, StatMop
);
852 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
853 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
854 StatInc(thr
, StatMopRodata
);
858 FastState fast_state
= thr
->fast_state
;
859 if (UNLIKELY(fast_state
.GetIgnoreBit())) {
860 StatInc(thr
, StatMop
);
861 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
862 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
863 StatInc(thr
, StatMopIgnored
);
867 Shadow
cur(fast_state
);
868 cur
.SetAddr0AndSizeLog(addr
& 7, kAccessSizeLog
);
869 cur
.SetWrite(kAccessIsWrite
);
870 cur
.SetAtomic(kIsAtomic
);
872 if (LIKELY(ContainsSameAccess(shadow_mem
, cur
.raw(),
873 thr
->fast_synch_epoch
, kAccessIsWrite
))) {
874 StatInc(thr
, StatMop
);
875 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
876 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
877 StatInc(thr
, StatMopSame
);
881 if (kCollectHistory
) {
882 fast_state
.IncrementEpoch();
883 thr
->fast_state
= fast_state
;
884 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
885 cur
.IncrementEpoch();
888 MemoryAccessImpl1(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
892 // Called by MemoryAccessRange in tsan_rtl_thread.cpp
894 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
895 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
896 u64
*shadow_mem
, Shadow cur
) {
897 if (LIKELY(ContainsSameAccess(shadow_mem
, cur
.raw(),
898 thr
->fast_synch_epoch
, kAccessIsWrite
))) {
899 StatInc(thr
, StatMop
);
900 StatInc(thr
, kAccessIsWrite
? StatMopWrite
: StatMopRead
);
901 StatInc(thr
, (StatType
)(StatMop1
+ kAccessSizeLog
));
902 StatInc(thr
, StatMopSame
);
906 MemoryAccessImpl1(thr
, addr
, kAccessSizeLog
, kAccessIsWrite
, kIsAtomic
,
910 static void MemoryRangeSet(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
,
917 uptr offset
= addr
% kShadowCell
;
919 offset
= kShadowCell
- offset
;
925 DCHECK_EQ(addr
% 8, 0);
926 // If a user passes some insane arguments (memset(0)),
927 // let it just crash as usual.
928 if (!IsAppMem(addr
) || !IsAppMem(addr
+ size
- 1))
930 // Don't want to touch lots of shadow memory.
931 // If a program maps 10MB stack, there is no need reset the whole range.
932 size
= (size
+ (kShadowCell
- 1)) & ~(kShadowCell
- 1);
933 // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
934 if (SANITIZER_WINDOWS
|| size
< common_flags()->clear_shadow_mmap_threshold
) {
935 u64
*p
= (u64
*)MemToShadow(addr
);
936 CHECK(IsShadowMem((uptr
)p
));
937 CHECK(IsShadowMem((uptr
)(p
+ size
* kShadowCnt
/ kShadowCell
- 1)));
938 // FIXME: may overwrite a part outside the region
939 for (uptr i
= 0; i
< size
/ kShadowCell
* kShadowCnt
;) {
941 for (uptr j
= 1; j
< kShadowCnt
; j
++)
945 // The region is big, reset only beginning and end.
946 const uptr kPageSize
= GetPageSizeCached();
947 u64
*begin
= (u64
*)MemToShadow(addr
);
948 u64
*end
= begin
+ size
/ kShadowCell
* kShadowCnt
;
950 // Set at least first kPageSize/2 to page boundary.
951 while ((p
< begin
+ kPageSize
/ kShadowSize
/ 2) || ((uptr
)p
% kPageSize
)) {
953 for (uptr j
= 1; j
< kShadowCnt
; j
++)
956 // Reset middle part.
958 p
= RoundDown(end
, kPageSize
);
959 UnmapOrDie((void*)p1
, (uptr
)p
- (uptr
)p1
);
960 if (!MmapFixedNoReserve((uptr
)p1
, (uptr
)p
- (uptr
)p1
))
965 for (uptr j
= 1; j
< kShadowCnt
; j
++)
971 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
972 MemoryRangeSet(thr
, pc
, addr
, size
, 0);
975 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
976 // Processing more than 1k (4k of shadow) is expensive,
977 // can cause excessive memory consumption (user does not necessary touch
978 // the whole range) and most likely unnecessary.
981 CHECK_EQ(thr
->is_freeing
, false);
982 thr
->is_freeing
= true;
983 MemoryAccessRange(thr
, pc
, addr
, size
, true);
984 thr
->is_freeing
= false;
985 if (kCollectHistory
) {
986 thr
->fast_state
.IncrementEpoch();
987 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
989 Shadow
s(thr
->fast_state
);
993 s
.SetAddr0AndSizeLog(0, 3);
994 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
997 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
) {
998 if (kCollectHistory
) {
999 thr
->fast_state
.IncrementEpoch();
1000 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, pc
);
1002 Shadow
s(thr
->fast_state
);
1005 s
.SetAddr0AndSizeLog(0, 3);
1006 MemoryRangeSet(thr
, pc
, addr
, size
, s
.raw());
1009 void MemoryRangeImitateWriteOrResetRange(ThreadState
*thr
, uptr pc
, uptr addr
,
1011 if (thr
->ignore_reads_and_writes
== 0)
1012 MemoryRangeImitateWrite(thr
, pc
, addr
, size
);
1014 MemoryResetRange(thr
, pc
, addr
, size
);
1018 void FuncEntry(ThreadState
*thr
, uptr pc
) {
1019 StatInc(thr
, StatFuncEnter
);
1020 DPrintf2("#%d: FuncEntry %p\n", (int)thr
->fast_state
.tid(), (void*)pc
);
1021 if (kCollectHistory
) {
1022 thr
->fast_state
.IncrementEpoch();
1023 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncEnter
, pc
);
1026 // Shadow stack maintenance can be replaced with
1027 // stack unwinding during trace switch (which presumably must be faster).
1028 DCHECK_GE(thr
->shadow_stack_pos
, thr
->shadow_stack
);
1030 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
1032 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
)
1033 GrowShadowStack(thr
);
1035 thr
->shadow_stack_pos
[0] = pc
;
1036 thr
->shadow_stack_pos
++;
1040 void FuncExit(ThreadState
*thr
) {
1041 StatInc(thr
, StatFuncExit
);
1042 DPrintf2("#%d: FuncExit\n", (int)thr
->fast_state
.tid());
1043 if (kCollectHistory
) {
1044 thr
->fast_state
.IncrementEpoch();
1045 TraceAddEvent(thr
, thr
->fast_state
, EventTypeFuncExit
, 0);
1048 DCHECK_GT(thr
->shadow_stack_pos
, thr
->shadow_stack
);
1050 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
1052 thr
->shadow_stack_pos
--;
1055 void ThreadIgnoreBegin(ThreadState
*thr
, uptr pc
, bool save_stack
) {
1056 DPrintf("#%d: ThreadIgnoreBegin\n", thr
->tid
);
1057 thr
->ignore_reads_and_writes
++;
1058 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
1059 thr
->fast_state
.SetIgnoreBit();
1061 if (save_stack
&& !ctx
->after_multithreaded_fork
)
1062 thr
->mop_ignore_set
.Add(CurrentStackId(thr
, pc
));
1066 void ThreadIgnoreEnd(ThreadState
*thr
, uptr pc
) {
1067 DPrintf("#%d: ThreadIgnoreEnd\n", thr
->tid
);
1068 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
1069 thr
->ignore_reads_and_writes
--;
1070 if (thr
->ignore_reads_and_writes
== 0) {
1071 thr
->fast_state
.ClearIgnoreBit();
1073 thr
->mop_ignore_set
.Reset();
1079 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
1080 uptr
__tsan_testonly_shadow_stack_current_size() {
1081 ThreadState
*thr
= cur_thread();
1082 return thr
->shadow_stack_pos
- thr
->shadow_stack
;
1086 void ThreadIgnoreSyncBegin(ThreadState
*thr
, uptr pc
, bool save_stack
) {
1087 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr
->tid
);
1089 CHECK_GT(thr
->ignore_sync
, 0);
1091 if (save_stack
&& !ctx
->after_multithreaded_fork
)
1092 thr
->sync_ignore_set
.Add(CurrentStackId(thr
, pc
));
1096 void ThreadIgnoreSyncEnd(ThreadState
*thr
, uptr pc
) {
1097 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr
->tid
);
1098 CHECK_GT(thr
->ignore_sync
, 0);
1101 if (thr
->ignore_sync
== 0)
1102 thr
->sync_ignore_set
.Reset();
1106 bool MD5Hash::operator==(const MD5Hash
&other
) const {
1107 return hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1];
1111 void build_consistency_debug() {}
1113 void build_consistency_release() {}
1116 #if TSAN_COLLECT_STATS
1117 void build_consistency_stats() {}
1119 void build_consistency_nostats() {}
1122 } // namespace __tsan
1125 // Must be included in this file to make sure everything is inlined.
1126 #include "tsan_interface_inl.h"