]> git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/tsan/tsan_rtl.cc
[AArch64] Use "x" predication for SVE integer arithmetic patterns
[thirdparty/gcc.git] / libsanitizer / tsan / tsan_rtl.cc
1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
12
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_file.h"
16 #include "sanitizer_common/sanitizer_libc.h"
17 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "sanitizer_common/sanitizer_symbolizer.h"
20 #include "tsan_defs.h"
21 #include "tsan_platform.h"
22 #include "tsan_rtl.h"
23 #include "tsan_mman.h"
24 #include "tsan_suppressions.h"
25 #include "tsan_symbolize.h"
26 #include "ubsan/ubsan_init.h"
27
28 #ifdef __SSE3__
29 // <emmintrin.h> transitively includes <stdlib.h>,
30 // and it's prohibited to include std headers into tsan runtime.
31 // So we do this dirty trick.
32 #define _MM_MALLOC_H_INCLUDED
33 #define __MM_MALLOC_H
34 #include <emmintrin.h>
35 typedef __m128i m128;
36 #endif
37
38 volatile int __tsan_resumed = 0;
39
40 extern "C" void __tsan_resume() {
41 __tsan_resumed = 1;
42 }
43
44 namespace __tsan {
45
46 #if !SANITIZER_GO && !SANITIZER_MAC
47 __attribute__((tls_model("initial-exec")))
48 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
49 #endif
50 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
51 Context *ctx;
52
53 // Can be overriden by a front-end.
54 #ifdef TSAN_EXTERNAL_HOOKS
55 bool OnFinalize(bool failed);
56 void OnInitialize();
57 #else
58 SANITIZER_WEAK_CXX_DEFAULT_IMPL
59 bool OnFinalize(bool failed) {
60 return failed;
61 }
62 SANITIZER_WEAK_CXX_DEFAULT_IMPL
63 void OnInitialize() {}
64 #endif
65
66 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
67
68 static ThreadContextBase *CreateThreadContext(u32 tid) {
69 // Map thread trace when context is created.
70 char name[50];
71 internal_snprintf(name, sizeof(name), "trace %u", tid);
72 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
73 const uptr hdr = GetThreadTraceHeader(tid);
74 internal_snprintf(name, sizeof(name), "trace header %u", tid);
75 MapThreadTrace(hdr, sizeof(Trace), name);
76 new((void*)hdr) Trace();
77 // We are going to use only a small part of the trace with the default
78 // value of history_size. However, the constructor writes to the whole trace.
79 // Unmap the unused part.
80 uptr hdr_end = hdr + sizeof(Trace);
81 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
82 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
83 if (hdr_end < hdr + sizeof(Trace))
84 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
85 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
86 return new(mem) ThreadContext(tid);
87 }
88
89 #if !SANITIZER_GO
90 static const u32 kThreadQuarantineSize = 16;
91 #else
92 static const u32 kThreadQuarantineSize = 64;
93 #endif
94
95 Context::Context()
96 : initialized()
97 , report_mtx(MutexTypeReport, StatMtxReport)
98 , nreported()
99 , nmissed_expected()
100 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
101 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
102 , racy_mtx(MutexTypeRacy, StatMtxRacy)
103 , racy_stacks()
104 , racy_addresses()
105 , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
106 , clock_alloc("clock allocator") {
107 fired_suppressions.reserve(8);
108 }
109
110 // The objects are allocated in TLS, so one may rely on zero-initialization.
111 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
112 unsigned reuse_count,
113 uptr stk_addr, uptr stk_size,
114 uptr tls_addr, uptr tls_size)
115 : fast_state(tid, epoch)
116 // Do not touch these, rely on zero initialization,
117 // they may be accessed before the ctor.
118 // , ignore_reads_and_writes()
119 // , ignore_interceptors()
120 , clock(tid, reuse_count)
121 #if !SANITIZER_GO
122 , jmp_bufs()
123 #endif
124 , tid(tid)
125 , unique_id(unique_id)
126 , stk_addr(stk_addr)
127 , stk_size(stk_size)
128 , tls_addr(tls_addr)
129 , tls_size(tls_size)
130 #if !SANITIZER_GO
131 , last_sleep_clock(tid)
132 #endif
133 {
134 }
135
136 #if !SANITIZER_GO
137 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
138 uptr n_threads;
139 uptr n_running_threads;
140 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
141 InternalMmapVector<char> buf(4096);
142 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
143 WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
144 }
145
146 static void BackgroundThread(void *arg) {
147 // This is a non-initialized non-user thread, nothing to see here.
148 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
149 // enabled even when the thread function exits (e.g. during pthread thread
150 // shutdown code).
151 cur_thread()->ignore_interceptors++;
152 const u64 kMs2Ns = 1000 * 1000;
153
154 fd_t mprof_fd = kInvalidFd;
155 if (flags()->profile_memory && flags()->profile_memory[0]) {
156 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
157 mprof_fd = 1;
158 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
159 mprof_fd = 2;
160 } else {
161 InternalScopedString filename(kMaxPathLength);
162 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
163 fd_t fd = OpenFile(filename.data(), WrOnly);
164 if (fd == kInvalidFd) {
165 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
166 &filename[0]);
167 } else {
168 mprof_fd = fd;
169 }
170 }
171 }
172
173 u64 last_flush = NanoTime();
174 uptr last_rss = 0;
175 for (int i = 0;
176 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
177 i++) {
178 SleepForMillis(100);
179 u64 now = NanoTime();
180
181 // Flush memory if requested.
182 if (flags()->flush_memory_ms > 0) {
183 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
184 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
185 FlushShadowMemory();
186 last_flush = NanoTime();
187 }
188 }
189 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
190 if (flags()->memory_limit_mb > 0) {
191 uptr rss = GetRSS();
192 uptr limit = uptr(flags()->memory_limit_mb) << 20;
193 VPrintf(1, "ThreadSanitizer: memory flush check"
194 " RSS=%llu LAST=%llu LIMIT=%llu\n",
195 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
196 if (2 * rss > limit + last_rss) {
197 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
198 FlushShadowMemory();
199 rss = GetRSS();
200 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
201 }
202 last_rss = rss;
203 }
204
205 // Write memory profile if requested.
206 if (mprof_fd != kInvalidFd)
207 MemoryProfiler(ctx, mprof_fd, i);
208
209 // Flush symbolizer cache if requested.
210 if (flags()->flush_symbolizer_ms > 0) {
211 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
212 memory_order_relaxed);
213 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
214 Lock l(&ctx->report_mtx);
215 ScopedErrorReportLock l2;
216 SymbolizeFlush();
217 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
218 }
219 }
220 }
221 }
222
223 static void StartBackgroundThread() {
224 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
225 }
226
227 #ifndef __mips__
228 static void StopBackgroundThread() {
229 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
230 internal_join_thread(ctx->background_thread);
231 ctx->background_thread = 0;
232 }
233 #endif
234 #endif
235
236 void DontNeedShadowFor(uptr addr, uptr size) {
237 ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
238 }
239
240 void MapShadow(uptr addr, uptr size) {
241 // Global data is not 64K aligned, but there are no adjacent mappings,
242 // so we can get away with unaligned mapping.
243 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
244 const uptr kPageSize = GetPageSizeCached();
245 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
246 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
247 if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
248 Die();
249
250 // Meta shadow is 2:1, so tread carefully.
251 static bool data_mapped = false;
252 static uptr mapped_meta_end = 0;
253 uptr meta_begin = (uptr)MemToMeta(addr);
254 uptr meta_end = (uptr)MemToMeta(addr + size);
255 meta_begin = RoundDownTo(meta_begin, 64 << 10);
256 meta_end = RoundUpTo(meta_end, 64 << 10);
257 if (!data_mapped) {
258 // First call maps data+bss.
259 data_mapped = true;
260 if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
261 Die();
262 } else {
263 // Mapping continous heap.
264 // Windows wants 64K alignment.
265 meta_begin = RoundDownTo(meta_begin, 64 << 10);
266 meta_end = RoundUpTo(meta_end, 64 << 10);
267 if (meta_end <= mapped_meta_end)
268 return;
269 if (meta_begin < mapped_meta_end)
270 meta_begin = mapped_meta_end;
271 if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
272 Die();
273 mapped_meta_end = meta_end;
274 }
275 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
276 addr, addr+size, meta_begin, meta_end);
277 }
278
279 void MapThreadTrace(uptr addr, uptr size, const char *name) {
280 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
281 CHECK_GE(addr, TraceMemBeg());
282 CHECK_LE(addr + size, TraceMemEnd());
283 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
284 if (!MmapFixedNoReserve(addr, size, name)) {
285 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
286 addr, size);
287 Die();
288 }
289 }
290
291 static void CheckShadowMapping() {
292 uptr beg, end;
293 for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
294 // Skip cases for empty regions (heap definition for architectures that
295 // do not use 64-bit allocator).
296 if (beg == end)
297 continue;
298 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
299 uptr prev = 0;
300 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
301 for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
302 const uptr p = RoundDown(p0 + x, kShadowCell);
303 if (p < beg || p >= end)
304 continue;
305 const uptr s = MemToShadow(p);
306 const uptr m = (uptr)MemToMeta(p);
307 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
308 CHECK(IsAppMem(p));
309 CHECK(IsShadowMem(s));
310 CHECK_EQ(p, ShadowToMem(s));
311 CHECK(IsMetaMem(m));
312 if (prev) {
313 // Ensure that shadow and meta mappings are linear within a single
314 // user range. Lots of code that processes memory ranges assumes it.
315 const uptr prev_s = MemToShadow(prev);
316 const uptr prev_m = (uptr)MemToMeta(prev);
317 CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
318 CHECK_EQ((m - prev_m) / kMetaShadowSize,
319 (p - prev) / kMetaShadowCell);
320 }
321 prev = p;
322 }
323 }
324 }
325 }
326
327 #if !SANITIZER_GO
328 static void OnStackUnwind(const SignalContext &sig, const void *,
329 BufferedStackTrace *stack) {
330 uptr top = 0;
331 uptr bottom = 0;
332 bool fast = common_flags()->fast_unwind_on_fatal;
333 if (fast) GetThreadStackTopAndBottom(false, &top, &bottom);
334 stack->Unwind(kStackTraceMax, sig.pc, sig.bp, sig.context, top, bottom, fast);
335 }
336
337 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
338 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
339 }
340 #endif
341
342 void Initialize(ThreadState *thr) {
343 // Thread safe because done before all threads exist.
344 static bool is_initialized = false;
345 if (is_initialized)
346 return;
347 is_initialized = true;
348 // We are not ready to handle interceptors yet.
349 ScopedIgnoreInterceptors ignore;
350 SanitizerToolName = "ThreadSanitizer";
351 // Install tool-specific callbacks in sanitizer_common.
352 SetCheckFailedCallback(TsanCheckFailed);
353
354 ctx = new(ctx_placeholder) Context;
355 const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS");
356 CacheBinaryName();
357 CheckASLR();
358 InitializeFlags(&ctx->flags, options);
359 AvoidCVE_2016_2143();
360 InitializePlatformEarly();
361 #if !SANITIZER_GO
362 // Re-exec ourselves if we need to set additional env or command line args.
363 MaybeReexec();
364
365 InitializeAllocator();
366 ReplaceSystemMalloc();
367 #endif
368 if (common_flags()->detect_deadlocks)
369 ctx->dd = DDetector::Create(flags());
370 Processor *proc = ProcCreate();
371 ProcWire(proc, thr);
372 InitializeInterceptors();
373 CheckShadowMapping();
374 InitializePlatform();
375 InitializeMutex();
376 InitializeDynamicAnnotations();
377 #if !SANITIZER_GO
378 InitializeShadowMemory();
379 InitializeAllocatorLate();
380 InstallDeadlySignalHandlers(TsanOnDeadlySignal);
381 #endif
382 // Setup correct file descriptor for error reports.
383 __sanitizer_set_report_path(common_flags()->log_path);
384 InitializeSuppressions();
385 #if !SANITIZER_GO
386 InitializeLibIgnore();
387 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
388 #endif
389
390 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
391 (int)internal_getpid());
392
393 // Initialize thread 0.
394 int tid = ThreadCreate(thr, 0, 0, true);
395 CHECK_EQ(tid, 0);
396 ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
397 #if TSAN_CONTAINS_UBSAN
398 __ubsan::InitAsPlugin();
399 #endif
400 ctx->initialized = true;
401
402 #if !SANITIZER_GO
403 Symbolizer::LateInitialize();
404 #endif
405
406 if (flags()->stop_on_start) {
407 Printf("ThreadSanitizer is suspended at startup (pid %d)."
408 " Call __tsan_resume().\n",
409 (int)internal_getpid());
410 while (__tsan_resumed == 0) {}
411 }
412
413 OnInitialize();
414 }
415
416 void MaybeSpawnBackgroundThread() {
417 // On MIPS, TSan initialization is run before
418 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
419 // new threads.
420 #if !SANITIZER_GO && !defined(__mips__)
421 static atomic_uint32_t bg_thread = {};
422 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
423 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
424 StartBackgroundThread();
425 SetSandboxingCallback(StopBackgroundThread);
426 }
427 #endif
428 }
429
430
431 int Finalize(ThreadState *thr) {
432 bool failed = false;
433
434 if (common_flags()->print_module_map == 1) PrintModuleMap();
435
436 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
437 SleepForMillis(flags()->atexit_sleep_ms);
438
439 // Wait for pending reports.
440 ctx->report_mtx.Lock();
441 { ScopedErrorReportLock l; }
442 ctx->report_mtx.Unlock();
443
444 #if !SANITIZER_GO
445 if (Verbosity()) AllocatorPrintStats();
446 #endif
447
448 ThreadFinalize(thr);
449
450 if (ctx->nreported) {
451 failed = true;
452 #if !SANITIZER_GO
453 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
454 #else
455 Printf("Found %d data race(s)\n", ctx->nreported);
456 #endif
457 }
458
459 if (ctx->nmissed_expected) {
460 failed = true;
461 Printf("ThreadSanitizer: missed %d expected races\n",
462 ctx->nmissed_expected);
463 }
464
465 if (common_flags()->print_suppressions)
466 PrintMatchedSuppressions();
467 #if !SANITIZER_GO
468 if (flags()->print_benign)
469 PrintMatchedBenignRaces();
470 #endif
471
472 failed = OnFinalize(failed);
473
474 #if TSAN_COLLECT_STATS
475 StatAggregate(ctx->stat, thr->stat);
476 StatOutput(ctx->stat);
477 #endif
478
479 return failed ? common_flags()->exitcode : 0;
480 }
481
482 #if !SANITIZER_GO
483 void ForkBefore(ThreadState *thr, uptr pc) {
484 ctx->thread_registry->Lock();
485 ctx->report_mtx.Lock();
486 }
487
488 void ForkParentAfter(ThreadState *thr, uptr pc) {
489 ctx->report_mtx.Unlock();
490 ctx->thread_registry->Unlock();
491 }
492
493 void ForkChildAfter(ThreadState *thr, uptr pc) {
494 ctx->report_mtx.Unlock();
495 ctx->thread_registry->Unlock();
496
497 uptr nthread = 0;
498 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
499 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
500 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
501 if (nthread == 1) {
502 StartBackgroundThread();
503 } else {
504 // We've just forked a multi-threaded process. We cannot reasonably function
505 // after that (some mutexes may be locked before fork). So just enable
506 // ignores for everything in the hope that we will exec soon.
507 ctx->after_multithreaded_fork = true;
508 thr->ignore_interceptors++;
509 ThreadIgnoreBegin(thr, pc);
510 ThreadIgnoreSyncBegin(thr, pc);
511 }
512 }
513 #endif
514
515 #if SANITIZER_GO
516 NOINLINE
517 void GrowShadowStack(ThreadState *thr) {
518 const int sz = thr->shadow_stack_end - thr->shadow_stack;
519 const int newsz = 2 * sz;
520 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
521 newsz * sizeof(uptr));
522 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
523 internal_free(thr->shadow_stack);
524 thr->shadow_stack = newstack;
525 thr->shadow_stack_pos = newstack + sz;
526 thr->shadow_stack_end = newstack + newsz;
527 }
528 #endif
529
530 u32 CurrentStackId(ThreadState *thr, uptr pc) {
531 if (!thr->is_inited) // May happen during bootstrap.
532 return 0;
533 if (pc != 0) {
534 #if !SANITIZER_GO
535 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
536 #else
537 if (thr->shadow_stack_pos == thr->shadow_stack_end)
538 GrowShadowStack(thr);
539 #endif
540 thr->shadow_stack_pos[0] = pc;
541 thr->shadow_stack_pos++;
542 }
543 u32 id = StackDepotPut(
544 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
545 if (pc != 0)
546 thr->shadow_stack_pos--;
547 return id;
548 }
549
550 void TraceSwitch(ThreadState *thr) {
551 #if !SANITIZER_GO
552 if (ctx->after_multithreaded_fork)
553 return;
554 #endif
555 thr->nomalloc++;
556 Trace *thr_trace = ThreadTrace(thr->tid);
557 Lock l(&thr_trace->mtx);
558 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
559 TraceHeader *hdr = &thr_trace->headers[trace];
560 hdr->epoch0 = thr->fast_state.epoch();
561 ObtainCurrentStack(thr, 0, &hdr->stack0);
562 hdr->mset0 = thr->mset;
563 thr->nomalloc--;
564 }
565
566 Trace *ThreadTrace(int tid) {
567 return (Trace*)GetThreadTraceHeader(tid);
568 }
569
570 uptr TraceTopPC(ThreadState *thr) {
571 Event *events = (Event*)GetThreadTrace(thr->tid);
572 uptr pc = events[thr->fast_state.GetTracePos()];
573 return pc;
574 }
575
576 uptr TraceSize() {
577 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
578 }
579
580 uptr TraceParts() {
581 return TraceSize() / kTracePartSize;
582 }
583
584 #if !SANITIZER_GO
585 extern "C" void __tsan_trace_switch() {
586 TraceSwitch(cur_thread());
587 }
588
589 extern "C" void __tsan_report_race() {
590 ReportRace(cur_thread());
591 }
592 #endif
593
594 ALWAYS_INLINE
595 Shadow LoadShadow(u64 *p) {
596 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
597 return Shadow(raw);
598 }
599
600 ALWAYS_INLINE
601 void StoreShadow(u64 *sp, u64 s) {
602 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
603 }
604
605 ALWAYS_INLINE
606 void StoreIfNotYetStored(u64 *sp, u64 *s) {
607 StoreShadow(sp, *s);
608 *s = 0;
609 }
610
611 ALWAYS_INLINE
612 void HandleRace(ThreadState *thr, u64 *shadow_mem,
613 Shadow cur, Shadow old) {
614 thr->racy_state[0] = cur.raw();
615 thr->racy_state[1] = old.raw();
616 thr->racy_shadow_addr = shadow_mem;
617 #if !SANITIZER_GO
618 HACKY_CALL(__tsan_report_race);
619 #else
620 ReportRace(thr);
621 #endif
622 }
623
624 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
625 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
626 }
627
628 ALWAYS_INLINE
629 void MemoryAccessImpl1(ThreadState *thr, uptr addr,
630 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
631 u64 *shadow_mem, Shadow cur) {
632 StatInc(thr, StatMop);
633 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
634 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
635
636 // This potentially can live in an MMX/SSE scratch register.
637 // The required intrinsics are:
638 // __m128i _mm_move_epi64(__m128i*);
639 // _mm_storel_epi64(u64*, __m128i);
640 u64 store_word = cur.raw();
641
642 // scan all the shadow values and dispatch to 4 categories:
643 // same, replace, candidate and race (see comments below).
644 // we consider only 3 cases regarding access sizes:
645 // equal, intersect and not intersect. initially I considered
646 // larger and smaller as well, it allowed to replace some
647 // 'candidates' with 'same' or 'replace', but I think
648 // it's just not worth it (performance- and complexity-wise).
649
650 Shadow old(0);
651
652 // It release mode we manually unroll the loop,
653 // because empirically gcc generates better code this way.
654 // However, we can't afford unrolling in debug mode, because the function
655 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
656 // threads, which is not enough for the unrolled loop.
657 #if SANITIZER_DEBUG
658 for (int idx = 0; idx < 4; idx++) {
659 #include "tsan_update_shadow_word_inl.h"
660 }
661 #else
662 int idx = 0;
663 #include "tsan_update_shadow_word_inl.h"
664 idx = 1;
665 #include "tsan_update_shadow_word_inl.h"
666 idx = 2;
667 #include "tsan_update_shadow_word_inl.h"
668 idx = 3;
669 #include "tsan_update_shadow_word_inl.h"
670 #endif
671
672 // we did not find any races and had already stored
673 // the current access info, so we are done
674 if (LIKELY(store_word == 0))
675 return;
676 // choose a random candidate slot and replace it
677 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
678 StatInc(thr, StatShadowReplace);
679 return;
680 RACE:
681 HandleRace(thr, shadow_mem, cur, old);
682 return;
683 }
684
685 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
686 int size, bool kAccessIsWrite, bool kIsAtomic) {
687 while (size) {
688 int size1 = 1;
689 int kAccessSizeLog = kSizeLog1;
690 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
691 size1 = 8;
692 kAccessSizeLog = kSizeLog8;
693 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
694 size1 = 4;
695 kAccessSizeLog = kSizeLog4;
696 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
697 size1 = 2;
698 kAccessSizeLog = kSizeLog2;
699 }
700 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
701 addr += size1;
702 size -= size1;
703 }
704 }
705
706 ALWAYS_INLINE
707 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
708 Shadow cur(a);
709 for (uptr i = 0; i < kShadowCnt; i++) {
710 Shadow old(LoadShadow(&s[i]));
711 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
712 old.TidWithIgnore() == cur.TidWithIgnore() &&
713 old.epoch() > sync_epoch &&
714 old.IsAtomic() == cur.IsAtomic() &&
715 old.IsRead() <= cur.IsRead())
716 return true;
717 }
718 return false;
719 }
720
721 #if defined(__SSE3__)
722 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
723 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
724 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
725 ALWAYS_INLINE
726 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
727 // This is an optimized version of ContainsSameAccessSlow.
728 // load current access into access[0:63]
729 const m128 access = _mm_cvtsi64_si128(a);
730 // duplicate high part of access in addr0:
731 // addr0[0:31] = access[32:63]
732 // addr0[32:63] = access[32:63]
733 // addr0[64:95] = access[32:63]
734 // addr0[96:127] = access[32:63]
735 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
736 // load 4 shadow slots
737 const m128 shadow0 = _mm_load_si128((__m128i*)s);
738 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
739 // load high parts of 4 shadow slots into addr_vect:
740 // addr_vect[0:31] = shadow0[32:63]
741 // addr_vect[32:63] = shadow0[96:127]
742 // addr_vect[64:95] = shadow1[32:63]
743 // addr_vect[96:127] = shadow1[96:127]
744 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
745 if (!is_write) {
746 // set IsRead bit in addr_vect
747 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
748 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
749 addr_vect = _mm_or_si128(addr_vect, rw_mask);
750 }
751 // addr0 == addr_vect?
752 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
753 // epoch1[0:63] = sync_epoch
754 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
755 // epoch[0:31] = sync_epoch[0:31]
756 // epoch[32:63] = sync_epoch[0:31]
757 // epoch[64:95] = sync_epoch[0:31]
758 // epoch[96:127] = sync_epoch[0:31]
759 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
760 // load low parts of shadow cell epochs into epoch_vect:
761 // epoch_vect[0:31] = shadow0[0:31]
762 // epoch_vect[32:63] = shadow0[64:95]
763 // epoch_vect[64:95] = shadow1[0:31]
764 // epoch_vect[96:127] = shadow1[64:95]
765 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
766 // epoch_vect >= sync_epoch?
767 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
768 // addr_res & epoch_res
769 const m128 res = _mm_and_si128(addr_res, epoch_res);
770 // mask[0] = res[7]
771 // mask[1] = res[15]
772 // ...
773 // mask[15] = res[127]
774 const int mask = _mm_movemask_epi8(res);
775 return mask != 0;
776 }
777 #endif
778
779 ALWAYS_INLINE
780 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
781 #if defined(__SSE3__)
782 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
783 // NOTE: this check can fail if the shadow is concurrently mutated
784 // by other threads. But it still can be useful if you modify
785 // ContainsSameAccessFast and want to ensure that it's not completely broken.
786 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
787 return res;
788 #else
789 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
790 #endif
791 }
792
793 ALWAYS_INLINE USED
794 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
795 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
796 u64 *shadow_mem = (u64*)MemToShadow(addr);
797 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
798 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
799 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
800 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
801 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
802 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
803 #if SANITIZER_DEBUG
804 if (!IsAppMem(addr)) {
805 Printf("Access to non app mem %zx\n", addr);
806 DCHECK(IsAppMem(addr));
807 }
808 if (!IsShadowMem((uptr)shadow_mem)) {
809 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
810 DCHECK(IsShadowMem((uptr)shadow_mem));
811 }
812 #endif
813
814 if (!SANITIZER_GO && *shadow_mem == kShadowRodata) {
815 // Access to .rodata section, no races here.
816 // Measurements show that it can be 10-20% of all memory accesses.
817 StatInc(thr, StatMop);
818 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
819 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
820 StatInc(thr, StatMopRodata);
821 return;
822 }
823
824 FastState fast_state = thr->fast_state;
825 if (fast_state.GetIgnoreBit()) {
826 StatInc(thr, StatMop);
827 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
828 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
829 StatInc(thr, StatMopIgnored);
830 return;
831 }
832
833 Shadow cur(fast_state);
834 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
835 cur.SetWrite(kAccessIsWrite);
836 cur.SetAtomic(kIsAtomic);
837
838 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
839 thr->fast_synch_epoch, kAccessIsWrite))) {
840 StatInc(thr, StatMop);
841 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
842 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
843 StatInc(thr, StatMopSame);
844 return;
845 }
846
847 if (kCollectHistory) {
848 fast_state.IncrementEpoch();
849 thr->fast_state = fast_state;
850 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
851 cur.IncrementEpoch();
852 }
853
854 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
855 shadow_mem, cur);
856 }
857
858 // Called by MemoryAccessRange in tsan_rtl_thread.cc
859 ALWAYS_INLINE USED
860 void MemoryAccessImpl(ThreadState *thr, uptr addr,
861 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
862 u64 *shadow_mem, Shadow cur) {
863 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
864 thr->fast_synch_epoch, kAccessIsWrite))) {
865 StatInc(thr, StatMop);
866 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
867 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
868 StatInc(thr, StatMopSame);
869 return;
870 }
871
872 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
873 shadow_mem, cur);
874 }
875
876 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
877 u64 val) {
878 (void)thr;
879 (void)pc;
880 if (size == 0)
881 return;
882 // FIXME: fix me.
883 uptr offset = addr % kShadowCell;
884 if (offset) {
885 offset = kShadowCell - offset;
886 if (size <= offset)
887 return;
888 addr += offset;
889 size -= offset;
890 }
891 DCHECK_EQ(addr % 8, 0);
892 // If a user passes some insane arguments (memset(0)),
893 // let it just crash as usual.
894 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
895 return;
896 // Don't want to touch lots of shadow memory.
897 // If a program maps 10MB stack, there is no need reset the whole range.
898 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
899 // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
900 if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
901 u64 *p = (u64*)MemToShadow(addr);
902 CHECK(IsShadowMem((uptr)p));
903 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
904 // FIXME: may overwrite a part outside the region
905 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
906 p[i++] = val;
907 for (uptr j = 1; j < kShadowCnt; j++)
908 p[i++] = 0;
909 }
910 } else {
911 // The region is big, reset only beginning and end.
912 const uptr kPageSize = GetPageSizeCached();
913 u64 *begin = (u64*)MemToShadow(addr);
914 u64 *end = begin + size / kShadowCell * kShadowCnt;
915 u64 *p = begin;
916 // Set at least first kPageSize/2 to page boundary.
917 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
918 *p++ = val;
919 for (uptr j = 1; j < kShadowCnt; j++)
920 *p++ = 0;
921 }
922 // Reset middle part.
923 u64 *p1 = p;
924 p = RoundDown(end, kPageSize);
925 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
926 if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
927 Die();
928 // Set the ending.
929 while (p < end) {
930 *p++ = val;
931 for (uptr j = 1; j < kShadowCnt; j++)
932 *p++ = 0;
933 }
934 }
935 }
936
937 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
938 MemoryRangeSet(thr, pc, addr, size, 0);
939 }
940
941 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
942 // Processing more than 1k (4k of shadow) is expensive,
943 // can cause excessive memory consumption (user does not necessary touch
944 // the whole range) and most likely unnecessary.
945 if (size > 1024)
946 size = 1024;
947 CHECK_EQ(thr->is_freeing, false);
948 thr->is_freeing = true;
949 MemoryAccessRange(thr, pc, addr, size, true);
950 thr->is_freeing = false;
951 if (kCollectHistory) {
952 thr->fast_state.IncrementEpoch();
953 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
954 }
955 Shadow s(thr->fast_state);
956 s.ClearIgnoreBit();
957 s.MarkAsFreed();
958 s.SetWrite(true);
959 s.SetAddr0AndSizeLog(0, 3);
960 MemoryRangeSet(thr, pc, addr, size, s.raw());
961 }
962
963 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
964 if (kCollectHistory) {
965 thr->fast_state.IncrementEpoch();
966 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
967 }
968 Shadow s(thr->fast_state);
969 s.ClearIgnoreBit();
970 s.SetWrite(true);
971 s.SetAddr0AndSizeLog(0, 3);
972 MemoryRangeSet(thr, pc, addr, size, s.raw());
973 }
974
975 ALWAYS_INLINE USED
976 void FuncEntry(ThreadState *thr, uptr pc) {
977 StatInc(thr, StatFuncEnter);
978 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
979 if (kCollectHistory) {
980 thr->fast_state.IncrementEpoch();
981 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
982 }
983
984 // Shadow stack maintenance can be replaced with
985 // stack unwinding during trace switch (which presumably must be faster).
986 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
987 #if !SANITIZER_GO
988 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
989 #else
990 if (thr->shadow_stack_pos == thr->shadow_stack_end)
991 GrowShadowStack(thr);
992 #endif
993 thr->shadow_stack_pos[0] = pc;
994 thr->shadow_stack_pos++;
995 }
996
997 ALWAYS_INLINE USED
998 void FuncExit(ThreadState *thr) {
999 StatInc(thr, StatFuncExit);
1000 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
1001 if (kCollectHistory) {
1002 thr->fast_state.IncrementEpoch();
1003 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
1004 }
1005
1006 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
1007 #if !SANITIZER_GO
1008 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
1009 #endif
1010 thr->shadow_stack_pos--;
1011 }
1012
1013 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
1014 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
1015 thr->ignore_reads_and_writes++;
1016 CHECK_GT(thr->ignore_reads_and_writes, 0);
1017 thr->fast_state.SetIgnoreBit();
1018 #if !SANITIZER_GO
1019 if (save_stack && !ctx->after_multithreaded_fork)
1020 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
1021 #endif
1022 }
1023
1024 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
1025 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
1026 CHECK_GT(thr->ignore_reads_and_writes, 0);
1027 thr->ignore_reads_and_writes--;
1028 if (thr->ignore_reads_and_writes == 0) {
1029 thr->fast_state.ClearIgnoreBit();
1030 #if !SANITIZER_GO
1031 thr->mop_ignore_set.Reset();
1032 #endif
1033 }
1034 }
1035
1036 #if !SANITIZER_GO
1037 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
1038 uptr __tsan_testonly_shadow_stack_current_size() {
1039 ThreadState *thr = cur_thread();
1040 return thr->shadow_stack_pos - thr->shadow_stack;
1041 }
1042 #endif
1043
1044 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
1045 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
1046 thr->ignore_sync++;
1047 CHECK_GT(thr->ignore_sync, 0);
1048 #if !SANITIZER_GO
1049 if (save_stack && !ctx->after_multithreaded_fork)
1050 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
1051 #endif
1052 }
1053
1054 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
1055 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1056 CHECK_GT(thr->ignore_sync, 0);
1057 thr->ignore_sync--;
1058 #if !SANITIZER_GO
1059 if (thr->ignore_sync == 0)
1060 thr->sync_ignore_set.Reset();
1061 #endif
1062 }
1063
1064 bool MD5Hash::operator==(const MD5Hash &other) const {
1065 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1066 }
1067
1068 #if SANITIZER_DEBUG
1069 void build_consistency_debug() {}
1070 #else
1071 void build_consistency_release() {}
1072 #endif
1073
1074 #if TSAN_COLLECT_STATS
1075 void build_consistency_stats() {}
1076 #else
1077 void build_consistency_nostats() {}
1078 #endif
1079
1080 } // namespace __tsan
1081
1082 #if !SANITIZER_GO
1083 // Must be included in this file to make sure everything is inlined.
1084 #include "tsan_interface_inl.h"
1085 #endif