]>
Commit | Line | Data |
---|---|---|
9cf75457 | 1 | //===-- tsan_rtl_report.cc ------------------------------------------------===// |
2 | // | |
3 | // This file is distributed under the University of Illinois Open Source | |
4 | // License. See LICENSE.TXT for details. | |
5 | // | |
6 | //===----------------------------------------------------------------------===// | |
7 | // | |
8 | // This file is a part of ThreadSanitizer (TSan), a race detector. | |
9 | // | |
10 | //===----------------------------------------------------------------------===// | |
11 | ||
12 | #include "sanitizer_common/sanitizer_libc.h" | |
13 | #include "sanitizer_common/sanitizer_placement_new.h" | |
14 | #include "sanitizer_common/sanitizer_stackdepot.h" | |
f5ed5428 | 15 | #include "sanitizer_common/sanitizer_common.h" |
7da89e80 | 16 | #include "sanitizer_common/sanitizer_stacktrace.h" |
9cf75457 | 17 | #include "tsan_platform.h" |
18 | #include "tsan_rtl.h" | |
19 | #include "tsan_suppressions.h" | |
20 | #include "tsan_symbolize.h" | |
21 | #include "tsan_report.h" | |
22 | #include "tsan_sync.h" | |
23 | #include "tsan_mman.h" | |
24 | #include "tsan_flags.h" | |
f5ed5428 | 25 | #include "tsan_fd.h" |
9cf75457 | 26 | |
27 | namespace __tsan { | |
28 | ||
f5ed5428 | 29 | using namespace __sanitizer; // NOLINT |
30 | ||
0328398d | 31 | static ReportStack *SymbolizeStack(StackTrace trace); |
7da89e80 | 32 | |
9cf75457 | 33 | void TsanCheckFailed(const char *file, int line, const char *cond, |
34 | u64 v1, u64 v2) { | |
7d752f28 | 35 | // There is high probability that interceptors will check-fail as well, |
36 | // on the other hand there is no sense in processing interceptors | |
37 | // since we are going to die soon. | |
38 | ScopedIgnoreInterceptors ignore; | |
0d996a11 | 39 | Printf("FATAL: ThreadSanitizer CHECK failed: " |
40 | "%s:%d \"%s\" (0x%zx, 0x%zx)\n", | |
41 | file, line, cond, (uptr)v1, (uptr)v2); | |
0328398d | 42 | PrintCurrentStackSlow(StackTrace::GetCurrentPc()); |
9cf75457 | 43 | Die(); |
44 | } | |
45 | ||
46 | // Can be overriden by an application/test to intercept reports. | |
47 | #ifdef TSAN_EXTERNAL_HOOKS | |
48 | bool OnReport(const ReportDesc *rep, bool suppressed); | |
49 | #else | |
50 | SANITIZER_INTERFACE_ATTRIBUTE | |
51 | bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { | |
52 | (void)rep; | |
53 | return suppressed; | |
54 | } | |
55 | #endif | |
56 | ||
57 | static void StackStripMain(ReportStack *stack) { | |
58 | ReportStack *last_frame = 0; | |
59 | ReportStack *last_frame2 = 0; | |
9cf75457 | 60 | for (ReportStack *ent = stack; ent; ent = ent->next) { |
9cf75457 | 61 | last_frame2 = last_frame; |
62 | last_frame = ent; | |
63 | } | |
64 | ||
65 | if (last_frame2 == 0) | |
66 | return; | |
0328398d | 67 | const char *last = last_frame->info.function; |
9cf75457 | 68 | #ifndef TSAN_GO |
0328398d | 69 | const char *last2 = last_frame2->info.function; |
9cf75457 | 70 | // Strip frame above 'main' |
71 | if (last2 && 0 == internal_strcmp(last2, "main")) { | |
72 | last_frame2->next = 0; | |
73 | // Strip our internal thread start routine. | |
74 | } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { | |
75 | last_frame2->next = 0; | |
76 | // Strip global ctors init. | |
77 | } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { | |
78 | last_frame2->next = 0; | |
79 | // If both are 0, then we probably just failed to symbolize. | |
80 | } else if (last || last2) { | |
81 | // Ensure that we recovered stack completely. Trimmed stack | |
82 | // can actually happen if we do not instrument some code, | |
83 | // so it's only a debug print. However we must try hard to not miss it | |
84 | // due to our fault. | |
85 | DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); | |
86 | } | |
87 | #else | |
1e80ce41 | 88 | // The last frame always point into runtime (gosched0, goexit0, runtime.main). |
89 | last_frame2->next = 0; | |
90 | (void)last; | |
9cf75457 | 91 | #endif |
92 | } | |
93 | ||
4fc7b5ac | 94 | ReportStack *SymbolizeStackId(u32 stack_id) { |
7d752f28 | 95 | if (stack_id == 0) |
96 | return 0; | |
0328398d | 97 | StackTrace stack = StackDepotGet(stack_id); |
98 | if (stack.trace == nullptr) | |
99 | return nullptr; | |
100 | return SymbolizeStack(stack); | |
4fc7b5ac | 101 | } |
4fc7b5ac | 102 | |
0328398d | 103 | static ReportStack *SymbolizeStack(StackTrace trace) { |
104 | if (trace.size == 0) | |
9cf75457 | 105 | return 0; |
106 | ReportStack *stack = 0; | |
0328398d | 107 | for (uptr si = 0; si < trace.size; si++) { |
108 | const uptr pc = trace.trace[si]; | |
1e80ce41 | 109 | #ifndef TSAN_GO |
9cf75457 | 110 | // We obtain the return address, that is, address of the next instruction, |
111 | // so offset it by 1 byte. | |
0328398d | 112 | const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc); |
1e80ce41 | 113 | #else |
114 | // FIXME(dvyukov): Go sometimes uses address of a function as top pc. | |
115 | uptr pc1 = pc; | |
0328398d | 116 | if (si != trace.size - 1) |
1e80ce41 | 117 | pc1 -= 1; |
118 | #endif | |
119 | ReportStack *ent = SymbolizeCode(pc1); | |
9cf75457 | 120 | CHECK_NE(ent, 0); |
121 | ReportStack *last = ent; | |
122 | while (last->next) { | |
0328398d | 123 | last->info.address = pc; // restore original pc for report |
9cf75457 | 124 | last = last->next; |
125 | } | |
0328398d | 126 | last->info.address = pc; // restore original pc for report |
9cf75457 | 127 | last->next = stack; |
128 | stack = ent; | |
129 | } | |
130 | StackStripMain(stack); | |
131 | return stack; | |
132 | } | |
133 | ||
134 | ScopedReport::ScopedReport(ReportType typ) { | |
7d752f28 | 135 | ctx->thread_registry->CheckLocked(); |
9cf75457 | 136 | void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); |
137 | rep_ = new(mem) ReportDesc; | |
138 | rep_->typ = typ; | |
7d752f28 | 139 | ctx->report_mtx.Lock(); |
1e80ce41 | 140 | CommonSanitizerReportMutex.Lock(); |
9cf75457 | 141 | } |
142 | ||
143 | ScopedReport::~ScopedReport() { | |
1e80ce41 | 144 | CommonSanitizerReportMutex.Unlock(); |
7d752f28 | 145 | ctx->report_mtx.Unlock(); |
4ab070fc | 146 | DestroyAndFree(rep_); |
9cf75457 | 147 | } |
148 | ||
0328398d | 149 | void ScopedReport::AddStack(StackTrace stack, bool suppressable) { |
9cf75457 | 150 | ReportStack **rs = rep_->stacks.PushBack(); |
0328398d | 151 | *rs = SymbolizeStack(stack); |
a9586c9c | 152 | (*rs)->suppressable = suppressable; |
9cf75457 | 153 | } |
154 | ||
0328398d | 155 | void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, |
156 | const MutexSet *mset) { | |
9cf75457 | 157 | void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); |
158 | ReportMop *mop = new(mem) ReportMop; | |
159 | rep_->mops.PushBack(mop); | |
160 | mop->tid = s.tid(); | |
161 | mop->addr = addr + s.addr0(); | |
162 | mop->size = s.size(); | |
7da89e80 | 163 | mop->write = s.IsWrite(); |
164 | mop->atomic = s.IsAtomic(); | |
0328398d | 165 | mop->stack = SymbolizeStack(stack); |
a9586c9c | 166 | if (mop->stack) |
167 | mop->stack->suppressable = true; | |
f5ed5428 | 168 | for (uptr i = 0; i < mset->Size(); i++) { |
169 | MutexSet::Desc d = mset->Get(i); | |
7d752f28 | 170 | u64 mid = this->AddMutex(d.id); |
171 | ReportMopMutex mtx = {mid, d.write}; | |
172 | mop->mset.PushBack(mtx); | |
f5ed5428 | 173 | } |
9cf75457 | 174 | } |
175 | ||
7d752f28 | 176 | void ScopedReport::AddUniqueTid(int unique_tid) { |
177 | rep_->unique_tids.PushBack(unique_tid); | |
178 | } | |
179 | ||
a9586c9c | 180 | void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { |
9cf75457 | 181 | for (uptr i = 0; i < rep_->threads.Size(); i++) { |
1e80ce41 | 182 | if ((u32)rep_->threads[i]->id == tctx->tid) |
9cf75457 | 183 | return; |
184 | } | |
185 | void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); | |
186 | ReportThread *rt = new(mem) ReportThread(); | |
187 | rep_->threads.PushBack(rt); | |
188 | rt->id = tctx->tid; | |
189 | rt->pid = tctx->os_id; | |
190 | rt->running = (tctx->status == ThreadStatusRunning); | |
7d752f28 | 191 | rt->name = internal_strdup(tctx->name); |
1e80ce41 | 192 | rt->parent_tid = tctx->parent_tid; |
193 | rt->stack = 0; | |
4fc7b5ac | 194 | rt->stack = SymbolizeStackId(tctx->creation_stack_id); |
a9586c9c | 195 | if (rt->stack) |
196 | rt->stack->suppressable = suppressable; | |
9cf75457 | 197 | } |
198 | ||
199 | #ifndef TSAN_GO | |
1e80ce41 | 200 | static ThreadContext *FindThreadByUidLocked(int unique_id) { |
1e80ce41 | 201 | ctx->thread_registry->CheckLocked(); |
9cf75457 | 202 | for (unsigned i = 0; i < kMaxTid; i++) { |
1e80ce41 | 203 | ThreadContext *tctx = static_cast<ThreadContext*>( |
204 | ctx->thread_registry->GetThreadLocked(i)); | |
205 | if (tctx && tctx->unique_id == (u32)unique_id) { | |
9cf75457 | 206 | return tctx; |
207 | } | |
208 | } | |
209 | return 0; | |
210 | } | |
4a2c1ffc | 211 | |
1e80ce41 | 212 | static ThreadContext *FindThreadByTidLocked(int tid) { |
1e80ce41 | 213 | ctx->thread_registry->CheckLocked(); |
214 | return static_cast<ThreadContext*>( | |
215 | ctx->thread_registry->GetThreadLocked(tid)); | |
216 | } | |
217 | ||
218 | static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { | |
219 | uptr addr = (uptr)arg; | |
220 | ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); | |
221 | if (tctx->status != ThreadStatusRunning) | |
222 | return false; | |
223 | ThreadState *thr = tctx->thr; | |
224 | CHECK(thr); | |
225 | return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || | |
226 | (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); | |
227 | } | |
228 | ||
4a2c1ffc | 229 | ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { |
1e80ce41 | 230 | ctx->thread_registry->CheckLocked(); |
231 | ThreadContext *tctx = static_cast<ThreadContext*>( | |
232 | ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, | |
233 | (void*)addr)); | |
234 | if (!tctx) | |
235 | return 0; | |
236 | ThreadState *thr = tctx->thr; | |
237 | CHECK(thr); | |
238 | *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); | |
239 | return tctx; | |
4a2c1ffc | 240 | } |
9cf75457 | 241 | #endif |
242 | ||
a9586c9c | 243 | void ScopedReport::AddThread(int unique_tid, bool suppressable) { |
7d752f28 | 244 | #ifndef TSAN_GO |
400e957b | 245 | if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) |
246 | AddThread(tctx, suppressable); | |
7d752f28 | 247 | #endif |
248 | } | |
249 | ||
9cf75457 | 250 | void ScopedReport::AddMutex(const SyncVar *s) { |
f5ed5428 | 251 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { |
252 | if (rep_->mutexes[i]->id == s->uid) | |
253 | return; | |
254 | } | |
9cf75457 | 255 | void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); |
256 | ReportMutex *rm = new(mem) ReportMutex(); | |
257 | rep_->mutexes.PushBack(rm); | |
f5ed5428 | 258 | rm->id = s->uid; |
7d752f28 | 259 | rm->addr = s->addr; |
f5ed5428 | 260 | rm->destroyed = false; |
4fc7b5ac | 261 | rm->stack = SymbolizeStackId(s->creation_stack_id); |
9cf75457 | 262 | } |
263 | ||
7d752f28 | 264 | u64 ScopedReport::AddMutex(u64 id) { |
265 | u64 uid = 0; | |
266 | u64 mid = id; | |
267 | uptr addr = SyncVar::SplitId(id, &uid); | |
a9586c9c | 268 | SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); |
7d752f28 | 269 | // Check that the mutex is still alive. |
270 | // Another mutex can be created at the same address, | |
271 | // so check uid as well. | |
272 | if (s && s->CheckId(uid)) { | |
273 | mid = s->uid; | |
274 | AddMutex(s); | |
275 | } else { | |
276 | AddDeadMutex(id); | |
277 | } | |
278 | if (s) | |
a9586c9c | 279 | s->mtx.Unlock(); |
7d752f28 | 280 | return mid; |
281 | } | |
282 | ||
283 | void ScopedReport::AddDeadMutex(u64 id) { | |
f5ed5428 | 284 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { |
285 | if (rep_->mutexes[i]->id == id) | |
286 | return; | |
287 | } | |
288 | void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); | |
289 | ReportMutex *rm = new(mem) ReportMutex(); | |
290 | rep_->mutexes.PushBack(rm); | |
291 | rm->id = id; | |
7d752f28 | 292 | rm->addr = 0; |
f5ed5428 | 293 | rm->destroyed = true; |
294 | rm->stack = 0; | |
295 | } | |
296 | ||
9cf75457 | 297 | void ScopedReport::AddLocation(uptr addr, uptr size) { |
298 | if (addr == 0) | |
299 | return; | |
300 | #ifndef TSAN_GO | |
f5ed5428 | 301 | int fd = -1; |
302 | int creat_tid = -1; | |
303 | u32 creat_stack = 0; | |
a9586c9c | 304 | if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { |
0328398d | 305 | ReportLocation *loc = ReportLocation::New(ReportLocationFD); |
f5ed5428 | 306 | loc->fd = fd; |
307 | loc->tid = creat_tid; | |
4fc7b5ac | 308 | loc->stack = SymbolizeStackId(creat_stack); |
0328398d | 309 | rep_->locs.PushBack(loc); |
1e80ce41 | 310 | ThreadContext *tctx = FindThreadByUidLocked(creat_tid); |
f5ed5428 | 311 | if (tctx) |
312 | AddThread(tctx); | |
313 | return; | |
314 | } | |
1e80ce41 | 315 | MBlock *b = 0; |
a9586c9c | 316 | Allocator *a = allocator(); |
317 | if (a->PointerIsMine((void*)addr)) { | |
318 | void *block_begin = a->GetBlockBegin((void*)addr); | |
319 | if (block_begin) | |
320 | b = ctx->metamap.GetBlock((uptr)block_begin); | |
321 | } | |
322 | if (b != 0) { | |
323 | ThreadContext *tctx = FindThreadByTidLocked(b->tid); | |
0328398d | 324 | ReportLocation *loc = ReportLocation::New(ReportLocationHeap); |
325 | loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); | |
326 | loc->heap_chunk_size = b->siz; | |
a9586c9c | 327 | loc->tid = tctx ? tctx->tid : b->tid; |
a9586c9c | 328 | loc->stack = SymbolizeStackId(b->stk); |
0328398d | 329 | rep_->locs.PushBack(loc); |
9cf75457 | 330 | if (tctx) |
331 | AddThread(tctx); | |
332 | return; | |
333 | } | |
4a2c1ffc | 334 | bool is_stack = false; |
335 | if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { | |
0328398d | 336 | ReportLocation *loc = |
337 | ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); | |
4a2c1ffc | 338 | loc->tid = tctx->tid; |
0328398d | 339 | rep_->locs.PushBack(loc); |
4a2c1ffc | 340 | AddThread(tctx); |
341 | } | |
0328398d | 342 | if (ReportLocation *loc = SymbolizeData(addr)) { |
a9586c9c | 343 | loc->suppressable = true; |
4a2c1ffc | 344 | rep_->locs.PushBack(loc); |
9cf75457 | 345 | return; |
346 | } | |
4a2c1ffc | 347 | #endif |
9cf75457 | 348 | } |
349 | ||
350 | #ifndef TSAN_GO | |
351 | void ScopedReport::AddSleep(u32 stack_id) { | |
4fc7b5ac | 352 | rep_->sleep = SymbolizeStackId(stack_id); |
9cf75457 | 353 | } |
354 | #endif | |
355 | ||
1e80ce41 | 356 | void ScopedReport::SetCount(int count) { |
357 | rep_->count = count; | |
358 | } | |
359 | ||
9cf75457 | 360 | const ReportDesc *ScopedReport::GetReport() const { |
361 | return rep_; | |
362 | } | |
363 | ||
0328398d | 364 | void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, |
365 | MutexSet *mset) { | |
f5ed5428 | 366 | // This function restores stack trace and mutex set for the thread/epoch. |
367 | // It does so by getting stack trace and mutex set at the beginning of | |
368 | // trace part, and then replaying the trace till the given epoch. | |
1e80ce41 | 369 | ctx->thread_registry->CheckLocked(); |
370 | ThreadContext *tctx = static_cast<ThreadContext*>( | |
371 | ctx->thread_registry->GetThreadLocked(tid)); | |
9cf75457 | 372 | if (tctx == 0) |
373 | return; | |
1e80ce41 | 374 | if (tctx->status != ThreadStatusRunning |
375 | && tctx->status != ThreadStatusFinished | |
376 | && tctx->status != ThreadStatusDead) | |
9cf75457 | 377 | return; |
1e80ce41 | 378 | Trace* trace = ThreadTrace(tctx->tid); |
9cf75457 | 379 | Lock l(&trace->mtx); |
4ab070fc | 380 | const int partidx = (epoch / kTracePartSize) % TraceParts(); |
9cf75457 | 381 | TraceHeader* hdr = &trace->headers[partidx]; |
382 | if (epoch < hdr->epoch0) | |
383 | return; | |
f5ed5428 | 384 | const u64 epoch0 = RoundDown(epoch, TraceSize()); |
4ab070fc | 385 | const u64 eend = epoch % TraceSize(); |
386 | const u64 ebegin = RoundDown(eend, kTracePartSize); | |
9cf75457 | 387 | DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", |
388 | tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); | |
4fc7b5ac | 389 | InternalScopedBuffer<uptr> stack(kShadowStackSize); |
0328398d | 390 | for (uptr i = 0; i < hdr->stack0.size; i++) { |
391 | stack[i] = hdr->stack0.trace[i]; | |
9cf75457 | 392 | DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); |
393 | } | |
f5ed5428 | 394 | if (mset) |
395 | *mset = hdr->mset0; | |
0328398d | 396 | uptr pos = hdr->stack0.size; |
4ab070fc | 397 | Event *events = (Event*)GetThreadTrace(tid); |
9cf75457 | 398 | for (uptr i = ebegin; i <= eend; i++) { |
4ab070fc | 399 | Event ev = events[i]; |
9cf75457 | 400 | EventType typ = (EventType)(ev >> 61); |
f5ed5428 | 401 | uptr pc = (uptr)(ev & ((1ull << 61) - 1)); |
9cf75457 | 402 | DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); |
403 | if (typ == EventTypeMop) { | |
404 | stack[pos] = pc; | |
405 | } else if (typ == EventTypeFuncEnter) { | |
406 | stack[pos++] = pc; | |
407 | } else if (typ == EventTypeFuncExit) { | |
408 | if (pos > 0) | |
409 | pos--; | |
410 | } | |
f5ed5428 | 411 | if (mset) { |
412 | if (typ == EventTypeLock) { | |
413 | mset->Add(pc, true, epoch0 + i); | |
414 | } else if (typ == EventTypeUnlock) { | |
415 | mset->Del(pc, true); | |
416 | } else if (typ == EventTypeRLock) { | |
417 | mset->Add(pc, false, epoch0 + i); | |
418 | } else if (typ == EventTypeRUnlock) { | |
419 | mset->Del(pc, false); | |
420 | } | |
421 | } | |
9cf75457 | 422 | for (uptr j = 0; j <= pos; j++) |
423 | DPrintf2(" #%zu: %zx\n", j, stack[j]); | |
424 | } | |
425 | if (pos == 0 && stack[0] == 0) | |
426 | return; | |
427 | pos++; | |
428 | stk->Init(stack.data(), pos); | |
429 | } | |
430 | ||
0328398d | 431 | static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], |
432 | uptr addr_min, uptr addr_max) { | |
9cf75457 | 433 | bool equal_stack = false; |
4a2c1ffc | 434 | RacyStacks hash; |
9cf75457 | 435 | if (flags()->suppress_equal_stacks) { |
0328398d | 436 | hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); |
437 | hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); | |
9cf75457 | 438 | for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { |
439 | if (hash == ctx->racy_stacks[i]) { | |
440 | DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); | |
441 | equal_stack = true; | |
442 | break; | |
443 | } | |
444 | } | |
445 | } | |
446 | bool equal_address = false; | |
447 | RacyAddress ra0 = {addr_min, addr_max}; | |
448 | if (flags()->suppress_equal_addresses) { | |
449 | for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { | |
450 | RacyAddress ra2 = ctx->racy_addresses[i]; | |
451 | uptr maxbeg = max(ra0.addr_min, ra2.addr_min); | |
452 | uptr minend = min(ra0.addr_max, ra2.addr_max); | |
453 | if (maxbeg < minend) { | |
454 | DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); | |
455 | equal_address = true; | |
456 | break; | |
457 | } | |
458 | } | |
459 | } | |
460 | if (equal_stack || equal_address) { | |
461 | if (!equal_stack) | |
462 | ctx->racy_stacks.PushBack(hash); | |
463 | if (!equal_address) | |
464 | ctx->racy_addresses.PushBack(ra0); | |
465 | return true; | |
466 | } | |
467 | return false; | |
468 | } | |
469 | ||
0328398d | 470 | static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], |
471 | uptr addr_min, uptr addr_max) { | |
9cf75457 | 472 | if (flags()->suppress_equal_stacks) { |
473 | RacyStacks hash; | |
0328398d | 474 | hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); |
475 | hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); | |
9cf75457 | 476 | ctx->racy_stacks.PushBack(hash); |
477 | } | |
478 | if (flags()->suppress_equal_addresses) { | |
479 | RacyAddress ra0 = {addr_min, addr_max}; | |
480 | ctx->racy_addresses.PushBack(ra0); | |
481 | } | |
482 | } | |
483 | ||
a9586c9c | 484 | bool OutputReport(ThreadState *thr, const ScopedReport &srep) { |
1e80ce41 | 485 | atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); |
9cf75457 | 486 | const ReportDesc *rep = srep.GetReport(); |
1e80ce41 | 487 | Suppression *supp = 0; |
a9586c9c | 488 | uptr suppress_pc = 0; |
489 | for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++) | |
490 | suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); | |
491 | for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++) | |
492 | suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp); | |
493 | for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++) | |
494 | suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); | |
495 | for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++) | |
496 | suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp); | |
9cf75457 | 497 | if (suppress_pc != 0) { |
1e80ce41 | 498 | FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; |
499 | ctx->fired_suppressions.push_back(s); | |
9cf75457 | 500 | } |
a9586c9c | 501 | { |
502 | bool old_is_freeing = thr->is_freeing; | |
503 | thr->is_freeing = false; | |
504 | bool suppressed = OnReport(rep, suppress_pc != 0); | |
505 | thr->is_freeing = old_is_freeing; | |
506 | if (suppressed) | |
507 | return false; | |
508 | } | |
9cf75457 | 509 | PrintReport(rep); |
1e80ce41 | 510 | ctx->nreported++; |
511 | if (flags()->halt_on_error) | |
512 | internal__exit(flags()->exitcode); | |
9cf75457 | 513 | return true; |
514 | } | |
515 | ||
0328398d | 516 | bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, |
517 | StackTrace trace) { | |
1e80ce41 | 518 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { |
9cf75457 | 519 | if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) |
520 | continue; | |
0328398d | 521 | for (uptr j = 0; j < trace.size; j++) { |
1e80ce41 | 522 | FiredSuppression *s = &ctx->fired_suppressions[k]; |
0328398d | 523 | if (trace.trace[j] == s->pc) { |
1e80ce41 | 524 | if (s->supp) |
525 | s->supp->hit_count++; | |
9cf75457 | 526 | return true; |
1e80ce41 | 527 | } |
528 | } | |
529 | } | |
530 | return false; | |
531 | } | |
532 | ||
533 | static bool IsFiredSuppression(Context *ctx, | |
534 | const ScopedReport &srep, | |
535 | uptr addr) { | |
536 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { | |
537 | if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) | |
538 | continue; | |
539 | FiredSuppression *s = &ctx->fired_suppressions[k]; | |
540 | if (addr == s->pc) { | |
541 | if (s->supp) | |
542 | s->supp->hit_count++; | |
543 | return true; | |
9cf75457 | 544 | } |
545 | } | |
546 | return false; | |
547 | } | |
548 | ||
7da89e80 | 549 | bool FrameIsInternal(const ReportStack *frame) { |
0328398d | 550 | if (frame == 0) |
551 | return false; | |
552 | const char *file = frame->info.file; | |
553 | return file != 0 && | |
554 | (internal_strstr(file, "tsan_interceptors.cc") || | |
555 | internal_strstr(file, "sanitizer_common_interceptors.inc") || | |
556 | internal_strstr(file, "tsan_interface_")); | |
7da89e80 | 557 | } |
558 | ||
7da89e80 | 559 | static bool RaceBetweenAtomicAndFree(ThreadState *thr) { |
560 | Shadow s0(thr->racy_state[0]); | |
561 | Shadow s1(thr->racy_state[1]); | |
562 | CHECK(!(s0.IsAtomic() && s1.IsAtomic())); | |
563 | if (!s0.IsAtomic() && !s1.IsAtomic()) | |
564 | return true; | |
565 | if (s0.IsAtomic() && s1.IsFreed()) | |
566 | return true; | |
567 | if (s1.IsAtomic() && thr->is_freeing) | |
568 | return true; | |
4ab070fc | 569 | return false; |
570 | } | |
571 | ||
9cf75457 | 572 | void ReportRace(ThreadState *thr) { |
a9586c9c | 573 | CheckNoLocks(thr); |
574 | ||
7d752f28 | 575 | // Symbolizer makes lots of intercepted calls. If we try to process them, |
576 | // at best it will cause deadlocks on internal mutexes. | |
577 | ScopedIgnoreInterceptors ignore; | |
578 | ||
0d996a11 | 579 | if (!flags()->report_bugs) |
580 | return; | |
7da89e80 | 581 | if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) |
582 | return; | |
583 | ||
9cf75457 | 584 | bool freed = false; |
585 | { | |
586 | Shadow s(thr->racy_state[1]); | |
587 | freed = s.GetFreedAndReset(); | |
588 | thr->racy_state[1] = s.raw(); | |
589 | } | |
590 | ||
591 | uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); | |
592 | uptr addr_min = 0; | |
593 | uptr addr_max = 0; | |
594 | { | |
595 | uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); | |
596 | uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); | |
597 | uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); | |
598 | uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); | |
599 | addr_min = min(a0, a1); | |
600 | addr_max = max(e0, e1); | |
601 | if (IsExpectedReport(addr_min, addr_max - addr_min)) | |
602 | return; | |
603 | } | |
604 | ||
1e80ce41 | 605 | ThreadRegistryLock l0(ctx->thread_registry); |
606 | ||
607 | ReportType typ = ReportTypeRace; | |
0328398d | 608 | if (thr->is_vptr_access && freed) |
609 | typ = ReportTypeVptrUseAfterFree; | |
610 | else if (thr->is_vptr_access) | |
1e80ce41 | 611 | typ = ReportTypeVptrRace; |
612 | else if (freed) | |
613 | typ = ReportTypeUseAfterFree; | |
614 | ScopedReport rep(typ); | |
615 | if (IsFiredSuppression(ctx, rep, addr)) | |
616 | return; | |
9cf75457 | 617 | const uptr kMop = 2; |
0328398d | 618 | VarSizeStackTrace traces[kMop]; |
4ab070fc | 619 | const uptr toppc = TraceTopPC(thr); |
0328398d | 620 | ObtainCurrentStack(thr, toppc, &traces[0]); |
9cf75457 | 621 | if (IsFiredSuppression(ctx, rep, traces[0])) |
622 | return; | |
f5ed5428 | 623 | InternalScopedBuffer<MutexSet> mset2(1); |
624 | new(mset2.data()) MutexSet(); | |
9cf75457 | 625 | Shadow s2(thr->racy_state[1]); |
f5ed5428 | 626 | RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); |
1e80ce41 | 627 | if (IsFiredSuppression(ctx, rep, traces[1])) |
628 | return; | |
9cf75457 | 629 | |
630 | if (HandleRacyStacks(thr, traces, addr_min, addr_max)) | |
631 | return; | |
632 | ||
633 | for (uptr i = 0; i < kMop; i++) { | |
634 | Shadow s(thr->racy_state[i]); | |
0328398d | 635 | rep.AddMemoryAccess(addr, s, traces[i], |
f5ed5428 | 636 | i == 0 ? &thr->mset : mset2.data()); |
9cf75457 | 637 | } |
638 | ||
639 | for (uptr i = 0; i < kMop; i++) { | |
640 | FastState s(thr->racy_state[i]); | |
1e80ce41 | 641 | ThreadContext *tctx = static_cast<ThreadContext*>( |
642 | ctx->thread_registry->GetThreadLocked(s.tid())); | |
9cf75457 | 643 | if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) |
644 | continue; | |
645 | rep.AddThread(tctx); | |
646 | } | |
647 | ||
648 | rep.AddLocation(addr_min, addr_max - addr_min); | |
649 | ||
650 | #ifndef TSAN_GO | |
651 | { // NOLINT | |
652 | Shadow s(thr->racy_state[1]); | |
653 | if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) | |
654 | rep.AddSleep(thr->last_sleep_stack_id); | |
655 | } | |
656 | #endif | |
657 | ||
a9586c9c | 658 | if (!OutputReport(thr, rep)) |
9cf75457 | 659 | return; |
660 | ||
661 | AddRacyStacks(thr, traces, addr_min, addr_max); | |
662 | } | |
663 | ||
664 | void PrintCurrentStack(ThreadState *thr, uptr pc) { | |
0328398d | 665 | VarSizeStackTrace trace; |
666 | ObtainCurrentStack(thr, pc, &trace); | |
9cf75457 | 667 | PrintStack(SymbolizeStack(trace)); |
668 | } | |
669 | ||
0328398d | 670 | void PrintCurrentStackSlow(uptr pc) { |
7da89e80 | 671 | #ifndef TSAN_GO |
0328398d | 672 | BufferedStackTrace *ptrace = |
673 | new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) | |
674 | BufferedStackTrace(); | |
675 | ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false); | |
1e80ce41 | 676 | for (uptr i = 0; i < ptrace->size / 2; i++) { |
0328398d | 677 | uptr tmp = ptrace->trace_buffer[i]; |
678 | ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; | |
679 | ptrace->trace_buffer[ptrace->size - i - 1] = tmp; | |
1e80ce41 | 680 | } |
0328398d | 681 | PrintStack(SymbolizeStack(*ptrace)); |
7da89e80 | 682 | #endif |
683 | } | |
684 | ||
9cf75457 | 685 | } // namespace __tsan |
0328398d | 686 | |
687 | using namespace __tsan; | |
688 | ||
689 | extern "C" { | |
690 | SANITIZER_INTERFACE_ATTRIBUTE | |
691 | void __sanitizer_print_stack_trace() { | |
692 | PrintCurrentStackSlow(StackTrace::GetCurrentPc()); | |
693 | } | |
694 | } // extern "C" |