]>
Commit | Line | Data |
---|---|---|
9cf75457 | 1 | //===-- tsan_rtl_report.cc ------------------------------------------------===// |
2 | // | |
3 | // This file is distributed under the University of Illinois Open Source | |
4 | // License. See LICENSE.TXT for details. | |
5 | // | |
6 | //===----------------------------------------------------------------------===// | |
7 | // | |
8 | // This file is a part of ThreadSanitizer (TSan), a race detector. | |
9 | // | |
10 | //===----------------------------------------------------------------------===// | |
11 | ||
12 | #include "sanitizer_common/sanitizer_libc.h" | |
13 | #include "sanitizer_common/sanitizer_placement_new.h" | |
14 | #include "sanitizer_common/sanitizer_stackdepot.h" | |
f5ed5428 | 15 | #include "sanitizer_common/sanitizer_common.h" |
9cf75457 | 16 | #include "tsan_platform.h" |
17 | #include "tsan_rtl.h" | |
18 | #include "tsan_suppressions.h" | |
19 | #include "tsan_symbolize.h" | |
20 | #include "tsan_report.h" | |
21 | #include "tsan_sync.h" | |
22 | #include "tsan_mman.h" | |
23 | #include "tsan_flags.h" | |
f5ed5428 | 24 | #include "tsan_fd.h" |
9cf75457 | 25 | |
26 | namespace __tsan { | |
27 | ||
f5ed5428 | 28 | using namespace __sanitizer; // NOLINT |
29 | ||
9cf75457 | 30 | void TsanCheckFailed(const char *file, int line, const char *cond, |
31 | u64 v1, u64 v2) { | |
32 | ScopedInRtl in_rtl; | |
0d996a11 | 33 | Printf("FATAL: ThreadSanitizer CHECK failed: " |
34 | "%s:%d \"%s\" (0x%zx, 0x%zx)\n", | |
35 | file, line, cond, (uptr)v1, (uptr)v2); | |
9cf75457 | 36 | Die(); |
37 | } | |
38 | ||
39 | // Can be overriden by an application/test to intercept reports. | |
40 | #ifdef TSAN_EXTERNAL_HOOKS | |
41 | bool OnReport(const ReportDesc *rep, bool suppressed); | |
42 | #else | |
43 | SANITIZER_INTERFACE_ATTRIBUTE | |
44 | bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { | |
45 | (void)rep; | |
46 | return suppressed; | |
47 | } | |
48 | #endif | |
49 | ||
50 | static void StackStripMain(ReportStack *stack) { | |
51 | ReportStack *last_frame = 0; | |
52 | ReportStack *last_frame2 = 0; | |
53 | const char *prefix = "__interceptor_"; | |
54 | uptr prefix_len = internal_strlen(prefix); | |
55 | const char *path_prefix = flags()->strip_path_prefix; | |
56 | uptr path_prefix_len = internal_strlen(path_prefix); | |
57 | char *pos; | |
58 | for (ReportStack *ent = stack; ent; ent = ent->next) { | |
59 | if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len)) | |
60 | ent->func += prefix_len; | |
61 | if (ent->file && (pos = internal_strstr(ent->file, path_prefix))) | |
62 | ent->file = pos + path_prefix_len; | |
63 | if (ent->file && ent->file[0] == '.' && ent->file[1] == '/') | |
64 | ent->file += 2; | |
65 | last_frame2 = last_frame; | |
66 | last_frame = ent; | |
67 | } | |
68 | ||
69 | if (last_frame2 == 0) | |
70 | return; | |
71 | const char *last = last_frame->func; | |
72 | #ifndef TSAN_GO | |
73 | const char *last2 = last_frame2->func; | |
74 | // Strip frame above 'main' | |
75 | if (last2 && 0 == internal_strcmp(last2, "main")) { | |
76 | last_frame2->next = 0; | |
77 | // Strip our internal thread start routine. | |
78 | } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { | |
79 | last_frame2->next = 0; | |
80 | // Strip global ctors init. | |
81 | } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { | |
82 | last_frame2->next = 0; | |
83 | // If both are 0, then we probably just failed to symbolize. | |
84 | } else if (last || last2) { | |
85 | // Ensure that we recovered stack completely. Trimmed stack | |
86 | // can actually happen if we do not instrument some code, | |
87 | // so it's only a debug print. However we must try hard to not miss it | |
88 | // due to our fault. | |
89 | DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); | |
90 | } | |
91 | #else | |
92 | if (last && 0 == internal_strcmp(last, "schedunlock")) | |
93 | last_frame2->next = 0; | |
94 | #endif | |
95 | } | |
96 | ||
97 | static ReportStack *SymbolizeStack(const StackTrace& trace) { | |
98 | if (trace.IsEmpty()) | |
99 | return 0; | |
100 | ReportStack *stack = 0; | |
101 | for (uptr si = 0; si < trace.Size(); si++) { | |
102 | // We obtain the return address, that is, address of the next instruction, | |
103 | // so offset it by 1 byte. | |
104 | bool is_last = (si == trace.Size() - 1); | |
105 | ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last); | |
106 | CHECK_NE(ent, 0); | |
107 | ReportStack *last = ent; | |
108 | while (last->next) { | |
109 | last->pc += !is_last; | |
110 | last = last->next; | |
111 | } | |
112 | last->pc += !is_last; | |
113 | last->next = stack; | |
114 | stack = ent; | |
115 | } | |
116 | StackStripMain(stack); | |
117 | return stack; | |
118 | } | |
119 | ||
120 | ScopedReport::ScopedReport(ReportType typ) { | |
121 | ctx_ = CTX(); | |
4a2c1ffc | 122 | ctx_->thread_mtx.CheckLocked(); |
9cf75457 | 123 | void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); |
124 | rep_ = new(mem) ReportDesc; | |
125 | rep_->typ = typ; | |
126 | ctx_->report_mtx.Lock(); | |
127 | } | |
128 | ||
129 | ScopedReport::~ScopedReport() { | |
130 | ctx_->report_mtx.Unlock(); | |
4ab070fc | 131 | DestroyAndFree(rep_); |
9cf75457 | 132 | } |
133 | ||
134 | void ScopedReport::AddStack(const StackTrace *stack) { | |
135 | ReportStack **rs = rep_->stacks.PushBack(); | |
136 | *rs = SymbolizeStack(*stack); | |
137 | } | |
138 | ||
139 | void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, | |
f5ed5428 | 140 | const StackTrace *stack, const MutexSet *mset) { |
9cf75457 | 141 | void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); |
142 | ReportMop *mop = new(mem) ReportMop; | |
143 | rep_->mops.PushBack(mop); | |
144 | mop->tid = s.tid(); | |
145 | mop->addr = addr + s.addr0(); | |
146 | mop->size = s.size(); | |
147 | mop->write = s.is_write(); | |
9cf75457 | 148 | mop->stack = SymbolizeStack(*stack); |
f5ed5428 | 149 | for (uptr i = 0; i < mset->Size(); i++) { |
150 | MutexSet::Desc d = mset->Get(i); | |
151 | u64 uid = 0; | |
152 | uptr addr = SyncVar::SplitId(d.id, &uid); | |
153 | SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false); | |
154 | // Check that the mutex is still alive. | |
155 | // Another mutex can be created at the same address, | |
156 | // so check uid as well. | |
157 | if (s && s->CheckId(uid)) { | |
158 | ReportMopMutex mtx = {s->uid, d.write}; | |
159 | mop->mset.PushBack(mtx); | |
160 | AddMutex(s); | |
161 | } else { | |
162 | ReportMopMutex mtx = {d.id, d.write}; | |
163 | mop->mset.PushBack(mtx); | |
164 | AddMutex(d.id); | |
165 | } | |
166 | if (s) | |
167 | s->mtx.ReadUnlock(); | |
168 | } | |
9cf75457 | 169 | } |
170 | ||
171 | void ScopedReport::AddThread(const ThreadContext *tctx) { | |
172 | for (uptr i = 0; i < rep_->threads.Size(); i++) { | |
173 | if (rep_->threads[i]->id == tctx->tid) | |
174 | return; | |
175 | } | |
176 | void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); | |
177 | ReportThread *rt = new(mem) ReportThread(); | |
178 | rep_->threads.PushBack(rt); | |
179 | rt->id = tctx->tid; | |
180 | rt->pid = tctx->os_id; | |
181 | rt->running = (tctx->status == ThreadStatusRunning); | |
4ab070fc | 182 | rt->name = tctx->name ? internal_strdup(tctx->name) : 0; |
f5ed5428 | 183 | rt->parent_tid = tctx->creation_tid; |
9cf75457 | 184 | rt->stack = SymbolizeStack(tctx->creation_stack); |
185 | } | |
186 | ||
187 | #ifndef TSAN_GO | |
188 | static ThreadContext *FindThread(int unique_id) { | |
4a2c1ffc | 189 | Context *ctx = CTX(); |
190 | ctx->thread_mtx.CheckLocked(); | |
9cf75457 | 191 | for (unsigned i = 0; i < kMaxTid; i++) { |
4a2c1ffc | 192 | ThreadContext *tctx = ctx->threads[i]; |
9cf75457 | 193 | if (tctx && tctx->unique_id == unique_id) { |
194 | return tctx; | |
195 | } | |
196 | } | |
197 | return 0; | |
198 | } | |
4a2c1ffc | 199 | |
200 | ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { | |
201 | Context *ctx = CTX(); | |
202 | ctx->thread_mtx.CheckLocked(); | |
203 | for (unsigned i = 0; i < kMaxTid; i++) { | |
204 | ThreadContext *tctx = ctx->threads[i]; | |
205 | if (tctx == 0 || tctx->status != ThreadStatusRunning) | |
206 | continue; | |
207 | ThreadState *thr = tctx->thr; | |
208 | CHECK(thr); | |
209 | if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) { | |
210 | *is_stack = true; | |
211 | return tctx; | |
212 | } | |
213 | if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) { | |
214 | *is_stack = false; | |
215 | return tctx; | |
216 | } | |
217 | } | |
218 | return 0; | |
219 | } | |
9cf75457 | 220 | #endif |
221 | ||
222 | void ScopedReport::AddMutex(const SyncVar *s) { | |
f5ed5428 | 223 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { |
224 | if (rep_->mutexes[i]->id == s->uid) | |
225 | return; | |
226 | } | |
9cf75457 | 227 | void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); |
228 | ReportMutex *rm = new(mem) ReportMutex(); | |
229 | rep_->mutexes.PushBack(rm); | |
f5ed5428 | 230 | rm->id = s->uid; |
231 | rm->destroyed = false; | |
9cf75457 | 232 | rm->stack = SymbolizeStack(s->creation_stack); |
233 | } | |
234 | ||
f5ed5428 | 235 | void ScopedReport::AddMutex(u64 id) { |
236 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { | |
237 | if (rep_->mutexes[i]->id == id) | |
238 | return; | |
239 | } | |
240 | void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); | |
241 | ReportMutex *rm = new(mem) ReportMutex(); | |
242 | rep_->mutexes.PushBack(rm); | |
243 | rm->id = id; | |
244 | rm->destroyed = true; | |
245 | rm->stack = 0; | |
246 | } | |
247 | ||
9cf75457 | 248 | void ScopedReport::AddLocation(uptr addr, uptr size) { |
249 | if (addr == 0) | |
250 | return; | |
251 | #ifndef TSAN_GO | |
f5ed5428 | 252 | int fd = -1; |
253 | int creat_tid = -1; | |
254 | u32 creat_stack = 0; | |
255 | if (FdLocation(addr, &fd, &creat_tid, &creat_stack) | |
256 | || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) { | |
257 | void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); | |
258 | ReportLocation *loc = new(mem) ReportLocation(); | |
259 | rep_->locs.PushBack(loc); | |
260 | loc->type = ReportLocationFD; | |
261 | loc->fd = fd; | |
262 | loc->tid = creat_tid; | |
263 | uptr ssz = 0; | |
264 | const uptr *stack = StackDepotGet(creat_stack, &ssz); | |
265 | if (stack) { | |
266 | StackTrace trace; | |
267 | trace.Init(stack, ssz); | |
268 | loc->stack = SymbolizeStack(trace); | |
269 | } | |
270 | ThreadContext *tctx = FindThread(creat_tid); | |
271 | if (tctx) | |
272 | AddThread(tctx); | |
273 | return; | |
274 | } | |
9cf75457 | 275 | if (allocator()->PointerIsMine((void*)addr)) { |
276 | MBlock *b = user_mblock(0, (void*)addr); | |
277 | ThreadContext *tctx = FindThread(b->alloc_tid); | |
278 | void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); | |
279 | ReportLocation *loc = new(mem) ReportLocation(); | |
280 | rep_->locs.PushBack(loc); | |
281 | loc->type = ReportLocationHeap; | |
282 | loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); | |
283 | loc->size = b->size; | |
284 | loc->tid = tctx ? tctx->tid : b->alloc_tid; | |
285 | loc->name = 0; | |
286 | loc->file = 0; | |
287 | loc->line = 0; | |
288 | loc->stack = 0; | |
289 | uptr ssz = 0; | |
290 | const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz); | |
291 | if (stack) { | |
292 | StackTrace trace; | |
293 | trace.Init(stack, ssz); | |
294 | loc->stack = SymbolizeStack(trace); | |
295 | } | |
296 | if (tctx) | |
297 | AddThread(tctx); | |
298 | return; | |
299 | } | |
4a2c1ffc | 300 | bool is_stack = false; |
301 | if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { | |
9cf75457 | 302 | void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); |
303 | ReportLocation *loc = new(mem) ReportLocation(); | |
304 | rep_->locs.PushBack(loc); | |
4a2c1ffc | 305 | loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; |
306 | loc->tid = tctx->tid; | |
307 | AddThread(tctx); | |
308 | } | |
309 | ReportLocation *loc = SymbolizeData(addr); | |
310 | if (loc) { | |
311 | rep_->locs.PushBack(loc); | |
9cf75457 | 312 | return; |
313 | } | |
4a2c1ffc | 314 | #endif |
9cf75457 | 315 | } |
316 | ||
317 | #ifndef TSAN_GO | |
318 | void ScopedReport::AddSleep(u32 stack_id) { | |
319 | uptr ssz = 0; | |
320 | const uptr *stack = StackDepotGet(stack_id, &ssz); | |
321 | if (stack) { | |
322 | StackTrace trace; | |
323 | trace.Init(stack, ssz); | |
324 | rep_->sleep = SymbolizeStack(trace); | |
325 | } | |
326 | } | |
327 | #endif | |
328 | ||
329 | const ReportDesc *ScopedReport::GetReport() const { | |
330 | return rep_; | |
331 | } | |
332 | ||
f5ed5428 | 333 | void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { |
334 | // This function restores stack trace and mutex set for the thread/epoch. | |
335 | // It does so by getting stack trace and mutex set at the beginning of | |
336 | // trace part, and then replaying the trace till the given epoch. | |
9cf75457 | 337 | ThreadContext *tctx = CTX()->threads[tid]; |
338 | if (tctx == 0) | |
339 | return; | |
340 | Trace* trace = 0; | |
341 | if (tctx->status == ThreadStatusRunning) { | |
342 | CHECK(tctx->thr); | |
343 | trace = &tctx->thr->trace; | |
344 | } else if (tctx->status == ThreadStatusFinished | |
345 | || tctx->status == ThreadStatusDead) { | |
346 | if (tctx->dead_info == 0) | |
347 | return; | |
348 | trace = &tctx->dead_info->trace; | |
349 | } else { | |
350 | return; | |
351 | } | |
352 | Lock l(&trace->mtx); | |
4ab070fc | 353 | const int partidx = (epoch / kTracePartSize) % TraceParts(); |
9cf75457 | 354 | TraceHeader* hdr = &trace->headers[partidx]; |
355 | if (epoch < hdr->epoch0) | |
356 | return; | |
f5ed5428 | 357 | const u64 epoch0 = RoundDown(epoch, TraceSize()); |
4ab070fc | 358 | const u64 eend = epoch % TraceSize(); |
359 | const u64 ebegin = RoundDown(eend, kTracePartSize); | |
9cf75457 | 360 | DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", |
361 | tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); | |
362 | InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024 | |
363 | for (uptr i = 0; i < hdr->stack0.Size(); i++) { | |
364 | stack[i] = hdr->stack0.Get(i); | |
365 | DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); | |
366 | } | |
f5ed5428 | 367 | if (mset) |
368 | *mset = hdr->mset0; | |
9cf75457 | 369 | uptr pos = hdr->stack0.Size(); |
4ab070fc | 370 | Event *events = (Event*)GetThreadTrace(tid); |
9cf75457 | 371 | for (uptr i = ebegin; i <= eend; i++) { |
4ab070fc | 372 | Event ev = events[i]; |
9cf75457 | 373 | EventType typ = (EventType)(ev >> 61); |
f5ed5428 | 374 | uptr pc = (uptr)(ev & ((1ull << 61) - 1)); |
9cf75457 | 375 | DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); |
376 | if (typ == EventTypeMop) { | |
377 | stack[pos] = pc; | |
378 | } else if (typ == EventTypeFuncEnter) { | |
379 | stack[pos++] = pc; | |
380 | } else if (typ == EventTypeFuncExit) { | |
381 | if (pos > 0) | |
382 | pos--; | |
383 | } | |
f5ed5428 | 384 | if (mset) { |
385 | if (typ == EventTypeLock) { | |
386 | mset->Add(pc, true, epoch0 + i); | |
387 | } else if (typ == EventTypeUnlock) { | |
388 | mset->Del(pc, true); | |
389 | } else if (typ == EventTypeRLock) { | |
390 | mset->Add(pc, false, epoch0 + i); | |
391 | } else if (typ == EventTypeRUnlock) { | |
392 | mset->Del(pc, false); | |
393 | } | |
394 | } | |
9cf75457 | 395 | for (uptr j = 0; j <= pos; j++) |
396 | DPrintf2(" #%zu: %zx\n", j, stack[j]); | |
397 | } | |
398 | if (pos == 0 && stack[0] == 0) | |
399 | return; | |
400 | pos++; | |
401 | stk->Init(stack.data(), pos); | |
402 | } | |
403 | ||
404 | static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], | |
405 | uptr addr_min, uptr addr_max) { | |
406 | Context *ctx = CTX(); | |
407 | bool equal_stack = false; | |
4a2c1ffc | 408 | RacyStacks hash; |
9cf75457 | 409 | if (flags()->suppress_equal_stacks) { |
410 | hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); | |
411 | hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); | |
412 | for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { | |
413 | if (hash == ctx->racy_stacks[i]) { | |
414 | DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); | |
415 | equal_stack = true; | |
416 | break; | |
417 | } | |
418 | } | |
419 | } | |
420 | bool equal_address = false; | |
421 | RacyAddress ra0 = {addr_min, addr_max}; | |
422 | if (flags()->suppress_equal_addresses) { | |
423 | for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { | |
424 | RacyAddress ra2 = ctx->racy_addresses[i]; | |
425 | uptr maxbeg = max(ra0.addr_min, ra2.addr_min); | |
426 | uptr minend = min(ra0.addr_max, ra2.addr_max); | |
427 | if (maxbeg < minend) { | |
428 | DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); | |
429 | equal_address = true; | |
430 | break; | |
431 | } | |
432 | } | |
433 | } | |
434 | if (equal_stack || equal_address) { | |
435 | if (!equal_stack) | |
436 | ctx->racy_stacks.PushBack(hash); | |
437 | if (!equal_address) | |
438 | ctx->racy_addresses.PushBack(ra0); | |
439 | return true; | |
440 | } | |
441 | return false; | |
442 | } | |
443 | ||
444 | static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], | |
445 | uptr addr_min, uptr addr_max) { | |
446 | Context *ctx = CTX(); | |
447 | if (flags()->suppress_equal_stacks) { | |
448 | RacyStacks hash; | |
449 | hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); | |
450 | hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); | |
451 | ctx->racy_stacks.PushBack(hash); | |
452 | } | |
453 | if (flags()->suppress_equal_addresses) { | |
454 | RacyAddress ra0 = {addr_min, addr_max}; | |
455 | ctx->racy_addresses.PushBack(ra0); | |
456 | } | |
457 | } | |
458 | ||
459 | bool OutputReport(Context *ctx, | |
460 | const ScopedReport &srep, | |
461 | const ReportStack *suppress_stack) { | |
462 | const ReportDesc *rep = srep.GetReport(); | |
463 | const uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack); | |
464 | if (suppress_pc != 0) { | |
465 | FiredSuppression supp = {srep.GetReport()->typ, suppress_pc}; | |
466 | ctx->fired_suppressions.PushBack(supp); | |
467 | } | |
468 | if (OnReport(rep, suppress_pc != 0)) | |
469 | return false; | |
470 | PrintReport(rep); | |
471 | CTX()->nreported++; | |
472 | return true; | |
473 | } | |
474 | ||
475 | bool IsFiredSuppression(Context *ctx, | |
476 | const ScopedReport &srep, | |
477 | const StackTrace &trace) { | |
478 | for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) { | |
479 | if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) | |
480 | continue; | |
481 | for (uptr j = 0; j < trace.Size(); j++) { | |
482 | if (trace.Get(j) == ctx->fired_suppressions[k].pc) | |
483 | return true; | |
484 | } | |
485 | } | |
486 | return false; | |
487 | } | |
488 | ||
4ab070fc | 489 | // On programs that use Java we see weird reports like: |
490 | // WARNING: ThreadSanitizer: data race (pid=22512) | |
491 | // Read of size 8 at 0x7d2b00084318 by thread 100: | |
492 | // #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3) | |
493 | // #1 <null> <null>:0 (0x7f7ad9b40193) | |
494 | // Previous write of size 8 at 0x7d2b00084318 by thread 105: | |
495 | // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919) | |
496 | // #1 <null> <null>:0 (0x7f7ad9b42707) | |
497 | static bool IsJavaNonsense(const ReportDesc *rep) { | |
498 | for (uptr i = 0; i < rep->mops.Size(); i++) { | |
499 | ReportMop *mop = rep->mops[i]; | |
500 | ReportStack *frame = mop->stack; | |
501 | if (frame != 0 && frame->func != 0 | |
502 | && (internal_strcmp(frame->func, "memset") == 0 | |
503 | || internal_strcmp(frame->func, "memcpy") == 0 | |
f5ed5428 | 504 | || internal_strcmp(frame->func, "memmove") == 0 |
4ab070fc | 505 | || internal_strcmp(frame->func, "strcmp") == 0 |
506 | || internal_strcmp(frame->func, "strncpy") == 0 | |
f5ed5428 | 507 | || internal_strcmp(frame->func, "strlen") == 0 |
508 | || internal_strcmp(frame->func, "free") == 0 | |
4ab070fc | 509 | || internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) { |
510 | frame = frame->next; | |
511 | if (frame == 0 | |
512 | || (frame->func == 0 && frame->file == 0 && frame->line == 0 | |
513 | && frame->module == 0)) { | |
514 | if (frame) { | |
515 | FiredSuppression supp = {rep->typ, frame->pc}; | |
516 | CTX()->fired_suppressions.PushBack(supp); | |
517 | } | |
518 | return true; | |
519 | } | |
520 | } | |
521 | } | |
522 | return false; | |
523 | } | |
524 | ||
9cf75457 | 525 | void ReportRace(ThreadState *thr) { |
0d996a11 | 526 | if (!flags()->report_bugs) |
527 | return; | |
9cf75457 | 528 | ScopedInRtl in_rtl; |
529 | ||
f5ed5428 | 530 | if (thr->in_signal_handler) |
531 | Printf("ThreadSanitizer: printing report from signal handler." | |
532 | " Can crash or hang.\n"); | |
533 | ||
9cf75457 | 534 | bool freed = false; |
535 | { | |
536 | Shadow s(thr->racy_state[1]); | |
537 | freed = s.GetFreedAndReset(); | |
538 | thr->racy_state[1] = s.raw(); | |
539 | } | |
540 | ||
541 | uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); | |
542 | uptr addr_min = 0; | |
543 | uptr addr_max = 0; | |
544 | { | |
545 | uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); | |
546 | uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); | |
547 | uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); | |
548 | uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); | |
549 | addr_min = min(a0, a1); | |
550 | addr_max = max(e0, e1); | |
551 | if (IsExpectedReport(addr_min, addr_max - addr_min)) | |
552 | return; | |
553 | } | |
554 | ||
555 | Context *ctx = CTX(); | |
556 | Lock l0(&ctx->thread_mtx); | |
557 | ||
558 | ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace); | |
559 | const uptr kMop = 2; | |
560 | StackTrace traces[kMop]; | |
4ab070fc | 561 | const uptr toppc = TraceTopPC(thr); |
9cf75457 | 562 | traces[0].ObtainCurrent(thr, toppc); |
563 | if (IsFiredSuppression(ctx, rep, traces[0])) | |
564 | return; | |
f5ed5428 | 565 | InternalScopedBuffer<MutexSet> mset2(1); |
566 | new(mset2.data()) MutexSet(); | |
9cf75457 | 567 | Shadow s2(thr->racy_state[1]); |
f5ed5428 | 568 | RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); |
9cf75457 | 569 | |
570 | if (HandleRacyStacks(thr, traces, addr_min, addr_max)) | |
571 | return; | |
572 | ||
573 | for (uptr i = 0; i < kMop; i++) { | |
574 | Shadow s(thr->racy_state[i]); | |
f5ed5428 | 575 | rep.AddMemoryAccess(addr, s, &traces[i], |
576 | i == 0 ? &thr->mset : mset2.data()); | |
9cf75457 | 577 | } |
578 | ||
4ab070fc | 579 | if (flags()->suppress_java && IsJavaNonsense(rep.GetReport())) |
580 | return; | |
581 | ||
9cf75457 | 582 | for (uptr i = 0; i < kMop; i++) { |
583 | FastState s(thr->racy_state[i]); | |
584 | ThreadContext *tctx = ctx->threads[s.tid()]; | |
585 | if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) | |
586 | continue; | |
587 | rep.AddThread(tctx); | |
588 | } | |
589 | ||
590 | rep.AddLocation(addr_min, addr_max - addr_min); | |
591 | ||
592 | #ifndef TSAN_GO | |
593 | { // NOLINT | |
594 | Shadow s(thr->racy_state[1]); | |
595 | if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) | |
596 | rep.AddSleep(thr->last_sleep_stack_id); | |
597 | } | |
598 | #endif | |
599 | ||
600 | if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack)) | |
601 | return; | |
602 | ||
603 | AddRacyStacks(thr, traces, addr_min, addr_max); | |
604 | } | |
605 | ||
606 | void PrintCurrentStack(ThreadState *thr, uptr pc) { | |
607 | StackTrace trace; | |
608 | trace.ObtainCurrent(thr, pc); | |
609 | PrintStack(SymbolizeStack(trace)); | |
610 | } | |
611 | ||
612 | } // namespace __tsan |