]>
Commit | Line | Data |
---|---|---|
2fc4da48 | 1 | //===-- tsan_rtl_report.cpp -----------------------------------------------===// |
9cf75457 | 2 | // |
2fc4da48 | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. | |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |
9cf75457 | 6 | // |
7 | //===----------------------------------------------------------------------===// | |
8 | // | |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. | |
10 | // | |
11 | //===----------------------------------------------------------------------===// | |
12 | ||
13 | #include "sanitizer_common/sanitizer_libc.h" | |
14 | #include "sanitizer_common/sanitizer_placement_new.h" | |
15 | #include "sanitizer_common/sanitizer_stackdepot.h" | |
f5ed5428 | 16 | #include "sanitizer_common/sanitizer_common.h" |
7da89e80 | 17 | #include "sanitizer_common/sanitizer_stacktrace.h" |
9cf75457 | 18 | #include "tsan_platform.h" |
19 | #include "tsan_rtl.h" | |
20 | #include "tsan_suppressions.h" | |
21 | #include "tsan_symbolize.h" | |
22 | #include "tsan_report.h" | |
23 | #include "tsan_sync.h" | |
24 | #include "tsan_mman.h" | |
25 | #include "tsan_flags.h" | |
f5ed5428 | 26 | #include "tsan_fd.h" |
9cf75457 | 27 | |
28 | namespace __tsan { | |
29 | ||
f5ed5428 | 30 | using namespace __sanitizer; // NOLINT |
31 | ||
0328398d | 32 | static ReportStack *SymbolizeStack(StackTrace trace); |
7da89e80 | 33 | |
9cf75457 | 34 | void TsanCheckFailed(const char *file, int line, const char *cond, |
35 | u64 v1, u64 v2) { | |
7d752f28 | 36 | // There is high probability that interceptors will check-fail as well, |
37 | // on the other hand there is no sense in processing interceptors | |
38 | // since we are going to die soon. | |
39 | ScopedIgnoreInterceptors ignore; | |
23e39437 | 40 | #if !SANITIZER_GO |
41 | cur_thread()->ignore_sync++; | |
42 | cur_thread()->ignore_reads_and_writes++; | |
43 | #endif | |
0d996a11 | 44 | Printf("FATAL: ThreadSanitizer CHECK failed: " |
45 | "%s:%d \"%s\" (0x%zx, 0x%zx)\n", | |
46 | file, line, cond, (uptr)v1, (uptr)v2); | |
0328398d | 47 | PrintCurrentStackSlow(StackTrace::GetCurrentPc()); |
9cf75457 | 48 | Die(); |
49 | } | |
50 | ||
51 | // Can be overriden by an application/test to intercept reports. | |
52 | #ifdef TSAN_EXTERNAL_HOOKS | |
53 | bool OnReport(const ReportDesc *rep, bool suppressed); | |
54 | #else | |
23e39437 | 55 | SANITIZER_WEAK_CXX_DEFAULT_IMPL |
56 | bool OnReport(const ReportDesc *rep, bool suppressed) { | |
9cf75457 | 57 | (void)rep; |
58 | return suppressed; | |
59 | } | |
60 | #endif | |
61 | ||
23e39437 | 62 | SANITIZER_WEAK_DEFAULT_IMPL |
63 | void __tsan_on_report(const ReportDesc *rep) { | |
64 | (void)rep; | |
65 | } | |
66 | ||
5645a48f | 67 | static void StackStripMain(SymbolizedStack *frames) { |
68 | SymbolizedStack *last_frame = nullptr; | |
69 | SymbolizedStack *last_frame2 = nullptr; | |
70 | for (SymbolizedStack *cur = frames; cur; cur = cur->next) { | |
9cf75457 | 71 | last_frame2 = last_frame; |
5645a48f | 72 | last_frame = cur; |
9cf75457 | 73 | } |
74 | ||
75 | if (last_frame2 == 0) | |
76 | return; | |
23e39437 | 77 | #if !SANITIZER_GO |
0328398d | 78 | const char *last = last_frame->info.function; |
0328398d | 79 | const char *last2 = last_frame2->info.function; |
9cf75457 | 80 | // Strip frame above 'main' |
81 | if (last2 && 0 == internal_strcmp(last2, "main")) { | |
5645a48f | 82 | last_frame->ClearAll(); |
83 | last_frame2->next = nullptr; | |
9cf75457 | 84 | // Strip our internal thread start routine. |
85 | } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { | |
5645a48f | 86 | last_frame->ClearAll(); |
87 | last_frame2->next = nullptr; | |
9cf75457 | 88 | // Strip global ctors init. |
89 | } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { | |
5645a48f | 90 | last_frame->ClearAll(); |
91 | last_frame2->next = nullptr; | |
9cf75457 | 92 | // If both are 0, then we probably just failed to symbolize. |
93 | } else if (last || last2) { | |
94 | // Ensure that we recovered stack completely. Trimmed stack | |
95 | // can actually happen if we do not instrument some code, | |
96 | // so it's only a debug print. However we must try hard to not miss it | |
97 | // due to our fault. | |
5645a48f | 98 | DPrintf("Bottom stack frame is missed\n"); |
9cf75457 | 99 | } |
100 | #else | |
1e80ce41 | 101 | // The last frame always point into runtime (gosched0, goexit0, runtime.main). |
5645a48f | 102 | last_frame->ClearAll(); |
103 | last_frame2->next = nullptr; | |
9cf75457 | 104 | #endif |
105 | } | |
106 | ||
4fc7b5ac | 107 | ReportStack *SymbolizeStackId(u32 stack_id) { |
7d752f28 | 108 | if (stack_id == 0) |
109 | return 0; | |
0328398d | 110 | StackTrace stack = StackDepotGet(stack_id); |
111 | if (stack.trace == nullptr) | |
112 | return nullptr; | |
113 | return SymbolizeStack(stack); | |
4fc7b5ac | 114 | } |
4fc7b5ac | 115 | |
0328398d | 116 | static ReportStack *SymbolizeStack(StackTrace trace) { |
117 | if (trace.size == 0) | |
9cf75457 | 118 | return 0; |
5645a48f | 119 | SymbolizedStack *top = nullptr; |
0328398d | 120 | for (uptr si = 0; si < trace.size; si++) { |
121 | const uptr pc = trace.trace[si]; | |
1e80ce41 | 122 | uptr pc1 = pc; |
5645a48f | 123 | // We obtain the return address, but we're interested in the previous |
124 | // instruction. | |
125 | if ((pc & kExternalPCBit) == 0) | |
126 | pc1 = StackTrace::GetPreviousInstructionPc(pc); | |
127 | SymbolizedStack *ent = SymbolizeCode(pc1); | |
9cf75457 | 128 | CHECK_NE(ent, 0); |
5645a48f | 129 | SymbolizedStack *last = ent; |
9cf75457 | 130 | while (last->next) { |
0328398d | 131 | last->info.address = pc; // restore original pc for report |
9cf75457 | 132 | last = last->next; |
133 | } | |
0328398d | 134 | last->info.address = pc; // restore original pc for report |
5645a48f | 135 | last->next = top; |
136 | top = ent; | |
9cf75457 | 137 | } |
5645a48f | 138 | StackStripMain(top); |
139 | ||
140 | ReportStack *stack = ReportStack::New(); | |
141 | stack->frames = top; | |
9cf75457 | 142 | return stack; |
143 | } | |
144 | ||
d2ef4bee | 145 | ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) { |
7d752f28 | 146 | ctx->thread_registry->CheckLocked(); |
9cf75457 | 147 | void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); |
148 | rep_ = new(mem) ReportDesc; | |
149 | rep_->typ = typ; | |
36093749 | 150 | rep_->tag = tag; |
7d752f28 | 151 | ctx->report_mtx.Lock(); |
9cf75457 | 152 | } |
153 | ||
d2ef4bee | 154 | ScopedReportBase::~ScopedReportBase() { |
7d752f28 | 155 | ctx->report_mtx.Unlock(); |
4ab070fc | 156 | DestroyAndFree(rep_); |
9cf75457 | 157 | } |
158 | ||
d2ef4bee | 159 | void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) { |
9cf75457 | 160 | ReportStack **rs = rep_->stacks.PushBack(); |
0328398d | 161 | *rs = SymbolizeStack(stack); |
a9586c9c | 162 | (*rs)->suppressable = suppressable; |
9cf75457 | 163 | } |
164 | ||
d2ef4bee | 165 | void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, |
166 | StackTrace stack, const MutexSet *mset) { | |
9cf75457 | 167 | void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); |
168 | ReportMop *mop = new(mem) ReportMop; | |
169 | rep_->mops.PushBack(mop); | |
170 | mop->tid = s.tid(); | |
171 | mop->addr = addr + s.addr0(); | |
172 | mop->size = s.size(); | |
7da89e80 | 173 | mop->write = s.IsWrite(); |
174 | mop->atomic = s.IsAtomic(); | |
0328398d | 175 | mop->stack = SymbolizeStack(stack); |
36093749 | 176 | mop->external_tag = external_tag; |
a9586c9c | 177 | if (mop->stack) |
178 | mop->stack->suppressable = true; | |
f5ed5428 | 179 | for (uptr i = 0; i < mset->Size(); i++) { |
180 | MutexSet::Desc d = mset->Get(i); | |
7d752f28 | 181 | u64 mid = this->AddMutex(d.id); |
182 | ReportMopMutex mtx = {mid, d.write}; | |
183 | mop->mset.PushBack(mtx); | |
f5ed5428 | 184 | } |
9cf75457 | 185 | } |
186 | ||
d2ef4bee | 187 | void ScopedReportBase::AddUniqueTid(int unique_tid) { |
7d752f28 | 188 | rep_->unique_tids.PushBack(unique_tid); |
189 | } | |
190 | ||
d2ef4bee | 191 | void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { |
9cf75457 | 192 | for (uptr i = 0; i < rep_->threads.Size(); i++) { |
1e80ce41 | 193 | if ((u32)rep_->threads[i]->id == tctx->tid) |
9cf75457 | 194 | return; |
195 | } | |
196 | void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); | |
23e39437 | 197 | ReportThread *rt = new(mem) ReportThread; |
9cf75457 | 198 | rep_->threads.PushBack(rt); |
199 | rt->id = tctx->tid; | |
23e39437 | 200 | rt->os_id = tctx->os_id; |
9cf75457 | 201 | rt->running = (tctx->status == ThreadStatusRunning); |
7d752f28 | 202 | rt->name = internal_strdup(tctx->name); |
1e80ce41 | 203 | rt->parent_tid = tctx->parent_tid; |
2fc4da48 | 204 | rt->thread_type = tctx->thread_type; |
1e80ce41 | 205 | rt->stack = 0; |
4fc7b5ac | 206 | rt->stack = SymbolizeStackId(tctx->creation_stack_id); |
a9586c9c | 207 | if (rt->stack) |
208 | rt->stack->suppressable = suppressable; | |
9cf75457 | 209 | } |
210 | ||
23e39437 | 211 | #if !SANITIZER_GO |
212 | static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) { | |
213 | int unique_id = *(int *)arg; | |
214 | return tctx->unique_id == (u32)unique_id; | |
215 | } | |
216 | ||
1e80ce41 | 217 | static ThreadContext *FindThreadByUidLocked(int unique_id) { |
1e80ce41 | 218 | ctx->thread_registry->CheckLocked(); |
23e39437 | 219 | return static_cast<ThreadContext *>( |
220 | ctx->thread_registry->FindThreadContextLocked( | |
221 | FindThreadByUidLockedCallback, &unique_id)); | |
9cf75457 | 222 | } |
4a2c1ffc | 223 | |
1e80ce41 | 224 | static ThreadContext *FindThreadByTidLocked(int tid) { |
1e80ce41 | 225 | ctx->thread_registry->CheckLocked(); |
226 | return static_cast<ThreadContext*>( | |
227 | ctx->thread_registry->GetThreadLocked(tid)); | |
228 | } | |
229 | ||
230 | static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { | |
231 | uptr addr = (uptr)arg; | |
232 | ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); | |
233 | if (tctx->status != ThreadStatusRunning) | |
234 | return false; | |
235 | ThreadState *thr = tctx->thr; | |
236 | CHECK(thr); | |
237 | return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || | |
238 | (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); | |
239 | } | |
240 | ||
4a2c1ffc | 241 | ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { |
1e80ce41 | 242 | ctx->thread_registry->CheckLocked(); |
243 | ThreadContext *tctx = static_cast<ThreadContext*>( | |
244 | ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, | |
245 | (void*)addr)); | |
246 | if (!tctx) | |
247 | return 0; | |
248 | ThreadState *thr = tctx->thr; | |
249 | CHECK(thr); | |
250 | *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); | |
251 | return tctx; | |
4a2c1ffc | 252 | } |
9cf75457 | 253 | #endif |
254 | ||
d2ef4bee | 255 | void ScopedReportBase::AddThread(int unique_tid, bool suppressable) { |
23e39437 | 256 | #if !SANITIZER_GO |
400e957b | 257 | if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) |
258 | AddThread(tctx, suppressable); | |
7d752f28 | 259 | #endif |
260 | } | |
261 | ||
d2ef4bee | 262 | void ScopedReportBase::AddMutex(const SyncVar *s) { |
f5ed5428 | 263 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { |
264 | if (rep_->mutexes[i]->id == s->uid) | |
265 | return; | |
266 | } | |
9cf75457 | 267 | void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); |
23e39437 | 268 | ReportMutex *rm = new(mem) ReportMutex; |
9cf75457 | 269 | rep_->mutexes.PushBack(rm); |
f5ed5428 | 270 | rm->id = s->uid; |
7d752f28 | 271 | rm->addr = s->addr; |
f5ed5428 | 272 | rm->destroyed = false; |
4fc7b5ac | 273 | rm->stack = SymbolizeStackId(s->creation_stack_id); |
9cf75457 | 274 | } |
275 | ||
d2ef4bee | 276 | u64 ScopedReportBase::AddMutex(u64 id) { |
7d752f28 | 277 | u64 uid = 0; |
278 | u64 mid = id; | |
279 | uptr addr = SyncVar::SplitId(id, &uid); | |
23e39437 | 280 | SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); |
7d752f28 | 281 | // Check that the mutex is still alive. |
282 | // Another mutex can be created at the same address, | |
283 | // so check uid as well. | |
284 | if (s && s->CheckId(uid)) { | |
285 | mid = s->uid; | |
286 | AddMutex(s); | |
287 | } else { | |
288 | AddDeadMutex(id); | |
289 | } | |
290 | if (s) | |
a9586c9c | 291 | s->mtx.Unlock(); |
7d752f28 | 292 | return mid; |
293 | } | |
294 | ||
d2ef4bee | 295 | void ScopedReportBase::AddDeadMutex(u64 id) { |
f5ed5428 | 296 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { |
297 | if (rep_->mutexes[i]->id == id) | |
298 | return; | |
299 | } | |
300 | void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); | |
23e39437 | 301 | ReportMutex *rm = new(mem) ReportMutex; |
f5ed5428 | 302 | rep_->mutexes.PushBack(rm); |
303 | rm->id = id; | |
7d752f28 | 304 | rm->addr = 0; |
f5ed5428 | 305 | rm->destroyed = true; |
306 | rm->stack = 0; | |
307 | } | |
308 | ||
d2ef4bee | 309 | void ScopedReportBase::AddLocation(uptr addr, uptr size) { |
9cf75457 | 310 | if (addr == 0) |
311 | return; | |
23e39437 | 312 | #if !SANITIZER_GO |
f5ed5428 | 313 | int fd = -1; |
36093749 | 314 | int creat_tid = kInvalidTid; |
f5ed5428 | 315 | u32 creat_stack = 0; |
a9586c9c | 316 | if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { |
0328398d | 317 | ReportLocation *loc = ReportLocation::New(ReportLocationFD); |
f5ed5428 | 318 | loc->fd = fd; |
319 | loc->tid = creat_tid; | |
4fc7b5ac | 320 | loc->stack = SymbolizeStackId(creat_stack); |
0328398d | 321 | rep_->locs.PushBack(loc); |
1e80ce41 | 322 | ThreadContext *tctx = FindThreadByUidLocked(creat_tid); |
f5ed5428 | 323 | if (tctx) |
324 | AddThread(tctx); | |
325 | return; | |
326 | } | |
1e80ce41 | 327 | MBlock *b = 0; |
a9586c9c | 328 | Allocator *a = allocator(); |
329 | if (a->PointerIsMine((void*)addr)) { | |
330 | void *block_begin = a->GetBlockBegin((void*)addr); | |
331 | if (block_begin) | |
332 | b = ctx->metamap.GetBlock((uptr)block_begin); | |
333 | } | |
334 | if (b != 0) { | |
335 | ThreadContext *tctx = FindThreadByTidLocked(b->tid); | |
0328398d | 336 | ReportLocation *loc = ReportLocation::New(ReportLocationHeap); |
337 | loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); | |
338 | loc->heap_chunk_size = b->siz; | |
36093749 | 339 | loc->external_tag = b->tag; |
a9586c9c | 340 | loc->tid = tctx ? tctx->tid : b->tid; |
a9586c9c | 341 | loc->stack = SymbolizeStackId(b->stk); |
0328398d | 342 | rep_->locs.PushBack(loc); |
9cf75457 | 343 | if (tctx) |
344 | AddThread(tctx); | |
345 | return; | |
346 | } | |
4a2c1ffc | 347 | bool is_stack = false; |
348 | if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { | |
0328398d | 349 | ReportLocation *loc = |
350 | ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); | |
4a2c1ffc | 351 | loc->tid = tctx->tid; |
0328398d | 352 | rep_->locs.PushBack(loc); |
4a2c1ffc | 353 | AddThread(tctx); |
354 | } | |
23e39437 | 355 | #endif |
0328398d | 356 | if (ReportLocation *loc = SymbolizeData(addr)) { |
a9586c9c | 357 | loc->suppressable = true; |
4a2c1ffc | 358 | rep_->locs.PushBack(loc); |
9cf75457 | 359 | return; |
360 | } | |
361 | } | |
362 | ||
23e39437 | 363 | #if !SANITIZER_GO |
d2ef4bee | 364 | void ScopedReportBase::AddSleep(u32 stack_id) { |
4fc7b5ac | 365 | rep_->sleep = SymbolizeStackId(stack_id); |
9cf75457 | 366 | } |
367 | #endif | |
368 | ||
d2ef4bee | 369 | void ScopedReportBase::SetCount(int count) { rep_->count = count; } |
1e80ce41 | 370 | |
d2ef4bee | 371 | const ReportDesc *ScopedReportBase::GetReport() const { return rep_; } |
372 | ||
373 | ScopedReport::ScopedReport(ReportType typ, uptr tag) | |
374 | : ScopedReportBase(typ, tag) {} | |
375 | ||
376 | ScopedReport::~ScopedReport() {} | |
9cf75457 | 377 | |
0328398d | 378 | void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, |
36093749 | 379 | MutexSet *mset, uptr *tag) { |
f5ed5428 | 380 | // This function restores stack trace and mutex set for the thread/epoch. |
381 | // It does so by getting stack trace and mutex set at the beginning of | |
382 | // trace part, and then replaying the trace till the given epoch. | |
5645a48f | 383 | Trace* trace = ThreadTrace(tid); |
384 | ReadLock l(&trace->mtx); | |
4ab070fc | 385 | const int partidx = (epoch / kTracePartSize) % TraceParts(); |
9cf75457 | 386 | TraceHeader* hdr = &trace->headers[partidx]; |
5645a48f | 387 | if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) |
9cf75457 | 388 | return; |
5645a48f | 389 | CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); |
f5ed5428 | 390 | const u64 epoch0 = RoundDown(epoch, TraceSize()); |
4ab070fc | 391 | const u64 eend = epoch % TraceSize(); |
392 | const u64 ebegin = RoundDown(eend, kTracePartSize); | |
9cf75457 | 393 | DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", |
394 | tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); | |
d2ef4bee | 395 | Vector<uptr> stack; |
5645a48f | 396 | stack.Resize(hdr->stack0.size + 64); |
0328398d | 397 | for (uptr i = 0; i < hdr->stack0.size; i++) { |
398 | stack[i] = hdr->stack0.trace[i]; | |
5645a48f | 399 | DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); |
9cf75457 | 400 | } |
f5ed5428 | 401 | if (mset) |
402 | *mset = hdr->mset0; | |
0328398d | 403 | uptr pos = hdr->stack0.size; |
4ab070fc | 404 | Event *events = (Event*)GetThreadTrace(tid); |
9cf75457 | 405 | for (uptr i = ebegin; i <= eend; i++) { |
4ab070fc | 406 | Event ev = events[i]; |
36093749 | 407 | EventType typ = (EventType)(ev >> kEventPCBits); |
408 | uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1)); | |
9cf75457 | 409 | DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); |
410 | if (typ == EventTypeMop) { | |
411 | stack[pos] = pc; | |
412 | } else if (typ == EventTypeFuncEnter) { | |
5645a48f | 413 | if (stack.Size() < pos + 2) |
414 | stack.Resize(pos + 2); | |
9cf75457 | 415 | stack[pos++] = pc; |
416 | } else if (typ == EventTypeFuncExit) { | |
417 | if (pos > 0) | |
418 | pos--; | |
419 | } | |
f5ed5428 | 420 | if (mset) { |
421 | if (typ == EventTypeLock) { | |
422 | mset->Add(pc, true, epoch0 + i); | |
423 | } else if (typ == EventTypeUnlock) { | |
424 | mset->Del(pc, true); | |
425 | } else if (typ == EventTypeRLock) { | |
426 | mset->Add(pc, false, epoch0 + i); | |
427 | } else if (typ == EventTypeRUnlock) { | |
428 | mset->Del(pc, false); | |
429 | } | |
430 | } | |
9cf75457 | 431 | for (uptr j = 0; j <= pos; j++) |
432 | DPrintf2(" #%zu: %zx\n", j, stack[j]); | |
433 | } | |
434 | if (pos == 0 && stack[0] == 0) | |
435 | return; | |
436 | pos++; | |
5645a48f | 437 | stk->Init(&stack[0], pos); |
36093749 | 438 | ExtractTagFromStack(stk, tag); |
9cf75457 | 439 | } |
440 | ||
0328398d | 441 | static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], |
442 | uptr addr_min, uptr addr_max) { | |
9cf75457 | 443 | bool equal_stack = false; |
4a2c1ffc | 444 | RacyStacks hash; |
9cf75457 | 445 | bool equal_address = false; |
446 | RacyAddress ra0 = {addr_min, addr_max}; | |
5645a48f | 447 | { |
448 | ReadLock lock(&ctx->racy_mtx); | |
449 | if (flags()->suppress_equal_stacks) { | |
450 | hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); | |
451 | hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); | |
452 | for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { | |
453 | if (hash == ctx->racy_stacks[i]) { | |
454 | VPrintf(2, | |
455 | "ThreadSanitizer: suppressing report as doubled (stack)\n"); | |
456 | equal_stack = true; | |
457 | break; | |
458 | } | |
459 | } | |
460 | } | |
461 | if (flags()->suppress_equal_addresses) { | |
462 | for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { | |
463 | RacyAddress ra2 = ctx->racy_addresses[i]; | |
464 | uptr maxbeg = max(ra0.addr_min, ra2.addr_min); | |
465 | uptr minend = min(ra0.addr_max, ra2.addr_max); | |
466 | if (maxbeg < minend) { | |
467 | VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); | |
468 | equal_address = true; | |
469 | break; | |
470 | } | |
9cf75457 | 471 | } |
472 | } | |
473 | } | |
5645a48f | 474 | if (!equal_stack && !equal_address) |
475 | return false; | |
476 | if (!equal_stack) { | |
477 | Lock lock(&ctx->racy_mtx); | |
478 | ctx->racy_stacks.PushBack(hash); | |
9cf75457 | 479 | } |
5645a48f | 480 | if (!equal_address) { |
481 | Lock lock(&ctx->racy_mtx); | |
482 | ctx->racy_addresses.PushBack(ra0); | |
483 | } | |
484 | return true; | |
9cf75457 | 485 | } |
486 | ||
0328398d | 487 | static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], |
488 | uptr addr_min, uptr addr_max) { | |
5645a48f | 489 | Lock lock(&ctx->racy_mtx); |
9cf75457 | 490 | if (flags()->suppress_equal_stacks) { |
491 | RacyStacks hash; | |
0328398d | 492 | hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); |
493 | hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); | |
9cf75457 | 494 | ctx->racy_stacks.PushBack(hash); |
495 | } | |
496 | if (flags()->suppress_equal_addresses) { | |
497 | RacyAddress ra0 = {addr_min, addr_max}; | |
498 | ctx->racy_addresses.PushBack(ra0); | |
499 | } | |
500 | } | |
501 | ||
a9586c9c | 502 | bool OutputReport(ThreadState *thr, const ScopedReport &srep) { |
36093749 | 503 | if (!flags()->report_bugs || thr->suppress_reports) |
5645a48f | 504 | return false; |
505 | atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); | |
9cf75457 | 506 | const ReportDesc *rep = srep.GetReport(); |
23e39437 | 507 | CHECK_EQ(thr->current_report, nullptr); |
508 | thr->current_report = rep; | |
1e80ce41 | 509 | Suppression *supp = 0; |
5645a48f | 510 | uptr pc_or_addr = 0; |
511 | for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) | |
512 | pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); | |
513 | for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) | |
514 | pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); | |
515 | for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) | |
516 | pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); | |
517 | for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) | |
518 | pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); | |
519 | if (pc_or_addr != 0) { | |
520 | Lock lock(&ctx->fired_suppressions_mtx); | |
521 | FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; | |
1e80ce41 | 522 | ctx->fired_suppressions.push_back(s); |
9cf75457 | 523 | } |
a9586c9c | 524 | { |
525 | bool old_is_freeing = thr->is_freeing; | |
526 | thr->is_freeing = false; | |
5645a48f | 527 | bool suppressed = OnReport(rep, pc_or_addr != 0); |
a9586c9c | 528 | thr->is_freeing = old_is_freeing; |
23e39437 | 529 | if (suppressed) { |
530 | thr->current_report = nullptr; | |
a9586c9c | 531 | return false; |
23e39437 | 532 | } |
a9586c9c | 533 | } |
9cf75457 | 534 | PrintReport(rep); |
23e39437 | 535 | __tsan_on_report(rep); |
1e80ce41 | 536 | ctx->nreported++; |
537 | if (flags()->halt_on_error) | |
5645a48f | 538 | Die(); |
23e39437 | 539 | thr->current_report = nullptr; |
9cf75457 | 540 | return true; |
541 | } | |
542 | ||
5645a48f | 543 | bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { |
544 | ReadLock lock(&ctx->fired_suppressions_mtx); | |
1e80ce41 | 545 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { |
5645a48f | 546 | if (ctx->fired_suppressions[k].type != type) |
9cf75457 | 547 | continue; |
0328398d | 548 | for (uptr j = 0; j < trace.size; j++) { |
1e80ce41 | 549 | FiredSuppression *s = &ctx->fired_suppressions[k]; |
5645a48f | 550 | if (trace.trace[j] == s->pc_or_addr) { |
1e80ce41 | 551 | if (s->supp) |
5645a48f | 552 | atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); |
9cf75457 | 553 | return true; |
1e80ce41 | 554 | } |
555 | } | |
556 | } | |
557 | return false; | |
558 | } | |
559 | ||
5645a48f | 560 | static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { |
561 | ReadLock lock(&ctx->fired_suppressions_mtx); | |
1e80ce41 | 562 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { |
5645a48f | 563 | if (ctx->fired_suppressions[k].type != type) |
1e80ce41 | 564 | continue; |
565 | FiredSuppression *s = &ctx->fired_suppressions[k]; | |
5645a48f | 566 | if (addr == s->pc_or_addr) { |
1e80ce41 | 567 | if (s->supp) |
5645a48f | 568 | atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); |
1e80ce41 | 569 | return true; |
9cf75457 | 570 | } |
571 | } | |
572 | return false; | |
573 | } | |
574 | ||
7da89e80 | 575 | static bool RaceBetweenAtomicAndFree(ThreadState *thr) { |
576 | Shadow s0(thr->racy_state[0]); | |
577 | Shadow s1(thr->racy_state[1]); | |
578 | CHECK(!(s0.IsAtomic() && s1.IsAtomic())); | |
579 | if (!s0.IsAtomic() && !s1.IsAtomic()) | |
580 | return true; | |
581 | if (s0.IsAtomic() && s1.IsFreed()) | |
582 | return true; | |
583 | if (s1.IsAtomic() && thr->is_freeing) | |
584 | return true; | |
4ab070fc | 585 | return false; |
586 | } | |
587 | ||
9cf75457 | 588 | void ReportRace(ThreadState *thr) { |
a9586c9c | 589 | CheckNoLocks(thr); |
590 | ||
7d752f28 | 591 | // Symbolizer makes lots of intercepted calls. If we try to process them, |
592 | // at best it will cause deadlocks on internal mutexes. | |
593 | ScopedIgnoreInterceptors ignore; | |
594 | ||
0d996a11 | 595 | if (!flags()->report_bugs) |
596 | return; | |
7da89e80 | 597 | if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) |
598 | return; | |
599 | ||
9cf75457 | 600 | bool freed = false; |
601 | { | |
602 | Shadow s(thr->racy_state[1]); | |
603 | freed = s.GetFreedAndReset(); | |
604 | thr->racy_state[1] = s.raw(); | |
605 | } | |
606 | ||
607 | uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); | |
608 | uptr addr_min = 0; | |
609 | uptr addr_max = 0; | |
610 | { | |
611 | uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); | |
612 | uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); | |
613 | uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); | |
614 | uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); | |
615 | addr_min = min(a0, a1); | |
616 | addr_max = max(e0, e1); | |
617 | if (IsExpectedReport(addr_min, addr_max - addr_min)) | |
618 | return; | |
619 | } | |
620 | ||
1e80ce41 | 621 | ReportType typ = ReportTypeRace; |
0328398d | 622 | if (thr->is_vptr_access && freed) |
623 | typ = ReportTypeVptrUseAfterFree; | |
624 | else if (thr->is_vptr_access) | |
1e80ce41 | 625 | typ = ReportTypeVptrRace; |
626 | else if (freed) | |
627 | typ = ReportTypeUseAfterFree; | |
5645a48f | 628 | |
629 | if (IsFiredSuppression(ctx, typ, addr)) | |
1e80ce41 | 630 | return; |
5645a48f | 631 | |
9cf75457 | 632 | const uptr kMop = 2; |
0328398d | 633 | VarSizeStackTrace traces[kMop]; |
36093749 | 634 | uptr tags[kMop] = {kExternalTagNone}; |
635 | uptr toppc = TraceTopPC(thr); | |
636 | if (toppc >> kEventPCBits) { | |
637 | // This is a work-around for a known issue. | |
638 | // The scenario where this happens is rather elaborate and requires | |
639 | // an instrumented __sanitizer_report_error_summary callback and | |
640 | // a __tsan_symbolize_external callback and a race during a range memory | |
641 | // access larger than 8 bytes. MemoryAccessRange adds the current PC to | |
642 | // the trace and starts processing memory accesses. A first memory access | |
643 | // triggers a race, we report it and call the instrumented | |
644 | // __sanitizer_report_error_summary, which adds more stuff to the trace | |
645 | // since it is intrumented. Then a second memory access in MemoryAccessRange | |
646 | // also triggers a race and we get here and call TraceTopPC to get the | |
647 | // current PC, however now it contains some unrelated events from the | |
648 | // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit | |
649 | // event. Later we subtract -1 from it (in GetPreviousInstructionPc) | |
650 | // and the resulting PC has kExternalPCBit set, so we pass it to | |
d2ef4bee | 651 | // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its |
652 | // rights to crash since the PC is completely bogus. | |
2fc4da48 | 653 | // test/tsan/double_race.cpp contains a test case for this. |
36093749 | 654 | toppc = 0; |
655 | } | |
656 | ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]); | |
5645a48f | 657 | if (IsFiredSuppression(ctx, typ, traces[0])) |
9cf75457 | 658 | return; |
5645a48f | 659 | |
660 | // MutexSet is too large to live on stack. | |
d2ef4bee | 661 | Vector<u64> mset_buffer; |
5645a48f | 662 | mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); |
663 | MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); | |
664 | ||
9cf75457 | 665 | Shadow s2(thr->racy_state[1]); |
36093749 | 666 | RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]); |
5645a48f | 667 | if (IsFiredSuppression(ctx, typ, traces[1])) |
1e80ce41 | 668 | return; |
9cf75457 | 669 | |
670 | if (HandleRacyStacks(thr, traces, addr_min, addr_max)) | |
671 | return; | |
672 | ||
36093749 | 673 | // If any of the accesses has a tag, treat this as an "external" race. |
674 | uptr tag = kExternalTagNone; | |
675 | for (uptr i = 0; i < kMop; i++) { | |
676 | if (tags[i] != kExternalTagNone) { | |
677 | typ = ReportTypeExternalRace; | |
678 | tag = tags[i]; | |
679 | break; | |
680 | } | |
681 | } | |
682 | ||
5645a48f | 683 | ThreadRegistryLock l0(ctx->thread_registry); |
36093749 | 684 | ScopedReport rep(typ, tag); |
9cf75457 | 685 | for (uptr i = 0; i < kMop; i++) { |
686 | Shadow s(thr->racy_state[i]); | |
36093749 | 687 | rep.AddMemoryAccess(addr, tags[i], s, traces[i], |
688 | i == 0 ? &thr->mset : mset2); | |
9cf75457 | 689 | } |
690 | ||
691 | for (uptr i = 0; i < kMop; i++) { | |
692 | FastState s(thr->racy_state[i]); | |
1e80ce41 | 693 | ThreadContext *tctx = static_cast<ThreadContext*>( |
694 | ctx->thread_registry->GetThreadLocked(s.tid())); | |
9cf75457 | 695 | if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) |
696 | continue; | |
697 | rep.AddThread(tctx); | |
698 | } | |
699 | ||
700 | rep.AddLocation(addr_min, addr_max - addr_min); | |
701 | ||
23e39437 | 702 | #if !SANITIZER_GO |
9cf75457 | 703 | { // NOLINT |
704 | Shadow s(thr->racy_state[1]); | |
705 | if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) | |
706 | rep.AddSleep(thr->last_sleep_stack_id); | |
707 | } | |
708 | #endif | |
709 | ||
a9586c9c | 710 | if (!OutputReport(thr, rep)) |
9cf75457 | 711 | return; |
712 | ||
713 | AddRacyStacks(thr, traces, addr_min, addr_max); | |
714 | } | |
715 | ||
716 | void PrintCurrentStack(ThreadState *thr, uptr pc) { | |
0328398d | 717 | VarSizeStackTrace trace; |
718 | ObtainCurrentStack(thr, pc, &trace); | |
9cf75457 | 719 | PrintStack(SymbolizeStack(trace)); |
720 | } | |
721 | ||
23e39437 | 722 | // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes |
723 | // __sanitizer_print_stack_trace exists in the actual unwinded stack, but | |
724 | // tail-call to PrintCurrentStackSlow breaks this assumption because | |
725 | // __sanitizer_print_stack_trace disappears after tail-call. | |
726 | // However, this solution is not reliable enough, please see dvyukov's comment | |
727 | // http://reviews.llvm.org/D19148#406208 | |
728 | // Also see PR27280 comment 2 and 3 for breaking examples and analysis. | |
729 | ALWAYS_INLINE | |
0328398d | 730 | void PrintCurrentStackSlow(uptr pc) { |
23e39437 | 731 | #if !SANITIZER_GO |
2fc4da48 | 732 | uptr bp = GET_CURRENT_FRAME(); |
0328398d | 733 | BufferedStackTrace *ptrace = |
734 | new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) | |
735 | BufferedStackTrace(); | |
2fc4da48 | 736 | ptrace->Unwind(pc, bp, nullptr, false); |
737 | ||
1e80ce41 | 738 | for (uptr i = 0; i < ptrace->size / 2; i++) { |
0328398d | 739 | uptr tmp = ptrace->trace_buffer[i]; |
740 | ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; | |
741 | ptrace->trace_buffer[ptrace->size - i - 1] = tmp; | |
1e80ce41 | 742 | } |
0328398d | 743 | PrintStack(SymbolizeStack(*ptrace)); |
7da89e80 | 744 | #endif |
745 | } | |
746 | ||
9cf75457 | 747 | } // namespace __tsan |
0328398d | 748 | |
749 | using namespace __tsan; | |
750 | ||
751 | extern "C" { | |
752 | SANITIZER_INTERFACE_ATTRIBUTE | |
753 | void __sanitizer_print_stack_trace() { | |
754 | PrintCurrentStackSlow(StackTrace::GetCurrentPc()); | |
755 | } | |
756 | } // extern "C" |