]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/tsan/tsan_rtl_report.cc
PR fortran/56052
[thirdparty/gcc.git] / libsanitizer / tsan / tsan_rtl_report.cc
CommitLineData
9cf75457 1//===-- tsan_rtl_report.cc ------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer (TSan), a race detector.
9//
10//===----------------------------------------------------------------------===//
11
12#include "sanitizer_common/sanitizer_libc.h"
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "sanitizer_common/sanitizer_stackdepot.h"
f5ed5428 15#include "sanitizer_common/sanitizer_common.h"
9cf75457 16#include "tsan_platform.h"
17#include "tsan_rtl.h"
18#include "tsan_suppressions.h"
19#include "tsan_symbolize.h"
20#include "tsan_report.h"
21#include "tsan_sync.h"
22#include "tsan_mman.h"
23#include "tsan_flags.h"
f5ed5428 24#include "tsan_fd.h"
9cf75457 25
26namespace __tsan {
27
f5ed5428 28using namespace __sanitizer; // NOLINT
29
9cf75457 30void TsanCheckFailed(const char *file, int line, const char *cond,
31 u64 v1, u64 v2) {
32 ScopedInRtl in_rtl;
0d996a11 33 Printf("FATAL: ThreadSanitizer CHECK failed: "
34 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
35 file, line, cond, (uptr)v1, (uptr)v2);
9cf75457 36 Die();
37}
38
39// Can be overriden by an application/test to intercept reports.
40#ifdef TSAN_EXTERNAL_HOOKS
41bool OnReport(const ReportDesc *rep, bool suppressed);
42#else
43SANITIZER_INTERFACE_ATTRIBUTE
44bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
45 (void)rep;
46 return suppressed;
47}
48#endif
49
50static void StackStripMain(ReportStack *stack) {
51 ReportStack *last_frame = 0;
52 ReportStack *last_frame2 = 0;
53 const char *prefix = "__interceptor_";
54 uptr prefix_len = internal_strlen(prefix);
55 const char *path_prefix = flags()->strip_path_prefix;
56 uptr path_prefix_len = internal_strlen(path_prefix);
57 char *pos;
58 for (ReportStack *ent = stack; ent; ent = ent->next) {
59 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
60 ent->func += prefix_len;
61 if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
62 ent->file = pos + path_prefix_len;
63 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
64 ent->file += 2;
65 last_frame2 = last_frame;
66 last_frame = ent;
67 }
68
69 if (last_frame2 == 0)
70 return;
71 const char *last = last_frame->func;
72#ifndef TSAN_GO
73 const char *last2 = last_frame2->func;
74 // Strip frame above 'main'
75 if (last2 && 0 == internal_strcmp(last2, "main")) {
76 last_frame2->next = 0;
77 // Strip our internal thread start routine.
78 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
79 last_frame2->next = 0;
80 // Strip global ctors init.
81 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
82 last_frame2->next = 0;
83 // If both are 0, then we probably just failed to symbolize.
84 } else if (last || last2) {
85 // Ensure that we recovered stack completely. Trimmed stack
86 // can actually happen if we do not instrument some code,
87 // so it's only a debug print. However we must try hard to not miss it
88 // due to our fault.
89 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
90 }
91#else
92 if (last && 0 == internal_strcmp(last, "schedunlock"))
93 last_frame2->next = 0;
94#endif
95}
96
97static ReportStack *SymbolizeStack(const StackTrace& trace) {
98 if (trace.IsEmpty())
99 return 0;
100 ReportStack *stack = 0;
101 for (uptr si = 0; si < trace.Size(); si++) {
102 // We obtain the return address, that is, address of the next instruction,
103 // so offset it by 1 byte.
104 bool is_last = (si == trace.Size() - 1);
105 ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last);
106 CHECK_NE(ent, 0);
107 ReportStack *last = ent;
108 while (last->next) {
109 last->pc += !is_last;
110 last = last->next;
111 }
112 last->pc += !is_last;
113 last->next = stack;
114 stack = ent;
115 }
116 StackStripMain(stack);
117 return stack;
118}
119
120ScopedReport::ScopedReport(ReportType typ) {
121 ctx_ = CTX();
122 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
123 rep_ = new(mem) ReportDesc;
124 rep_->typ = typ;
125 ctx_->report_mtx.Lock();
126}
127
128ScopedReport::~ScopedReport() {
129 ctx_->report_mtx.Unlock();
4ab070fc 130 DestroyAndFree(rep_);
9cf75457 131}
132
133void ScopedReport::AddStack(const StackTrace *stack) {
134 ReportStack **rs = rep_->stacks.PushBack();
135 *rs = SymbolizeStack(*stack);
136}
137
138void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
f5ed5428 139 const StackTrace *stack, const MutexSet *mset) {
9cf75457 140 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
141 ReportMop *mop = new(mem) ReportMop;
142 rep_->mops.PushBack(mop);
143 mop->tid = s.tid();
144 mop->addr = addr + s.addr0();
145 mop->size = s.size();
146 mop->write = s.is_write();
9cf75457 147 mop->stack = SymbolizeStack(*stack);
f5ed5428 148 for (uptr i = 0; i < mset->Size(); i++) {
149 MutexSet::Desc d = mset->Get(i);
150 u64 uid = 0;
151 uptr addr = SyncVar::SplitId(d.id, &uid);
152 SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
153 // Check that the mutex is still alive.
154 // Another mutex can be created at the same address,
155 // so check uid as well.
156 if (s && s->CheckId(uid)) {
157 ReportMopMutex mtx = {s->uid, d.write};
158 mop->mset.PushBack(mtx);
159 AddMutex(s);
160 } else {
161 ReportMopMutex mtx = {d.id, d.write};
162 mop->mset.PushBack(mtx);
163 AddMutex(d.id);
164 }
165 if (s)
166 s->mtx.ReadUnlock();
167 }
9cf75457 168}
169
170void ScopedReport::AddThread(const ThreadContext *tctx) {
171 for (uptr i = 0; i < rep_->threads.Size(); i++) {
172 if (rep_->threads[i]->id == tctx->tid)
173 return;
174 }
175 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
176 ReportThread *rt = new(mem) ReportThread();
177 rep_->threads.PushBack(rt);
178 rt->id = tctx->tid;
179 rt->pid = tctx->os_id;
180 rt->running = (tctx->status == ThreadStatusRunning);
4ab070fc 181 rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
f5ed5428 182 rt->parent_tid = tctx->creation_tid;
9cf75457 183 rt->stack = SymbolizeStack(tctx->creation_stack);
184}
185
186#ifndef TSAN_GO
187static ThreadContext *FindThread(int unique_id) {
188 CTX()->thread_mtx.CheckLocked();
189 for (unsigned i = 0; i < kMaxTid; i++) {
190 ThreadContext *tctx = CTX()->threads[i];
191 if (tctx && tctx->unique_id == unique_id) {
192 return tctx;
193 }
194 }
195 return 0;
196}
197#endif
198
199void ScopedReport::AddMutex(const SyncVar *s) {
f5ed5428 200 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
201 if (rep_->mutexes[i]->id == s->uid)
202 return;
203 }
9cf75457 204 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
205 ReportMutex *rm = new(mem) ReportMutex();
206 rep_->mutexes.PushBack(rm);
f5ed5428 207 rm->id = s->uid;
208 rm->destroyed = false;
9cf75457 209 rm->stack = SymbolizeStack(s->creation_stack);
210}
211
f5ed5428 212void ScopedReport::AddMutex(u64 id) {
213 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
214 if (rep_->mutexes[i]->id == id)
215 return;
216 }
217 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
218 ReportMutex *rm = new(mem) ReportMutex();
219 rep_->mutexes.PushBack(rm);
220 rm->id = id;
221 rm->destroyed = true;
222 rm->stack = 0;
223}
224
9cf75457 225void ScopedReport::AddLocation(uptr addr, uptr size) {
226 if (addr == 0)
227 return;
228#ifndef TSAN_GO
f5ed5428 229 int fd = -1;
230 int creat_tid = -1;
231 u32 creat_stack = 0;
232 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
233 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
234 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
235 ReportLocation *loc = new(mem) ReportLocation();
236 rep_->locs.PushBack(loc);
237 loc->type = ReportLocationFD;
238 loc->fd = fd;
239 loc->tid = creat_tid;
240 uptr ssz = 0;
241 const uptr *stack = StackDepotGet(creat_stack, &ssz);
242 if (stack) {
243 StackTrace trace;
244 trace.Init(stack, ssz);
245 loc->stack = SymbolizeStack(trace);
246 }
247 ThreadContext *tctx = FindThread(creat_tid);
248 if (tctx)
249 AddThread(tctx);
250 return;
251 }
9cf75457 252 if (allocator()->PointerIsMine((void*)addr)) {
253 MBlock *b = user_mblock(0, (void*)addr);
254 ThreadContext *tctx = FindThread(b->alloc_tid);
255 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
256 ReportLocation *loc = new(mem) ReportLocation();
257 rep_->locs.PushBack(loc);
258 loc->type = ReportLocationHeap;
259 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
260 loc->size = b->size;
261 loc->tid = tctx ? tctx->tid : b->alloc_tid;
262 loc->name = 0;
263 loc->file = 0;
264 loc->line = 0;
265 loc->stack = 0;
266 uptr ssz = 0;
267 const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz);
268 if (stack) {
269 StackTrace trace;
270 trace.Init(stack, ssz);
271 loc->stack = SymbolizeStack(trace);
272 }
273 if (tctx)
274 AddThread(tctx);
275 return;
276 }
277#endif
278 ReportStack *symb = SymbolizeData(addr);
279 if (symb) {
280 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
281 ReportLocation *loc = new(mem) ReportLocation();
282 rep_->locs.PushBack(loc);
283 loc->type = ReportLocationGlobal;
284 loc->addr = addr;
285 loc->size = size;
4ab070fc 286 loc->module = symb->module ? internal_strdup(symb->module) : 0;
287 loc->offset = symb->offset;
9cf75457 288 loc->tid = 0;
4ab070fc 289 loc->name = symb->func ? internal_strdup(symb->func) : 0;
290 loc->file = symb->file ? internal_strdup(symb->file) : 0;
9cf75457 291 loc->line = symb->line;
292 loc->stack = 0;
293 internal_free(symb);
294 return;
295 }
296}
297
298#ifndef TSAN_GO
299void ScopedReport::AddSleep(u32 stack_id) {
300 uptr ssz = 0;
301 const uptr *stack = StackDepotGet(stack_id, &ssz);
302 if (stack) {
303 StackTrace trace;
304 trace.Init(stack, ssz);
305 rep_->sleep = SymbolizeStack(trace);
306 }
307}
308#endif
309
310const ReportDesc *ScopedReport::GetReport() const {
311 return rep_;
312}
313
f5ed5428 314void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
315 // This function restores stack trace and mutex set for the thread/epoch.
316 // It does so by getting stack trace and mutex set at the beginning of
317 // trace part, and then replaying the trace till the given epoch.
9cf75457 318 ThreadContext *tctx = CTX()->threads[tid];
319 if (tctx == 0)
320 return;
321 Trace* trace = 0;
322 if (tctx->status == ThreadStatusRunning) {
323 CHECK(tctx->thr);
324 trace = &tctx->thr->trace;
325 } else if (tctx->status == ThreadStatusFinished
326 || tctx->status == ThreadStatusDead) {
327 if (tctx->dead_info == 0)
328 return;
329 trace = &tctx->dead_info->trace;
330 } else {
331 return;
332 }
333 Lock l(&trace->mtx);
4ab070fc 334 const int partidx = (epoch / kTracePartSize) % TraceParts();
9cf75457 335 TraceHeader* hdr = &trace->headers[partidx];
336 if (epoch < hdr->epoch0)
337 return;
f5ed5428 338 const u64 epoch0 = RoundDown(epoch, TraceSize());
4ab070fc 339 const u64 eend = epoch % TraceSize();
340 const u64 ebegin = RoundDown(eend, kTracePartSize);
9cf75457 341 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
342 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
343 InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024
344 for (uptr i = 0; i < hdr->stack0.Size(); i++) {
345 stack[i] = hdr->stack0.Get(i);
346 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
347 }
f5ed5428 348 if (mset)
349 *mset = hdr->mset0;
9cf75457 350 uptr pos = hdr->stack0.Size();
4ab070fc 351 Event *events = (Event*)GetThreadTrace(tid);
9cf75457 352 for (uptr i = ebegin; i <= eend; i++) {
4ab070fc 353 Event ev = events[i];
9cf75457 354 EventType typ = (EventType)(ev >> 61);
f5ed5428 355 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
9cf75457 356 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
357 if (typ == EventTypeMop) {
358 stack[pos] = pc;
359 } else if (typ == EventTypeFuncEnter) {
360 stack[pos++] = pc;
361 } else if (typ == EventTypeFuncExit) {
362 if (pos > 0)
363 pos--;
364 }
f5ed5428 365 if (mset) {
366 if (typ == EventTypeLock) {
367 mset->Add(pc, true, epoch0 + i);
368 } else if (typ == EventTypeUnlock) {
369 mset->Del(pc, true);
370 } else if (typ == EventTypeRLock) {
371 mset->Add(pc, false, epoch0 + i);
372 } else if (typ == EventTypeRUnlock) {
373 mset->Del(pc, false);
374 }
375 }
9cf75457 376 for (uptr j = 0; j <= pos; j++)
377 DPrintf2(" #%zu: %zx\n", j, stack[j]);
378 }
379 if (pos == 0 && stack[0] == 0)
380 return;
381 pos++;
382 stk->Init(stack.data(), pos);
383}
384
385static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
386 uptr addr_min, uptr addr_max) {
387 Context *ctx = CTX();
388 bool equal_stack = false;
389 RacyStacks hash = {};
390 if (flags()->suppress_equal_stacks) {
391 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
392 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
393 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
394 if (hash == ctx->racy_stacks[i]) {
395 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
396 equal_stack = true;
397 break;
398 }
399 }
400 }
401 bool equal_address = false;
402 RacyAddress ra0 = {addr_min, addr_max};
403 if (flags()->suppress_equal_addresses) {
404 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
405 RacyAddress ra2 = ctx->racy_addresses[i];
406 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
407 uptr minend = min(ra0.addr_max, ra2.addr_max);
408 if (maxbeg < minend) {
409 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
410 equal_address = true;
411 break;
412 }
413 }
414 }
415 if (equal_stack || equal_address) {
416 if (!equal_stack)
417 ctx->racy_stacks.PushBack(hash);
418 if (!equal_address)
419 ctx->racy_addresses.PushBack(ra0);
420 return true;
421 }
422 return false;
423}
424
425static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
426 uptr addr_min, uptr addr_max) {
427 Context *ctx = CTX();
428 if (flags()->suppress_equal_stacks) {
429 RacyStacks hash;
430 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
431 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
432 ctx->racy_stacks.PushBack(hash);
433 }
434 if (flags()->suppress_equal_addresses) {
435 RacyAddress ra0 = {addr_min, addr_max};
436 ctx->racy_addresses.PushBack(ra0);
437 }
438}
439
440bool OutputReport(Context *ctx,
441 const ScopedReport &srep,
442 const ReportStack *suppress_stack) {
443 const ReportDesc *rep = srep.GetReport();
444 const uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack);
445 if (suppress_pc != 0) {
446 FiredSuppression supp = {srep.GetReport()->typ, suppress_pc};
447 ctx->fired_suppressions.PushBack(supp);
448 }
449 if (OnReport(rep, suppress_pc != 0))
450 return false;
451 PrintReport(rep);
452 CTX()->nreported++;
453 return true;
454}
455
456bool IsFiredSuppression(Context *ctx,
457 const ScopedReport &srep,
458 const StackTrace &trace) {
459 for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) {
460 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
461 continue;
462 for (uptr j = 0; j < trace.Size(); j++) {
463 if (trace.Get(j) == ctx->fired_suppressions[k].pc)
464 return true;
465 }
466 }
467 return false;
468}
469
4ab070fc 470// On programs that use Java we see weird reports like:
471// WARNING: ThreadSanitizer: data race (pid=22512)
472// Read of size 8 at 0x7d2b00084318 by thread 100:
473// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
474// #1 <null> <null>:0 (0x7f7ad9b40193)
475// Previous write of size 8 at 0x7d2b00084318 by thread 105:
476// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
477// #1 <null> <null>:0 (0x7f7ad9b42707)
478static bool IsJavaNonsense(const ReportDesc *rep) {
479 for (uptr i = 0; i < rep->mops.Size(); i++) {
480 ReportMop *mop = rep->mops[i];
481 ReportStack *frame = mop->stack;
482 if (frame != 0 && frame->func != 0
483 && (internal_strcmp(frame->func, "memset") == 0
484 || internal_strcmp(frame->func, "memcpy") == 0
f5ed5428 485 || internal_strcmp(frame->func, "memmove") == 0
4ab070fc 486 || internal_strcmp(frame->func, "strcmp") == 0
487 || internal_strcmp(frame->func, "strncpy") == 0
f5ed5428 488 || internal_strcmp(frame->func, "strlen") == 0
489 || internal_strcmp(frame->func, "free") == 0
4ab070fc 490 || internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) {
491 frame = frame->next;
492 if (frame == 0
493 || (frame->func == 0 && frame->file == 0 && frame->line == 0
494 && frame->module == 0)) {
495 if (frame) {
496 FiredSuppression supp = {rep->typ, frame->pc};
497 CTX()->fired_suppressions.PushBack(supp);
498 }
499 return true;
500 }
501 }
502 }
503 return false;
504}
505
9cf75457 506void ReportRace(ThreadState *thr) {
0d996a11 507 if (!flags()->report_bugs)
508 return;
9cf75457 509 ScopedInRtl in_rtl;
510
f5ed5428 511 if (thr->in_signal_handler)
512 Printf("ThreadSanitizer: printing report from signal handler."
513 " Can crash or hang.\n");
514
9cf75457 515 bool freed = false;
516 {
517 Shadow s(thr->racy_state[1]);
518 freed = s.GetFreedAndReset();
519 thr->racy_state[1] = s.raw();
520 }
521
522 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
523 uptr addr_min = 0;
524 uptr addr_max = 0;
525 {
526 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
527 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
528 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
529 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
530 addr_min = min(a0, a1);
531 addr_max = max(e0, e1);
532 if (IsExpectedReport(addr_min, addr_max - addr_min))
533 return;
534 }
535
536 Context *ctx = CTX();
537 Lock l0(&ctx->thread_mtx);
538
539 ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
540 const uptr kMop = 2;
541 StackTrace traces[kMop];
4ab070fc 542 const uptr toppc = TraceTopPC(thr);
9cf75457 543 traces[0].ObtainCurrent(thr, toppc);
544 if (IsFiredSuppression(ctx, rep, traces[0]))
545 return;
f5ed5428 546 InternalScopedBuffer<MutexSet> mset2(1);
547 new(mset2.data()) MutexSet();
9cf75457 548 Shadow s2(thr->racy_state[1]);
f5ed5428 549 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
9cf75457 550
551 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
552 return;
553
554 for (uptr i = 0; i < kMop; i++) {
555 Shadow s(thr->racy_state[i]);
f5ed5428 556 rep.AddMemoryAccess(addr, s, &traces[i],
557 i == 0 ? &thr->mset : mset2.data());
9cf75457 558 }
559
4ab070fc 560 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
561 return;
562
9cf75457 563 for (uptr i = 0; i < kMop; i++) {
564 FastState s(thr->racy_state[i]);
565 ThreadContext *tctx = ctx->threads[s.tid()];
566 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
567 continue;
568 rep.AddThread(tctx);
569 }
570
571 rep.AddLocation(addr_min, addr_max - addr_min);
572
573#ifndef TSAN_GO
574 { // NOLINT
575 Shadow s(thr->racy_state[1]);
576 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
577 rep.AddSleep(thr->last_sleep_stack_id);
578 }
579#endif
580
581 if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack))
582 return;
583
584 AddRacyStacks(thr, traces, addr_min, addr_max);
585}
586
587void PrintCurrentStack(ThreadState *thr, uptr pc) {
588 StackTrace trace;
589 trace.ObtainCurrent(thr, pc);
590 PrintStack(SymbolizeStack(trace));
591}
592
593} // namespace __tsan