1 //===-- tsan_rtl_thread.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_placement_new.h"
15 #include "tsan_mman.h"
16 #include "tsan_platform.h"
17 #include "tsan_report.h"
18 #include "tsan_sync.h"
22 // ThreadContext implementation.
24 ThreadContext::ThreadContext(int tid
)
25 : ThreadContextBase(tid
)
33 ThreadContext::~ThreadContext() {
37 void ThreadContext::OnDead() {
38 CHECK_EQ(sync
.size(), 0);
41 void ThreadContext::OnJoined(void *arg
) {
42 ThreadState
*caller_thr
= static_cast<ThreadState
*>(arg
);
43 AcquireImpl(caller_thr
, 0, &sync
);
44 sync
.Reset(&caller_thr
->proc()->clock_cache
);
47 struct OnCreatedArgs
{
52 void ThreadContext::OnCreated(void *arg
) {
56 OnCreatedArgs
*args
= static_cast<OnCreatedArgs
*>(arg
);
57 if (!args
->thr
) // GCD workers don't have a parent thread.
59 args
->thr
->fast_state
.IncrementEpoch();
60 // Can't increment epoch w/o writing to the trace as well.
61 TraceAddEvent(args
->thr
, args
->thr
->fast_state
, EventTypeMop
, 0);
62 ReleaseImpl(args
->thr
, 0, &sync
);
63 creation_stack_id
= CurrentStackId(args
->thr
, args
->pc
);
65 StatInc(args
->thr
, StatThreadMaxTid
);
68 void ThreadContext::OnReset() {
69 CHECK_EQ(sync
.size(), 0);
70 uptr trace_p
= GetThreadTrace(tid
);
71 ReleaseMemoryPagesToOS(trace_p
, trace_p
+ TraceSize() * sizeof(Event
));
72 //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
75 void ThreadContext::OnDetached(void *arg
) {
76 ThreadState
*thr1
= static_cast<ThreadState
*>(arg
);
77 sync
.Reset(&thr1
->proc()->clock_cache
);
80 struct OnStartedArgs
{
88 void ThreadContext::OnStarted(void *arg
) {
89 OnStartedArgs
*args
= static_cast<OnStartedArgs
*>(arg
);
91 // RoundUp so that one trace part does not contain events
92 // from different threads.
93 epoch0
= RoundUp(epoch1
+ 1, kTracePartSize
);
95 new(thr
) ThreadState(ctx
, tid
, unique_id
, epoch0
, reuse_count
,
96 args
->stk_addr
, args
->stk_size
, args
->tls_addr
, args
->tls_size
);
98 thr
->shadow_stack
= &ThreadTrace(thr
->tid
)->shadow_stack
[0];
99 thr
->shadow_stack_pos
= thr
->shadow_stack
;
100 thr
->shadow_stack_end
= thr
->shadow_stack
+ kShadowStackSize
;
102 // Setup dynamic shadow stack.
103 const int kInitStackSize
= 8;
104 thr
->shadow_stack
= (uptr
*)internal_alloc(MBlockShadowStack
,
105 kInitStackSize
* sizeof(uptr
));
106 thr
->shadow_stack_pos
= thr
->shadow_stack
;
107 thr
->shadow_stack_end
= thr
->shadow_stack
+ kInitStackSize
;
109 if (common_flags()->detect_deadlocks
)
110 thr
->dd_lt
= ctx
->dd
->CreateLogicalThread(unique_id
);
111 thr
->fast_state
.SetHistorySize(flags()->history_size
);
112 // Commit switch to the new part of the trace.
113 // TraceAddEvent will reset stack0/mset0 in the new part for us.
114 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
116 thr
->fast_synch_epoch
= epoch0
;
117 AcquireImpl(thr
, 0, &sync
);
118 StatInc(thr
, StatSyncAcquire
);
119 sync
.Reset(&thr
->proc()->clock_cache
);
120 thr
->is_inited
= true;
121 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
122 "tls_addr=%zx tls_size=%zx\n",
123 tid
, (uptr
)epoch0
, args
->stk_addr
, args
->stk_size
,
124 args
->tls_addr
, args
->tls_size
);
127 void ThreadContext::OnFinished() {
129 internal_free(thr
->shadow_stack
);
130 thr
->shadow_stack
= nullptr;
131 thr
->shadow_stack_pos
= nullptr;
132 thr
->shadow_stack_end
= nullptr;
135 thr
->fast_state
.IncrementEpoch();
136 // Can't increment epoch w/o writing to the trace as well.
137 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
138 ReleaseImpl(thr
, 0, &sync
);
140 epoch1
= thr
->fast_state
.epoch();
142 if (common_flags()->detect_deadlocks
)
143 ctx
->dd
->DestroyLogicalThread(thr
->dd_lt
);
144 thr
->clock
.ResetCached(&thr
->proc()->clock_cache
);
146 thr
->last_sleep_clock
.ResetCached(&thr
->proc()->clock_cache
);
149 PlatformCleanUpThreadState(thr
);
152 #if TSAN_COLLECT_STATS
153 StatAggregate(ctx
->stat
, thr
->stat
);
164 static void MaybeReportThreadLeak(ThreadContextBase
*tctx_base
, void *arg
) {
165 Vector
<ThreadLeak
> &leaks
= *(Vector
<ThreadLeak
>*)arg
;
166 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
167 if (tctx
->detached
|| tctx
->status
!= ThreadStatusFinished
)
169 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
170 if (leaks
[i
].tctx
->creation_stack_id
== tctx
->creation_stack_id
) {
175 ThreadLeak leak
= {tctx
, 1};
176 leaks
.PushBack(leak
);
181 static void ReportIgnoresEnabled(ThreadContext
*tctx
, IgnoreSet
*set
) {
182 if (tctx
->tid
== 0) {
183 Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
185 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
186 " created at:\n", tctx
->tid
, tctx
->name
);
187 PrintStack(SymbolizeStackId(tctx
->creation_stack_id
));
189 Printf(" One of the following ignores was not ended"
190 " (in order of probability)\n");
191 for (uptr i
= 0; i
< set
->Size(); i
++) {
192 Printf(" Ignore was enabled at:\n");
193 PrintStack(SymbolizeStackId(set
->At(i
)));
198 static void ThreadCheckIgnore(ThreadState
*thr
) {
199 if (ctx
->after_multithreaded_fork
)
201 if (thr
->ignore_reads_and_writes
)
202 ReportIgnoresEnabled(thr
->tctx
, &thr
->mop_ignore_set
);
203 if (thr
->ignore_sync
)
204 ReportIgnoresEnabled(thr
->tctx
, &thr
->sync_ignore_set
);
207 static void ThreadCheckIgnore(ThreadState
*thr
) {}
210 void ThreadFinalize(ThreadState
*thr
) {
211 ThreadCheckIgnore(thr
);
213 if (!flags()->report_thread_leaks
)
215 ThreadRegistryLock
l(ctx
->thread_registry
);
216 Vector
<ThreadLeak
> leaks
;
217 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
218 MaybeReportThreadLeak
, &leaks
);
219 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
220 ScopedReport
rep(ReportTypeThreadLeak
);
221 rep
.AddThread(leaks
[i
].tctx
, true);
222 rep
.SetCount(leaks
[i
].count
);
223 OutputReport(thr
, rep
);
228 int ThreadCount(ThreadState
*thr
) {
230 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &result
);
234 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
) {
235 StatInc(thr
, StatThreadCreate
);
236 OnCreatedArgs args
= { thr
, pc
};
237 u32 parent_tid
= thr
? thr
->tid
: kInvalidTid
; // No parent for GCD workers.
239 ctx
->thread_registry
->CreateThread(uid
, detached
, parent_tid
, &args
);
240 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid
, tid
, uid
);
241 StatSet(thr
, StatThreadMaxAlive
, ctx
->thread_registry
->GetMaxAliveThreads());
245 void ThreadStart(ThreadState
*thr
, int tid
, tid_t os_id
,
246 ThreadType thread_type
) {
252 if (thread_type
!= ThreadType::Fiber
)
253 GetThreadStackAndTls(tid
== 0, &stk_addr
, &stk_size
, &tls_addr
, &tls_size
);
256 if (stk_addr
&& stk_size
)
257 MemoryRangeImitateWrite(thr
, /*pc=*/ 1, stk_addr
, stk_size
);
259 if (tls_addr
&& tls_size
) ImitateTlsWrite(thr
, tls_addr
, tls_size
);
263 ThreadRegistry
*tr
= ctx
->thread_registry
;
264 OnStartedArgs args
= { thr
, stk_addr
, stk_size
, tls_addr
, tls_size
};
265 tr
->StartThread(tid
, os_id
, thread_type
, &args
);
268 thr
->tctx
= (ThreadContext
*)tr
->GetThreadLocked(tid
);
272 if (ctx
->after_multithreaded_fork
) {
273 thr
->ignore_interceptors
++;
274 ThreadIgnoreBegin(thr
, 0);
275 ThreadIgnoreSyncBegin(thr
, 0);
280 void ThreadFinish(ThreadState
*thr
) {
281 ThreadCheckIgnore(thr
);
282 StatInc(thr
, StatThreadFinish
);
283 if (thr
->stk_addr
&& thr
->stk_size
)
284 DontNeedShadowFor(thr
->stk_addr
, thr
->stk_size
);
285 if (thr
->tls_addr
&& thr
->tls_size
)
286 DontNeedShadowFor(thr
->tls_addr
, thr
->tls_size
);
288 ctx
->thread_registry
->FinishThread(thr
->tid
);
291 struct ConsumeThreadContext
{
293 ThreadContextBase
*tctx
;
296 static bool ConsumeThreadByUid(ThreadContextBase
*tctx
, void *arg
) {
297 ConsumeThreadContext
*findCtx
= (ConsumeThreadContext
*)arg
;
298 if (tctx
->user_id
== findCtx
->uid
&& tctx
->status
!= ThreadStatusInvalid
) {
300 // Ensure that user_id is unique. If it's not the case we are screwed.
301 // Something went wrong before, but now there is no way to recover.
302 // Returning a wrong thread is not an option, it may lead to very hard
303 // to debug false positives (e.g. if we join a wrong thread).
304 Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx
->uid
);
307 findCtx
->tctx
= tctx
;
313 int ThreadConsumeTid(ThreadState
*thr
, uptr pc
, uptr uid
) {
314 ConsumeThreadContext findCtx
= {uid
, nullptr};
315 ctx
->thread_registry
->FindThread(ConsumeThreadByUid
, &findCtx
);
316 int tid
= findCtx
.tctx
? findCtx
.tctx
->tid
: ThreadRegistry::kUnknownTid
;
317 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr
->tid
, uid
, tid
);
321 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
) {
323 CHECK_LT(tid
, kMaxTid
);
324 DPrintf("#%d: ThreadJoin tid=%d\n", thr
->tid
, tid
);
325 ctx
->thread_registry
->JoinThread(tid
, thr
);
328 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
) {
330 CHECK_LT(tid
, kMaxTid
);
331 ctx
->thread_registry
->DetachThread(tid
, thr
);
334 void ThreadNotJoined(ThreadState
*thr
, uptr pc
, int tid
, uptr uid
) {
336 CHECK_LT(tid
, kMaxTid
);
337 ctx
->thread_registry
->SetThreadUserId(tid
, uid
);
340 void ThreadSetName(ThreadState
*thr
, const char *name
) {
341 ctx
->thread_registry
->SetThreadName(thr
->tid
, name
);
344 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
345 uptr size
, bool is_write
) {
349 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
350 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
351 thr
->tid
, (void*)pc
, (void*)addr
,
352 (int)size
, is_write
);
355 if (!IsAppMem(addr
)) {
356 Printf("Access to non app mem %zx\n", addr
);
357 DCHECK(IsAppMem(addr
));
359 if (!IsAppMem(addr
+ size
- 1)) {
360 Printf("Access to non app mem %zx\n", addr
+ size
- 1);
361 DCHECK(IsAppMem(addr
+ size
- 1));
363 if (!IsShadowMem((uptr
)shadow_mem
)) {
364 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
365 DCHECK(IsShadowMem((uptr
)shadow_mem
));
367 if (!IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1))) {
368 Printf("Bad shadow addr %p (%zx)\n",
369 shadow_mem
+ size
* kShadowCnt
/ 8 - 1, addr
+ size
- 1);
370 DCHECK(IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1)));
374 StatInc(thr
, StatMopRange
);
376 if (*shadow_mem
== kShadowRodata
) {
378 // Access to .rodata section, no races here.
379 // Measurements show that it can be 10-20% of all memory accesses.
380 StatInc(thr
, StatMopRangeRodata
);
384 FastState fast_state
= thr
->fast_state
;
385 if (fast_state
.GetIgnoreBit())
388 fast_state
.IncrementEpoch();
389 thr
->fast_state
= fast_state
;
390 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
392 bool unaligned
= (addr
% kShadowCell
) != 0;
394 // Handle unaligned beginning, if any.
395 for (; addr
% kShadowCell
&& size
; addr
++, size
--) {
396 int const kAccessSizeLog
= 0;
397 Shadow
cur(fast_state
);
398 cur
.SetWrite(is_write
);
399 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
400 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
404 shadow_mem
+= kShadowCnt
;
405 // Handle middle part, if any.
406 for (; size
>= kShadowCell
; addr
+= kShadowCell
, size
-= kShadowCell
) {
407 int const kAccessSizeLog
= 3;
408 Shadow
cur(fast_state
);
409 cur
.SetWrite(is_write
);
410 cur
.SetAddr0AndSizeLog(0, kAccessSizeLog
);
411 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
413 shadow_mem
+= kShadowCnt
;
415 // Handle ending, if any.
416 for (; size
; addr
++, size
--) {
417 int const kAccessSizeLog
= 0;
418 Shadow
cur(fast_state
);
419 cur
.SetWrite(is_write
);
420 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
421 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
427 void FiberSwitchImpl(ThreadState
*from
, ThreadState
*to
) {
428 Processor
*proc
= from
->proc();
429 ProcUnwire(proc
, from
);
434 ThreadState
*FiberCreate(ThreadState
*thr
, uptr pc
, unsigned flags
) {
435 void *mem
= internal_alloc(MBlockThreadContex
, sizeof(ThreadState
));
436 ThreadState
*fiber
= static_cast<ThreadState
*>(mem
);
437 internal_memset(fiber
, 0, sizeof(*fiber
));
438 int tid
= ThreadCreate(thr
, pc
, 0, true);
439 FiberSwitchImpl(thr
, fiber
);
440 ThreadStart(fiber
, tid
, 0, ThreadType::Fiber
);
441 FiberSwitchImpl(fiber
, thr
);
445 void FiberDestroy(ThreadState
*thr
, uptr pc
, ThreadState
*fiber
) {
446 FiberSwitchImpl(thr
, fiber
);
448 FiberSwitchImpl(fiber
, thr
);
449 internal_free(fiber
);
452 void FiberSwitch(ThreadState
*thr
, uptr pc
,
453 ThreadState
*fiber
, unsigned flags
) {
454 if (!(flags
& FiberSwitchFlagNoSync
))
455 Release(thr
, pc
, (uptr
)fiber
);
456 FiberSwitchImpl(thr
, fiber
);
457 if (!(flags
& FiberSwitchFlagNoSync
))
458 Acquire(fiber
, pc
, (uptr
)fiber
);
462 } // namespace __tsan