]>
git.ipfire.org Git - thirdparty/gcc.git/blob - libsanitizer/tsan/tsan_mman.cc
1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_common.h"
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_mman.h"
15 #include "tsan_report.h"
16 #include "tsan_flags.h"
18 // May be overriden by front-end.
19 extern "C" void WEAK
__tsan_malloc_hook(void *ptr
, uptr size
) {
24 extern "C" void WEAK
__tsan_free_hook(void *ptr
) {
30 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
31 Allocator
*allocator() {
32 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
35 void InitializeAllocator() {
39 void AlloctorThreadFinish(ThreadState
*thr
) {
40 allocator()->SwallowCache(&thr
->alloc_cache
);
43 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
44 if (!thr
->in_signal_handler
|| !flags()->report_signal_unsafe
)
48 stack
.ObtainCurrent(thr
, pc
);
49 ScopedReport
rep(ReportTypeSignalUnsafe
);
50 if (!IsFiredSuppression(ctx
, rep
, stack
)) {
52 OutputReport(ctx
, rep
, rep
.GetReport()->stacks
[0]);
56 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
) {
57 CHECK_GT(thr
->in_rtl
, 0);
58 void *p
= allocator()->Allocate(&thr
->alloc_cache
, sz
, align
);
61 MBlock
*b
= new(allocator()->GetMetaData(p
)) MBlock
;
64 b
->alloc_tid
= thr
->unique_id
;
65 b
->alloc_stack_id
= CurrentStackId(thr
, pc
);
66 if (CTX() && CTX()->initialized
) {
67 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
69 DPrintf("#%d: alloc(%zu) = %p\n", thr
->tid
, sz
, p
);
70 SignalUnsafeCall(thr
, pc
);
74 void user_free(ThreadState
*thr
, uptr pc
, void *p
) {
75 CHECK_GT(thr
->in_rtl
, 0);
76 CHECK_NE(p
, (void*)0);
77 DPrintf("#%d: free(%p)\n", thr
->tid
, p
);
78 MBlock
*b
= (MBlock
*)allocator()->GetMetaData(p
);
81 for (SyncVar
*s
= b
->head
; s
;) {
84 StatInc(thr
, StatSyncDestroyed
);
91 if (CTX() && CTX()->initialized
&& thr
->in_rtl
== 1) {
92 MemoryRangeFreed(thr
, pc
, (uptr
)p
, b
->size
);
95 allocator()->Deallocate(&thr
->alloc_cache
, p
);
96 SignalUnsafeCall(thr
, pc
);
99 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
100 CHECK_GT(thr
->in_rtl
, 0);
102 // FIXME: Handle "shrinking" more efficiently,
103 // it seems that some software actually does this.
105 p2
= user_alloc(thr
, pc
, sz
);
109 MBlock
*b
= user_mblock(thr
, p
);
110 internal_memcpy(p2
, p
, min(b
->size
, sz
));
114 user_free(thr
, pc
, p
);
119 MBlock
*user_mblock(ThreadState
*thr
, void *p
) {
120 CHECK_NE(p
, (void*)0);
121 Allocator
*a
= allocator();
122 void *b
= a
->GetBlockBegin(p
);
124 return (MBlock
*)a
->GetMetaData(b
);
127 void invoke_malloc_hook(void *ptr
, uptr size
) {
128 Context
*ctx
= CTX();
129 ThreadState
*thr
= cur_thread();
130 if (ctx
== 0 || !ctx
->initialized
|| thr
->in_rtl
)
132 __tsan_malloc_hook(ptr
, size
);
135 void invoke_free_hook(void *ptr
) {
136 Context
*ctx
= CTX();
137 ThreadState
*thr
= cur_thread();
138 if (ctx
== 0 || !ctx
->initialized
|| thr
->in_rtl
)
140 __tsan_free_hook(ptr
);
143 void *internal_alloc(MBlockType typ
, uptr sz
) {
144 ThreadState
*thr
= cur_thread();
145 CHECK_GT(thr
->in_rtl
, 0);
147 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
150 return InternalAlloc(sz
);
153 void internal_free(void *p
) {
154 ThreadState
*thr
= cur_thread();
155 CHECK_GT(thr
->in_rtl
, 0);
157 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
163 } // namespace __tsan