]>
Commit | Line | Data |
---|---|---|
f35db108 WM |
1 | //===-- sanitizer_allocator.cc --------------------------------------------===// |
2 | // | |
3 | // This file is distributed under the University of Illinois Open Source | |
4 | // License. See LICENSE.TXT for details. | |
5 | // | |
6 | //===----------------------------------------------------------------------===// | |
7 | // | |
8 | // This file is shared between AddressSanitizer and ThreadSanitizer | |
9 | // run-time libraries. | |
ef1b3fda | 10 | // This allocator is used inside run-times. |
f35db108 | 11 | //===----------------------------------------------------------------------===// |
696d846a | 12 | |
ef1b3fda | 13 | #include "sanitizer_allocator.h" |
10189819 | 14 | |
5d3805fc | 15 | #include "sanitizer_allocator_checks.h" |
ef1b3fda | 16 | #include "sanitizer_allocator_internal.h" |
10189819 | 17 | #include "sanitizer_atomic.h" |
f35db108 WM |
18 | #include "sanitizer_common.h" |
19 | ||
ef1b3fda KS |
20 | namespace __sanitizer { |
21 | ||
22 | // ThreadSanitizer for Go uses libc malloc/free. | |
10189819 | 23 | #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
ef1b3fda KS |
24 | # if SANITIZER_LINUX && !SANITIZER_ANDROID |
25 | extern "C" void *__libc_malloc(uptr size); | |
10189819 MO |
26 | # if !SANITIZER_GO |
27 | extern "C" void *__libc_memalign(uptr alignment, uptr size); | |
28 | # endif | |
29 | extern "C" void *__libc_realloc(void *ptr, uptr size); | |
f35db108 | 30 | extern "C" void __libc_free(void *ptr); |
ef1b3fda KS |
31 | # else |
32 | # include <stdlib.h> | |
10189819 MO |
33 | # define __libc_malloc malloc |
34 | # if !SANITIZER_GO | |
35 | static void *__libc_memalign(uptr alignment, uptr size) { | |
36 | void *p; | |
37 | uptr error = posix_memalign(&p, alignment, size); | |
38 | if (error) return nullptr; | |
39 | return p; | |
40 | } | |
41 | # endif | |
42 | # define __libc_realloc realloc | |
43 | # define __libc_free free | |
ef1b3fda | 44 | # endif |
f35db108 | 45 | |
10189819 MO |
46 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
47 | uptr alignment) { | |
48 | (void)cache; | |
49 | #if !SANITIZER_GO | |
50 | if (alignment == 0) | |
51 | return __libc_malloc(size); | |
52 | else | |
53 | return __libc_memalign(alignment, size); | |
54 | #else | |
55 | // Windows does not provide __libc_memalign/posix_memalign. It provides | |
56 | // __aligned_malloc, but the allocated blocks can't be passed to free, | |
57 | // they need to be passed to __aligned_free. InternalAlloc interface does | |
58 | // not account for such requirement. Alignemnt does not seem to be used | |
59 | // anywhere in runtime, so just call __libc_malloc for now. | |
60 | DCHECK_EQ(alignment, 0); | |
61 | return __libc_malloc(size); | |
62 | #endif | |
63 | } | |
64 | ||
65 | static void *RawInternalRealloc(void *ptr, uptr size, | |
66 | InternalAllocatorCache *cache) { | |
ef1b3fda | 67 | (void)cache; |
10189819 | 68 | return __libc_realloc(ptr, size); |
ef1b3fda KS |
69 | } |
70 | ||
71 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { | |
72 | (void)cache; | |
10189819 | 73 | __libc_free(ptr); |
ef1b3fda KS |
74 | } |
75 | ||
76 | InternalAllocator *internal_allocator() { | |
77 | return 0; | |
78 | } | |
79 | ||
10189819 | 80 | #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
ef1b3fda KS |
81 | |
82 | static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; | |
83 | static atomic_uint8_t internal_allocator_initialized; | |
84 | static StaticSpinMutex internal_alloc_init_mu; | |
85 | ||
86 | static InternalAllocatorCache internal_allocator_cache; | |
87 | static StaticSpinMutex internal_allocator_cache_mu; | |
88 | ||
89 | InternalAllocator *internal_allocator() { | |
90 | InternalAllocator *internal_allocator_instance = | |
91 | reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); | |
92 | if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { | |
93 | SpinMutexLock l(&internal_alloc_init_mu); | |
94 | if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == | |
95 | 0) { | |
5d3805fc | 96 | internal_allocator_instance->Init(kReleaseToOSIntervalNever); |
ef1b3fda KS |
97 | atomic_store(&internal_allocator_initialized, 1, memory_order_release); |
98 | } | |
99 | } | |
100 | return internal_allocator_instance; | |
101 | } | |
102 | ||
10189819 MO |
103 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
104 | uptr alignment) { | |
105 | if (alignment == 0) alignment = 8; | |
106 | if (cache == 0) { | |
107 | SpinMutexLock l(&internal_allocator_cache_mu); | |
108 | return internal_allocator()->Allocate(&internal_allocator_cache, size, | |
5d3805fc | 109 | alignment); |
10189819 | 110 | } |
5d3805fc | 111 | return internal_allocator()->Allocate(cache, size, alignment); |
10189819 MO |
112 | } |
113 | ||
114 | static void *RawInternalRealloc(void *ptr, uptr size, | |
115 | InternalAllocatorCache *cache) { | |
116 | uptr alignment = 8; | |
ef1b3fda KS |
117 | if (cache == 0) { |
118 | SpinMutexLock l(&internal_allocator_cache_mu); | |
10189819 MO |
119 | return internal_allocator()->Reallocate(&internal_allocator_cache, ptr, |
120 | size, alignment); | |
ef1b3fda | 121 | } |
10189819 | 122 | return internal_allocator()->Reallocate(cache, ptr, size, alignment); |
ef1b3fda KS |
123 | } |
124 | ||
125 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { | |
696d846a | 126 | if (!cache) { |
ef1b3fda KS |
127 | SpinMutexLock l(&internal_allocator_cache_mu); |
128 | return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); | |
129 | } | |
130 | internal_allocator()->Deallocate(cache, ptr); | |
131 | } | |
132 | ||
10189819 | 133 | #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
f35db108 WM |
134 | |
135 | const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; | |
136 | ||
10189819 | 137 | void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { |
f35db108 | 138 | if (size + sizeof(u64) < size) |
696d846a | 139 | return nullptr; |
10189819 | 140 | void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); |
696d846a MO |
141 | if (!p) |
142 | return nullptr; | |
f35db108 WM |
143 | ((u64*)p)[0] = kBlockMagic; |
144 | return (char*)p + sizeof(u64); | |
145 | } | |
146 | ||
10189819 MO |
147 | void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { |
148 | if (!addr) | |
149 | return InternalAlloc(size, cache); | |
150 | if (size + sizeof(u64) < size) | |
151 | return nullptr; | |
152 | addr = (char*)addr - sizeof(u64); | |
153 | size = size + sizeof(u64); | |
154 | CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); | |
155 | void *p = RawInternalRealloc(addr, size, cache); | |
156 | if (!p) | |
157 | return nullptr; | |
158 | return (char*)p + sizeof(u64); | |
159 | } | |
160 | ||
161 | void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { | |
5d3805fc JJ |
162 | if (UNLIKELY(CheckForCallocOverflow(count, size))) |
163 | return InternalAllocator::FailureHandler::OnBadRequest(); | |
10189819 MO |
164 | void *p = InternalAlloc(count * size, cache); |
165 | if (p) internal_memset(p, 0, count * size); | |
166 | return p; | |
167 | } | |
168 | ||
ef1b3fda | 169 | void InternalFree(void *addr, InternalAllocatorCache *cache) { |
696d846a | 170 | if (!addr) |
f35db108 WM |
171 | return; |
172 | addr = (char*)addr - sizeof(u64); | |
ef1b3fda | 173 | CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); |
f35db108 | 174 | ((u64*)addr)[0] = 0; |
ef1b3fda | 175 | RawInternalFree(addr, cache); |
f35db108 WM |
176 | } |
177 | ||
f35db108 WM |
178 | // LowLevelAllocator |
179 | static LowLevelAllocateCallback low_level_alloc_callback; | |
180 | ||
181 | void *LowLevelAllocator::Allocate(uptr size) { | |
182 | // Align allocation size. | |
183 | size = RoundUpTo(size, 8); | |
184 | if (allocated_end_ - allocated_current_ < (sptr)size) { | |
4ba5ca46 | 185 | uptr size_to_allocate = Max(size, GetPageSizeCached()); |
f35db108 | 186 | allocated_current_ = |
dee5ea7a | 187 | (char*)MmapOrDie(size_to_allocate, __func__); |
f35db108 WM |
188 | allocated_end_ = allocated_current_ + size_to_allocate; |
189 | if (low_level_alloc_callback) { | |
190 | low_level_alloc_callback((uptr)allocated_current_, | |
191 | size_to_allocate); | |
192 | } | |
193 | } | |
194 | CHECK(allocated_end_ - allocated_current_ >= (sptr)size); | |
195 | void *res = allocated_current_; | |
196 | allocated_current_ += size; | |
197 | return res; | |
198 | } | |
199 | ||
200 | void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { | |
201 | low_level_alloc_callback = callback; | |
202 | } | |
203 | ||
5d3805fc JJ |
204 | static atomic_uint8_t allocator_out_of_memory = {0}; |
205 | static atomic_uint8_t allocator_may_return_null = {0}; | |
10189819 | 206 | |
5d3805fc JJ |
207 | bool IsAllocatorOutOfMemory() { |
208 | return atomic_load_relaxed(&allocator_out_of_memory); | |
209 | } | |
10189819 | 210 | |
5d3805fc JJ |
211 | // Prints error message and kills the program. |
212 | void NORETURN ReportAllocatorCannotReturnNull() { | |
ef1b3fda KS |
213 | Report("%s's allocator is terminating the process instead of returning 0\n", |
214 | SanitizerToolName); | |
215 | Report("If you don't like this behavior set allocator_may_return_null=1\n"); | |
216 | CHECK(0); | |
696d846a | 217 | Die(); |
ef1b3fda KS |
218 | } |
219 | ||
5d3805fc JJ |
220 | bool AllocatorMayReturnNull() { |
221 | return atomic_load(&allocator_may_return_null, memory_order_relaxed); | |
222 | } | |
223 | ||
224 | void SetAllocatorMayReturnNull(bool may_return_null) { | |
225 | atomic_store(&allocator_may_return_null, may_return_null, | |
226 | memory_order_relaxed); | |
227 | } | |
228 | ||
229 | void *ReturnNullOrDieOnFailure::OnBadRequest() { | |
230 | if (AllocatorMayReturnNull()) | |
231 | return nullptr; | |
232 | ReportAllocatorCannotReturnNull(); | |
233 | } | |
234 | ||
235 | void *ReturnNullOrDieOnFailure::OnOOM() { | |
236 | atomic_store_relaxed(&allocator_out_of_memory, 1); | |
237 | if (AllocatorMayReturnNull()) | |
238 | return nullptr; | |
239 | ReportAllocatorCannotReturnNull(); | |
240 | } | |
241 | ||
242 | void NORETURN *DieOnFailure::OnBadRequest() { | |
243 | ReportAllocatorCannotReturnNull(); | |
244 | } | |
245 | ||
246 | void NORETURN *DieOnFailure::OnOOM() { | |
247 | atomic_store_relaxed(&allocator_out_of_memory, 1); | |
248 | ReportAllocatorCannotReturnNull(); | |
249 | } | |
250 | ||
696d846a | 251 | } // namespace __sanitizer |