]>
Commit | Line | Data |
---|---|---|
549e2197 | 1 | //===-- sanitizer_allocator.cc --------------------------------------------===// |
2 | // | |
3 | // This file is distributed under the University of Illinois Open Source | |
4 | // License. See LICENSE.TXT for details. | |
5 | // | |
6 | //===----------------------------------------------------------------------===// | |
7 | // | |
8 | // This file is shared between AddressSanitizer and ThreadSanitizer | |
9 | // run-time libraries. | |
1e80ce41 | 10 | // This allocator is used inside run-times. |
549e2197 | 11 | //===----------------------------------------------------------------------===// |
5645a48f | 12 | |
1e80ce41 | 13 | #include "sanitizer_allocator.h" |
23e39437 | 14 | |
36093749 | 15 | #include "sanitizer_allocator_checks.h" |
1e80ce41 | 16 | #include "sanitizer_allocator_internal.h" |
23e39437 | 17 | #include "sanitizer_atomic.h" |
549e2197 | 18 | #include "sanitizer_common.h" |
19 | ||
1e80ce41 | 20 | namespace __sanitizer { |
21 | ||
d2ef4bee | 22 | // Default allocator names. |
23 | const char *PrimaryAllocatorName = "SizeClassAllocator"; | |
24 | const char *SecondaryAllocatorName = "LargeMmapAllocator"; | |
25 | ||
1e80ce41 | 26 | // ThreadSanitizer for Go uses libc malloc/free. |
23e39437 | 27 | #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
1e80ce41 | 28 | # if SANITIZER_LINUX && !SANITIZER_ANDROID |
29 | extern "C" void *__libc_malloc(uptr size); | |
23e39437 | 30 | # if !SANITIZER_GO |
31 | extern "C" void *__libc_memalign(uptr alignment, uptr size); | |
32 | # endif | |
33 | extern "C" void *__libc_realloc(void *ptr, uptr size); | |
549e2197 | 34 | extern "C" void __libc_free(void *ptr); |
1e80ce41 | 35 | # else |
36 | # include <stdlib.h> | |
23e39437 | 37 | # define __libc_malloc malloc |
38 | # if !SANITIZER_GO | |
39 | static void *__libc_memalign(uptr alignment, uptr size) { | |
40 | void *p; | |
41 | uptr error = posix_memalign(&p, alignment, size); | |
42 | if (error) return nullptr; | |
43 | return p; | |
44 | } | |
45 | # endif | |
46 | # define __libc_realloc realloc | |
47 | # define __libc_free free | |
1e80ce41 | 48 | # endif |
549e2197 | 49 | |
23e39437 | 50 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
51 | uptr alignment) { | |
52 | (void)cache; | |
53 | #if !SANITIZER_GO | |
54 | if (alignment == 0) | |
55 | return __libc_malloc(size); | |
56 | else | |
57 | return __libc_memalign(alignment, size); | |
58 | #else | |
59 | // Windows does not provide __libc_memalign/posix_memalign. It provides | |
60 | // __aligned_malloc, but the allocated blocks can't be passed to free, | |
61 | // they need to be passed to __aligned_free. InternalAlloc interface does | |
62 | // not account for such requirement. Alignemnt does not seem to be used | |
63 | // anywhere in runtime, so just call __libc_malloc for now. | |
64 | DCHECK_EQ(alignment, 0); | |
65 | return __libc_malloc(size); | |
66 | #endif | |
67 | } | |
68 | ||
69 | static void *RawInternalRealloc(void *ptr, uptr size, | |
70 | InternalAllocatorCache *cache) { | |
1e80ce41 | 71 | (void)cache; |
23e39437 | 72 | return __libc_realloc(ptr, size); |
1e80ce41 | 73 | } |
74 | ||
75 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { | |
76 | (void)cache; | |
23e39437 | 77 | __libc_free(ptr); |
1e80ce41 | 78 | } |
79 | ||
80 | InternalAllocator *internal_allocator() { | |
81 | return 0; | |
82 | } | |
83 | ||
23e39437 | 84 | #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
1e80ce41 | 85 | |
86 | static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; | |
87 | static atomic_uint8_t internal_allocator_initialized; | |
88 | static StaticSpinMutex internal_alloc_init_mu; | |
89 | ||
90 | static InternalAllocatorCache internal_allocator_cache; | |
91 | static StaticSpinMutex internal_allocator_cache_mu; | |
92 | ||
93 | InternalAllocator *internal_allocator() { | |
94 | InternalAllocator *internal_allocator_instance = | |
95 | reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); | |
96 | if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { | |
97 | SpinMutexLock l(&internal_alloc_init_mu); | |
98 | if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == | |
99 | 0) { | |
36093749 | 100 | internal_allocator_instance->Init(kReleaseToOSIntervalNever); |
1e80ce41 | 101 | atomic_store(&internal_allocator_initialized, 1, memory_order_release); |
102 | } | |
103 | } | |
104 | return internal_allocator_instance; | |
105 | } | |
106 | ||
23e39437 | 107 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
108 | uptr alignment) { | |
109 | if (alignment == 0) alignment = 8; | |
110 | if (cache == 0) { | |
111 | SpinMutexLock l(&internal_allocator_cache_mu); | |
112 | return internal_allocator()->Allocate(&internal_allocator_cache, size, | |
36093749 | 113 | alignment); |
23e39437 | 114 | } |
36093749 | 115 | return internal_allocator()->Allocate(cache, size, alignment); |
23e39437 | 116 | } |
117 | ||
118 | static void *RawInternalRealloc(void *ptr, uptr size, | |
119 | InternalAllocatorCache *cache) { | |
120 | uptr alignment = 8; | |
1e80ce41 | 121 | if (cache == 0) { |
122 | SpinMutexLock l(&internal_allocator_cache_mu); | |
23e39437 | 123 | return internal_allocator()->Reallocate(&internal_allocator_cache, ptr, |
124 | size, alignment); | |
1e80ce41 | 125 | } |
23e39437 | 126 | return internal_allocator()->Reallocate(cache, ptr, size, alignment); |
1e80ce41 | 127 | } |
128 | ||
129 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { | |
5645a48f | 130 | if (!cache) { |
1e80ce41 | 131 | SpinMutexLock l(&internal_allocator_cache_mu); |
132 | return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); | |
133 | } | |
134 | internal_allocator()->Deallocate(cache, ptr); | |
135 | } | |
136 | ||
23e39437 | 137 | #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
549e2197 | 138 | |
139 | const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; | |
140 | ||
d2ef4bee | 141 | static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) { |
142 | SetAllocatorOutOfMemory(); | |
143 | Report("FATAL: %s: internal allocator is out of memory trying to allocate " | |
144 | "0x%zx bytes\n", SanitizerToolName, requested_size); | |
145 | Die(); | |
146 | } | |
147 | ||
23e39437 | 148 | void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { |
549e2197 | 149 | if (size + sizeof(u64) < size) |
5645a48f | 150 | return nullptr; |
23e39437 | 151 | void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); |
d2ef4bee | 152 | if (UNLIKELY(!p)) |
153 | ReportInternalAllocatorOutOfMemory(size + sizeof(u64)); | |
549e2197 | 154 | ((u64*)p)[0] = kBlockMagic; |
155 | return (char*)p + sizeof(u64); | |
156 | } | |
157 | ||
23e39437 | 158 | void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { |
159 | if (!addr) | |
160 | return InternalAlloc(size, cache); | |
161 | if (size + sizeof(u64) < size) | |
162 | return nullptr; | |
163 | addr = (char*)addr - sizeof(u64); | |
164 | size = size + sizeof(u64); | |
165 | CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); | |
166 | void *p = RawInternalRealloc(addr, size, cache); | |
d2ef4bee | 167 | if (UNLIKELY(!p)) |
168 | ReportInternalAllocatorOutOfMemory(size); | |
23e39437 | 169 | return (char*)p + sizeof(u64); |
170 | } | |
171 | ||
172 | void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { | |
d2ef4bee | 173 | if (UNLIKELY(CheckForCallocOverflow(count, size))) { |
174 | Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) " | |
175 | "cannot be represented in type size_t\n", SanitizerToolName, count, | |
176 | size); | |
177 | Die(); | |
178 | } | |
23e39437 | 179 | void *p = InternalAlloc(count * size, cache); |
d2ef4bee | 180 | if (LIKELY(p)) |
181 | internal_memset(p, 0, count * size); | |
23e39437 | 182 | return p; |
183 | } | |
184 | ||
1e80ce41 | 185 | void InternalFree(void *addr, InternalAllocatorCache *cache) { |
5645a48f | 186 | if (!addr) |
549e2197 | 187 | return; |
188 | addr = (char*)addr - sizeof(u64); | |
1e80ce41 | 189 | CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); |
549e2197 | 190 | ((u64*)addr)[0] = 0; |
1e80ce41 | 191 | RawInternalFree(addr, cache); |
549e2197 | 192 | } |
193 | ||
549e2197 | 194 | // LowLevelAllocator |
d2ef4bee | 195 | constexpr uptr kLowLevelAllocatorDefaultAlignment = 8; |
196 | static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment; | |
549e2197 | 197 | static LowLevelAllocateCallback low_level_alloc_callback; |
198 | ||
199 | void *LowLevelAllocator::Allocate(uptr size) { | |
200 | // Align allocation size. | |
d2ef4bee | 201 | size = RoundUpTo(size, low_level_alloc_min_alignment); |
549e2197 | 202 | if (allocated_end_ - allocated_current_ < (sptr)size) { |
2862f959 | 203 | uptr size_to_allocate = Max(size, GetPageSizeCached()); |
549e2197 | 204 | allocated_current_ = |
7d752f28 | 205 | (char*)MmapOrDie(size_to_allocate, __func__); |
549e2197 | 206 | allocated_end_ = allocated_current_ + size_to_allocate; |
207 | if (low_level_alloc_callback) { | |
208 | low_level_alloc_callback((uptr)allocated_current_, | |
209 | size_to_allocate); | |
210 | } | |
211 | } | |
212 | CHECK(allocated_end_ - allocated_current_ >= (sptr)size); | |
213 | void *res = allocated_current_; | |
214 | allocated_current_ += size; | |
215 | return res; | |
216 | } | |
217 | ||
d2ef4bee | 218 | void SetLowLevelAllocateMinAlignment(uptr alignment) { |
219 | CHECK(IsPowerOfTwo(alignment)); | |
220 | low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment); | |
221 | } | |
222 | ||
549e2197 | 223 | void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { |
224 | low_level_alloc_callback = callback; | |
225 | } | |
226 | ||
d2ef4bee | 227 | // Allocator's OOM and other errors handling support. |
228 | ||
36093749 | 229 | static atomic_uint8_t allocator_out_of_memory = {0}; |
230 | static atomic_uint8_t allocator_may_return_null = {0}; | |
23e39437 | 231 | |
36093749 | 232 | bool IsAllocatorOutOfMemory() { |
233 | return atomic_load_relaxed(&allocator_out_of_memory); | |
234 | } | |
23e39437 | 235 | |
d2ef4bee | 236 | void SetAllocatorOutOfMemory() { |
237 | atomic_store_relaxed(&allocator_out_of_memory, 1); | |
1e80ce41 | 238 | } |
239 | ||
36093749 | 240 | bool AllocatorMayReturnNull() { |
241 | return atomic_load(&allocator_may_return_null, memory_order_relaxed); | |
242 | } | |
243 | ||
244 | void SetAllocatorMayReturnNull(bool may_return_null) { | |
245 | atomic_store(&allocator_may_return_null, may_return_null, | |
246 | memory_order_relaxed); | |
247 | } | |
248 | ||
d2ef4bee | 249 | void PrintHintAllocatorCannotReturnNull() { |
250 | Report("HINT: if you don't care about these errors you may set " | |
251 | "allocator_may_return_null=1\n"); | |
36093749 | 252 | } |
253 | ||
5645a48f | 254 | } // namespace __sanitizer |