1 //===-- sanitizer_allocator.cc --------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // This allocator is used inside run-times.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_allocator.h"
15 #include "sanitizer_allocator_checks.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_common.h"
20 namespace __sanitizer
{
22 // Default allocator names.
23 const char *PrimaryAllocatorName
= "SizeClassAllocator";
24 const char *SecondaryAllocatorName
= "LargeMmapAllocator";
26 // ThreadSanitizer for Go uses libc malloc/free.
27 #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
28 # if SANITIZER_LINUX && !SANITIZER_ANDROID
29 extern "C" void *__libc_malloc(uptr size
);
31 extern "C" void *__libc_memalign(uptr alignment
, uptr size
);
33 extern "C" void *__libc_realloc(void *ptr
, uptr size
);
34 extern "C" void __libc_free(void *ptr
);
37 # define __libc_malloc malloc
39 static void *__libc_memalign(uptr alignment
, uptr size
) {
41 uptr error
= posix_memalign(&p
, alignment
, size
);
42 if (error
) return nullptr;
46 # define __libc_realloc realloc
47 # define __libc_free free
50 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
,
55 return __libc_malloc(size
);
57 return __libc_memalign(alignment
, size
);
59 // Windows does not provide __libc_memalign/posix_memalign. It provides
60 // __aligned_malloc, but the allocated blocks can't be passed to free,
61 // they need to be passed to __aligned_free. InternalAlloc interface does
62 // not account for such requirement. Alignemnt does not seem to be used
63 // anywhere in runtime, so just call __libc_malloc for now.
64 DCHECK_EQ(alignment
, 0);
65 return __libc_malloc(size
);
69 static void *RawInternalRealloc(void *ptr
, uptr size
,
70 InternalAllocatorCache
*cache
) {
72 return __libc_realloc(ptr
, size
);
75 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
80 InternalAllocator
*internal_allocator() {
84 #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
86 static ALIGNED(64) char internal_alloc_placeholder
[sizeof(InternalAllocator
)];
87 static atomic_uint8_t internal_allocator_initialized
;
88 static StaticSpinMutex internal_alloc_init_mu
;
90 static InternalAllocatorCache internal_allocator_cache
;
91 static StaticSpinMutex internal_allocator_cache_mu
;
93 InternalAllocator
*internal_allocator() {
94 InternalAllocator
*internal_allocator_instance
=
95 reinterpret_cast<InternalAllocator
*>(&internal_alloc_placeholder
);
96 if (atomic_load(&internal_allocator_initialized
, memory_order_acquire
) == 0) {
97 SpinMutexLock
l(&internal_alloc_init_mu
);
98 if (atomic_load(&internal_allocator_initialized
, memory_order_relaxed
) ==
100 internal_allocator_instance
->Init(kReleaseToOSIntervalNever
);
101 atomic_store(&internal_allocator_initialized
, 1, memory_order_release
);
104 return internal_allocator_instance
;
107 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
,
109 if (alignment
== 0) alignment
= 8;
111 SpinMutexLock
l(&internal_allocator_cache_mu
);
112 return internal_allocator()->Allocate(&internal_allocator_cache
, size
,
115 return internal_allocator()->Allocate(cache
, size
, alignment
);
118 static void *RawInternalRealloc(void *ptr
, uptr size
,
119 InternalAllocatorCache
*cache
) {
122 SpinMutexLock
l(&internal_allocator_cache_mu
);
123 return internal_allocator()->Reallocate(&internal_allocator_cache
, ptr
,
126 return internal_allocator()->Reallocate(cache
, ptr
, size
, alignment
);
129 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
131 SpinMutexLock
l(&internal_allocator_cache_mu
);
132 return internal_allocator()->Deallocate(&internal_allocator_cache
, ptr
);
134 internal_allocator()->Deallocate(cache
, ptr
);
137 #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
139 const u64 kBlockMagic
= 0x6A6CB03ABCEBC041ull
;
141 static void NORETURN
ReportInternalAllocatorOutOfMemory(uptr requested_size
) {
142 SetAllocatorOutOfMemory();
143 Report("FATAL: %s: internal allocator is out of memory trying to allocate "
144 "0x%zx bytes\n", SanitizerToolName
, requested_size
);
148 void *InternalAlloc(uptr size
, InternalAllocatorCache
*cache
, uptr alignment
) {
149 if (size
+ sizeof(u64
) < size
)
151 void *p
= RawInternalAlloc(size
+ sizeof(u64
), cache
, alignment
);
153 ReportInternalAllocatorOutOfMemory(size
+ sizeof(u64
));
154 ((u64
*)p
)[0] = kBlockMagic
;
155 return (char*)p
+ sizeof(u64
);
158 void *InternalRealloc(void *addr
, uptr size
, InternalAllocatorCache
*cache
) {
160 return InternalAlloc(size
, cache
);
161 if (size
+ sizeof(u64
) < size
)
163 addr
= (char*)addr
- sizeof(u64
);
164 size
= size
+ sizeof(u64
);
165 CHECK_EQ(kBlockMagic
, ((u64
*)addr
)[0]);
166 void *p
= RawInternalRealloc(addr
, size
, cache
);
168 ReportInternalAllocatorOutOfMemory(size
);
169 return (char*)p
+ sizeof(u64
);
172 void *InternalCalloc(uptr count
, uptr size
, InternalAllocatorCache
*cache
) {
173 if (UNLIKELY(CheckForCallocOverflow(count
, size
))) {
174 Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
175 "cannot be represented in type size_t\n", SanitizerToolName
, count
,
179 void *p
= InternalAlloc(count
* size
, cache
);
181 internal_memset(p
, 0, count
* size
);
185 void InternalFree(void *addr
, InternalAllocatorCache
*cache
) {
188 addr
= (char*)addr
- sizeof(u64
);
189 CHECK_EQ(kBlockMagic
, ((u64
*)addr
)[0]);
191 RawInternalFree(addr
, cache
);
195 constexpr uptr kLowLevelAllocatorDefaultAlignment
= 8;
196 static uptr low_level_alloc_min_alignment
= kLowLevelAllocatorDefaultAlignment
;
197 static LowLevelAllocateCallback low_level_alloc_callback
;
199 void *LowLevelAllocator::Allocate(uptr size
) {
200 // Align allocation size.
201 size
= RoundUpTo(size
, low_level_alloc_min_alignment
);
202 if (allocated_end_
- allocated_current_
< (sptr
)size
) {
203 uptr size_to_allocate
= Max(size
, GetPageSizeCached());
205 (char*)MmapOrDie(size_to_allocate
, __func__
);
206 allocated_end_
= allocated_current_
+ size_to_allocate
;
207 if (low_level_alloc_callback
) {
208 low_level_alloc_callback((uptr
)allocated_current_
,
212 CHECK(allocated_end_
- allocated_current_
>= (sptr
)size
);
213 void *res
= allocated_current_
;
214 allocated_current_
+= size
;
218 void SetLowLevelAllocateMinAlignment(uptr alignment
) {
219 CHECK(IsPowerOfTwo(alignment
));
220 low_level_alloc_min_alignment
= Max(alignment
, low_level_alloc_min_alignment
);
223 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
) {
224 low_level_alloc_callback
= callback
;
227 // Allocator's OOM and other errors handling support.
229 static atomic_uint8_t allocator_out_of_memory
= {0};
230 static atomic_uint8_t allocator_may_return_null
= {0};
232 bool IsAllocatorOutOfMemory() {
233 return atomic_load_relaxed(&allocator_out_of_memory
);
236 void SetAllocatorOutOfMemory() {
237 atomic_store_relaxed(&allocator_out_of_memory
, 1);
240 bool AllocatorMayReturnNull() {
241 return atomic_load(&allocator_may_return_null
, memory_order_relaxed
);
244 void SetAllocatorMayReturnNull(bool may_return_null
) {
245 atomic_store(&allocator_may_return_null
, may_return_null
,
246 memory_order_relaxed
);
249 void PrintHintAllocatorCannotReturnNull() {
250 Report("HINT: if you don't care about these errors you may set "
251 "allocator_may_return_null=1\n");
254 } // namespace __sanitizer