1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
16 // allocated chunks. To be used in memory constrained or not memory hungry cases
17 // (currently, 32 bits and internal allocator).
18 class LargeMmapAllocatorPtrArrayStatic
{
20 INLINE
void *Init() { return &p_
[0]; }
21 INLINE
void EnsureSpace(uptr n
) { CHECK_LT(n
, kMaxNumChunks
); }
23 static const int kMaxNumChunks
= 1 << 15;
24 uptr p_
[kMaxNumChunks
];
27 // Much less restricted LargeMmapAllocator chunks list (comparing to
28 // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
29 // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
30 // same functionality in Fuchsia case, which does not support MAP_NORESERVE.
31 class LargeMmapAllocatorPtrArrayDynamic
{
34 uptr p
= address_range_
.Init(kMaxNumChunks
* sizeof(uptr
),
35 SecondaryAllocatorName
);
37 return reinterpret_cast<void*>(p
);
40 INLINE
void EnsureSpace(uptr n
) {
41 CHECK_LT(n
, kMaxNumChunks
);
42 DCHECK(n
<= n_reserved_
);
43 if (UNLIKELY(n
== n_reserved_
)) {
44 address_range_
.MapOrDie(
45 reinterpret_cast<uptr
>(address_range_
.base()) +
46 n_reserved_
* sizeof(uptr
),
47 kChunksBlockCount
* sizeof(uptr
));
48 n_reserved_
+= kChunksBlockCount
;
53 static const int kMaxNumChunks
= 1 << 20;
54 static const int kChunksBlockCount
= 1 << 14;
55 ReservedAddressRange address_range_
;
59 #if SANITIZER_WORDSIZE == 32
60 typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray
;
62 typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray
;
65 // This class can (de)allocate only large chunks of memory using mmap/unmap.
66 // The main purpose of this allocator is to cover large and rare allocation
67 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
68 template <class MapUnmapCallback
= NoOpMapUnmapCallback
,
69 class PtrArrayT
= DefaultLargeMmapAllocatorPtrArray
>
70 class LargeMmapAllocator
{
72 void InitLinkerInitialized() {
73 page_size_
= GetPageSizeCached();
74 chunks_
= reinterpret_cast<Header
**>(ptr_array_
.Init());
78 internal_memset(this, 0, sizeof(*this));
79 InitLinkerInitialized();
82 void *Allocate(AllocatorStats
*stat
, uptr size
, uptr alignment
) {
83 CHECK(IsPowerOfTwo(alignment
));
84 uptr map_size
= RoundUpMapSize(size
);
85 if (alignment
> page_size_
)
86 map_size
+= alignment
;
88 if (map_size
< size
) {
89 Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
90 "0x%zx bytes with 0x%zx alignment requested\n",
91 SanitizerToolName
, map_size
, alignment
);
94 uptr map_beg
= reinterpret_cast<uptr
>(
95 MmapOrDieOnFatalError(map_size
, SecondaryAllocatorName
));
98 CHECK(IsAligned(map_beg
, page_size_
));
99 MapUnmapCallback().OnMap(map_beg
, map_size
);
100 uptr map_end
= map_beg
+ map_size
;
101 uptr res
= map_beg
+ page_size_
;
102 if (res
& (alignment
- 1)) // Align.
103 res
+= alignment
- (res
& (alignment
- 1));
104 CHECK(IsAligned(res
, alignment
));
105 CHECK(IsAligned(res
, page_size_
));
106 CHECK_GE(res
+ size
, map_beg
);
107 CHECK_LE(res
+ size
, map_end
);
108 Header
*h
= GetHeader(res
);
110 h
->map_beg
= map_beg
;
111 h
->map_size
= map_size
;
112 uptr size_log
= MostSignificantSetBitIndex(map_size
);
113 CHECK_LT(size_log
, ARRAY_SIZE(stats
.by_size_log
));
115 SpinMutexLock
l(&mutex_
);
116 ptr_array_
.EnsureSpace(n_chunks_
);
117 uptr idx
= n_chunks_
++;
120 chunks_sorted_
= false;
122 stats
.currently_allocated
+= map_size
;
123 stats
.max_allocated
= Max(stats
.max_allocated
, stats
.currently_allocated
);
124 stats
.by_size_log
[size_log
]++;
125 stat
->Add(AllocatorStatAllocated
, map_size
);
126 stat
->Add(AllocatorStatMapped
, map_size
);
128 return reinterpret_cast<void*>(res
);
131 void Deallocate(AllocatorStats
*stat
, void *p
) {
132 Header
*h
= GetHeader(p
);
134 SpinMutexLock
l(&mutex_
);
135 uptr idx
= h
->chunk_idx
;
136 CHECK_EQ(chunks_
[idx
], h
);
137 CHECK_LT(idx
, n_chunks_
);
138 chunks_
[idx
] = chunks_
[--n_chunks_
];
139 chunks_
[idx
]->chunk_idx
= idx
;
140 chunks_sorted_
= false;
142 stats
.currently_allocated
-= h
->map_size
;
143 stat
->Sub(AllocatorStatAllocated
, h
->map_size
);
144 stat
->Sub(AllocatorStatMapped
, h
->map_size
);
146 MapUnmapCallback().OnUnmap(h
->map_beg
, h
->map_size
);
147 UnmapOrDie(reinterpret_cast<void*>(h
->map_beg
), h
->map_size
);
150 uptr
TotalMemoryUsed() {
151 SpinMutexLock
l(&mutex_
);
153 for (uptr i
= 0; i
< n_chunks_
; i
++) {
154 Header
*h
= chunks_
[i
];
155 CHECK_EQ(h
->chunk_idx
, i
);
156 res
+= RoundUpMapSize(h
->size
);
161 bool PointerIsMine(const void *p
) {
162 return GetBlockBegin(p
) != nullptr;
165 uptr
GetActuallyAllocatedSize(void *p
) {
166 return RoundUpTo(GetHeader(p
)->size
, page_size_
);
169 // At least page_size_/2 metadata bytes is available.
170 void *GetMetaData(const void *p
) {
171 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
172 if (!IsAligned(reinterpret_cast<uptr
>(p
), page_size_
)) {
173 Printf("%s: bad pointer %p\n", SanitizerToolName
, p
);
174 CHECK(IsAligned(reinterpret_cast<uptr
>(p
), page_size_
));
176 return GetHeader(p
) + 1;
179 void *GetBlockBegin(const void *ptr
) {
180 uptr p
= reinterpret_cast<uptr
>(ptr
);
181 SpinMutexLock
l(&mutex_
);
182 uptr nearest_chunk
= 0;
183 // Cache-friendly linear search.
184 for (uptr i
= 0; i
< n_chunks_
; i
++) {
185 uptr ch
= reinterpret_cast<uptr
>(chunks_
[i
]);
186 if (p
< ch
) continue; // p is at left to this chunk, skip it.
187 if (p
- ch
< p
- nearest_chunk
)
192 Header
*h
= reinterpret_cast<Header
*>(nearest_chunk
);
193 CHECK_GE(nearest_chunk
, h
->map_beg
);
194 CHECK_LT(nearest_chunk
, h
->map_beg
+ h
->map_size
);
195 CHECK_LE(nearest_chunk
, p
);
196 if (h
->map_beg
+ h
->map_size
<= p
)
201 void EnsureSortedChunks() {
202 if (chunks_sorted_
) return;
203 Sort(reinterpret_cast<uptr
*>(chunks_
), n_chunks_
);
204 for (uptr i
= 0; i
< n_chunks_
; i
++)
205 chunks_
[i
]->chunk_idx
= i
;
206 chunks_sorted_
= true;
209 // This function does the same as GetBlockBegin, but is much faster.
210 // Must be called with the allocator locked.
211 void *GetBlockBeginFastLocked(void *ptr
) {
212 mutex_
.CheckLocked();
213 uptr p
= reinterpret_cast<uptr
>(ptr
);
215 if (!n
) return nullptr;
216 EnsureSortedChunks();
217 auto min_mmap_
= reinterpret_cast<uptr
>(chunks_
[0]);
219 reinterpret_cast<uptr
>(chunks_
[n
- 1]) + chunks_
[n
- 1]->map_size
;
220 if (p
< min_mmap_
|| p
>= max_mmap_
)
222 uptr beg
= 0, end
= n
- 1;
223 // This loop is a log(n) lower_bound. It does not check for the exact match
224 // to avoid expensive cache-thrashing loads.
225 while (end
- beg
>= 2) {
226 uptr mid
= (beg
+ end
) / 2; // Invariant: mid >= beg + 1
227 if (p
< reinterpret_cast<uptr
>(chunks_
[mid
]))
228 end
= mid
- 1; // We are not interested in chunks_[mid].
230 beg
= mid
; // chunks_[mid] may still be what we want.
234 CHECK_EQ(beg
+ 1, end
);
235 // There are 2 chunks left, choose one.
236 if (p
>= reinterpret_cast<uptr
>(chunks_
[end
]))
240 Header
*h
= chunks_
[beg
];
241 if (h
->map_beg
+ h
->map_size
<= p
|| p
< h
->map_beg
)
247 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
248 "remains %zd (%zd K) max %zd M; by size logs: ",
249 stats
.n_allocs
, stats
.n_allocs
- stats
.n_frees
,
250 stats
.currently_allocated
>> 10, stats
.max_allocated
>> 20);
251 for (uptr i
= 0; i
< ARRAY_SIZE(stats
.by_size_log
); i
++) {
252 uptr c
= stats
.by_size_log
[i
];
254 Printf("%zd:%zd; ", i
, c
);
259 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
260 // introspection API.
269 // Iterate over all existing chunks.
270 // The allocator must be locked when calling this function.
271 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
272 EnsureSortedChunks(); // Avoid doing the sort while iterating.
273 for (uptr i
= 0; i
< n_chunks_
; i
++) {
275 callback(reinterpret_cast<uptr
>(GetUser(t
)), arg
);
276 // Consistency check: verify that the array did not change.
277 CHECK_EQ(chunks_
[i
], t
);
278 CHECK_EQ(chunks_
[i
]->chunk_idx
, i
);
290 Header
*GetHeader(uptr p
) {
291 CHECK(IsAligned(p
, page_size_
));
292 return reinterpret_cast<Header
*>(p
- page_size_
);
294 Header
*GetHeader(const void *p
) {
295 return GetHeader(reinterpret_cast<uptr
>(p
));
298 void *GetUser(Header
*h
) {
299 CHECK(IsAligned((uptr
)h
, page_size_
));
300 return reinterpret_cast<void*>(reinterpret_cast<uptr
>(h
) + page_size_
);
303 uptr
RoundUpMapSize(uptr size
) {
304 return RoundUpTo(size
, page_size_
) + page_size_
;
309 PtrArrayT ptr_array_
;
313 uptr n_allocs
, n_frees
, currently_allocated
, max_allocated
, by_size_log
[64];
315 StaticSpinMutex mutex_
;