]>
Commit | Line | Data |
---|---|---|
10189819 MO |
1 | //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===// |
2 | // | |
b667dd70 ML |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. | |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |
10189819 MO |
6 | // |
7 | //===----------------------------------------------------------------------===// | |
8 | // | |
9 | // Part of the Sanitizer Allocator. | |
10 | // | |
11 | //===----------------------------------------------------------------------===// | |
12 | #ifndef SANITIZER_ALLOCATOR_H | |
13 | #error This file must be included inside sanitizer_allocator.h | |
14 | #endif | |
15 | ||
eac97531 ML |
16 | // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total |
17 | // allocated chunks. To be used in memory constrained or not memory hungry cases | |
18 | // (currently, 32 bits and internal allocator). | |
19 | class LargeMmapAllocatorPtrArrayStatic { | |
20 | public: | |
21 | INLINE void *Init() { return &p_[0]; } | |
22 | INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); } | |
23 | private: | |
24 | static const int kMaxNumChunks = 1 << 15; | |
25 | uptr p_[kMaxNumChunks]; | |
26 | }; | |
27 | ||
28 | // Much less restricted LargeMmapAllocator chunks list (comparing to | |
29 | // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks. | |
30 | // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the | |
31 | // same functionality in Fuchsia case, which does not support MAP_NORESERVE. | |
32 | class LargeMmapAllocatorPtrArrayDynamic { | |
33 | public: | |
34 | INLINE void *Init() { | |
35 | uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr), | |
36 | SecondaryAllocatorName); | |
37 | CHECK(p); | |
38 | return reinterpret_cast<void*>(p); | |
39 | } | |
40 | ||
41 | INLINE void EnsureSpace(uptr n) { | |
42 | CHECK_LT(n, kMaxNumChunks); | |
43 | DCHECK(n <= n_reserved_); | |
44 | if (UNLIKELY(n == n_reserved_)) { | |
45 | address_range_.MapOrDie( | |
46 | reinterpret_cast<uptr>(address_range_.base()) + | |
47 | n_reserved_ * sizeof(uptr), | |
48 | kChunksBlockCount * sizeof(uptr)); | |
49 | n_reserved_ += kChunksBlockCount; | |
50 | } | |
51 | } | |
52 | ||
53 | private: | |
54 | static const int kMaxNumChunks = 1 << 20; | |
55 | static const int kChunksBlockCount = 1 << 14; | |
56 | ReservedAddressRange address_range_; | |
57 | uptr n_reserved_; | |
58 | }; | |
59 | ||
60 | #if SANITIZER_WORDSIZE == 32 | |
61 | typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray; | |
62 | #else | |
63 | typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray; | |
64 | #endif | |
65 | ||
10189819 MO |
66 | // This class can (de)allocate only large chunks of memory using mmap/unmap. |
67 | // The main purpose of this allocator is to cover large and rare allocation | |
68 | // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). | |
5d3805fc | 69 | template <class MapUnmapCallback = NoOpMapUnmapCallback, |
b667dd70 ML |
70 | class PtrArrayT = DefaultLargeMmapAllocatorPtrArray, |
71 | class AddressSpaceViewTy = LocalAddressSpaceView> | |
10189819 MO |
72 | class LargeMmapAllocator { |
73 | public: | |
b667dd70 | 74 | using AddressSpaceView = AddressSpaceViewTy; |
5d3805fc | 75 | void InitLinkerInitialized() { |
10189819 | 76 | page_size_ = GetPageSizeCached(); |
eac97531 | 77 | chunks_ = reinterpret_cast<Header**>(ptr_array_.Init()); |
10189819 MO |
78 | } |
79 | ||
5d3805fc | 80 | void Init() { |
10189819 | 81 | internal_memset(this, 0, sizeof(*this)); |
5d3805fc | 82 | InitLinkerInitialized(); |
10189819 MO |
83 | } |
84 | ||
85 | void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { | |
86 | CHECK(IsPowerOfTwo(alignment)); | |
87 | uptr map_size = RoundUpMapSize(size); | |
88 | if (alignment > page_size_) | |
89 | map_size += alignment; | |
90 | // Overflow. | |
eac97531 ML |
91 | if (map_size < size) { |
92 | Report("WARNING: %s: LargeMmapAllocator allocation overflow: " | |
93 | "0x%zx bytes with 0x%zx alignment requested\n", | |
94 | SanitizerToolName, map_size, alignment); | |
95 | return nullptr; | |
96 | } | |
10189819 | 97 | uptr map_beg = reinterpret_cast<uptr>( |
eac97531 | 98 | MmapOrDieOnFatalError(map_size, SecondaryAllocatorName)); |
5d3805fc | 99 | if (!map_beg) |
eac97531 | 100 | return nullptr; |
10189819 MO |
101 | CHECK(IsAligned(map_beg, page_size_)); |
102 | MapUnmapCallback().OnMap(map_beg, map_size); | |
103 | uptr map_end = map_beg + map_size; | |
104 | uptr res = map_beg + page_size_; | |
105 | if (res & (alignment - 1)) // Align. | |
106 | res += alignment - (res & (alignment - 1)); | |
107 | CHECK(IsAligned(res, alignment)); | |
108 | CHECK(IsAligned(res, page_size_)); | |
109 | CHECK_GE(res + size, map_beg); | |
110 | CHECK_LE(res + size, map_end); | |
111 | Header *h = GetHeader(res); | |
112 | h->size = size; | |
113 | h->map_beg = map_beg; | |
114 | h->map_size = map_size; | |
115 | uptr size_log = MostSignificantSetBitIndex(map_size); | |
116 | CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log)); | |
117 | { | |
118 | SpinMutexLock l(&mutex_); | |
eac97531 | 119 | ptr_array_.EnsureSpace(n_chunks_); |
10189819 | 120 | uptr idx = n_chunks_++; |
10189819 MO |
121 | h->chunk_idx = idx; |
122 | chunks_[idx] = h; | |
eac97531 | 123 | chunks_sorted_ = false; |
10189819 MO |
124 | stats.n_allocs++; |
125 | stats.currently_allocated += map_size; | |
126 | stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); | |
127 | stats.by_size_log[size_log]++; | |
128 | stat->Add(AllocatorStatAllocated, map_size); | |
129 | stat->Add(AllocatorStatMapped, map_size); | |
130 | } | |
131 | return reinterpret_cast<void*>(res); | |
132 | } | |
133 | ||
10189819 MO |
134 | void Deallocate(AllocatorStats *stat, void *p) { |
135 | Header *h = GetHeader(p); | |
136 | { | |
137 | SpinMutexLock l(&mutex_); | |
138 | uptr idx = h->chunk_idx; | |
139 | CHECK_EQ(chunks_[idx], h); | |
140 | CHECK_LT(idx, n_chunks_); | |
eac97531 | 141 | chunks_[idx] = chunks_[--n_chunks_]; |
10189819 | 142 | chunks_[idx]->chunk_idx = idx; |
10189819 MO |
143 | chunks_sorted_ = false; |
144 | stats.n_frees++; | |
145 | stats.currently_allocated -= h->map_size; | |
146 | stat->Sub(AllocatorStatAllocated, h->map_size); | |
147 | stat->Sub(AllocatorStatMapped, h->map_size); | |
148 | } | |
149 | MapUnmapCallback().OnUnmap(h->map_beg, h->map_size); | |
150 | UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size); | |
151 | } | |
152 | ||
153 | uptr TotalMemoryUsed() { | |
154 | SpinMutexLock l(&mutex_); | |
155 | uptr res = 0; | |
156 | for (uptr i = 0; i < n_chunks_; i++) { | |
157 | Header *h = chunks_[i]; | |
158 | CHECK_EQ(h->chunk_idx, i); | |
159 | res += RoundUpMapSize(h->size); | |
160 | } | |
161 | return res; | |
162 | } | |
163 | ||
164 | bool PointerIsMine(const void *p) { | |
165 | return GetBlockBegin(p) != nullptr; | |
166 | } | |
167 | ||
168 | uptr GetActuallyAllocatedSize(void *p) { | |
169 | return RoundUpTo(GetHeader(p)->size, page_size_); | |
170 | } | |
171 | ||
172 | // At least page_size_/2 metadata bytes is available. | |
173 | void *GetMetaData(const void *p) { | |
174 | // Too slow: CHECK_EQ(p, GetBlockBegin(p)); | |
175 | if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) { | |
176 | Printf("%s: bad pointer %p\n", SanitizerToolName, p); | |
177 | CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_)); | |
178 | } | |
179 | return GetHeader(p) + 1; | |
180 | } | |
181 | ||
182 | void *GetBlockBegin(const void *ptr) { | |
183 | uptr p = reinterpret_cast<uptr>(ptr); | |
184 | SpinMutexLock l(&mutex_); | |
185 | uptr nearest_chunk = 0; | |
b667dd70 | 186 | Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_); |
10189819 MO |
187 | // Cache-friendly linear search. |
188 | for (uptr i = 0; i < n_chunks_; i++) { | |
b667dd70 | 189 | uptr ch = reinterpret_cast<uptr>(chunks[i]); |
10189819 MO |
190 | if (p < ch) continue; // p is at left to this chunk, skip it. |
191 | if (p - ch < p - nearest_chunk) | |
192 | nearest_chunk = ch; | |
193 | } | |
194 | if (!nearest_chunk) | |
195 | return nullptr; | |
b667dd70 ML |
196 | const Header *h = |
197 | AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk)); | |
198 | Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk); | |
10189819 MO |
199 | CHECK_GE(nearest_chunk, h->map_beg); |
200 | CHECK_LT(nearest_chunk, h->map_beg + h->map_size); | |
201 | CHECK_LE(nearest_chunk, p); | |
202 | if (h->map_beg + h->map_size <= p) | |
203 | return nullptr; | |
b667dd70 | 204 | return GetUser(h_ptr); |
10189819 MO |
205 | } |
206 | ||
5d3805fc JJ |
207 | void EnsureSortedChunks() { |
208 | if (chunks_sorted_) return; | |
b667dd70 ML |
209 | Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_); |
210 | Sort(reinterpret_cast<uptr *>(chunks), n_chunks_); | |
5d3805fc | 211 | for (uptr i = 0; i < n_chunks_; i++) |
b667dd70 | 212 | AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i; |
5d3805fc JJ |
213 | chunks_sorted_ = true; |
214 | } | |
215 | ||
10189819 MO |
216 | // This function does the same as GetBlockBegin, but is much faster. |
217 | // Must be called with the allocator locked. | |
218 | void *GetBlockBeginFastLocked(void *ptr) { | |
219 | mutex_.CheckLocked(); | |
220 | uptr p = reinterpret_cast<uptr>(ptr); | |
221 | uptr n = n_chunks_; | |
222 | if (!n) return nullptr; | |
5d3805fc | 223 | EnsureSortedChunks(); |
b667dd70 ML |
224 | Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_); |
225 | auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]); | |
226 | auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) + | |
227 | AddressSpaceView::Load(chunks[n - 1])->map_size; | |
10189819 MO |
228 | if (p < min_mmap_ || p >= max_mmap_) |
229 | return nullptr; | |
230 | uptr beg = 0, end = n - 1; | |
231 | // This loop is a log(n) lower_bound. It does not check for the exact match | |
232 | // to avoid expensive cache-thrashing loads. | |
233 | while (end - beg >= 2) { | |
234 | uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1 | |
b667dd70 ML |
235 | if (p < reinterpret_cast<uptr>(chunks[mid])) |
236 | end = mid - 1; // We are not interested in chunks[mid]. | |
10189819 | 237 | else |
b667dd70 | 238 | beg = mid; // chunks[mid] may still be what we want. |
10189819 MO |
239 | } |
240 | ||
241 | if (beg < end) { | |
242 | CHECK_EQ(beg + 1, end); | |
243 | // There are 2 chunks left, choose one. | |
b667dd70 | 244 | if (p >= reinterpret_cast<uptr>(chunks[end])) |
10189819 MO |
245 | beg = end; |
246 | } | |
247 | ||
b667dd70 ML |
248 | const Header *h = AddressSpaceView::Load(chunks[beg]); |
249 | Header *h_ptr = chunks[beg]; | |
10189819 MO |
250 | if (h->map_beg + h->map_size <= p || p < h->map_beg) |
251 | return nullptr; | |
b667dd70 | 252 | return GetUser(h_ptr); |
10189819 MO |
253 | } |
254 | ||
255 | void PrintStats() { | |
256 | Printf("Stats: LargeMmapAllocator: allocated %zd times, " | |
257 | "remains %zd (%zd K) max %zd M; by size logs: ", | |
258 | stats.n_allocs, stats.n_allocs - stats.n_frees, | |
259 | stats.currently_allocated >> 10, stats.max_allocated >> 20); | |
260 | for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) { | |
261 | uptr c = stats.by_size_log[i]; | |
262 | if (!c) continue; | |
263 | Printf("%zd:%zd; ", i, c); | |
264 | } | |
265 | Printf("\n"); | |
266 | } | |
267 | ||
268 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone | |
269 | // introspection API. | |
270 | void ForceLock() { | |
271 | mutex_.Lock(); | |
272 | } | |
273 | ||
274 | void ForceUnlock() { | |
275 | mutex_.Unlock(); | |
276 | } | |
277 | ||
278 | // Iterate over all existing chunks. | |
279 | // The allocator must be locked when calling this function. | |
280 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | |
5d3805fc | 281 | EnsureSortedChunks(); // Avoid doing the sort while iterating. |
b667dd70 | 282 | const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_); |
5d3805fc | 283 | for (uptr i = 0; i < n_chunks_; i++) { |
b667dd70 | 284 | const Header *t = chunks[i]; |
eac97531 | 285 | callback(reinterpret_cast<uptr>(GetUser(t)), arg); |
5d3805fc | 286 | // Consistency check: verify that the array did not change. |
b667dd70 ML |
287 | CHECK_EQ(chunks[i], t); |
288 | CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i); | |
5d3805fc | 289 | } |
10189819 MO |
290 | } |
291 | ||
292 | private: | |
10189819 MO |
293 | struct Header { |
294 | uptr map_beg; | |
295 | uptr map_size; | |
296 | uptr size; | |
297 | uptr chunk_idx; | |
298 | }; | |
299 | ||
300 | Header *GetHeader(uptr p) { | |
301 | CHECK(IsAligned(p, page_size_)); | |
302 | return reinterpret_cast<Header*>(p - page_size_); | |
303 | } | |
304 | Header *GetHeader(const void *p) { | |
305 | return GetHeader(reinterpret_cast<uptr>(p)); | |
306 | } | |
307 | ||
b667dd70 | 308 | void *GetUser(const Header *h) { |
10189819 MO |
309 | CHECK(IsAligned((uptr)h, page_size_)); |
310 | return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_); | |
311 | } | |
312 | ||
313 | uptr RoundUpMapSize(uptr size) { | |
314 | return RoundUpTo(size, page_size_) + page_size_; | |
315 | } | |
316 | ||
317 | uptr page_size_; | |
eac97531 ML |
318 | Header **chunks_; |
319 | PtrArrayT ptr_array_; | |
10189819 | 320 | uptr n_chunks_; |
10189819 MO |
321 | bool chunks_sorted_; |
322 | struct Stats { | |
323 | uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; | |
324 | } stats; | |
eac97531 | 325 | StaticSpinMutex mutex_; |
10189819 | 326 | }; |