]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_primary32.h
CommitLineData
10189819
MO
1//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2//
b667dd70
ML
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10189819
MO
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
17
18// SizeClassAllocator32 -- allocator for 32-bit address space.
19// This allocator can theoretically be used on 64-bit arch, but there it is less
20// efficient than SizeClassAllocator64.
21//
22// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
23// be returned by MmapOrDie().
24//
25// Region:
5d3805fc
JJ
26// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27// kRegionSize).
10189819
MO
28// Since the regions are aligned by kRegionSize, there are exactly
29// kNumPossibleRegions possible regions in the address space and so we keep
30// a ByteMap possible_regions to store the size classes of each Region.
31// 0 size class means the region is not used by the allocator.
32//
33// One Region is used to allocate chunks of a single size class.
34// A Region looks like this:
35// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36//
37// In order to avoid false sharing the objects of this class should be
38// chache-line aligned.
5d3805fc
JJ
39
40struct SizeClassAllocator32FlagMasks { // Bit masks.
41 enum {
42 kRandomShuffleChunks = 1,
43 kUseSeparateSizeClassForBatch = 2,
44 };
45};
46
47template <class Params>
10189819 48class SizeClassAllocator32 {
b667dd70
ML
49 private:
50 static const u64 kTwoLevelByteMapSize1 =
51 (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
52 static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
53
10189819 54 public:
b667dd70 55 using AddressSpaceView = typename Params::AddressSpaceView;
5d3805fc
JJ
56 static const uptr kSpaceBeg = Params::kSpaceBeg;
57 static const u64 kSpaceSize = Params::kSpaceSize;
58 static const uptr kMetadataSize = Params::kMetadataSize;
59 typedef typename Params::SizeClassMap SizeClassMap;
60 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
5d3805fc 61 typedef typename Params::MapUnmapCallback MapUnmapCallback;
b667dd70
ML
62 using ByteMap = typename conditional<
63 (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
64 FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
65 AddressSpaceView>,
66 TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
5d3805fc 67
36b50aeb
EB
68 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
69 (kSpaceSize & (kSpaceSize - 1)) == 0);
70
5d3805fc
JJ
71 static const bool kRandomShuffleChunks = Params::kFlags &
72 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
73 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
74 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
75
10189819
MO
76 struct TransferBatch {
77 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
eac97531
ML
78 void SetFromArray(void *batch[], uptr count) {
79 DCHECK_LE(count, kMaxNumCached);
10189819 80 count_ = count;
10189819
MO
81 for (uptr i = 0; i < count; i++)
82 batch_[i] = batch[i];
83 }
84 uptr Count() const { return count_; }
85 void Clear() { count_ = 0; }
86 void Add(void *ptr) {
87 batch_[count_++] = ptr;
eac97531 88 DCHECK_LE(count_, kMaxNumCached);
10189819 89 }
eac97531 90 void CopyToArray(void *to_batch[]) const {
10189819
MO
91 for (uptr i = 0, n = Count(); i < n; i++)
92 to_batch[i] = batch_[i];
93 }
94
95 // How much memory do we need for a batch containing n elements.
96 static uptr AllocationSizeRequiredForNElements(uptr n) {
97 return sizeof(uptr) * 2 + sizeof(void *) * n;
98 }
eac97531
ML
99 static uptr MaxCached(uptr size) {
100 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
10189819
MO
101 }
102
103 TransferBatch *next;
104
105 private:
106 uptr count_;
107 void *batch_[kMaxNumCached];
108 };
109
110 static const uptr kBatchSize = sizeof(TransferBatch);
111 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
5d3805fc 112 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
10189819
MO
113
114 static uptr ClassIdToSize(uptr class_id) {
5d3805fc
JJ
115 return (class_id == SizeClassMap::kBatchClassID) ?
116 kBatchSize : SizeClassMap::Size(class_id);
10189819
MO
117 }
118
5d3805fc 119 typedef SizeClassAllocator32<Params> ThisT;
10189819
MO
120 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
121
d0fee87e
ML
122 void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
123 CHECK(!heap_start);
eac97531 124 possible_regions.Init();
10189819
MO
125 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
126 }
127
5d3805fc
JJ
128 s32 ReleaseToOSIntervalMs() const {
129 return kReleaseToOSIntervalNever;
130 }
131
132 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
133 // This is empty here. Currently only implemented in 64-bit allocator.
134 }
135
eac97531
ML
136 void ForceReleaseToOS() {
137 // Currently implemented in 64-bit allocator only.
138 }
139
10189819 140 void *MapWithCallback(uptr size) {
eac97531 141 void *res = MmapOrDie(size, PrimaryAllocatorName);
10189819
MO
142 MapUnmapCallback().OnMap((uptr)res, size);
143 return res;
144 }
145
146 void UnmapWithCallback(uptr beg, uptr size) {
147 MapUnmapCallback().OnUnmap(beg, size);
148 UnmapOrDie(reinterpret_cast<void *>(beg), size);
149 }
150
151 static bool CanAllocate(uptr size, uptr alignment) {
152 return size <= SizeClassMap::kMaxSize &&
153 alignment <= SizeClassMap::kMaxSize;
154 }
155
156 void *GetMetaData(const void *p) {
0b997f6e 157 CHECK(kMetadataSize);
10189819
MO
158 CHECK(PointerIsMine(p));
159 uptr mem = reinterpret_cast<uptr>(p);
160 uptr beg = ComputeRegionBeg(mem);
161 uptr size = ClassIdToSize(GetSizeClass(p));
162 u32 offset = mem - beg;
163 uptr n = offset / (u32)size; // 32-bit division
164 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
165 return reinterpret_cast<void*>(meta);
166 }
167
168 NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
169 uptr class_id) {
eac97531 170 DCHECK_LT(class_id, kNumClasses);
10189819
MO
171 SizeClassInfo *sci = GetSizeClassInfo(class_id);
172 SpinMutexLock l(&sci->mutex);
eac97531
ML
173 if (sci->free_list.empty()) {
174 if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
175 return nullptr;
176 DCHECK(!sci->free_list.empty());
177 }
10189819
MO
178 TransferBatch *b = sci->free_list.front();
179 sci->free_list.pop_front();
180 return b;
181 }
182
183 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
184 TransferBatch *b) {
eac97531 185 DCHECK_LT(class_id, kNumClasses);
5d3805fc 186 CHECK_GT(b->Count(), 0);
10189819
MO
187 SizeClassInfo *sci = GetSizeClassInfo(class_id);
188 SpinMutexLock l(&sci->mutex);
10189819
MO
189 sci->free_list.push_front(b);
190 }
191
10189819
MO
192 bool PointerIsMine(const void *p) {
193 uptr mem = reinterpret_cast<uptr>(p);
36b50aeb
EB
194 if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
195 mem &= (kSpaceSize - 1);
10189819
MO
196 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
197 return false;
198 return GetSizeClass(p) != 0;
199 }
200
201 uptr GetSizeClass(const void *p) {
202 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
203 }
204
205 void *GetBlockBegin(const void *p) {
206 CHECK(PointerIsMine(p));
207 uptr mem = reinterpret_cast<uptr>(p);
208 uptr beg = ComputeRegionBeg(mem);
209 uptr size = ClassIdToSize(GetSizeClass(p));
210 u32 offset = mem - beg;
211 u32 n = offset / (u32)size; // 32-bit division
212 uptr res = beg + (n * (u32)size);
213 return reinterpret_cast<void*>(res);
214 }
215
216 uptr GetActuallyAllocatedSize(void *p) {
217 CHECK(PointerIsMine(p));
218 return ClassIdToSize(GetSizeClass(p));
219 }
220
b667dd70 221 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
10189819
MO
222
223 uptr TotalMemoryUsed() {
224 // No need to lock here.
225 uptr res = 0;
226 for (uptr i = 0; i < kNumPossibleRegions; i++)
227 if (possible_regions[i])
228 res += kRegionSize;
229 return res;
230 }
231
232 void TestOnlyUnmap() {
233 for (uptr i = 0; i < kNumPossibleRegions; i++)
234 if (possible_regions[i])
235 UnmapWithCallback((i * kRegionSize), kRegionSize);
236 }
237
238 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
239 // introspection API.
90e46074 240 void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
10189819
MO
241 for (uptr i = 0; i < kNumClasses; i++) {
242 GetSizeClassInfo(i)->mutex.Lock();
243 }
244 }
245
90e46074 246 void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
10189819
MO
247 for (int i = kNumClasses - 1; i >= 0; i--) {
248 GetSizeClassInfo(i)->mutex.Unlock();
249 }
250 }
251
252 // Iterate over all existing chunks.
253 // The allocator must be locked when calling this function.
254 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
255 for (uptr region = 0; region < kNumPossibleRegions; region++)
256 if (possible_regions[region]) {
257 uptr chunk_size = ClassIdToSize(possible_regions[region]);
258 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
259 uptr region_beg = region * kRegionSize;
260 for (uptr chunk = region_beg;
261 chunk < region_beg + max_chunks_in_region * chunk_size;
262 chunk += chunk_size) {
263 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
264 callback(chunk, arg);
265 }
266 }
267 }
268
eac97531 269 void PrintStats() {}
10189819 270
eac97531 271 static uptr AdditionalSize() { return 0; }
10189819 272
10189819
MO
273 typedef SizeClassMap SizeClassMapT;
274 static const uptr kNumClasses = SizeClassMap::kNumClasses;
275
276 private:
277 static const uptr kRegionSize = 1 << kRegionSizeLog;
278 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
279
eac97531
ML
280 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
281 StaticSpinMutex mutex;
10189819 282 IntrusiveList<TransferBatch> free_list;
eac97531 283 u32 rand_state;
10189819 284 };
eac97531 285 COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
10189819 286
b667dd70 287 uptr ComputeRegionId(uptr mem) const {
36b50aeb
EB
288 if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
289 mem &= (kSpaceSize - 1);
eac97531 290 const uptr res = mem >> kRegionSizeLog;
10189819
MO
291 CHECK_LT(res, kNumPossibleRegions);
292 return res;
293 }
294
295 uptr ComputeRegionBeg(uptr mem) {
296 return mem & ~(kRegionSize - 1);
297 }
298
299 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
eac97531
ML
300 DCHECK_LT(class_id, kNumClasses);
301 const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
302 kRegionSize, kRegionSize, PrimaryAllocatorName));
5d3805fc
JJ
303 if (UNLIKELY(!res))
304 return 0;
10189819
MO
305 MapUnmapCallback().OnMap(res, kRegionSize);
306 stat->Add(AllocatorStatMapped, kRegionSize);
5d3805fc 307 CHECK(IsAligned(res, kRegionSize));
10189819
MO
308 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
309 return res;
310 }
311
312 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
eac97531 313 DCHECK_LT(class_id, kNumClasses);
10189819
MO
314 return &size_class_info_array[class_id];
315 }
316
eac97531
ML
317 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
318 TransferBatch **current_batch, uptr max_count,
319 uptr *pointers_array, uptr count) {
320 // If using a separate class for batches, we do not need to shuffle it.
321 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
322 class_id != SizeClassMap::kBatchClassID))
323 RandomShuffle(pointers_array, count, &sci->rand_state);
324 TransferBatch *b = *current_batch;
325 for (uptr i = 0; i < count; i++) {
10189819 326 if (!b) {
eac97531 327 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
5d3805fc
JJ
328 if (UNLIKELY(!b))
329 return false;
10189819
MO
330 b->Clear();
331 }
eac97531 332 b->Add((void*)pointers_array[i]);
10189819 333 if (b->Count() == max_count) {
10189819
MO
334 sci->free_list.push_back(b);
335 b = nullptr;
336 }
337 }
eac97531
ML
338 *current_batch = b;
339 return true;
340 }
341
342 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
343 SizeClassInfo *sci, uptr class_id) {
344 const uptr region = AllocateRegion(stat, class_id);
345 if (UNLIKELY(!region))
346 return false;
347 if (kRandomShuffleChunks)
348 if (UNLIKELY(sci->rand_state == 0))
349 // The random state is initialized from ASLR (PIE) and time.
350 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
351 const uptr size = ClassIdToSize(class_id);
352 const uptr n_chunks = kRegionSize / (size + kMetadataSize);
353 const uptr max_count = TransferBatch::MaxCached(size);
354 DCHECK_GT(max_count, 0);
355 TransferBatch *b = nullptr;
356 constexpr uptr kShuffleArraySize = 48;
357 uptr shuffle_array[kShuffleArraySize];
358 uptr count = 0;
359 for (uptr i = region; i < region + n_chunks * size; i += size) {
360 shuffle_array[count++] = i;
361 if (count == kShuffleArraySize) {
362 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
363 shuffle_array, count)))
364 return false;
365 count = 0;
366 }
367 }
368 if (count) {
369 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
370 shuffle_array, count)))
371 return false;
372 }
10189819
MO
373 if (b) {
374 CHECK_GT(b->Count(), 0);
375 sci->free_list.push_back(b);
376 }
5d3805fc 377 return true;
10189819
MO
378 }
379
380 ByteMap possible_regions;
381 SizeClassInfo size_class_info_array[kNumClasses];
382};