1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 template<class SizeClassAllocator
> struct SizeClassAllocator32LocalCache
;
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
25 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27 // Since the regions are aligned by kRegionSize, there are exactly
28 // kNumPossibleRegions possible regions in the address space and so we keep
29 // a ByteMap possible_regions to store the size classes of each Region.
30 // 0 size class means the region is not used by the allocator.
32 // One Region is used to allocate chunks of a single size class.
33 // A Region looks like this:
34 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36 // In order to avoid false sharing the objects of this class should be
37 // chache-line aligned.
39 struct SizeClassAllocator32FlagMasks
{ // Bit masks.
41 kRandomShuffleChunks
= 1,
42 kUseSeparateSizeClassForBatch
= 2,
46 template <class Params
>
47 class SizeClassAllocator32
{
49 static const uptr kSpaceBeg
= Params::kSpaceBeg
;
50 static const u64 kSpaceSize
= Params::kSpaceSize
;
51 static const uptr kMetadataSize
= Params::kMetadataSize
;
52 typedef typename
Params::SizeClassMap SizeClassMap
;
53 static const uptr kRegionSizeLog
= Params::kRegionSizeLog
;
54 typedef typename
Params::ByteMap ByteMap
;
55 typedef typename
Params::MapUnmapCallback MapUnmapCallback
;
57 COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES
||
58 (kSpaceSize
& (kSpaceSize
- 1)) == 0);
60 static const bool kRandomShuffleChunks
= Params::kFlags
&
61 SizeClassAllocator32FlagMasks::kRandomShuffleChunks
;
62 static const bool kUseSeparateSizeClassForBatch
= Params::kFlags
&
63 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch
;
65 struct TransferBatch
{
66 static const uptr kMaxNumCached
= SizeClassMap::kMaxNumCachedHint
- 2;
67 void SetFromArray(void *batch
[], uptr count
) {
68 DCHECK_LE(count
, kMaxNumCached
);
70 for (uptr i
= 0; i
< count
; i
++)
73 uptr
Count() const { return count_
; }
74 void Clear() { count_
= 0; }
76 batch_
[count_
++] = ptr
;
77 DCHECK_LE(count_
, kMaxNumCached
);
79 void CopyToArray(void *to_batch
[]) const {
80 for (uptr i
= 0, n
= Count(); i
< n
; i
++)
81 to_batch
[i
] = batch_
[i
];
84 // How much memory do we need for a batch containing n elements.
85 static uptr
AllocationSizeRequiredForNElements(uptr n
) {
86 return sizeof(uptr
) * 2 + sizeof(void *) * n
;
88 static uptr
MaxCached(uptr size
) {
89 return Min(kMaxNumCached
, SizeClassMap::MaxCachedHint(size
));
96 void *batch_
[kMaxNumCached
];
99 static const uptr kBatchSize
= sizeof(TransferBatch
);
100 COMPILER_CHECK((kBatchSize
& (kBatchSize
- 1)) == 0);
101 COMPILER_CHECK(kBatchSize
== SizeClassMap::kMaxNumCachedHint
* sizeof(uptr
));
103 static uptr
ClassIdToSize(uptr class_id
) {
104 return (class_id
== SizeClassMap::kBatchClassID
) ?
105 kBatchSize
: SizeClassMap::Size(class_id
);
108 typedef SizeClassAllocator32
<Params
> ThisT
;
109 typedef SizeClassAllocator32LocalCache
<ThisT
> AllocatorCache
;
111 void Init(s32 release_to_os_interval_ms
) {
112 possible_regions
.Init();
113 internal_memset(size_class_info_array
, 0, sizeof(size_class_info_array
));
116 s32
ReleaseToOSIntervalMs() const {
117 return kReleaseToOSIntervalNever
;
120 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms
) {
121 // This is empty here. Currently only implemented in 64-bit allocator.
124 void ForceReleaseToOS() {
125 // Currently implemented in 64-bit allocator only.
128 void *MapWithCallback(uptr size
) {
129 void *res
= MmapOrDie(size
, PrimaryAllocatorName
);
130 MapUnmapCallback().OnMap((uptr
)res
, size
);
134 void UnmapWithCallback(uptr beg
, uptr size
) {
135 MapUnmapCallback().OnUnmap(beg
, size
);
136 UnmapOrDie(reinterpret_cast<void *>(beg
), size
);
139 static bool CanAllocate(uptr size
, uptr alignment
) {
140 return size
<= SizeClassMap::kMaxSize
&&
141 alignment
<= SizeClassMap::kMaxSize
;
144 void *GetMetaData(const void *p
) {
145 CHECK(PointerIsMine(p
));
146 uptr mem
= reinterpret_cast<uptr
>(p
);
147 uptr beg
= ComputeRegionBeg(mem
);
148 uptr size
= ClassIdToSize(GetSizeClass(p
));
149 u32 offset
= mem
- beg
;
150 uptr n
= offset
/ (u32
)size
; // 32-bit division
151 uptr meta
= (beg
+ kRegionSize
) - (n
+ 1) * kMetadataSize
;
152 return reinterpret_cast<void*>(meta
);
155 NOINLINE TransferBatch
*AllocateBatch(AllocatorStats
*stat
, AllocatorCache
*c
,
157 DCHECK_LT(class_id
, kNumClasses
);
158 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
159 SpinMutexLock
l(&sci
->mutex
);
160 if (sci
->free_list
.empty()) {
161 if (UNLIKELY(!PopulateFreeList(stat
, c
, sci
, class_id
)))
163 DCHECK(!sci
->free_list
.empty());
165 TransferBatch
*b
= sci
->free_list
.front();
166 sci
->free_list
.pop_front();
170 NOINLINE
void DeallocateBatch(AllocatorStats
*stat
, uptr class_id
,
172 DCHECK_LT(class_id
, kNumClasses
);
173 CHECK_GT(b
->Count(), 0);
174 SizeClassInfo
*sci
= GetSizeClassInfo(class_id
);
175 SpinMutexLock
l(&sci
->mutex
);
176 sci
->free_list
.push_front(b
);
179 bool PointerIsMine(const void *p
) {
180 uptr mem
= reinterpret_cast<uptr
>(p
);
181 if (SANITIZER_SIGN_EXTENDED_ADDRESSES
)
182 mem
&= (kSpaceSize
- 1);
183 if (mem
< kSpaceBeg
|| mem
>= kSpaceBeg
+ kSpaceSize
)
185 return GetSizeClass(p
) != 0;
188 uptr
GetSizeClass(const void *p
) {
189 return possible_regions
[ComputeRegionId(reinterpret_cast<uptr
>(p
))];
192 void *GetBlockBegin(const void *p
) {
193 CHECK(PointerIsMine(p
));
194 uptr mem
= reinterpret_cast<uptr
>(p
);
195 uptr beg
= ComputeRegionBeg(mem
);
196 uptr size
= ClassIdToSize(GetSizeClass(p
));
197 u32 offset
= mem
- beg
;
198 u32 n
= offset
/ (u32
)size
; // 32-bit division
199 uptr res
= beg
+ (n
* (u32
)size
);
200 return reinterpret_cast<void*>(res
);
203 uptr
GetActuallyAllocatedSize(void *p
) {
204 CHECK(PointerIsMine(p
));
205 return ClassIdToSize(GetSizeClass(p
));
208 uptr
ClassID(uptr size
) { return SizeClassMap::ClassID(size
); }
210 uptr
TotalMemoryUsed() {
211 // No need to lock here.
213 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
214 if (possible_regions
[i
])
219 void TestOnlyUnmap() {
220 for (uptr i
= 0; i
< kNumPossibleRegions
; i
++)
221 if (possible_regions
[i
])
222 UnmapWithCallback((i
* kRegionSize
), kRegionSize
);
225 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
226 // introspection API.
228 for (uptr i
= 0; i
< kNumClasses
; i
++) {
229 GetSizeClassInfo(i
)->mutex
.Lock();
234 for (int i
= kNumClasses
- 1; i
>= 0; i
--) {
235 GetSizeClassInfo(i
)->mutex
.Unlock();
239 // Iterate over all existing chunks.
240 // The allocator must be locked when calling this function.
241 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
242 for (uptr region
= 0; region
< kNumPossibleRegions
; region
++)
243 if (possible_regions
[region
]) {
244 uptr chunk_size
= ClassIdToSize(possible_regions
[region
]);
245 uptr max_chunks_in_region
= kRegionSize
/ (chunk_size
+ kMetadataSize
);
246 uptr region_beg
= region
* kRegionSize
;
247 for (uptr chunk
= region_beg
;
248 chunk
< region_beg
+ max_chunks_in_region
* chunk_size
;
249 chunk
+= chunk_size
) {
250 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
251 callback(chunk
, arg
);
258 static uptr
AdditionalSize() { return 0; }
260 typedef SizeClassMap SizeClassMapT
;
261 static const uptr kNumClasses
= SizeClassMap::kNumClasses
;
264 static const uptr kRegionSize
= 1 << kRegionSizeLog
;
265 static const uptr kNumPossibleRegions
= kSpaceSize
/ kRegionSize
;
267 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE
) SizeClassInfo
{
268 StaticSpinMutex mutex
;
269 IntrusiveList
<TransferBatch
> free_list
;
272 COMPILER_CHECK(sizeof(SizeClassInfo
) % kCacheLineSize
== 0);
274 uptr
ComputeRegionId(uptr mem
) {
275 if (SANITIZER_SIGN_EXTENDED_ADDRESSES
)
276 mem
&= (kSpaceSize
- 1);
277 const uptr res
= mem
>> kRegionSizeLog
;
278 CHECK_LT(res
, kNumPossibleRegions
);
282 uptr
ComputeRegionBeg(uptr mem
) {
283 return mem
& ~(kRegionSize
- 1);
286 uptr
AllocateRegion(AllocatorStats
*stat
, uptr class_id
) {
287 DCHECK_LT(class_id
, kNumClasses
);
288 const uptr res
= reinterpret_cast<uptr
>(MmapAlignedOrDieOnFatalError(
289 kRegionSize
, kRegionSize
, PrimaryAllocatorName
));
292 MapUnmapCallback().OnMap(res
, kRegionSize
);
293 stat
->Add(AllocatorStatMapped
, kRegionSize
);
294 CHECK(IsAligned(res
, kRegionSize
));
295 possible_regions
.set(ComputeRegionId(res
), static_cast<u8
>(class_id
));
299 SizeClassInfo
*GetSizeClassInfo(uptr class_id
) {
300 DCHECK_LT(class_id
, kNumClasses
);
301 return &size_class_info_array
[class_id
];
304 bool PopulateBatches(AllocatorCache
*c
, SizeClassInfo
*sci
, uptr class_id
,
305 TransferBatch
**current_batch
, uptr max_count
,
306 uptr
*pointers_array
, uptr count
) {
307 // If using a separate class for batches, we do not need to shuffle it.
308 if (kRandomShuffleChunks
&& (!kUseSeparateSizeClassForBatch
||
309 class_id
!= SizeClassMap::kBatchClassID
))
310 RandomShuffle(pointers_array
, count
, &sci
->rand_state
);
311 TransferBatch
*b
= *current_batch
;
312 for (uptr i
= 0; i
< count
; i
++) {
314 b
= c
->CreateBatch(class_id
, this, (TransferBatch
*)pointers_array
[i
]);
319 b
->Add((void*)pointers_array
[i
]);
320 if (b
->Count() == max_count
) {
321 sci
->free_list
.push_back(b
);
329 bool PopulateFreeList(AllocatorStats
*stat
, AllocatorCache
*c
,
330 SizeClassInfo
*sci
, uptr class_id
) {
331 const uptr region
= AllocateRegion(stat
, class_id
);
332 if (UNLIKELY(!region
))
334 if (kRandomShuffleChunks
)
335 if (UNLIKELY(sci
->rand_state
== 0))
336 // The random state is initialized from ASLR (PIE) and time.
337 sci
->rand_state
= reinterpret_cast<uptr
>(sci
) ^ NanoTime();
338 const uptr size
= ClassIdToSize(class_id
);
339 const uptr n_chunks
= kRegionSize
/ (size
+ kMetadataSize
);
340 const uptr max_count
= TransferBatch::MaxCached(size
);
341 DCHECK_GT(max_count
, 0);
342 TransferBatch
*b
= nullptr;
343 constexpr uptr kShuffleArraySize
= 48;
344 uptr shuffle_array
[kShuffleArraySize
];
346 for (uptr i
= region
; i
< region
+ n_chunks
* size
; i
+= size
) {
347 shuffle_array
[count
++] = i
;
348 if (count
== kShuffleArraySize
) {
349 if (UNLIKELY(!PopulateBatches(c
, sci
, class_id
, &b
, max_count
,
350 shuffle_array
, count
)))
356 if (UNLIKELY(!PopulateBatches(c
, sci
, class_id
, &b
, max_count
,
357 shuffle_array
, count
)))
361 CHECK_GT(b
->Count(), 0);
362 sci
->free_list
.push_back(b
);
367 ByteMap possible_regions
;
368 SizeClassInfo size_class_info_array
[kNumClasses
];