]> git.ipfire.org Git - thirdparty/gcc.git/blame - libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_combined.h
CommitLineData
10189819
MO
1//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2//
b667dd70
ML
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10189819
MO
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// This class implements a complete memory allocator by using two
17// internal allocators:
18// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
19// When allocating 2^x bytes it should return 2^x aligned chunk.
20// PrimaryAllocator is used via a local AllocatorCache.
21// SecondaryAllocator can allocate anything, but is not efficient.
b667dd70
ML
22template <class PrimaryAllocator,
23 class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
10189819
MO
24class CombinedAllocator {
25 public:
b667dd70
ML
26 using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
27 using SecondaryAllocator =
28 LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
29 LargeMmapAllocatorPtrArray,
30 typename PrimaryAllocator::AddressSpaceView>;
31
5d3805fc 32 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
b667dd70 33 stats_.InitLinkerInitialized();
5d3805fc
JJ
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.InitLinkerInitialized();
10189819
MO
36 }
37
d0fee87e 38 void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
b667dd70 39 stats_.Init();
d0fee87e 40 primary_.Init(release_to_os_interval_ms, heap_start);
5d3805fc 41 secondary_.Init();
10189819
MO
42 }
43
5d3805fc 44 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
10189819
MO
45 // Returning 0 on malloc(0) may break a lot of code.
46 if (size == 0)
47 size = 1;
eac97531
ML
48 if (size + alignment < size) {
49 Report("WARNING: %s: CombinedAllocator allocation overflow: "
50 "0x%zx bytes with 0x%zx alignment requested\n",
51 SanitizerToolName, size, alignment);
52 return nullptr;
53 }
5d3805fc
JJ
54 uptr original_size = size;
55 // If alignment requirements are to be fulfilled by the frontend allocator
56 // rather than by the primary or secondary, passing an alignment lower than
57 // or equal to 8 will prevent any further rounding up, as well as the later
58 // alignment check.
10189819
MO
59 if (alignment > 8)
60 size = RoundUpTo(size, alignment);
5d3805fc
JJ
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
10189819 67 void *res;
5d3805fc 68 if (primary_.CanAllocate(size, alignment))
10189819
MO
69 res = cache->Allocate(&primary_, primary_.ClassID(size));
70 else
5d3805fc 71 res = secondary_.Allocate(&stats_, original_size, alignment);
10189819
MO
72 if (alignment > 8)
73 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
10189819
MO
74 return res;
75 }
76
5d3805fc
JJ
77 s32 ReleaseToOSIntervalMs() const {
78 return primary_.ReleaseToOSIntervalMs();
10189819
MO
79 }
80
5d3805fc
JJ
81 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
82 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
10189819
MO
83 }
84
eac97531
ML
85 void ForceReleaseToOS() {
86 primary_.ForceReleaseToOS();
87 }
88
10189819
MO
89 void Deallocate(AllocatorCache *cache, void *p) {
90 if (!p) return;
91 if (primary_.PointerIsMine(p))
92 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
93 else
94 secondary_.Deallocate(&stats_, p);
95 }
96
97 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
98 uptr alignment) {
99 if (!p)
100 return Allocate(cache, new_size, alignment);
101 if (!new_size) {
102 Deallocate(cache, p);
103 return nullptr;
104 }
105 CHECK(PointerIsMine(p));
106 uptr old_size = GetActuallyAllocatedSize(p);
107 uptr memcpy_size = Min(new_size, old_size);
108 void *new_p = Allocate(cache, new_size, alignment);
109 if (new_p)
110 internal_memcpy(new_p, p, memcpy_size);
111 Deallocate(cache, p);
112 return new_p;
113 }
114
115 bool PointerIsMine(void *p) {
116 if (primary_.PointerIsMine(p))
117 return true;
118 return secondary_.PointerIsMine(p);
119 }
120
121 bool FromPrimary(void *p) {
122 return primary_.PointerIsMine(p);
123 }
124
125 void *GetMetaData(const void *p) {
126 if (primary_.PointerIsMine(p))
127 return primary_.GetMetaData(p);
128 return secondary_.GetMetaData(p);
129 }
130
131 void *GetBlockBegin(const void *p) {
132 if (primary_.PointerIsMine(p))
133 return primary_.GetBlockBegin(p);
134 return secondary_.GetBlockBegin(p);
135 }
136
137 // This function does the same as GetBlockBegin, but is much faster.
138 // Must be called with the allocator locked.
139 void *GetBlockBeginFastLocked(void *p) {
140 if (primary_.PointerIsMine(p))
141 return primary_.GetBlockBegin(p);
142 return secondary_.GetBlockBeginFastLocked(p);
143 }
144
145 uptr GetActuallyAllocatedSize(void *p) {
146 if (primary_.PointerIsMine(p))
147 return primary_.GetActuallyAllocatedSize(p);
148 return secondary_.GetActuallyAllocatedSize(p);
149 }
150
151 uptr TotalMemoryUsed() {
152 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
153 }
154
155 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
156
157 void InitCache(AllocatorCache *cache) {
158 cache->Init(&stats_);
159 }
160
161 void DestroyCache(AllocatorCache *cache) {
162 cache->Destroy(&primary_, &stats_);
163 }
164
165 void SwallowCache(AllocatorCache *cache) {
166 cache->Drain(&primary_);
167 }
168
169 void GetStats(AllocatorStatCounters s) const {
170 stats_.Get(s);
171 }
172
173 void PrintStats() {
174 primary_.PrintStats();
175 secondary_.PrintStats();
176 }
177
178 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
179 // introspection API.
90e46074 180 void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
10189819
MO
181 primary_.ForceLock();
182 secondary_.ForceLock();
183 }
184
90e46074 185 void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
10189819
MO
186 secondary_.ForceUnlock();
187 primary_.ForceUnlock();
188 }
189
10189819
MO
190 // Iterate over all existing chunks.
191 // The allocator must be locked when calling this function.
192 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
193 primary_.ForEachChunk(callback, arg);
194 secondary_.ForEachChunk(callback, arg);
195 }
196
197 private:
198 PrimaryAllocator primary_;
199 SecondaryAllocator secondary_;
200 AllocatorGlobalStats stats_;
10189819 201};