]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/testsuite/20_util/unsynchronized_pool_resource/allocate.cc
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / testsuite / 20_util / unsynchronized_pool_resource / allocate.cc
1 // Copyright (C) 2018-2022 Free Software Foundation, Inc.
2 //
3 // This file is part of the GNU ISO C++ Library. This library is free
4 // software; you can redistribute it and/or modify it under the
5 // terms of the GNU General Public License as published by the
6 // Free Software Foundation; either version 3, or (at your option)
7 // any later version.
8
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU General Public License for more details.
13
14 // You should have received a copy of the GNU General Public License along
15 // with this library; see the file COPYING3. If not see
16 // <http://www.gnu.org/licenses/>.
17
18 // { dg-do run { target c++17 } }
19
20 #include <memory_resource>
21 #include <cstring>
22 #include <testsuite_allocator.h>
23 #include <testsuite_hooks.h>
24
25 void
26 test01()
27 {
28 __gnu_test::memory_resource test_mr;
29 {
30 std::pmr::unsynchronized_pool_resource r(&test_mr);
31 void* p1 = r.allocate(1, 1);
32 VERIFY( p1 != nullptr );
33 auto n = test_mr.number_of_active_allocations();
34 VERIFY( n > 0 );
35 // Ensure memory region can be written to (without corrupting heap!)
36 std::memset(p1, 0xff, 1);
37 void* p2 = r.allocate(1, 1);
38 VERIFY( p2 != nullptr );
39 VERIFY( p2 != p1 );
40 VERIFY( test_mr.number_of_active_allocations() == n );
41 std::memset(p1, 0xff, 1);
42 r.deallocate(p1, 1, 1);
43 // Returning single blocks to the pool doesn't return them upstream:
44 VERIFY( test_mr.number_of_active_allocations() == n );
45 r.deallocate(p2, 1, 1);
46 VERIFY( test_mr.number_of_active_allocations() == n );
47 }
48 VERIFY( test_mr.number_of_active_allocations() == 0 );
49 }
50
51 void
52 test02()
53 {
54 struct nullable_memory_resource : public std::pmr::memory_resource
55 {
56 void*
57 do_allocate(std::size_t bytes, std::size_t alignment) override
58 { return upstream->allocate(bytes, alignment); }
59
60 void
61 do_deallocate(void* p, std::size_t bytes, std::size_t alignment) override
62 { upstream->deallocate(p, bytes, alignment); }
63
64 bool
65 do_is_equal(const memory_resource& r) const noexcept override
66 { return &r == this; }
67
68 std::pmr::memory_resource* upstream = std::pmr::get_default_resource();
69 };
70
71 nullable_memory_resource test_mr;
72 std::pmr::unsynchronized_pool_resource r(&test_mr);
73 void* p1 = r.allocate(8, 1);
74 VERIFY( p1 != nullptr );
75 std::memset(p1, 0xff, 8);
76 test_mr.upstream = nullptr;
77 void* p2 = r.allocate(8, 1); //should not need to replenish
78 VERIFY( p2 != nullptr );
79 VERIFY( p2 != p1 );
80 std::memset(p1, 0xff, 8);
81 r.deallocate(p1, 8, 1); // should not use upstream
82 r.deallocate(p2, 8, 1); // should not use upstream
83
84 // Destructor will return memory upstream, so restore the upstream resource:
85 test_mr.upstream = std::pmr::get_default_resource();
86 }
87
88 void
89 test03()
90 {
91 __gnu_test::memory_resource test_mr;
92 {
93 std::pmr::unsynchronized_pool_resource r({10, 16}, &test_mr);
94 std::size_t largest_pool = r.options().largest_required_pool_block;
95 void* p1 = r.allocate(2 * largest_pool);
96 VERIFY( p1 != nullptr );
97 const std::size_t n = test_mr.number_of_active_allocations();
98 // Allocation of pools + allocation of pmr::vector + oversize allocation:
99 VERIFY( n >= 1 );
100 std::memset(p1, 0xff, 2 * largest_pool);
101 void* p2 = r.allocate(3 * largest_pool);
102 VERIFY( p2 != nullptr );
103 VERIFY( p2 != p1 );
104 VERIFY( test_mr.number_of_active_allocations() == n + 1 );
105 std::memset(p2, 0xff, 3 * largest_pool);
106 r.deallocate(p1, 2 * largest_pool);
107 VERIFY( test_mr.number_of_active_allocations() == n );
108 r.deallocate(p2, 3 * largest_pool);
109 VERIFY( test_mr.number_of_active_allocations() == n - 1 );
110 }
111 VERIFY( test_mr.number_of_active_allocations() == 0 );
112 {
113 std::pmr::unsynchronized_pool_resource r({16, 16}, &test_mr);
114 (void) r.allocate(2);
115 (void) r.allocate(8);
116 (void) r.allocate(16);
117 (void) r.allocate(2);
118 (void) r.allocate(8);
119 (void) r.allocate(16);
120 (void) r.allocate(2 * r.options().largest_required_pool_block);
121 VERIFY( test_mr.number_of_active_allocations() != 0 );
122 // Destructor calls release()
123 }
124 VERIFY( test_mr.number_of_active_allocations() == 0 );
125 }
126
127 void
128 test04()
129 {
130 __gnu_test::memory_resource test_mr;
131 std::pmr::unsynchronized_pool_resource r({256, 256}, &test_mr);
132 // Check alignment
133 void* p1 = r.allocate(2, 64);
134 VERIFY( (std::uintptr_t)p1 % 64 == 0 );
135 void* p2 = r.allocate(2, 128);
136 VERIFY( (std::uintptr_t)p2 % 128 == 0 );
137 void* p3 = r.allocate(2, 256);
138 VERIFY( (std::uintptr_t)p3 % 256 == 0 );
139 const std::size_t largest_pool = r.options().largest_required_pool_block;
140 void* p4 = r.allocate(2 * largest_pool, 1024);
141 VERIFY( (std::uintptr_t)p4 % 1024 == 0 );
142 r.deallocate(p1, 2, 64);
143 r.deallocate(p2, 2, 128);
144 r.deallocate(p3, 2, 256);
145 r.deallocate(p4, 2 * largest_pool, 1024);
146 }
147
148 void
149 test05()
150 {
151 __gnu_test::memory_resource test_mr;
152 std::pmr::pool_options opts{};
153 opts.max_blocks_per_chunk = 1;
154 opts.largest_required_pool_block = 1;
155 std::pmr::unsynchronized_pool_resource r(opts, &test_mr);
156 opts = r.options();
157 // Test unpooled allocations
158 void** p = new void*[opts.largest_required_pool_block];
159 for (unsigned a : {64, 128, 256, 512})
160 {
161 for (unsigned i = 0; i < opts.largest_required_pool_block; ++i)
162 p[i] = r.allocate(i, a);
163 for (unsigned i = 0; i < opts.largest_required_pool_block; ++i)
164 r.deallocate(p[i], i, a);
165 }
166 delete[] p;
167 }
168
169 void
170 test06()
171 {
172 struct checking_mr : std::pmr::memory_resource
173 {
174 size_t expected_size = 0;
175 size_t expected_alignment = 0;
176
177 struct bad_size { };
178 struct bad_alignment { };
179
180 void* do_allocate(std::size_t bytes, std::size_t align)
181 {
182 // Internal data structures in unsynchronized_pool_resource need to
183 // allocate memory, so handle those normally:
184 if (align <= alignof(std::max_align_t))
185 return std::pmr::new_delete_resource()->allocate(bytes, align);
186
187 // This is a large, unpooled allocation. Check the arguments:
188 if (bytes < expected_size)
189 throw bad_size();
190 else if (align != expected_alignment)
191 {
192 if (bytes == std::numeric_limits<std::size_t>::max()
193 && align == (1 + std::numeric_limits<std::size_t>::max() / 2))
194 {
195 // Pool resources request bytes=SIZE_MAX && align=bit_floor(SIZE_MAX)
196 // when they are unable to meet an allocation request.
197 }
198 else
199 throw bad_alignment();
200 }
201 // Else just throw, don't really try to allocate:
202 throw std::bad_alloc();
203 }
204
205 void do_deallocate(void* p, std::size_t bytes, std::size_t align)
206 { std::pmr::new_delete_resource()->deallocate(p, bytes, align); }
207
208 bool do_is_equal(const memory_resource&) const noexcept
209 { return false; }
210 };
211
212 checking_mr c;
213 std::pmr::unsynchronized_pool_resource r({1, 1}, &c);
214 std::pmr::pool_options opts = r.options();
215 const std::size_t largest_pool = opts.largest_required_pool_block;
216 const std::size_t large_alignment = 1024;
217 // Ensure allocations won't fit in pools:
218 VERIFY( largest_pool < large_alignment );
219
220 // Ensure the vector of large allocations has some capacity
221 // and won't need to reallocate:
222 r.deallocate(r.allocate(largest_pool + 1, 1), largest_pool + 1, 1);
223
224 // Try allocating various very large sizes and ensure the size requested
225 // from the upstream allocator is at least as large as needed.
226 for (int i = 0; i < std::numeric_limits<std::size_t>::digits; ++i)
227 {
228 for (auto b : { -63, -5, -1, 0, 1, 3, std::numeric_limits<int>::max() })
229 {
230 std::size_t bytes = std::size_t(1) << i;
231 bytes += b; // For negative b this can wrap to a large positive value.
232 c.expected_size = bytes;
233 c.expected_alignment = large_alignment;
234 bool caught_bad_alloc = false;
235 try {
236 (void) r.allocate(bytes, large_alignment);
237 } catch (const std::bad_alloc&) {
238 // expect to catch bad_alloc
239 caught_bad_alloc = true;
240 } catch (checking_mr::bad_size) {
241 VERIFY( ! "allocation from upstream resource had expected size" );
242 } catch (checking_mr::bad_alignment) {
243 VERIFY( ! "allocation from upstream resource had expected alignment" );
244 }
245 VERIFY( caught_bad_alloc );
246 }
247 }
248 }
249
250 void
251 test07()
252 {
253 // Custom exception thrown on expected allocation failure.
254 struct very_bad_alloc : std::bad_alloc { };
255
256 struct careful_resource : __gnu_test::memory_resource
257 {
258 void* do_allocate(std::size_t bytes, std::size_t alignment)
259 {
260 // Need to allow normal allocations for the pool resource's internal
261 // data structures:
262 if (alignment < 1024)
263 return __gnu_test::memory_resource::do_allocate(bytes, alignment);
264
265 // pmr::unsynchronized_pool_resource::do_allocate is not allowed to
266 // throw an exception when asked for an allocation it can't satisfy.
267 // The libstdc++ implementation will ask upstream to allocate
268 // bytes=SIZE_MAX and alignment=bit_floor(SIZE_MAX) instead of throwing.
269 // Verify that we got those values:
270 if (bytes != std::numeric_limits<size_t>::max())
271 VERIFY( !"upstream allocation should request SIZE_MAX bytes" );
272 if (alignment != (1 + std::numeric_limits<size_t>::max() / 2))
273 VERIFY( !"upstream allocation should request SIZE_MAX/2 alignment" );
274
275 // A successful failure:
276 throw very_bad_alloc();
277 }
278 };
279
280 careful_resource cr;
281 std::pmr::unsynchronized_pool_resource upr(&cr);
282 try
283 {
284 // Try to allocate a ridiculous size (and use a large extended alignment
285 // so that careful_resource::do_allocate can distinguish this allocation
286 // from any required for the pool resource's internal data structures):
287 void* p = upr.allocate(std::size_t(-2), 1024);
288 // Should not reach here!
289 VERIFY( !"attempt to allocate SIZE_MAX-1 should not have succeeded" );
290 throw p;
291 }
292 catch (const very_bad_alloc&)
293 {
294 // Should catch this exception from careful_resource::do_allocate
295 }
296 catch (const std::bad_alloc&)
297 {
298 VERIFY( !"unsynchronized_pool_resource::do_allocate is not allowed to throw" );
299 }
300 }
301
302 void
303 test08()
304 {
305 std::pmr::pool_options opts;
306 opts.largest_required_pool_block = 64;
307
308 // PR libstdc++/94160
309 // max_blocks_per_chunk=1 causes pool resources to return null pointers
310 for (int i = 0; i < 8; ++i)
311 {
312 opts.max_blocks_per_chunk = i;
313 std::pmr::unsynchronized_pool_resource upr(opts);
314 auto* p = (int*)upr.allocate(4);
315 VERIFY( p != nullptr );
316 *p = i;
317 upr.deallocate(p, 4);
318 }
319 }
320
321 int
322 main()
323 {
324 test01();
325 test02();
326 test03();
327 test04();
328 test05();
329 test06();
330 test07();
331 test08();
332 }