]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/src/c++17/memory_resource.cc
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / src / c++17 / memory_resource.cc
1 // <memory_resource> implementation -*- C++ -*-
2
3 // Copyright (C) 2018-2021 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 #include <memory_resource>
26 #include <algorithm> // lower_bound, rotate
27 #include <atomic>
28 #include <bit> // has_single_bit, bit_ceil, bit_width
29 #include <new>
30 #if ATOMIC_POINTER_LOCK_FREE != 2
31 # include <bits/std_mutex.h> // std::mutex, std::lock_guard
32 # include <bits/move.h> // std::exchange
33 #endif
34
35 namespace std _GLIBCXX_VISIBILITY(default)
36 {
37 _GLIBCXX_BEGIN_NAMESPACE_VERSION
38 namespace pmr
39 {
40 // This was defined inline in 9.1 and 9.2 so code compiled by those
41 // versions will not use this symbol.
42 memory_resource::~memory_resource() = default;
43
44 namespace
45 {
46 class newdel_res_t final : public memory_resource
47 {
48 void*
49 do_allocate(size_t __bytes, size_t __alignment) override
50 { return ::operator new(__bytes, std::align_val_t(__alignment)); }
51
52 void
53 do_deallocate(void* __p, size_t __bytes, size_t __alignment) noexcept
54 override
55 { ::operator delete(__p, __bytes, std::align_val_t(__alignment)); }
56
57 bool
58 do_is_equal(const memory_resource& __other) const noexcept override
59 { return &__other == this; }
60 };
61
62 class null_res_t final : public memory_resource
63 {
64 void*
65 do_allocate(size_t, size_t) override
66 { std::__throw_bad_alloc(); }
67
68 void
69 do_deallocate(void*, size_t, size_t) noexcept override
70 { }
71
72 bool
73 do_is_equal(const memory_resource& __other) const noexcept override
74 { return &__other == this; }
75 };
76
77 template<typename T>
78 struct constant_init
79 {
80 union {
81 unsigned char unused;
82 T obj;
83 };
84 constexpr constant_init() : obj() { }
85
86 template<typename U>
87 explicit constexpr constant_init(U arg) : obj(arg) { }
88
89 ~constant_init() { /* do nothing, union member is not destroyed */ }
90 };
91
92 __constinit constant_init<newdel_res_t> newdel_res{};
93 __constinit constant_init<null_res_t> null_res{};
94 #if ATOMIC_POINTER_LOCK_FREE == 2
95 using atomic_mem_res = atomic<memory_resource*>;
96 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
97 #elif defined(_GLIBCXX_HAS_GTHREADS)
98 // Can't use pointer-width atomics, define a type using a mutex instead:
99 struct atomic_mem_res
100 {
101 # ifdef __GTHREAD_MUTEX_INIT
102 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
103 // std::mutex has constexpr constructor
104 constexpr
105 # endif
106 atomic_mem_res(memory_resource* r) : val(r) { }
107
108 mutex mx;
109 memory_resource* val;
110
111 memory_resource* load()
112 {
113 lock_guard<mutex> lock(mx);
114 return val;
115 }
116
117 memory_resource* exchange(memory_resource* r)
118 {
119 lock_guard<mutex> lock(mx);
120 return std::exchange(val, r);
121 }
122 };
123 #else
124 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
125 // Single-threaded, no need for synchronization
126 struct atomic_mem_res
127 {
128 constexpr
129 atomic_mem_res(memory_resource* r) : val(r) { }
130
131 memory_resource* val;
132
133 memory_resource* load() const
134 {
135 return val;
136 }
137
138 memory_resource* exchange(memory_resource* r)
139 {
140 return std::exchange(val, r);
141 }
142 };
143 #endif // ATOMIC_POINTER_LOCK_FREE == 2
144
145 #ifdef _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
146 __constinit constant_init<atomic_mem_res> default_res{&newdel_res.obj};
147 #else
148 # include "default_resource.h"
149 #endif
150 } // namespace
151
152 memory_resource*
153 new_delete_resource() noexcept
154 { return &newdel_res.obj; }
155
156 memory_resource*
157 null_memory_resource() noexcept
158 { return &null_res.obj; }
159
160 memory_resource*
161 set_default_resource(memory_resource* r) noexcept
162 {
163 if (r == nullptr)
164 r = new_delete_resource();
165 return default_res.obj.exchange(r);
166 }
167
168 memory_resource*
169 get_default_resource() noexcept
170 { return default_res.obj.load(); }
171
172 // Member functions for std::pmr::monotonic_buffer_resource
173
174 // This was defined inline in 9.1 and 9.2 so code compiled by those
175 // versions will not use this symbol.
176 monotonic_buffer_resource::~monotonic_buffer_resource() { release(); }
177
178 namespace {
179
180 // aligned_size<N> stores the size and alignment of a memory allocation.
181 // The size must be a multiple of N, leaving the low log2(N) bits free
182 // to store the base-2 logarithm of the alignment.
183 // For example, allocate(1024, 32) is stored as 1024 + log2(32) = 1029.
184 template<unsigned N>
185 struct aligned_size
186 {
187 // N must be a power of two
188 static_assert( std::__popcount(N) == 1 );
189
190 static constexpr size_t _S_align_mask = N - 1;
191 static constexpr size_t _S_size_mask = ~_S_align_mask;
192
193 constexpr
194 aligned_size(size_t sz, size_t align) noexcept
195 : value(sz | (std::__bit_width(align) - 1u))
196 {
197 __glibcxx_assert(size() == sz); // sz must be a multiple of N
198 }
199
200 constexpr size_t
201 size() const noexcept
202 { return value & _S_size_mask; }
203
204 constexpr size_t
205 alignment() const noexcept
206 { return size_t(1) << (value & _S_align_mask); }
207
208 size_t value; // size | log2(alignment)
209 };
210
211 // Round n up to a multiple of alignment, which must be a power of two.
212 constexpr size_t aligned_ceil(size_t n, size_t alignment)
213 {
214 return (n + alignment - 1) & ~(alignment - 1);
215 }
216
217 } // namespace
218
219 // Memory allocated by the upstream resource is managed in a linked list
220 // of _Chunk objects. A _Chunk object recording the size and alignment of
221 // the allocated block and a pointer to the previous chunk is placed
222 // at end of the block.
223 class monotonic_buffer_resource::_Chunk
224 {
225 public:
226 // Return the address and size of a block of memory allocated from __r,
227 // of at least __size bytes and aligned to __align.
228 // Add a new _Chunk to the front of the linked list at __head.
229 static pair<void*, size_t>
230 allocate(memory_resource* __r, size_t __size, size_t __align,
231 _Chunk*& __head)
232 {
233 const size_t __orig_size = __size;
234
235 // Add space for the _Chunk object and round up to 64 bytes.
236 __size = aligned_ceil(__size + sizeof(_Chunk), 64);
237
238 // Check for unsigned wraparound
239 if (__size < __orig_size) [[unlikely]]
240 {
241 // monotonic_buffer_resource::do_allocate is not allowed to throw.
242 // If the required size is too large for size_t then ask the
243 // upstream resource for an impossibly large size and alignment.
244 __size = -1;
245 __align = ~(size_t(-1) >> 1);
246 }
247
248 void* __p = __r->allocate(__size, __align);
249
250 // Add a chunk defined by (__p, __size, __align) to linked list __head.
251 // We know the end of the buffer is suitably-aligned for a _Chunk
252 // because the caller ensured __align is at least alignof(max_align_t).
253 void* const __back = (char*)__p + __size - sizeof(_Chunk);
254 __head = ::new(__back) _Chunk(__size, __align, __head);
255 return { __p, __size - sizeof(_Chunk) };
256 }
257
258 // Return every chunk in linked list __head to resource __r.
259 static void
260 release(_Chunk*& __head, memory_resource* __r) noexcept
261 {
262 _Chunk* __next = __head;
263 __head = nullptr;
264 while (__next)
265 {
266 _Chunk* __ch = __next;
267 __next = __ch->_M_next;
268 size_t __size = __ch->_M_size.size();
269 size_t __align = __ch->_M_size.alignment();
270 void* __start = (char*)(__ch + 1) - __size;
271 __r->deallocate(__start, __size, __align);
272 }
273 }
274
275 private:
276 _Chunk(size_t __size, size_t __align, _Chunk* __next) noexcept
277 : _M_size(__size, __align), _M_next(__next)
278 { }
279
280 aligned_size<64> _M_size;
281 _Chunk* _M_next;
282 };
283
284 void
285 monotonic_buffer_resource::_M_new_buffer(size_t bytes, size_t alignment)
286 {
287 const size_t n = std::max(bytes, _M_next_bufsiz);
288 const size_t m = aligned_ceil(alignment, alignof(std::max_align_t));
289 auto [p, size] = _Chunk::allocate(_M_upstream, n, m, _M_head);
290 _M_current_buf = p;
291 _M_avail = size;
292 _M_next_bufsiz *= _S_growth_factor;
293 }
294
295 void
296 monotonic_buffer_resource::_M_release_buffers() noexcept
297 {
298 _Chunk::release(_M_head, _M_upstream);
299 }
300
301 // Helper types for synchronized_pool_resource & unsynchronized_pool_resource
302
303 namespace {
304
305 // Simple bitset with runtime size.
306 // Tracks which blocks in a pool chunk are used/unused.
307 struct bitset
308 {
309 using word = uint64_t;
310 using size_type // unsigned integer type with no more than 32 bits
311 = conditional_t<numeric_limits<size_t>::digits <= 32, size_t, uint32_t>;
312
313 static constexpr unsigned bits_per_word = numeric_limits<word>::digits;
314
315 // The bitset does not own p
316 bitset(void* p, size_type num_blocks)
317 : _M_words(static_cast<word*>(p)), _M_size(num_blocks),
318 _M_next_word(0)
319 {
320 const size_type last_word = num_blocks / bits_per_word;
321 __builtin_memset(_M_words, 0, last_word * sizeof(*_M_words));
322 // Set bits beyond _M_size, so they are not treated as free blocks:
323 if (const size_type extra_bits = num_blocks % bits_per_word)
324 _M_words[last_word] = word(-1) << extra_bits;
325 __glibcxx_assert( empty() );
326 __glibcxx_assert( free() == num_blocks );
327 }
328
329 bitset() = default;
330 ~bitset() = default;
331
332 // Number of blocks
333 size_type size() const noexcept { return _M_size; }
334
335 // Number of free blocks (unset bits)
336 size_type free() const noexcept
337 {
338 size_type n = 0;
339 for (size_type i = _M_next_word; i < nwords(); ++i)
340 n += (bits_per_word - std::__popcount(_M_words[i]));
341 return n;
342 }
343
344 // True if there are no free blocks (all bits are set)
345 bool full() const noexcept
346 {
347 if (_M_next_word >= nwords())
348 return true;
349 // For a bitset with size() > (max_blocks_per_chunk() - 64) we will
350 // have nwords() == (max_word_index() + 1) and so _M_next_word will
351 // never be equal to nwords().
352 // In that case, check if the last word is full:
353 if (_M_next_word == max_word_index())
354 return _M_words[_M_next_word] == word(-1);
355 return false;
356 }
357
358 // True if size() != 0 and all blocks are free (no bits are set).
359 bool empty() const noexcept
360 {
361 if (nwords() == 0)
362 return false;
363 if (_M_next_word != 0)
364 return false;
365 for (size_type i = 0; i < nwords() - 1; ++i)
366 if (_M_words[i] != 0)
367 return false;
368 word last = _M_words[nwords() - 1];
369 if (const size_type extra_bits = size() % bits_per_word)
370 last <<= (bits_per_word - extra_bits);
371 return last == 0;
372 }
373
374 void reset() noexcept
375 {
376 _M_words = nullptr;
377 _M_size = _M_next_word = 0;
378 }
379
380 bool operator[](size_type n) const noexcept
381 {
382 __glibcxx_assert( n < _M_size );
383 const size_type wd = n / bits_per_word;
384 const word bit = word(1) << (n % bits_per_word);
385 return _M_words[wd] & bit;
386 }
387
388 size_type get_first_unset() noexcept
389 {
390 const size_type wd = _M_next_word;
391 if (wd < nwords())
392 {
393 const size_type n = std::__countr_one(_M_words[wd]);
394 if (n < bits_per_word)
395 {
396 const word bit = word(1) << n;
397 _M_words[wd] |= bit;
398 update_next_word();
399 return (wd * bits_per_word) + n;
400 }
401 }
402 return size_type(-1);
403 }
404
405 void set(size_type n) noexcept
406 {
407 __glibcxx_assert( n < _M_size );
408 const size_type wd = n / bits_per_word;
409 const word bit = word(1) << (n % bits_per_word);
410 _M_words[wd] |= bit;
411 if (wd == _M_next_word)
412 update_next_word();
413 }
414
415 void clear(size_type n) noexcept
416 {
417 __glibcxx_assert( n < _M_size );
418 const size_type wd = n / bits_per_word;
419 const word bit = word(1) << (n % bits_per_word);
420 _M_words[wd] &= ~bit;
421 if (wd < _M_next_word)
422 _M_next_word = wd;
423 }
424
425 // Update _M_next_word to refer to the next word with an unset bit.
426 // The size of the _M_next_word bit-field means it cannot represent
427 // the maximum possible nwords() value. To avoid wraparound to zero
428 // this function saturates _M_next_word at max_word_index().
429 void update_next_word() noexcept
430 {
431 size_type next = _M_next_word;
432 while (_M_words[next] == word(-1) && ++next < nwords())
433 { }
434 _M_next_word = std::min(next, max_word_index());
435 }
436
437 void swap(bitset& b) noexcept
438 {
439 std::swap(_M_words, b._M_words);
440 size_type tmp = _M_size;
441 _M_size = b._M_size;
442 b._M_size = tmp;
443 tmp = _M_next_word;
444 _M_next_word = b._M_next_word;
445 b._M_next_word = tmp;
446 }
447
448 size_type nwords() const noexcept
449 { return (_M_size + bits_per_word - 1) / bits_per_word; }
450
451 // Maximum value that can be stored in bitset::_M_size member (approx 500k)
452 static constexpr size_type max_blocks_per_chunk() noexcept
453 { return (size_type(1) << _S_size_digits) - 1; }
454
455 // Maximum value that can be stored in bitset::_M_next_word member (8191).
456 static constexpr size_type max_word_index() noexcept
457 { return (max_blocks_per_chunk() + bits_per_word - 1) / bits_per_word; }
458
459 word* data() const noexcept { return _M_words; }
460
461 private:
462 static constexpr unsigned _S_size_digits
463 = (numeric_limits<size_type>::digits
464 + std::__bit_width(bits_per_word) - 1) / 2;
465
466 word* _M_words = nullptr;
467 // Number of blocks represented by the bitset:
468 size_type _M_size : _S_size_digits;
469 // Index of the first word with unset bits:
470 size_type _M_next_word : numeric_limits<size_type>::digits - _S_size_digits;
471 };
472
473 // A "chunk" belonging to a pool.
474 // A chunk contains many blocks of the same size.
475 // Derived from bitset to reuse its tail-padding.
476 struct chunk : bitset
477 {
478 chunk() = default;
479
480 // p points to the start of a chunk of size bytes in length.
481 // The chunk has space for n blocks, followed by a bitset of size n
482 // that begins at address words.
483 // This object does not own p or words, the caller will free it.
484 chunk(void* p, uint32_t bytes, void* words, size_t n)
485 : bitset(words, n),
486 _M_bytes(bytes),
487 _M_p(static_cast<std::byte*>(p))
488 { __glibcxx_assert(bytes <= chunk::max_bytes_per_chunk()); }
489
490 chunk(chunk&& c) noexcept
491 : bitset(std::move(c)), _M_bytes(c._M_bytes), _M_p(c._M_p)
492 {
493 c._M_bytes = 0;
494 c._M_p = nullptr;
495 c.reset();
496 }
497
498 chunk& operator=(chunk&& c) noexcept
499 {
500 swap(c);
501 return *this;
502 }
503
504 // Allocated size of chunk:
505 uint32_t _M_bytes = 0;
506 // Start of allocated chunk:
507 std::byte* _M_p = nullptr;
508
509 // True if there are free blocks in this chunk
510 using bitset::full;
511 // Number of blocks in this chunk
512 using bitset::size;
513
514 static constexpr uint32_t max_bytes_per_chunk() noexcept
515 { return numeric_limits<decltype(_M_bytes)>::max(); }
516
517 // Determine if block with address p and size block_size
518 // is contained within this chunk.
519 bool owns(void* p, size_t block_size)
520 {
521 std::less_equal<uintptr_t> less_equal;
522 return less_equal(reinterpret_cast<uintptr_t>(_M_p),
523 reinterpret_cast<uintptr_t>(p))
524 && less_equal(reinterpret_cast<uintptr_t>(p) + block_size,
525 reinterpret_cast<uintptr_t>(bitset::data()));
526 }
527
528 // Allocate next available block of block_size bytes from this chunk.
529 void* reserve(size_t block_size) noexcept
530 {
531 const size_type n = get_first_unset();
532 if (n == size_type(-1))
533 return nullptr;
534 return _M_p + (n * block_size);
535 }
536
537 // Deallocate a single block of block_size bytes
538 void release(void* vp, size_t block_size)
539 {
540 __glibcxx_assert( owns(vp, block_size) );
541 const size_t offset = static_cast<std::byte*>(vp) - _M_p;
542 // Pointer is correctly aligned for a block in this chunk:
543 __glibcxx_assert( (offset % block_size) == 0 );
544 // Block has been allocated:
545 __glibcxx_assert( (*this)[offset / block_size] == true );
546 bitset::clear(offset / block_size);
547 }
548
549 // Deallocate a single block if it belongs to this chunk.
550 bool try_release(void* p, size_t block_size)
551 {
552 if (!owns(p, block_size))
553 return false;
554 release(p, block_size);
555 return true;
556 }
557
558 void swap(chunk& c) noexcept
559 {
560 std::swap(_M_bytes, c._M_bytes);
561 std::swap(_M_p, c._M_p);
562 bitset::swap(c);
563 }
564
565 bool operator<(const chunk& c) const noexcept
566 { return std::less<const void*>{}(_M_p, c._M_p); }
567
568 friend void swap(chunk& l, chunk& r) { l.swap(r); }
569
570 friend bool operator<(const void* p, const chunk& c) noexcept
571 { return std::less<const void*>{}(p, c._M_p); }
572 };
573
574 // For 64-bit pointers this is the size of three pointers i.e. 24 bytes.
575 // For 32-bit and 20-bit pointers it's four pointers (16 bytes).
576 // For 16-bit pointers it's five pointers (10 bytes).
577 // TODO pad 64-bit to 4*sizeof(void*) to avoid splitting across cache lines?
578 static_assert(sizeof(chunk)
579 == sizeof(bitset::size_type) + sizeof(uint32_t) + 2 * sizeof(void*));
580
581 // An oversized allocation that doesn't fit in a pool.
582 struct big_block
583 {
584 // The minimum size of a big block.
585 // All big_block allocations will be a multiple of this value.
586 // Use bit_ceil to get a power of two even for e.g. 20-bit size_t.
587 static constexpr size_t min = __bit_ceil(numeric_limits<size_t>::digits);
588
589 constexpr
590 big_block(size_t bytes, size_t alignment)
591 : _M_size(alloc_size(bytes), alignment)
592 {
593 // Check for unsigned wraparound
594 if (size() < bytes) [[unlikely]]
595 {
596 // (sync|unsync)_pool_resource::do_allocate is not allowed to throw.
597 // If the required size is too large for size_t then ask the
598 // upstream resource for an impossibly large size and alignment.
599 _M_size.value = -1;
600 }
601 }
602
603 void* pointer = nullptr;
604 aligned_size<min> _M_size;
605
606 size_t size() const noexcept
607 {
608 if (_M_size.value == size_t(-1)) [[unlikely]]
609 return size_t(-1);
610 return _M_size.size();
611 }
612
613 size_t align() const noexcept
614 { return _M_size.alignment(); }
615
616 // Calculate size to be allocated instead of requested number of bytes.
617 // The requested value will be rounded up to a multiple of big_block::min,
618 // so the low bits are all zero and can be used to hold the alignment.
619 static constexpr size_t alloc_size(size_t bytes) noexcept
620 { return aligned_ceil(bytes, min); }
621
622 friend bool operator<(void* p, const big_block& b) noexcept
623 { return less<void*>{}(p, b.pointer); }
624
625 friend bool operator<(const big_block& b, void* p) noexcept
626 { return less<void*>{}(b.pointer, p); }
627 };
628
629 static_assert(sizeof(big_block) == (2 * sizeof(void*)));
630
631 } // namespace
632
633 // A pool that serves blocks of a particular size.
634 // Each pool manages a number of chunks.
635 // When a pool is full it is replenished by allocating another chunk.
636 struct __pool_resource::_Pool
637 {
638 // Smallest supported block size
639 static constexpr unsigned _S_min_block
640 = std::max(sizeof(void*), alignof(bitset::word));
641
642 _Pool(size_t __block_size, size_t __blocks_per_chunk)
643 : _M_chunks(),
644 _M_block_sz(__block_size),
645 _M_blocks_per_chunk(__blocks_per_chunk)
646 { }
647
648 // Must call release(r) before destruction!
649 ~_Pool() { __glibcxx_assert(_M_chunks.empty()); }
650
651 _Pool(_Pool&&) noexcept = default;
652 _Pool& operator=(_Pool&&) noexcept = default;
653
654 // Size of blocks in this pool
655 size_t block_size() const noexcept
656 { return _M_block_sz; }
657
658 // Allocate a block if the pool is not full, otherwise return null.
659 void* try_allocate() noexcept
660 {
661 const size_t blocksz = block_size();
662 if (!_M_chunks.empty())
663 {
664 auto& last = _M_chunks.back();
665 if (void* p = last.reserve(blocksz))
666 return p;
667 // TODO last is full, so move another chunk to the back instead?
668 for (auto it = _M_chunks.begin(); it != &last; ++it)
669 if (void* p = it->reserve(blocksz))
670 return p;
671 }
672 return nullptr;
673 }
674
675 // Allocate a block from the pool, replenishing from upstream if needed.
676 void* allocate(memory_resource* r, const pool_options& opts)
677 {
678 if (void* p = try_allocate())
679 return p;
680 replenish(r, opts);
681 return _M_chunks.back().reserve(block_size());
682 }
683
684 // Return a block to the pool.
685 bool deallocate(memory_resource*, void* p)
686 {
687 const size_t blocksz = block_size();
688 if (__builtin_expect(!_M_chunks.empty(), true))
689 {
690 auto& last = _M_chunks.back();
691 if (last.try_release(p, blocksz))
692 return true;
693 auto it = std::upper_bound(_M_chunks.begin(), &last, p);
694 if (it != _M_chunks.begin())
695 {
696 it--;
697 if (it->try_release(p, blocksz))
698 // If chunk is empty could return to upstream, but we don't
699 // currently do that. Pools only increase in size.
700 return true;
701 }
702 }
703 return false;
704 }
705
706 void replenish(memory_resource* __r, const pool_options& __opts)
707 {
708 using word = chunk::word;
709 const size_t __blocks = _M_blocks_per_chunk;
710 const auto __bits = chunk::bits_per_word;
711 const size_t __words = (__blocks + __bits - 1) / __bits;
712 const size_t __block_size = block_size();
713 size_t __bytes = __blocks * __block_size + __words * sizeof(word);
714 size_t __alignment = std::__bit_ceil(__block_size);
715 void* __p = __r->allocate(__bytes, __alignment);
716 __try
717 {
718 size_t __n = __blocks * __block_size;
719 void* __pwords = static_cast<char*>(__p) + __n;
720 _M_chunks.insert(chunk(__p, __bytes, __pwords, __blocks), __r);
721 }
722 __catch (...)
723 {
724 __r->deallocate(__p, __bytes, __alignment);
725 }
726 if (_M_blocks_per_chunk < __opts.max_blocks_per_chunk)
727 {
728 const size_t max_blocks
729 = (chunk::max_bytes_per_chunk() - sizeof(word))
730 / (__block_size + 0.125);
731 _M_blocks_per_chunk = std::min({
732 max_blocks,
733 __opts.max_blocks_per_chunk,
734 (size_t)_M_blocks_per_chunk * 2
735 });
736 }
737 }
738
739 void release(memory_resource* __r)
740 {
741 const size_t __alignment = std::__bit_ceil(block_size());
742 for (auto& __c : _M_chunks)
743 if (__c._M_p)
744 __r->deallocate(__c._M_p, __c._M_bytes, __alignment);
745 _M_chunks.clear(__r);
746 }
747
748 // A "resourceless vector" instead of pmr::vector, to save space.
749 // All resize operations need to be passed a memory resource, which
750 // obviously needs to be the same one every time.
751 // Chunks are kept sorted by address of their first block, except for
752 // the most recently-allocated Chunk which is at the end of the vector.
753 struct vector
754 {
755 using value_type = chunk;
756 using size_type = unsigned;
757 using iterator = value_type*;
758
759 // A vector owns its data pointer but not memory held by its elements.
760 chunk* data = nullptr;
761 size_type size = 0;
762 size_type capacity = 0;
763
764 vector() = default;
765
766 vector(size_type __n, memory_resource* __r)
767 : data(polymorphic_allocator<value_type>(__r).allocate(__n)),
768 capacity(__n)
769 { }
770
771 // Must call clear(r) before destruction!
772 ~vector() { __glibcxx_assert(data == nullptr); }
773
774 vector(vector&& __rval) noexcept
775 : data(__rval.data), size(__rval.size), capacity(__rval.capacity)
776 {
777 __rval.data = nullptr;
778 __rval.capacity = __rval.size = 0;
779 }
780
781 vector& operator=(vector&& __rval) noexcept
782 {
783 __glibcxx_assert(data == nullptr);
784 data = __rval.data;
785 size = __rval.size;
786 capacity = __rval.capacity;
787 __rval.data = nullptr;
788 __rval.capacity = __rval.size = 0;
789 return *this;
790 }
791
792 // void resize(size_type __n, memory_resource* __r);
793 // void reserve(size_type __n, memory_resource* __r);
794
795 void clear(memory_resource* __r)
796 {
797 if (!data)
798 return;
799 // Chunks must be individually freed before clearing the vector.
800 std::destroy(begin(), end());
801 polymorphic_allocator<value_type>(__r).deallocate(data, capacity);
802 data = nullptr;
803 capacity = size = 0;
804 }
805
806 // Sort existing elements then insert new one at the end.
807 iterator insert(chunk&& c, memory_resource* r)
808 {
809 if (size < capacity)
810 {
811 if (size > 1)
812 {
813 auto mid = end() - 1;
814 std::rotate(std::lower_bound(begin(), mid, *mid), mid, end());
815 }
816 }
817 else if (size > 0)
818 {
819 polymorphic_allocator<value_type> __alloc(r);
820 auto __mid = std::lower_bound(begin(), end() - 1, back());
821 auto __p = __alloc.allocate(capacity * 1.5);
822 // move [begin,__mid) to new storage
823 auto __p2 = std::move(begin(), __mid, __p);
824 // move end-1 to new storage
825 *__p2 = std::move(back());
826 // move [__mid,end-1) to new storage
827 std::move(__mid, end() - 1, ++__p2);
828 std::destroy(begin(), end());
829 __alloc.deallocate(data, capacity);
830 data = __p;
831 capacity *= 1.5;
832 }
833 else
834 {
835 polymorphic_allocator<value_type> __alloc(r);
836 data = __alloc.allocate(capacity = 8);
837 }
838 auto back = ::new (data + size) chunk(std::move(c));
839 __glibcxx_assert(std::is_sorted(begin(), back));
840 ++size;
841 return back;
842 }
843
844 iterator begin() const { return data; }
845 iterator end() const { return data + size; }
846
847 bool empty() const noexcept { return size == 0; }
848
849 value_type& back() { return data[size - 1]; }
850 };
851
852 vector _M_chunks;
853 unsigned _M_block_sz; // size of blocks allocated from this pool
854 unsigned _M_blocks_per_chunk; // number of blocks to allocate next
855 };
856
857 // An oversized allocation that doesn't fit in a pool.
858 struct __pool_resource::_BigBlock : big_block
859 {
860 using big_block::big_block;
861 };
862
863 namespace {
864
865 constexpr size_t pool_sizes[] = {
866 8, 16, 24,
867 32, 48,
868 64, 80, 96, 112,
869 128, 192,
870 256, 320, 384, 448,
871 512, 768,
872 #if __SIZE_WIDTH__ > 16
873 1024, 1536,
874 2048, 3072,
875 #if __SIZE_WIDTH__ > 20
876 1<<12, 1<<13, 1<<14,
877 1<<15, 1<<16, 1<<17,
878 1<<20, 1<<21, 1<<22 // 4MB should be enough for anybody
879 #endif
880 #endif
881 };
882
883 pool_options
884 munge_options(pool_options opts)
885 {
886 // The values in the returned struct may differ from those supplied
887 // to the pool resource constructor in that values of zero will be
888 // replaced with implementation-defined defaults, and sizes may be
889 // rounded to unspecified granularity.
890
891 // max_blocks_per_chunk sets the absolute maximum for the pool resource.
892 // Each pool might have a smaller maximum, because pools for very large
893 // objects might impose smaller limit.
894 if (opts.max_blocks_per_chunk == 0)
895 {
896 // Pick a default that depends on the number of bits in size_t.
897 opts.max_blocks_per_chunk = __SIZE_WIDTH__ << 8;
898 }
899 else
900 {
901 // Round to preferred granularity.
902 if (opts.max_blocks_per_chunk < size_t(-4))
903 {
904 // round up
905 opts.max_blocks_per_chunk
906 = aligned_ceil(opts.max_blocks_per_chunk, 4);
907 }
908 else
909 {
910 // round down
911 opts.max_blocks_per_chunk &= ~size_t(3);
912 }
913 }
914
915 if (opts.max_blocks_per_chunk > chunk::max_blocks_per_chunk())
916 {
917 opts.max_blocks_per_chunk = chunk::max_blocks_per_chunk();
918 }
919
920 // largest_required_pool_block specifies the largest block size that will
921 // be allocated from a pool. Larger allocations will come directly from
922 // the upstream resource and so will not be pooled.
923 if (opts.largest_required_pool_block == 0)
924 {
925 // Pick a sensible default that depends on the number of bits in size_t
926 // (pools with larger block sizes must be explicitly requested by
927 // using a non-zero value for largest_required_pool_block).
928 opts.largest_required_pool_block = __SIZE_WIDTH__ << 6;
929 }
930 else
931 {
932 // Round to preferred granularity
933 static_assert(std::__has_single_bit(pool_sizes[0]));
934 opts.largest_required_pool_block
935 = aligned_ceil(opts.largest_required_pool_block, pool_sizes[0]);
936 }
937
938 if (opts.largest_required_pool_block < big_block::min)
939 {
940 opts.largest_required_pool_block = big_block::min;
941 }
942 else if (opts.largest_required_pool_block > std::end(pool_sizes)[-1])
943 {
944 // Setting _M_opts to the largest pool allows users to query it:
945 opts.largest_required_pool_block = std::end(pool_sizes)[-1];
946 }
947 return opts;
948 }
949
950 inline int
951 pool_index(size_t block_size, int npools)
952 {
953 auto p = std::lower_bound(pool_sizes, pool_sizes + npools, block_size);
954 int n = p - pool_sizes;
955 if (n != npools)
956 return n;
957 return -1;
958 }
959
960 inline int
961 select_num_pools(const pool_options& opts)
962 {
963 auto p = std::lower_bound(std::begin(pool_sizes), std::end(pool_sizes),
964 opts.largest_required_pool_block);
965 const int n = p - std::begin(pool_sizes);
966 if (p == std::end(pool_sizes))
967 return n;
968 return n + 1;
969 }
970
971 #ifdef _GLIBCXX_HAS_GTHREADS
972 using shared_lock = std::shared_lock<shared_mutex>;
973 using exclusive_lock = lock_guard<shared_mutex>;
974 #endif
975
976 } // namespace
977
978 __pool_resource::
979 __pool_resource(const pool_options& opts, memory_resource* upstream)
980 : _M_opts(munge_options(opts)), _M_unpooled(upstream),
981 _M_npools(select_num_pools(_M_opts))
982 { }
983
984 __pool_resource::~__pool_resource() { release(); }
985
986 void
987 __pool_resource::release() noexcept
988 {
989 memory_resource* res = resource();
990 // deallocate oversize allocations
991 for (auto& b : _M_unpooled)
992 res->deallocate(b.pointer, b.size(), b.align());
993 pmr::vector<_BigBlock>{res}.swap(_M_unpooled);
994 }
995
996 void*
997 __pool_resource::allocate(size_t bytes, size_t alignment)
998 {
999 auto& b = _M_unpooled.emplace_back(bytes, alignment);
1000 __try {
1001 // N.B. need to allocate b.size(), which might be larger than bytes.
1002 // Also use b.align() instead of alignment parameter, which will be
1003 // an impossibly large value if (bytes+bookkeeping) > SIZE_MAX.
1004 void* p = resource()->allocate(b.size(), b.align());
1005 b.pointer = p;
1006 if (_M_unpooled.size() > 1)
1007 {
1008 const auto mid = _M_unpooled.end() - 1;
1009 // move to right position in vector
1010 std::rotate(std::lower_bound(_M_unpooled.begin(), mid, p),
1011 mid, _M_unpooled.end());
1012 }
1013 return p;
1014 } __catch(...) {
1015 _M_unpooled.pop_back();
1016 __throw_exception_again;
1017 }
1018 }
1019
1020 void
1021 __pool_resource::deallocate(void* p, size_t bytes [[maybe_unused]],
1022 size_t alignment [[maybe_unused]])
1023 {
1024 const auto it
1025 = std::lower_bound(_M_unpooled.begin(), _M_unpooled.end(), p);
1026 __glibcxx_assert(it != _M_unpooled.end() && it->pointer == p);
1027 if (it != _M_unpooled.end() && it->pointer == p) // [[likely]]
1028 {
1029 const auto b = *it;
1030 __glibcxx_assert(b.size() == b.alloc_size(bytes));
1031 __glibcxx_assert(b.align() == alignment);
1032 _M_unpooled.erase(it);
1033 // N.B. need to deallocate b.size(), which might be larger than bytes.
1034 resource()->deallocate(p, b.size(), b.align());
1035 }
1036 }
1037
1038 // Create array of pools, allocated from upstream resource.
1039 auto
1040 __pool_resource::_M_alloc_pools()
1041 -> _Pool*
1042 {
1043 polymorphic_allocator<_Pool> alloc{resource()};
1044 _Pool* p = alloc.allocate(_M_npools);
1045 for (int i = 0; i < _M_npools; ++i)
1046 {
1047 // For last pool use largest_required_pool_block
1048 const size_t block_size = (i+1 == _M_npools)
1049 ? _M_opts.largest_required_pool_block
1050 : pool_sizes[i];
1051
1052 // Decide on initial number of blocks per chunk.
1053 // At least 16 blocks per chunk seems reasonable,
1054 // more for smaller blocks:
1055 size_t blocks_per_chunk = std::max(size_t(16), 1024 / block_size);
1056 // But don't exceed the requested max_blocks_per_chunk:
1057 blocks_per_chunk
1058 = std::min(blocks_per_chunk, _M_opts.max_blocks_per_chunk);
1059 // Allow space for bitset to track which blocks are used/unused:
1060 blocks_per_chunk *= 1 - 1.0 / (__CHAR_BIT__ * block_size);
1061 // Construct a _Pool for the given block size and initial chunk size:
1062 alloc.construct(p + i, block_size, blocks_per_chunk);
1063 }
1064 return p;
1065 }
1066
1067 #ifdef _GLIBCXX_HAS_GTHREADS
1068 // synchronized_pool_resource members.
1069
1070 /* Notes on implementation and thread safety:
1071 *
1072 * Each synchronized_pool_resource manages an linked list of N+1 _TPools
1073 * objects, where N is the number of threads using the pool resource.
1074 * Each _TPools object has its own set of pools, with their own chunks.
1075 * The first element of the list, _M_tpools[0], can be used by any thread.
1076 * The rest of the list contains a _TPools object for each thread,
1077 * accessed via the thread-specific key _M_key (and referred to for
1078 * exposition as _M_tpools[_M_key]).
1079 * The first element, _M_tpools[0], contains "orphaned chunks" which were
1080 * allocated by a thread which has since exited, and so there is no
1081 * _M_tpools[_M_key] for that thread. Orphaned chunks are never reused,
1082 * they're only held in _M_tpools[0] so they can be deallocated.
1083 * A thread can access its own thread-specific set of pools via _M_key
1084 * while holding a shared lock on _M_mx. Accessing _M_impl._M_unpooled
1085 * or _M_tpools[0] or any other thread's _M_tpools[_M_key] requires an
1086 * exclusive lock.
1087 * The upstream_resource() pointer can be obtained without a lock, but
1088 * any dereference of that pointer requires an exclusive lock.
1089 * The _M_impl._M_opts and _M_impl._M_npools members are immutable,
1090 * and can safely be accessed concurrently.
1091 *
1092 * In a single-threaded program (i.e. __gthread_active_p() == false)
1093 * the pool resource only needs one set of pools and never has orphaned
1094 * chunks, so just uses _M_tpools[0] directly, and _M_tpools->next is null.
1095 */
1096
1097 extern "C" {
1098 static void destroy_TPools(void*);
1099 }
1100
1101 struct synchronized_pool_resource::_TPools
1102 {
1103 // Exclusive lock must be held in the thread where this constructor runs.
1104 explicit
1105 _TPools(synchronized_pool_resource& owner, exclusive_lock&)
1106 : owner(owner), pools(owner._M_impl._M_alloc_pools())
1107 {
1108 // __builtin_printf("%p constructing\n", this);
1109 __glibcxx_assert(pools);
1110 }
1111
1112 // Exclusive lock must be held in the thread where this destructor runs.
1113 ~_TPools()
1114 {
1115 __glibcxx_assert(pools);
1116 if (pools)
1117 {
1118 memory_resource* r = owner.upstream_resource();
1119 for (int i = 0; i < owner._M_impl._M_npools; ++i)
1120 pools[i].release(r);
1121 std::destroy_n(pools, owner._M_impl._M_npools);
1122 polymorphic_allocator<__pool_resource::_Pool> a(r);
1123 a.deallocate(pools, owner._M_impl._M_npools);
1124 }
1125 if (prev)
1126 prev->next = next;
1127 if (next)
1128 next->prev = prev;
1129 }
1130
1131 // Exclusive lock must be held in the thread where this function runs.
1132 void move_nonempty_chunks()
1133 {
1134 __glibcxx_assert(pools);
1135 __glibcxx_assert(__gthread_active_p());
1136 if (!pools)
1137 return;
1138 memory_resource* const r = owner.upstream_resource();
1139 auto* const shared = owner._M_tpools->pools;
1140 // move all non-empty chunks to the shared _TPools
1141 for (int i = 0; i < owner._M_impl._M_npools; ++i)
1142 for (auto& c : pools[i]._M_chunks)
1143 if (!c.empty())
1144 shared[i]._M_chunks.insert(std::move(c), r);
1145 }
1146
1147 synchronized_pool_resource& owner;
1148 __pool_resource::_Pool* pools = nullptr;
1149 _TPools* prev = nullptr;
1150 _TPools* next = nullptr;
1151
1152 static void destroy(_TPools* p)
1153 {
1154 exclusive_lock l(p->owner._M_mx);
1155 // __glibcxx_assert(p != p->owner._M_tpools);
1156 p->move_nonempty_chunks();
1157 polymorphic_allocator<_TPools> a(p->owner.upstream_resource());
1158 p->~_TPools();
1159 a.deallocate(p, 1);
1160 }
1161 };
1162
1163 // Called when a thread exits
1164 extern "C" {
1165 static void destroy_TPools(void* p)
1166 {
1167 using _TPools = synchronized_pool_resource::_TPools;
1168 _TPools::destroy(static_cast<_TPools*>(p));
1169 }
1170 }
1171
1172 // Constructor
1173 synchronized_pool_resource::
1174 synchronized_pool_resource(const pool_options& opts,
1175 memory_resource* upstream)
1176 : _M_impl(opts, upstream)
1177 {
1178 if (__gthread_active_p())
1179 if (int err = __gthread_key_create(&_M_key, destroy_TPools))
1180 __throw_system_error(err);
1181 exclusive_lock l(_M_mx);
1182 _M_tpools = _M_alloc_shared_tpools(l);
1183 }
1184
1185 // Destructor
1186 synchronized_pool_resource::~synchronized_pool_resource()
1187 {
1188 release();
1189 if (__gthread_active_p())
1190 __gthread_key_delete(_M_key); // does not run destroy_TPools
1191 }
1192
1193 void
1194 synchronized_pool_resource::release()
1195 {
1196 exclusive_lock l(_M_mx);
1197 if (_M_tpools)
1198 {
1199 if (__gthread_active_p())
1200 {
1201 __gthread_key_delete(_M_key); // does not run destroy_TPools
1202 __gthread_key_create(&_M_key, destroy_TPools);
1203 }
1204 polymorphic_allocator<_TPools> a(upstream_resource());
1205 // destroy+deallocate each _TPools
1206 do
1207 {
1208 _TPools* p = _M_tpools;
1209 _M_tpools = _M_tpools->next;
1210 p->~_TPools();
1211 a.deallocate(p, 1);
1212 }
1213 while (_M_tpools);
1214 }
1215 // release unpooled memory
1216 _M_impl.release();
1217 }
1218
1219 // Caller must hold shared or exclusive lock to ensure the pointer
1220 // isn't invalidated before it can be used.
1221 auto
1222 synchronized_pool_resource::_M_thread_specific_pools() noexcept
1223 {
1224 __pool_resource::_Pool* pools = nullptr;
1225 __glibcxx_assert(__gthread_active_p());
1226 if (auto tp = static_cast<_TPools*>(__gthread_getspecific(_M_key)))
1227 {
1228 pools = tp->pools;
1229 // __glibcxx_assert(tp->pools);
1230 }
1231 return pools;
1232 }
1233
1234 // Override for memory_resource::do_allocate
1235 void*
1236 synchronized_pool_resource::
1237 do_allocate(size_t bytes, size_t alignment)
1238 {
1239 const auto block_size = std::max(bytes, alignment);
1240 const pool_options opts = _M_impl._M_opts;
1241 if (block_size <= opts.largest_required_pool_block)
1242 {
1243 const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
1244 if (__gthread_active_p())
1245 {
1246 // Try to allocate from the thread-specific pool.
1247 shared_lock l(_M_mx);
1248 if (auto pools = _M_thread_specific_pools()) // [[likely]]
1249 {
1250 // Need exclusive lock to replenish so use try_allocate:
1251 if (void* p = pools[index].try_allocate())
1252 return p;
1253 // Need to take exclusive lock and replenish pool.
1254 }
1255 // Need to allocate or replenish thread-specific pools using
1256 // upstream resource, so need to hold exclusive lock.
1257 }
1258 else // single-threaded
1259 {
1260 if (!_M_tpools) // [[unlikely]]
1261 {
1262 exclusive_lock dummy(_M_mx);
1263 _M_tpools = _M_alloc_shared_tpools(dummy);
1264 }
1265 return _M_tpools->pools[index].allocate(upstream_resource(), opts);
1266 }
1267
1268 // N.B. Another thread could call release() now lock is not held.
1269 exclusive_lock excl(_M_mx);
1270 if (!_M_tpools) // [[unlikely]]
1271 _M_tpools = _M_alloc_shared_tpools(excl);
1272 auto pools = _M_thread_specific_pools();
1273 if (!pools)
1274 pools = _M_alloc_tpools(excl)->pools;
1275 return pools[index].allocate(upstream_resource(), opts);
1276 }
1277 exclusive_lock l(_M_mx);
1278 return _M_impl.allocate(bytes, alignment); // unpooled allocation
1279 }
1280
1281 // Override for memory_resource::do_deallocate
1282 void
1283 synchronized_pool_resource::
1284 do_deallocate(void* p, size_t bytes, size_t alignment)
1285 {
1286 size_t block_size = std::max(bytes, alignment);
1287 if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1288 {
1289 const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
1290 __glibcxx_assert(index != -1);
1291 if (__gthread_active_p())
1292 {
1293 shared_lock l(_M_mx);
1294 if (auto pools = _M_thread_specific_pools())
1295 {
1296 // No need to lock here, no other thread is accessing this pool.
1297 if (pools[index].deallocate(upstream_resource(), p))
1298 return;
1299 }
1300 // Block might have come from a different thread's pool,
1301 // take exclusive lock and check every pool.
1302 }
1303 else // single-threaded
1304 {
1305 __glibcxx_assert(_M_tpools != nullptr);
1306 if (_M_tpools) // [[likely]]
1307 _M_tpools->pools[index].deallocate(upstream_resource(), p);
1308 return;
1309 }
1310
1311 // TODO store {p, bytes, alignment} somewhere and defer returning
1312 // the block to the correct thread-specific pool until we next
1313 // take the exclusive lock.
1314
1315 exclusive_lock excl(_M_mx);
1316 auto my_pools = _M_thread_specific_pools();
1317 for (_TPools* t = _M_tpools; t != nullptr; t = t->next)
1318 {
1319 if (t->pools != my_pools)
1320 if (t->pools) // [[likely]]
1321 {
1322 if (t->pools[index].deallocate(upstream_resource(), p))
1323 return;
1324 }
1325 }
1326 // Not necessarily an error to reach here, release() could have been
1327 // called on another thread between releasing the shared lock and
1328 // acquiring the exclusive lock.
1329 return;
1330 }
1331 exclusive_lock l(_M_mx);
1332 _M_impl.deallocate(p, bytes, alignment);
1333 }
1334
1335 // Allocate a thread-specific _TPools object and add it to the linked list.
1336 auto
1337 synchronized_pool_resource::_M_alloc_tpools(exclusive_lock& l)
1338 -> _TPools*
1339 {
1340 __glibcxx_assert(_M_tpools != nullptr);
1341 __glibcxx_assert(__gthread_active_p());
1342 // dump_list(_M_tpools);
1343 polymorphic_allocator<_TPools> a(upstream_resource());
1344 _TPools* p = a.allocate(1);
1345 bool constructed = false;
1346 __try
1347 {
1348 a.construct(p, *this, l);
1349 constructed = true;
1350 // __glibcxx_assert(__gthread_getspecific(_M_key) == nullptr);
1351 if (int err = __gthread_setspecific(_M_key, p))
1352 __throw_system_error(err);
1353 }
1354 __catch(...)
1355 {
1356 if (constructed)
1357 a.destroy(p);
1358 a.deallocate(p, 1);
1359 __throw_exception_again;
1360 }
1361 p->prev = _M_tpools;
1362 p->next = _M_tpools->next;
1363 _M_tpools->next = p;
1364 if (p->next)
1365 p->next->prev = p;
1366 return p;
1367 }
1368
1369 // Allocate the shared _TPools object, _M_tpools[0]
1370 auto
1371 synchronized_pool_resource::_M_alloc_shared_tpools(exclusive_lock& l)
1372 -> _TPools*
1373 {
1374 __glibcxx_assert(_M_tpools == nullptr);
1375 polymorphic_allocator<_TPools> a(upstream_resource());
1376 _TPools* p = a.allocate(1);
1377 __try
1378 {
1379 a.construct(p, *this, l);
1380 }
1381 __catch(...)
1382 {
1383 a.deallocate(p, 1);
1384 __throw_exception_again;
1385 }
1386 // __glibcxx_assert(p->next == nullptr);
1387 // __glibcxx_assert(p->prev == nullptr);
1388 return p;
1389 }
1390 #endif // _GLIBCXX_HAS_GTHREADS
1391
1392 // unsynchronized_pool_resource member functions
1393
1394 // Constructor
1395 unsynchronized_pool_resource::
1396 unsynchronized_pool_resource(const pool_options& opts,
1397 memory_resource* upstream)
1398 : _M_impl(opts, upstream), _M_pools(_M_impl._M_alloc_pools())
1399 { }
1400
1401 // Destructor
1402 unsynchronized_pool_resource::~unsynchronized_pool_resource()
1403 { release(); }
1404
1405 // Return all memory to upstream resource.
1406 void
1407 unsynchronized_pool_resource::release()
1408 {
1409 // release pooled memory
1410 if (_M_pools)
1411 {
1412 memory_resource* res = upstream_resource();
1413 polymorphic_allocator<_Pool> alloc{res};
1414 for (int i = 0; i < _M_impl._M_npools; ++i)
1415 {
1416 _M_pools[i].release(res);
1417 alloc.destroy(_M_pools + i);
1418 }
1419 alloc.deallocate(_M_pools, _M_impl._M_npools);
1420 _M_pools = nullptr;
1421 }
1422
1423 // release unpooled memory
1424 _M_impl.release();
1425 }
1426
1427 // Find the right pool for a block of size block_size.
1428 auto
1429 unsynchronized_pool_resource::_M_find_pool(size_t block_size) noexcept
1430 {
1431 __pool_resource::_Pool* pool = nullptr;
1432 if (_M_pools) // [[likely]]
1433 {
1434 int index = pool_index(block_size, _M_impl._M_npools);
1435 if (index != -1)
1436 pool = _M_pools + index;
1437 }
1438 return pool;
1439 }
1440
1441 // Override for memory_resource::do_allocate
1442 void*
1443 unsynchronized_pool_resource::do_allocate(size_t bytes, size_t alignment)
1444 {
1445 const auto block_size = std::max(bytes, alignment);
1446 if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1447 {
1448 // Recreate pools if release() has been called:
1449 if (__builtin_expect(_M_pools == nullptr, false))
1450 _M_pools = _M_impl._M_alloc_pools();
1451 if (auto pool = _M_find_pool(block_size))
1452 return pool->allocate(upstream_resource(), _M_impl._M_opts);
1453 }
1454 return _M_impl.allocate(bytes, alignment);
1455 }
1456
1457 // Override for memory_resource::do_deallocate
1458 void
1459 unsynchronized_pool_resource::
1460 do_deallocate(void* p, size_t bytes, size_t alignment)
1461 {
1462 size_t block_size = std::max(bytes, alignment);
1463 if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1464 {
1465 if (auto pool = _M_find_pool(block_size))
1466 {
1467 pool->deallocate(upstream_resource(), p);
1468 return;
1469 }
1470 }
1471 _M_impl.deallocate(p, bytes, alignment);
1472 }
1473
1474 } // namespace pmr
1475 _GLIBCXX_END_NAMESPACE_VERSION
1476 } // namespace std