]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/src/c++17/memory_resource.cc
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / src / c++17 / memory_resource.cc
CommitLineData
dfaa3c47
JW
1// <memory_resource> implementation -*- C++ -*-
2
a5544970 3// Copyright (C) 2018-2019 Free Software Foundation, Inc.
dfaa3c47
JW
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25#include <memory_resource>
852a971c 26#include <algorithm> // lower_bound, rotate
dfaa3c47 27#include <atomic>
852a971c 28#include <bit> // __ceil2, __log2p1
dfaa3c47 29#include <new>
26e13048
JW
30#if ATOMIC_POINTER_LOCK_FREE != 2
31# include <bits/std_mutex.h> // std::mutex, std::lock_guard
32# include <bits/move.h> // std::exchange
33#endif
dfaa3c47
JW
34
35namespace std _GLIBCXX_VISIBILITY(default)
36{
37_GLIBCXX_BEGIN_NAMESPACE_VERSION
38namespace pmr
39{
40 namespace
41 {
42 class newdel_res_t final : public memory_resource
43 {
44 void*
45 do_allocate(size_t __bytes, size_t __alignment) override
46 { return ::operator new(__bytes, std::align_val_t(__alignment)); }
47
48 void
49 do_deallocate(void* __p, size_t __bytes, size_t __alignment) noexcept
50 override
51 { ::operator delete(__p, __bytes, std::align_val_t(__alignment)); }
52
53 bool
54 do_is_equal(const memory_resource& __other) const noexcept override
55 { return &__other == this; }
56 };
57
58 class null_res_t final : public memory_resource
59 {
60 void*
61 do_allocate(size_t, size_t) override
62 { std::__throw_bad_alloc(); }
63
64 void
65 do_deallocate(void*, size_t, size_t) noexcept override
66 { }
67
68 bool
69 do_is_equal(const memory_resource& __other) const noexcept override
70 { return &__other == this; }
71 };
72
73 template<typename T>
74 struct constant_init
75 {
76 union {
77 unsigned char unused;
78 T obj;
79 };
80 constexpr constant_init() : obj() { }
81
82 template<typename U>
83 explicit constexpr constant_init(U arg) : obj(arg) { }
84
85 ~constant_init() { /* do nothing, union member is not destroyed */ }
86 };
87
88 constant_init<newdel_res_t> newdel_res{};
89 constant_init<null_res_t> null_res{};
26e13048
JW
90#if ATOMIC_POINTER_LOCK_FREE == 2
91 using atomic_mem_res = atomic<memory_resource*>;
92# define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
25b030b8 93#elif defined(_GLIBCXX_HAS_GTHREADS)
26e13048
JW
94 // Can't use pointer-width atomics, define a type using a mutex instead:
95 struct atomic_mem_res
96 {
97# ifdef __GTHREAD_MUTEX_INIT
98# define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
99 // std::mutex has constexpr constructor
100 constexpr
101# endif
102 atomic_mem_res(memory_resource* r) : val(r) { }
103
104 mutex mx;
105 memory_resource* val;
106
107 memory_resource* load()
108 {
109 lock_guard<mutex> lock(mx);
110 return val;
111 }
112
113 memory_resource* exchange(memory_resource* r)
114 {
115 lock_guard<mutex> lock(mx);
116 return std::exchange(val, r);
117 }
118 };
25b030b8
JW
119#else
120# define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
121 // Single-threaded, no need for synchronization
122 struct atomic_mem_res
123 {
124 constexpr
125 atomic_mem_res(memory_resource* r) : val(r) { }
126
127 memory_resource* val;
128
129 memory_resource* load() const
130 {
131 return val;
132 }
133
134 memory_resource* exchange(memory_resource* r)
135 {
136 return std::exchange(val, r);
137 }
138 };
26e13048
JW
139#endif // ATOMIC_POINTER_LOCK_FREE == 2
140
141#ifdef _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
142 constant_init<atomic_mem_res> default_res{&newdel_res.obj};
143#else
144# include "default_resource.h"
145#endif
dfaa3c47
JW
146 } // namespace
147
148 memory_resource*
149 new_delete_resource() noexcept
150 { return &newdel_res.obj; }
151
152 memory_resource*
153 null_memory_resource() noexcept
154 { return &null_res.obj; }
155
156 memory_resource*
157 set_default_resource(memory_resource* r) noexcept
158 {
159 if (r == nullptr)
160 r = new_delete_resource();
161 return default_res.obj.exchange(r);
162 }
163
164 memory_resource*
165 get_default_resource() noexcept
166 { return default_res.obj.load(); }
167
ea2329d1
JW
168 // Member functions for std::pmr::monotonic_buffer_resource
169
170 // Memory allocated by the upstream resource is managed in a linked list
171 // of _Chunk objects. A _Chunk object recording the size and alignment of
172 // the allocated block and a pointer to the previous chunk is placed
173 // at end of the block.
174 class monotonic_buffer_resource::_Chunk
175 {
176 public:
177 // Return the address and size of a block of memory allocated from __r,
178 // of at least __size bytes and aligned to __align.
179 // Add a new _Chunk to the front of the linked list at __head.
180 static pair<void*, size_t>
181 allocate(memory_resource* __r, size_t __size, size_t __align,
182 _Chunk*& __head)
183 {
184 __size = std::__ceil2(__size + sizeof(_Chunk));
185 void* __p = __r->allocate(__size, __align);
186 // Add a chunk defined by (__p, __size, __align) to linked list __head.
187 void* const __back = (char*)__p + __size - sizeof(_Chunk);
188 __head = ::new(__back) _Chunk(__size, __align, __head);
189 return { __p, __size - sizeof(_Chunk) };
190 }
191
192 // Return every chunk in linked list __head to resource __r.
193 static void
194 release(_Chunk*& __head, memory_resource* __r) noexcept
195 {
196 _Chunk* __next = __head;
197 __head = nullptr;
198 while (__next)
199 {
200 _Chunk* __ch = __next;
201 __builtin_memcpy(&__next, __ch->_M_next, sizeof(_Chunk*));
202
203 __glibcxx_assert(__ch->_M_canary != 0);
204 __glibcxx_assert(__ch->_M_canary == (__ch->_M_size|__ch->_M_align));
205
206 if (__ch->_M_canary != (__ch->_M_size | __ch->_M_align))
207 return; // buffer overflow detected!
208
209 size_t __size = (1u << __ch->_M_size);
210 size_t __align = (1u << __ch->_M_align);
211 void* __start = (char*)(__ch + 1) - __size;
212 __r->deallocate(__start, __size, __align);
213 }
214 }
215
216 private:
217 _Chunk(size_t __size, size_t __align, _Chunk* __next) noexcept
218 : _M_size(std::__log2p1(__size) - 1),
219 _M_align(std::__log2p1(__align) - 1)
220 {
221 __builtin_memcpy(_M_next, &__next, sizeof(__next));
222 _M_canary = _M_size | _M_align;
223 }
224
225 unsigned char _M_canary;
226 unsigned char _M_size;
227 unsigned char _M_align;
228 unsigned char _M_next[sizeof(_Chunk*)];
229 };
230
231 void
232 monotonic_buffer_resource::_M_new_buffer(size_t bytes, size_t alignment)
233 {
234 // Need to check this somewhere, so put it here:
235 static_assert(alignof(monotonic_buffer_resource::_Chunk) == 1);
236
237 const size_t n = std::max(bytes, _M_next_bufsiz);
238 const size_t m = std::max(alignment, alignof(std::max_align_t));
239 auto [p, size] = _Chunk::allocate(_M_upstream, n, m, _M_head);
240 _M_current_buf = p;
241 _M_avail = size;
242 _M_next_bufsiz *= _S_growth_factor;
243 }
244
245 void
246 monotonic_buffer_resource::_M_release_buffers() noexcept
247 {
248 _Chunk::release(_M_head, _M_upstream);
249 }
250
852a971c
JW
251 // Helper types for synchronized_pool_resource & unsynchronized_pool_resource
252
253 namespace {
254
afd02e4c
JW
255 // Simple bitset with runtime size.
256 // Tracks which blocks in a pool chunk are used/unused.
852a971c
JW
257 struct bitset
258 {
259 using word = uint64_t;
afd02e4c
JW
260 using size_type // unsigned integer type with no more than 32 bits
261 = conditional_t<numeric_limits<size_t>::digits <= 32, size_t, uint32_t>;
852a971c
JW
262
263 static constexpr unsigned bits_per_word = numeric_limits<word>::digits;
264
265 // The bitset does not own p
266 bitset(void* p, size_type num_blocks)
267 : _M_words(static_cast<word*>(p)), _M_size(num_blocks),
268 _M_next_word(0)
269 {
270 const size_type last_word = num_blocks / bits_per_word;
271 __builtin_memset(_M_words, 0, last_word * sizeof(*_M_words));
272 // Set bits beyond _M_size, so they are not treated as free blocks:
273 if (const size_type extra_bits = num_blocks % bits_per_word)
afd02e4c 274 _M_words[last_word] = word(-1) << extra_bits;
852a971c
JW
275 __glibcxx_assert( empty() );
276 __glibcxx_assert( free() == num_blocks );
277 }
278
279 bitset() = default;
280 ~bitset() = default;
281
282 // Number of blocks
afd02e4c 283 size_type size() const noexcept { return _M_size; }
852a971c 284
b6b18271 285 // Number of free blocks (unset bits)
afd02e4c 286 size_type free() const noexcept
852a971c 287 {
afd02e4c 288 size_type n = 0;
852a971c
JW
289 for (size_type i = _M_next_word; i < nwords(); ++i)
290 n += (bits_per_word - std::__popcount(_M_words[i]));
291 return n;
292 }
293
b6b18271 294 // True if there are no free blocks (all bits are set)
6bdd58f7
JW
295 bool full() const noexcept
296 {
297 if (_M_next_word >= nwords())
298 return true;
299 // For a bitset with size() > (max_blocks_per_chunk() - 64) we will
300 // have nwords() == (max_word_index() + 1) and so _M_next_word will
301 // never be equal to nwords().
302 // In that case, check if the last word is full:
303 if (_M_next_word == max_word_index())
304 return _M_words[_M_next_word] == word(-1);
305 return false;
306 }
852a971c 307
b6b18271 308 // True if size() != 0 and all blocks are free (no bits are set).
852a971c
JW
309 bool empty() const noexcept
310 {
311 if (nwords() == 0)
312 return false;
313 if (_M_next_word != 0)
314 return false;
315 for (size_type i = 0; i < nwords() - 1; ++i)
316 if (_M_words[i] != 0)
317 return false;
318 word last = _M_words[nwords() - 1];
319 if (const size_type extra_bits = size() % bits_per_word)
320 last <<= (bits_per_word - extra_bits);
321 return last == 0;
322 }
323
324 void reset() noexcept
325 {
326 _M_words = nullptr;
327 _M_size = _M_next_word = 0;
328 }
329
330 bool operator[](size_type n) const noexcept
331 {
332 __glibcxx_assert( n < _M_size );
333 const size_type wd = n / bits_per_word;
334 const word bit = word(1) << (n % bits_per_word);
335 return _M_words[wd] & bit;
336 }
337
852a971c
JW
338 size_type get_first_unset() noexcept
339 {
a15032ee
JW
340 const size_type wd = _M_next_word;
341 if (wd < nwords())
852a971c 342 {
a15032ee 343 const size_type n = std::__countr_one(_M_words[wd]);
852a971c
JW
344 if (n < bits_per_word)
345 {
346 const word bit = word(1) << n;
a15032ee
JW
347 _M_words[wd] |= bit;
348 update_next_word();
349 return (wd * bits_per_word) + n;
852a971c
JW
350 }
351 }
352 return size_type(-1);
353 }
354
355 void set(size_type n) noexcept
356 {
357 __glibcxx_assert( n < _M_size );
358 const size_type wd = n / bits_per_word;
359 const word bit = word(1) << (n % bits_per_word);
360 _M_words[wd] |= bit;
361 if (wd == _M_next_word)
6bdd58f7 362 update_next_word();
852a971c
JW
363 }
364
365 void clear(size_type n) noexcept
366 {
367 __glibcxx_assert( n < _M_size );
368 const size_type wd = n / bits_per_word;
369 const word bit = word(1) << (n % bits_per_word);
370 _M_words[wd] &= ~bit;
371 if (wd < _M_next_word)
372 _M_next_word = wd;
373 }
374
6bdd58f7
JW
375 // Update _M_next_word to refer to the next word with an unset bit.
376 // The size of the _M_next_word bit-field means it cannot represent
377 // the maximum possible nwords() value. To avoid wraparound to zero
378 // this function saturates _M_next_word at max_word_index().
379 void update_next_word() noexcept
380 {
afd02e4c 381 size_type next = _M_next_word;
6bdd58f7
JW
382 while (_M_words[next] == word(-1) && ++next < nwords())
383 { }
384 _M_next_word = std::min(next, max_word_index());
385 }
386
852a971c
JW
387 void swap(bitset& b) noexcept
388 {
389 std::swap(_M_words, b._M_words);
390 size_type tmp = _M_size;
391 _M_size = b._M_size;
392 b._M_size = tmp;
393 tmp = _M_next_word;
394 _M_next_word = b._M_next_word;
395 b._M_next_word = tmp;
396 }
397
398 size_type nwords() const noexcept
399 { return (_M_size + bits_per_word - 1) / bits_per_word; }
400
401 // Maximum value that can be stored in bitset::_M_size member (approx 500k)
afd02e4c
JW
402 static constexpr size_type max_blocks_per_chunk() noexcept
403 { return (size_type(1) << _S_size_digits) - 1; }
852a971c 404
6bdd58f7 405 // Maximum value that can be stored in bitset::_M_next_word member (8191).
afd02e4c 406 static constexpr size_type max_word_index() noexcept
6bdd58f7
JW
407 { return (max_blocks_per_chunk() + bits_per_word - 1) / bits_per_word; }
408
852a971c
JW
409 word* data() const noexcept { return _M_words; }
410
411 private:
412 static constexpr unsigned _S_size_digits
413 = (numeric_limits<size_type>::digits
414 + std::__log2p1(bits_per_word) - 1) / 2;
415
416 word* _M_words = nullptr;
417 // Number of blocks represented by the bitset:
418 size_type _M_size : _S_size_digits;
419 // Index of the first word with unset bits:
420 size_type _M_next_word : numeric_limits<size_type>::digits - _S_size_digits;
421 };
422
423 // A "chunk" belonging to a pool.
424 // A chunk contains many blocks of the same size.
425 // Derived from bitset to reuse its tail-padding.
426 struct chunk : bitset
427 {
428 chunk() = default;
429
430 // p points to the start of a chunk of size bytes in length.
431 // The chunk has space for n blocks, followed by a bitset of size n
432 // that begins at address words.
433 // This object does not own p or words, the caller will free it.
d5cc6de1 434 chunk(void* p, uint32_t bytes, void* words, size_t n)
852a971c
JW
435 : bitset(words, n),
436 _M_bytes(bytes),
437 _M_p(static_cast<std::byte*>(p))
6bdd58f7 438 { __glibcxx_assert(bytes <= chunk::max_bytes_per_chunk()); }
852a971c
JW
439
440 chunk(chunk&& c) noexcept
441 : bitset(std::move(c)), _M_bytes(c._M_bytes), _M_p(c._M_p)
442 {
443 c._M_bytes = 0;
444 c._M_p = nullptr;
445 c.reset();
446 }
447
448 chunk& operator=(chunk&& c) noexcept
449 {
450 swap(c);
451 return *this;
452 }
453
454 // Allocated size of chunk:
d5cc6de1 455 uint32_t _M_bytes = 0;
852a971c
JW
456 // Start of allocated chunk:
457 std::byte* _M_p = nullptr;
458
459 // True if there are free blocks in this chunk
460 using bitset::full;
461 // Number of blocks in this chunk
462 using bitset::size;
463
6bdd58f7
JW
464 static constexpr uint32_t max_bytes_per_chunk() noexcept
465 { return numeric_limits<decltype(_M_bytes)>::max(); }
466
852a971c
JW
467 // Determine if block with address p and size block_size
468 // is contained within this chunk.
469 bool owns(void* p, size_t block_size)
470 {
471 std::less_equal<uintptr_t> less_equal;
472 return less_equal(reinterpret_cast<uintptr_t>(_M_p),
473 reinterpret_cast<uintptr_t>(p))
474 && less_equal(reinterpret_cast<uintptr_t>(p) + block_size,
475 reinterpret_cast<uintptr_t>(bitset::data()));
476 }
477
478 // Allocate next available block of block_size bytes from this chunk.
479 void* reserve(size_t block_size) noexcept
480 {
481 const size_type n = get_first_unset();
482 if (n == size_type(-1))
483 return nullptr;
484 return _M_p + (n * block_size);
485 }
486
487 // Deallocate a single block of block_size bytes
488 void release(void* vp, size_t block_size)
489 {
490 __glibcxx_assert( owns(vp, block_size) );
491 const size_t offset = static_cast<std::byte*>(vp) - _M_p;
492 // Pointer is correctly aligned for a block in this chunk:
493 __glibcxx_assert( (offset % block_size) == 0 );
494 // Block has been allocated:
495 __glibcxx_assert( (*this)[offset / block_size] == true );
496 bitset::clear(offset / block_size);
497 }
498
499 // Deallocate a single block if it belongs to this chunk.
500 bool try_release(void* p, size_t block_size)
501 {
502 if (!owns(p, block_size))
503 return false;
504 release(p, block_size);
505 return true;
506 }
507
508 void swap(chunk& c) noexcept
509 {
510 std::swap(_M_bytes, c._M_bytes);
511 std::swap(_M_p, c._M_p);
512 bitset::swap(c);
513 }
514
515 bool operator<(const chunk& c) const noexcept
516 { return std::less<const void*>{}(_M_p, c._M_p); }
517
518 friend void swap(chunk& l, chunk& r) { l.swap(r); }
519
520 friend bool operator<(const void* p, const chunk& c) noexcept
521 { return std::less<const void*>{}(p, c._M_p); }
522 };
523
afd02e4c
JW
524 // For 64-bit pointers this is the size of three pointers i.e. 24 bytes.
525 // For 32-bit and 20-bit pointers it's four pointers (16 bytes).
526 // For 16-bit pointers it's five pointers (10 bytes).
d5cc6de1 527 // TODO pad 64-bit to 4*sizeof(void*) to avoid splitting across cache lines?
afd02e4c
JW
528 static_assert(sizeof(chunk)
529 == sizeof(bitset::size_type) + sizeof(uint32_t) + 2 * sizeof(void*));
852a971c
JW
530
531 // An oversized allocation that doesn't fit in a pool.
532 struct big_block
533 {
d3306a84
JW
534 // Alignment must be a power-of-two so we only need to use enough bits
535 // to store the power, not the actual value:
852a971c 536 static constexpr unsigned _S_alignbits
d3306a84
JW
537 = std::__log2p1((unsigned)numeric_limits<size_t>::digits - 1);
538 // Use the remaining bits to store the size:
852a971c
JW
539 static constexpr unsigned _S_sizebits
540 = numeric_limits<size_t>::digits - _S_alignbits;
541 // The maximum value that can be stored in _S_size
d3306a84
JW
542 static constexpr size_t all_ones = size_t(-1) >> _S_alignbits;
543 // The minimum size of a big block (smaller sizes will be rounded up).
852a971c
JW
544 static constexpr size_t min = 1u << _S_alignbits;
545
546 big_block(size_t bytes, size_t alignment)
d3306a84 547 : _M_size(alloc_size(bytes) >> _S_alignbits),
852a971c 548 _M_align_exp(std::__log2p1(alignment) - 1u)
d3306a84 549 { }
852a971c
JW
550
551 void* pointer = nullptr;
552 size_t _M_size : numeric_limits<size_t>::digits - _S_alignbits;
553 size_t _M_align_exp : _S_alignbits;
554
555 size_t size() const noexcept
556 {
d3306a84
JW
557 // If all bits are set in _M_size it means the maximum possible size:
558 if (__builtin_expect(_M_size == (size_t(-1) >> _S_alignbits), false))
852a971c
JW
559 return (size_t)-1;
560 else
561 return _M_size << _S_alignbits;
562 }
563
d3306a84
JW
564 size_t align() const noexcept { return size_t(1) << _M_align_exp; }
565
566 // Calculate size to be allocated instead of requested number of bytes.
567 // The requested value will be rounded up to a multiple of big_block::min,
568 // so the low _S_alignbits bits are all zero and don't need to be stored.
569 static constexpr size_t alloc_size(size_t bytes) noexcept
570 {
571 const size_t s = bytes + min - 1u;
572 if (__builtin_expect(s < bytes, false))
573 return size_t(-1); // addition wrapped past zero, return max value
574 else
575 return s & ~(min - 1u);
576 }
852a971c
JW
577
578 friend bool operator<(void* p, const big_block& b) noexcept
579 { return less<void*>{}(p, b.pointer); }
580
581 friend bool operator<(const big_block& b, void* p) noexcept
582 { return less<void*>{}(b.pointer, p); }
583 };
584
585 static_assert(sizeof(big_block) == (2 * sizeof(void*)));
586
587 } // namespace
588
589 // A pool that serves blocks of a particular size.
590 // Each pool manages a number of chunks.
591 // When a pool is full it is replenished by allocating another chunk.
592 struct __pool_resource::_Pool
593 {
594 // Smallest supported block size
595 static constexpr unsigned _S_min_block
596 = std::max(sizeof(void*), alignof(bitset::word));
597
598 _Pool(size_t __block_size, size_t __blocks_per_chunk)
599 : _M_chunks(),
600 _M_block_sz(__block_size),
601 _M_blocks_per_chunk(__blocks_per_chunk)
b6b18271 602 { }
852a971c
JW
603
604 // Must call release(r) before destruction!
605 ~_Pool() { __glibcxx_assert(_M_chunks.empty()); }
606
607 _Pool(_Pool&&) noexcept = default;
608 _Pool& operator=(_Pool&&) noexcept = default;
609
610 // Size of blocks in this pool
611 size_t block_size() const noexcept
852a971c 612 { return _M_block_sz; }
852a971c
JW
613
614 // Allocate a block if the pool is not full, otherwise return null.
615 void* try_allocate() noexcept
616 {
617 const size_t blocksz = block_size();
618 if (!_M_chunks.empty())
619 {
620 auto& last = _M_chunks.back();
621 if (void* p = last.reserve(blocksz))
622 return p;
623 // TODO last is full, so move another chunk to the back instead?
624 for (auto it = _M_chunks.begin(); it != &last; ++it)
625 if (void* p = it->reserve(blocksz))
626 return p;
627 }
628 return nullptr;
629 }
630
631 // Allocate a block from the pool, replenishing from upstream if needed.
632 void* allocate(memory_resource* r, const pool_options& opts)
633 {
634 if (void* p = try_allocate())
635 return p;
636 replenish(r, opts);
637 return _M_chunks.back().reserve(block_size());
638 }
639
640 // Return a block to the pool.
641 bool deallocate(memory_resource*, void* p)
642 {
643 const size_t blocksz = block_size();
644 if (__builtin_expect(!_M_chunks.empty(), true))
645 {
646 auto& last = _M_chunks.back();
647 if (last.try_release(p, blocksz))
648 return true;
649 auto it = std::upper_bound(_M_chunks.begin(), &last, p);
650 if (it != _M_chunks.begin())
651 {
652 it--;
653 if (it->try_release(p, blocksz))
654 // If chunk is empty could return to upstream, but we don't
655 // currently do that. Pools only increase in size.
656 return true;
657 }
658 }
659 return false;
660 }
661
662 void replenish(memory_resource* __r, const pool_options& __opts)
663 {
664 using word = chunk::word;
6bdd58f7 665 const size_t __blocks = _M_blocks_per_chunk;
852a971c
JW
666 const auto __bits = chunk::bits_per_word;
667 const size_t __words = (__blocks + __bits - 1) / __bits;
668 const size_t __block_size = block_size();
669 size_t __bytes = __blocks * __block_size + __words * sizeof(word);
670 size_t __alignment = std::__ceil2(__block_size);
671 void* __p = __r->allocate(__bytes, __alignment);
672 __try
673 {
674 size_t __n = __blocks * __block_size;
675 void* __pwords = static_cast<char*>(__p) + __n;
676 _M_chunks.insert(chunk(__p, __bytes, __pwords, __blocks), __r);
677 }
678 __catch (...)
679 {
680 __r->deallocate(__p, __bytes, __alignment);
681 }
682 if (_M_blocks_per_chunk < __opts.max_blocks_per_chunk)
6bdd58f7
JW
683 {
684 const size_t max_blocks
685 = (chunk::max_bytes_per_chunk() - sizeof(word))
686 / (__block_size + 0.125);
687 _M_blocks_per_chunk = std::min({
688 max_blocks,
689 __opts.max_blocks_per_chunk,
690 (size_t)_M_blocks_per_chunk * 2
691 });
692 }
852a971c
JW
693 }
694
695 void release(memory_resource* __r)
696 {
697 const size_t __alignment = std::__ceil2(block_size());
698 for (auto& __c : _M_chunks)
699 if (__c._M_p)
700 __r->deallocate(__c._M_p, __c._M_bytes, __alignment);
701 _M_chunks.clear(__r);
702 }
703
704 // A "resourceless vector" instead of pmr::vector, to save space.
705 // All resize operations need to be passed a memory resource, which
706 // obviously needs to be the same one every time.
707 // Chunks are kept sorted by address of their first block, except for
708 // the most recently-allocated Chunk which is at the end of the vector.
709 struct vector
710 {
711 using value_type = chunk;
712 using size_type = unsigned;
713 using iterator = value_type*;
714
715 // A vector owns its data pointer but not memory held by its elements.
716 chunk* data = nullptr;
717 size_type size = 0;
718 size_type capacity = 0;
719
720 vector() = default;
721
722 vector(size_type __n, memory_resource* __r)
723 : data(polymorphic_allocator<value_type>(__r).allocate(__n)),
724 capacity(__n)
725 { }
726
727 // Must call clear(r) before destruction!
728 ~vector() { __glibcxx_assert(data == nullptr); }
729
730 vector(vector&& __rval) noexcept
731 : data(__rval.data), size(__rval.size), capacity(__rval.capacity)
732 {
733 __rval.data = nullptr;
734 __rval.capacity = __rval.size = 0;
735 }
736
737 vector& operator=(vector&& __rval) noexcept
738 {
739 __glibcxx_assert(data == nullptr);
740 data = __rval.data;
741 size = __rval.size;
742 capacity = __rval.capacity;
743 __rval.data = nullptr;
744 __rval.capacity = __rval.size = 0;
745 return *this;
746 }
747
748 // void resize(size_type __n, memory_resource* __r);
749 // void reserve(size_type __n, memory_resource* __r);
750
751 void clear(memory_resource* __r)
752 {
753 if (!data)
754 return;
755 // Chunks must be individually freed before clearing the vector.
756 std::destroy(begin(), end());
757 polymorphic_allocator<value_type>(__r).deallocate(data, capacity);
758 data = nullptr;
759 capacity = size = 0;
760 }
761
762 // Sort existing elements then insert new one at the end.
763 iterator insert(chunk&& c, memory_resource* r)
764 {
765 if (size < capacity)
766 {
767 if (size > 1)
768 {
769 auto mid = end() - 1;
770 std::rotate(std::lower_bound(begin(), mid, *mid), mid, end());
771 }
772 }
773 else if (size > 0)
774 {
775 polymorphic_allocator<value_type> __alloc(r);
776 auto __mid = std::lower_bound(begin(), end() - 1, back());
777 auto __p = __alloc.allocate(capacity * 1.5);
778 // move [begin,__mid) to new storage
779 auto __p2 = std::move(begin(), __mid, __p);
780 // move end-1 to new storage
781 *__p2 = std::move(back());
782 // move [__mid,end-1) to new storage
783 std::move(__mid, end() - 1, ++__p2);
784 std::destroy(begin(), end());
785 __alloc.deallocate(data, capacity);
786 data = __p;
787 capacity *= 1.5;
788 }
789 else
790 {
791 polymorphic_allocator<value_type> __alloc(r);
792 data = __alloc.allocate(capacity = 8);
793 }
794 auto back = ::new (data + size) chunk(std::move(c));
795 __glibcxx_assert(std::is_sorted(begin(), back));
796 ++size;
797 return back;
798 }
799
800 iterator begin() const { return data; }
801 iterator end() const { return data + size; }
802
803 bool empty() const noexcept { return size == 0; }
804
805 value_type& back() { return data[size - 1]; }
806 };
807
808 vector _M_chunks;
809 unsigned _M_block_sz; // size of blocks allocated from this pool
810 unsigned _M_blocks_per_chunk; // number of blocks to allocate next
811 };
812
813 // An oversized allocation that doesn't fit in a pool.
814 struct __pool_resource::_BigBlock : big_block
815 {
816 using big_block::big_block;
817 };
818
819 namespace {
820
f2e00585
JW
821 constexpr size_t pool_sizes[] = {
822 8, 16, 24,
823 32, 48,
824 64, 80, 96, 112,
825 128, 192,
826 256, 320, 384, 448,
827 512, 768,
36d2acbd 828#if __SIZE_WIDTH__ > 16
f2e00585
JW
829 1024, 1536,
830 2048, 3072,
36d2acbd
JW
831#if __SIZE_WIDTH__ > 20
832 1<<12, 1<<13, 1<<14,
833 1<<15, 1<<16, 1<<17,
f2e00585 834 1<<20, 1<<21, 1<<22 // 4MB should be enough for anybody
36d2acbd
JW
835#endif
836#endif
f2e00585
JW
837 };
838
852a971c
JW
839 pool_options
840 munge_options(pool_options opts)
841 {
842 // The values in the returned struct may differ from those supplied
843 // to the pool resource constructor in that values of zero will be
844 // replaced with implementation-defined defaults, and sizes may be
845 // rounded to unspecified granularity.
846
36d2acbd
JW
847 // max_blocks_per_chunk sets the absolute maximum for the pool resource.
848 // Each pool might have a smaller maximum, because pools for very large
849 // objects might impose smaller limit.
852a971c
JW
850 if (opts.max_blocks_per_chunk == 0)
851 {
36d2acbd
JW
852 // Pick a default that depends on the number of bits in size_t.
853 opts.max_blocks_per_chunk = __SIZE_WIDTH__ << 8;
852a971c
JW
854 }
855 else
856 {
857 // TODO round to preferred granularity ?
858 }
859
860 if (opts.max_blocks_per_chunk > chunk::max_blocks_per_chunk())
861 {
862 opts.max_blocks_per_chunk = chunk::max_blocks_per_chunk();
863 }
864
36d2acbd
JW
865 // largest_required_pool_block specifies the largest block size that will
866 // be allocated from a pool. Larger allocations will come directly from
867 // the upstream resource and so will not be pooled.
852a971c
JW
868 if (opts.largest_required_pool_block == 0)
869 {
36d2acbd
JW
870 // Pick a sensible default that depends on the number of bits in size_t
871 // (pools with larger block sizes must be explicitly requested by
872 // using a non-zero value for largest_required_pool_block).
873 opts.largest_required_pool_block = __SIZE_WIDTH__ << 6;
852a971c
JW
874 }
875 else
876 {
f2e00585
JW
877 // Round to preferred granularity
878 static_assert(std::__ispow2(pool_sizes[0]));
879 constexpr size_t mask = pool_sizes[0] - 1;
880 opts.largest_required_pool_block += mask;
881 opts.largest_required_pool_block &= ~mask;
852a971c
JW
882 }
883
884 if (opts.largest_required_pool_block < big_block::min)
885 {
886 opts.largest_required_pool_block = big_block::min;
887 }
f2e00585
JW
888 else if (opts.largest_required_pool_block > std::end(pool_sizes)[-1])
889 {
890 // Setting _M_opts to the largest pool allows users to query it:
891 opts.largest_required_pool_block = std::end(pool_sizes)[-1];
892 }
852a971c
JW
893 return opts;
894 }
895
852a971c
JW
896 inline int
897 pool_index(size_t block_size, int npools)
898 {
899 auto p = std::lower_bound(pool_sizes, pool_sizes + npools, block_size);
900 int n = p - pool_sizes;
901 if (n != npools)
902 return n;
903 return -1;
904 }
905
906 inline int
907 select_num_pools(const pool_options& opts)
908 {
909 auto p = std::lower_bound(std::begin(pool_sizes), std::end(pool_sizes),
910 opts.largest_required_pool_block);
f2e00585 911 const int n = p - std::begin(pool_sizes);
b76a1b36 912 if (p == std::end(pool_sizes))
f2e00585
JW
913 return n;
914 return n + 1;
852a971c
JW
915 }
916
c5be6481
JW
917#ifdef _GLIBCXX_HAS_GTHREADS
918 using shared_lock = std::shared_lock<shared_mutex>;
919 using exclusive_lock = lock_guard<shared_mutex>;
920#endif
921
852a971c
JW
922 } // namespace
923
924 __pool_resource::
925 __pool_resource(const pool_options& opts, memory_resource* upstream)
926 : _M_opts(munge_options(opts)), _M_unpooled(upstream),
927 _M_npools(select_num_pools(_M_opts))
928 { }
929
930 __pool_resource::~__pool_resource() { release(); }
931
932 void
933 __pool_resource::release() noexcept
934 {
935 memory_resource* res = resource();
936 // deallocate oversize allocations
937 for (auto& b : _M_unpooled)
938 res->deallocate(b.pointer, b.size(), b.align());
939 pmr::vector<_BigBlock>{res}.swap(_M_unpooled);
940 }
941
942 void*
943 __pool_resource::allocate(size_t bytes, size_t alignment)
944 {
945 auto& b = _M_unpooled.emplace_back(bytes, alignment);
946 __try {
d3306a84 947 // N.B. need to allocate b.size(), which might be larger than bytes.
852a971c
JW
948 void* p = resource()->allocate(b.size(), alignment);
949 b.pointer = p;
950 if (_M_unpooled.size() > 1)
951 {
952 const auto mid = _M_unpooled.end() - 1;
953 // move to right position in vector
954 std::rotate(std::lower_bound(_M_unpooled.begin(), mid, p),
955 mid, _M_unpooled.end());
956 }
957 return p;
958 } __catch(...) {
959 _M_unpooled.pop_back();
960 __throw_exception_again;
961 }
962 }
963
964 void
6c4a1d38
JW
965 __pool_resource::deallocate(void* p, size_t bytes [[maybe_unused]],
966 size_t alignment [[maybe_unused]])
852a971c
JW
967 {
968 const auto it
969 = std::lower_bound(_M_unpooled.begin(), _M_unpooled.end(), p);
970 __glibcxx_assert(it != _M_unpooled.end() && it->pointer == p);
971 if (it != _M_unpooled.end() && it->pointer == p) // [[likely]]
972 {
973 const auto b = *it;
d3306a84 974 __glibcxx_assert(b.size() == b.alloc_size(bytes));
852a971c
JW
975 __glibcxx_assert(b.align() == alignment);
976 _M_unpooled.erase(it);
d3306a84 977 // N.B. need to deallocate b.size(), which might be larger than bytes.
852a971c
JW
978 resource()->deallocate(p, b.size(), b.align());
979 }
980 }
981
982 // Create array of pools, allocated from upstream resource.
983 auto
984 __pool_resource::_M_alloc_pools()
985 -> _Pool*
986 {
987 polymorphic_allocator<_Pool> alloc{resource()};
988 _Pool* p = alloc.allocate(_M_npools);
989 for (int i = 0; i < _M_npools; ++i)
990 {
f2e00585
JW
991 // For last pool use largest_required_pool_block
992 const size_t block_size = (i+1 == _M_npools)
993 ? _M_opts.largest_required_pool_block
994 : pool_sizes[i];
995
852a971c
JW
996 // Decide on initial number of blocks per chunk.
997 // Always have at least 16 blocks per chunk:
998 const size_t min_blocks_per_chunk = 16;
999 // But for smaller blocks, use a larger initial size:
1000 size_t blocks_per_chunk
1001 = std::max(1024 / block_size, min_blocks_per_chunk);
1002 // But don't exceed the requested max_blocks_per_chunk:
1003 blocks_per_chunk
1004 = std::min(blocks_per_chunk, _M_opts.max_blocks_per_chunk);
1005 // Allow space for bitset to track which blocks are used/unused:
1006 blocks_per_chunk *= 1 - 1.0 / (__CHAR_BIT__ * block_size);
1007 // Construct a _Pool for the given block size and initial chunk size:
1008 alloc.construct(p + i, block_size, blocks_per_chunk);
1009 }
1010 return p;
1011 }
1012
c5be6481
JW
1013#ifdef _GLIBCXX_HAS_GTHREADS
1014 // synchronized_pool_resource members.
1015
1016 /* Notes on implementation and thread safety:
1017 *
1018 * Each synchronized_pool_resource manages an linked list of N+1 _TPools
1019 * objects, where N is the number of threads using the pool resource.
1020 * Each _TPools object has its own set of pools, with their own chunks.
1021 * The first element of the list, _M_tpools[0], can be used by any thread.
1022 * The rest of the list contains a _TPools object for each thread,
1023 * accessed via the thread-specific key _M_key (and referred to for
1024 * exposition as _M_tpools[_M_key]).
1025 * The first element, _M_tpools[0], contains "orphaned chunks" which were
1026 * allocated by a thread which has since exited, and so there is no
1027 * _M_tpools[_M_key] for that thread.
1028 * A thread can access its own thread-specific set of pools via _M_key
1029 * while holding a shared lock on _M_mx. Accessing _M_impl._M_unpooled
1030 * or _M_tpools[0] or any other thread's _M_tpools[_M_key] requires an
1031 * exclusive lock.
1032 * The upstream_resource() pointer can be obtained without a lock, but
1033 * any dereference of that pointer requires an exclusive lock.
1034 * The _M_impl._M_opts and _M_impl._M_npools members are immutable,
1035 * and can safely be accessed concurrently.
1036 */
1037
1038 extern "C" {
1039 static void destroy_TPools(void*);
1040 }
1041
1042 struct synchronized_pool_resource::_TPools
1043 {
1044 // Exclusive lock must be held in the thread where this constructor runs.
1045 explicit
1046 _TPools(synchronized_pool_resource& owner, exclusive_lock&)
1047 : owner(owner), pools(owner._M_impl._M_alloc_pools())
1048 {
1049 // __builtin_printf("%p constructing\n", this);
1050 __glibcxx_assert(pools);
1051 }
1052
1053 // Exclusive lock must be held in the thread where this destructor runs.
1054 ~_TPools()
1055 {
1056 __glibcxx_assert(pools);
1057 if (pools)
1058 {
1059 memory_resource* r = owner.upstream_resource();
1060 for (int i = 0; i < owner._M_impl._M_npools; ++i)
1061 pools[i].release(r);
1062 std::destroy_n(pools, owner._M_impl._M_npools);
1063 polymorphic_allocator<__pool_resource::_Pool> a(r);
1064 a.deallocate(pools, owner._M_impl._M_npools);
1065 }
1066 if (prev)
1067 prev->next = next;
1068 if (next)
1069 next->prev = prev;
1070 }
1071
1072 // Exclusive lock must be held in the thread where this function runs.
1073 void move_nonempty_chunks()
1074 {
1075 __glibcxx_assert(pools);
1076 if (!pools)
1077 return;
1078 memory_resource* r = owner.upstream_resource();
1079 // move all non-empty chunks to the shared _TPools
1080 for (int i = 0; i < owner._M_impl._M_npools; ++i)
1081 for (auto& c : pools[i]._M_chunks)
1082 if (!c.empty())
1083 owner._M_tpools->pools[i]._M_chunks.insert(std::move(c), r);
1084 }
1085
1086 synchronized_pool_resource& owner;
1087 __pool_resource::_Pool* pools = nullptr;
1088 _TPools* prev = nullptr;
1089 _TPools* next = nullptr;
1090
1091 static void destroy(_TPools* p)
1092 {
1093 exclusive_lock l(p->owner._M_mx);
1094 // __glibcxx_assert(p != p->owner._M_tpools);
1095 p->move_nonempty_chunks();
1096 polymorphic_allocator<_TPools> a(p->owner.upstream_resource());
1097 p->~_TPools();
1098 a.deallocate(p, 1);
1099 }
1100 };
1101
1102 // Called when a thread exits
1103 extern "C" {
1104 static void destroy_TPools(void* p)
1105 {
1106 using _TPools = synchronized_pool_resource::_TPools;
1107 _TPools::destroy(static_cast<_TPools*>(p));
1108 }
1109 }
1110
1111 // Constructor
1112 synchronized_pool_resource::
1113 synchronized_pool_resource(const pool_options& opts,
1114 memory_resource* upstream)
1115 : _M_impl(opts, upstream)
1116 {
1117 if (int err = __gthread_key_create(&_M_key, destroy_TPools))
1118 __throw_system_error(err);
1119 exclusive_lock l(_M_mx);
1120 _M_tpools = _M_alloc_shared_tpools(l);
1121 }
1122
1123 // Destructor
1124 synchronized_pool_resource::~synchronized_pool_resource()
1125 {
1126 release();
1127 __gthread_key_delete(_M_key); // does not run destroy_TPools
1128 }
1129
1130 void
1131 synchronized_pool_resource::release()
1132 {
1133 exclusive_lock l(_M_mx);
1134 if (_M_tpools)
1135 {
1136 __gthread_key_delete(_M_key); // does not run destroy_TPools
1137 __gthread_key_create(&_M_key, destroy_TPools);
1138 polymorphic_allocator<_TPools> a(upstream_resource());
1139 // destroy+deallocate each _TPools
1140 do
1141 {
1142 _TPools* p = _M_tpools;
1143 _M_tpools = _M_tpools->next;
1144 p->~_TPools();
1145 a.deallocate(p, 1);
1146 }
1147 while (_M_tpools);
1148 }
1149 // release unpooled memory
1150 _M_impl.release();
1151 }
1152
1153 // Caller must hold shared or exclusive lock to ensure the pointer
1154 // isn't invalidated before it can be used.
1155 auto
1156 synchronized_pool_resource::_M_thread_specific_pools() noexcept
1157 {
1158 __pool_resource::_Pool* pools = nullptr;
1159 if (auto tp = static_cast<_TPools*>(__gthread_getspecific(_M_key)))
1160 {
1161 pools = tp->pools;
1162 __glibcxx_assert(tp->pools);
1163 }
1164 return pools;
1165 }
1166
1167 // Override for memory_resource::do_allocate
1168 void*
1169 synchronized_pool_resource::
1170 do_allocate(size_t bytes, size_t alignment)
1171 {
1172 const auto block_size = std::max(bytes, alignment);
1173 if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1174 {
1175 const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
1176 memory_resource* const r = upstream_resource();
1177 const pool_options opts = _M_impl._M_opts;
1178 {
1179 // Try to allocate from the thread-specific pool
1180 shared_lock l(_M_mx);
1181 if (auto pools = _M_thread_specific_pools()) // [[likely]]
1182 {
1183 // Need exclusive lock to replenish so use try_allocate:
1184 if (void* p = pools[index].try_allocate())
1185 return p;
1186 // Need to take exclusive lock and replenish pool.
1187 }
1188 // Need to allocate or replenish thread-specific pools using
1189 // upstream resource, so need to hold exclusive lock.
1190 }
1191 // N.B. Another thread could call release() now lock is not held.
1192 exclusive_lock excl(_M_mx);
1193 if (!_M_tpools) // [[unlikely]]
1194 _M_tpools = _M_alloc_shared_tpools(excl);
1195 auto pools = _M_thread_specific_pools();
1196 if (!pools)
1197 pools = _M_alloc_tpools(excl)->pools;
1198 return pools[index].allocate(r, opts);
1199 }
1200 exclusive_lock l(_M_mx);
1201 return _M_impl.allocate(bytes, alignment); // unpooled allocation
1202 }
1203
1204 // Override for memory_resource::do_deallocate
1205 void
1206 synchronized_pool_resource::
1207 do_deallocate(void* p, size_t bytes, size_t alignment)
1208 {
1209 size_t block_size = std::max(bytes, alignment);
1210 if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1211 {
1212 const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
1213 __glibcxx_assert(index != -1);
1214 {
1215 shared_lock l(_M_mx);
1216 auto pools = _M_thread_specific_pools();
1217 if (pools)
1218 {
1219 // No need to lock here, no other thread is accessing this pool.
1220 if (pools[index].deallocate(upstream_resource(), p))
1221 return;
1222 }
1223 // Block might have come from a different thread's pool,
1224 // take exclusive lock and check every pool.
1225 }
1226 // TODO store {p, bytes, alignment} somewhere and defer returning
1227 // the block to the correct thread-specific pool until we next
1228 // take the exclusive lock.
1229 exclusive_lock excl(_M_mx);
1230 for (_TPools* t = _M_tpools; t != nullptr; t = t->next)
1231 {
1232 if (t->pools) // [[likely]]
1233 {
1234 if (t->pools[index].deallocate(upstream_resource(), p))
1235 return;
1236 }
1237 }
1238 }
1239 exclusive_lock l(_M_mx);
1240 _M_impl.deallocate(p, bytes, alignment);
1241 }
1242
1243 // Allocate a thread-specific _TPools object and add it to the linked list.
1244 auto
1245 synchronized_pool_resource::_M_alloc_tpools(exclusive_lock& l)
1246 -> _TPools*
1247 {
1248 __glibcxx_assert(_M_tpools != nullptr);
1249 // dump_list(_M_tpools);
1250 polymorphic_allocator<_TPools> a(upstream_resource());
1251 _TPools* p = a.allocate(1);
1252 bool constructed = false;
1253 __try
1254 {
1255 a.construct(p, *this, l);
1256 constructed = true;
1257 // __glibcxx_assert(__gthread_getspecific(_M_key) == nullptr);
1258 if (int err = __gthread_setspecific(_M_key, p))
1259 __throw_system_error(err);
1260 }
1261 __catch(...)
1262 {
1263 if (constructed)
1264 a.destroy(p);
1265 a.deallocate(p, 1);
1266 __throw_exception_again;
1267 }
1268 p->prev = _M_tpools;
1269 p->next = _M_tpools->next;
1270 _M_tpools->next = p;
1271 if (p->next)
1272 p->next->prev = p;
1273 return p;
1274 }
1275
1276 // Allocate the shared _TPools object, _M_tpools[0]
1277 auto
1278 synchronized_pool_resource::_M_alloc_shared_tpools(exclusive_lock& l)
1279 -> _TPools*
1280 {
1281 __glibcxx_assert(_M_tpools == nullptr);
1282 polymorphic_allocator<_TPools> a(upstream_resource());
1283 _TPools* p = a.allocate(1);
1284 __try
1285 {
1286 a.construct(p, *this, l);
1287 }
1288 __catch(...)
1289 {
1290 a.deallocate(p, 1);
1291 __throw_exception_again;
1292 }
1293 // __glibcxx_assert(p->next == nullptr);
1294 // __glibcxx_assert(p->prev == nullptr);
1295 return p;
1296 }
1297#endif // _GLIBCXX_HAS_GTHREADS
1298
852a971c
JW
1299 // unsynchronized_pool_resource member functions
1300
1301 // Constructor
1302 unsynchronized_pool_resource::
1303 unsynchronized_pool_resource(const pool_options& opts,
1304 memory_resource* upstream)
1305 : _M_impl(opts, upstream), _M_pools(_M_impl._M_alloc_pools())
1306 { }
1307
1308 // Destructor
1309 unsynchronized_pool_resource::~unsynchronized_pool_resource()
1310 { release(); }
1311
1312 // Return all memory to upstream resource.
1313 void
1314 unsynchronized_pool_resource::release()
1315 {
1316 // release pooled memory
1317 if (_M_pools)
1318 {
1319 memory_resource* res = upstream_resource();
1320 polymorphic_allocator<_Pool> alloc{res};
1321 for (int i = 0; i < _M_impl._M_npools; ++i)
1322 {
1323 _M_pools[i].release(res);
1324 alloc.destroy(_M_pools + i);
1325 }
1326 alloc.deallocate(_M_pools, _M_impl._M_npools);
1327 _M_pools = nullptr;
1328 }
1329
1330 // release unpooled memory
1331 _M_impl.release();
1332 }
1333
1334 // Find the right pool for a block of size block_size.
1335 auto
1336 unsynchronized_pool_resource::_M_find_pool(size_t block_size) noexcept
1337 {
1338 __pool_resource::_Pool* pool = nullptr;
1339 if (_M_pools) // [[likely]]
1340 {
1341 int index = pool_index(block_size, _M_impl._M_npools);
1342 if (index != -1)
1343 pool = _M_pools + index;
1344 }
1345 return pool;
1346 }
1347
1348 // Override for memory_resource::do_allocate
1349 void*
1350 unsynchronized_pool_resource::do_allocate(size_t bytes, size_t alignment)
1351 {
1352 const auto block_size = std::max(bytes, alignment);
1353 if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1354 {
1355 // Recreate pools if release() has been called:
1356 if (__builtin_expect(_M_pools == nullptr, false))
1357 _M_pools = _M_impl._M_alloc_pools();
1358 if (auto pool = _M_find_pool(block_size))
1359 return pool->allocate(upstream_resource(), _M_impl._M_opts);
1360 }
1361 return _M_impl.allocate(bytes, alignment);
1362 }
1363
1364 // Override for memory_resource::do_deallocate
1365 void
1366 unsynchronized_pool_resource::
1367 do_deallocate(void* p, size_t bytes, size_t alignment)
1368 {
1369 size_t block_size = std::max(bytes, alignment);
1370 if (block_size <= _M_impl._M_opts.largest_required_pool_block)
1371 {
1372 if (auto pool = _M_find_pool(block_size))
1373 {
1374 pool->deallocate(upstream_resource(), p);
1375 return;
1376 }
1377 }
1378 _M_impl.deallocate(p, bytes, alignment);
1379 }
1380
dfaa3c47
JW
1381} // namespace pmr
1382_GLIBCXX_END_NAMESPACE_VERSION
1383} // namespace std