// versions will not use this symbol.
monotonic_buffer_resource::~monotonic_buffer_resource() { release(); }
- namespace {
-
+namespace
+{
// aligned_size<N> stores the size and alignment of a memory allocation.
// The size must be a multiple of N, leaving the low log2(N) bits free
// to store the base-2 logarithm of the alignment.
return (n + alignment - 1) & ~(alignment - 1);
}
- } // namespace
+} // namespace
// Memory allocated by the upstream resource is managed in a linked list
// of _Chunk objects. A _Chunk object recording the size and alignment of
// Helper types for synchronized_pool_resource & unsynchronized_pool_resource
- namespace {
-
+namespace
+{
// Simple bitset with runtime size.
// Tracks which blocks in a pool chunk are used/unused.
struct bitset
static_assert(sizeof(big_block) == (2 * sizeof(void*)));
- } // namespace
+} // namespace
// A pool that serves blocks of a particular size.
// Each pool manages a number of chunks.
using big_block::big_block;
};
- namespace {
+namespace
+{
+ // N.B. it is important that we don't skip any power of two sizes if there
+ // is a non-power of two size between them, e.g. must not have pool sizes
+ // of 24 and 40 without having a pool size of 32. Otherwise an allocation
+ // of 32 bytes with alignment 16 would choose the 40-byte pool which is not
+ // correctly aligned for 16-byte alignment. It would be OK (but suboptimal)
+ // to have no pool of size 32 if we have pool sizes of 16 and 64 and no
+ // non-power of two sizes between those, because the example of (32, 16)
+ // would choose the 64-byte pool, which would be correctly aligned.
constexpr size_t pool_sizes[] = {
8, 16, 24,
using exclusive_lock = lock_guard<shared_mutex>;
#endif
- } // namespace
+} // namespace
__pool_resource::
__pool_resource(const pool_options& opts, memory_resource* upstream)
return p;
}
+ // Determine the appropriate allocation size, rounding up to a multiple
+ // of the alignment if needed.
+ static inline size_t
+ choose_block_size(size_t bytes, size_t alignment)
+ {
+ if (bytes == 0) [[unlikely]]
+ return alignment;
+
+ // Use bit_ceil in case alignment is invalid (i.e. not a power of two).
+ size_t mask = std::__bit_ceil(alignment) - 1;
+ // Round up to a multiple of alignment.
+ size_t block_size = (bytes + mask) & ~mask;
+
+ if (block_size >= bytes) [[likely]]
+ return block_size;
+
+ // Wrapped around to zero, bytes must have been impossibly large.
+ return numeric_limits<size_t>::max();
+ }
+
+
#ifdef _GLIBCXX_HAS_GTHREADS
// synchronized_pool_resource members.
/* Notes on implementation and thread safety:
*
- * Each synchronized_pool_resource manages an linked list of N+1 _TPools
+ * Each synchronized_pool_resource manages a linked list of N+1 _TPools
* objects, where N is the number of threads using the pool resource.
* Each _TPools object has its own set of pools, with their own chunks.
* The first element of the list, _M_tpools[0], can be used by any thread.
return pools;
}
- static inline size_t
- choose_block_size(size_t bytes, size_t alignment)
- {
- if (bytes == 0) [[unlikely]]
- return alignment;
-
- // Use bit_ceil in case alignment is invalid (i.e. not a power of two).
- size_t mask = std::__bit_ceil(alignment) - 1;
- // Round up to a multiple of alignment.
- size_t block_size = (bytes + mask) & ~mask;
-
- if (block_size >= bytes) [[likely]]
- return block_size;
-
- // Wrapped around to zero, bytes must have been impossibly large.
- return numeric_limits<size_t>::max();
- }
-
// Override for memory_resource::do_allocate
void*
synchronized_pool_resource::