1 // Allocators -*- C++ -*-
3 // Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
5 // Free Software Foundation, Inc.
7 // This file is part of the GNU ISO C++ Library. This library is free
8 // software; you can redistribute it and/or modify it under the
9 // terms of the GNU General Public License as published by the
10 // Free Software Foundation; either version 3, or (at your option)
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
18 // Under Section 7 of GPL version 3, you are granted additional
19 // permissions described in the GCC Runtime Library Exception, version
20 // 3.1, as published by the Free Software Foundation.
22 // You should have received a copy of the GNU General Public License and
23 // a copy of the GCC Runtime Library Exception along with this program;
24 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 // <http://www.gnu.org/licenses/>.
28 * Copyright (c) 1996-1997
29 * Silicon Graphics Computer Systems, Inc.
31 * Permission to use, copy, modify, distribute and sell this software
32 * and its documentation for any purpose is hereby granted without fee,
33 * provided that the above copyright notice appear in all copies and
34 * that both that copyright notice and this permission notice appear
35 * in supporting documentation. Silicon Graphics makes no
36 * representations about the suitability of this software for any
37 * purpose. It is provided "as is" without express or implied warranty.
40 /** @file ext/pool_allocator.h
41 * This file is a GNU extension to the Standard C++ Library.
44 #ifndef _POOL_ALLOCATOR_H
45 #define _POOL_ALLOCATOR_H 1
47 #include <bits/c++config.h>
50 #include <bits/functexcept.h>
51 #include <ext/atomicity.h>
52 #include <ext/concurrence.h>
53 #include <bits/move.h>
55 namespace __gnu_cxx
_GLIBCXX_VISIBILITY(default)
57 _GLIBCXX_BEGIN_NAMESPACE_VERSION
63 * @brief Base class for __pool_alloc.
65 * Uses various allocators to fulfill underlying requests (and makes as
66 * few requests as possible when in default high-speed pool mode).
68 * Important implementation properties:
69 * 0. If globally mandated, then allocate objects from new
70 * 1. If the clients request an object of size > _S_max_bytes, the resulting
71 * object will be obtained directly from new
72 * 2. In all other cases, we allocate an object of size exactly
73 * _S_round_up(requested_size). Thus the client has enough size
74 * information that we can return the object to the proper free list
75 * without permanently losing part of the object.
77 class __pool_alloc_base
81 enum { _S_align
= 8 };
82 enum { _S_max_bytes
= 128 };
83 enum { _S_free_list_size
= (size_t)_S_max_bytes
/ (size_t)_S_align
};
87 union _Obj
* _M_free_list_link
;
88 char _M_client_data
[1]; // The client sees this.
91 static _Obj
* volatile _S_free_list
[_S_free_list_size
];
93 // Chunk allocation state.
94 static char* _S_start_free
;
95 static char* _S_end_free
;
96 static size_t _S_heap_size
;
99 _M_round_up(size_t __bytes
)
100 { return ((__bytes
+ (size_t)_S_align
- 1) & ~((size_t)_S_align
- 1)); }
102 _GLIBCXX_CONST _Obj
* volatile*
103 _M_get_free_list(size_t __bytes
) throw ();
106 _M_get_mutex() throw ();
108 // Returns an object of size __n, and optionally adds to size __n
111 _M_refill(size_t __n
);
113 // Allocates a chunk for nobjs of size size. nobjs may be reduced
114 // if it is inconvenient to allocate the requested number.
116 _M_allocate_chunk(size_t __n
, int& __nobjs
);
121 * @brief Allocator using a memory pool with a single lock.
122 * @ingroup allocators
124 template<typename _Tp
>
125 class __pool_alloc
: private __pool_alloc_base
128 static _Atomic_word _S_force_new
;
131 typedef size_t size_type
;
132 typedef ptrdiff_t difference_type
;
133 typedef _Tp
* pointer
;
134 typedef const _Tp
* const_pointer
;
135 typedef _Tp
& reference
;
136 typedef const _Tp
& const_reference
;
137 typedef _Tp value_type
;
139 template<typename _Tp1
>
141 { typedef __pool_alloc
<_Tp1
> other
; };
143 __pool_alloc() _GLIBCXX_USE_NOEXCEPT
{ }
145 __pool_alloc(const __pool_alloc
&) _GLIBCXX_USE_NOEXCEPT
{ }
147 template<typename _Tp1
>
148 __pool_alloc(const __pool_alloc
<_Tp1
>&) _GLIBCXX_USE_NOEXCEPT
{ }
150 ~__pool_alloc() _GLIBCXX_USE_NOEXCEPT
{ }
153 address(reference __x
) const _GLIBCXX_NOEXCEPT
154 { return std::__addressof(__x
); }
157 address(const_reference __x
) const _GLIBCXX_NOEXCEPT
158 { return std::__addressof(__x
); }
161 max_size() const _GLIBCXX_USE_NOEXCEPT
162 { return size_t(-1) / sizeof(_Tp
); }
164 #if __cplusplus >= 201103L
165 template<typename _Up
, typename
... _Args
>
167 construct(_Up
* __p
, _Args
&&... __args
)
168 { ::new((void *)__p
) _Up(std::forward
<_Args
>(__args
)...); }
170 template<typename _Up
>
172 destroy(_Up
* __p
) { __p
->~_Up(); }
174 // _GLIBCXX_RESOLVE_LIB_DEFECTS
175 // 402. wrong new expression in [some_] allocator::construct
177 construct(pointer __p
, const _Tp
& __val
)
178 { ::new((void *)__p
) _Tp(__val
); }
181 destroy(pointer __p
) { __p
->~_Tp(); }
185 allocate(size_type __n
, const void* = 0);
188 deallocate(pointer __p
, size_type __n
);
191 template<typename _Tp
>
193 operator==(const __pool_alloc
<_Tp
>&, const __pool_alloc
<_Tp
>&)
196 template<typename _Tp
>
198 operator!=(const __pool_alloc
<_Tp
>&, const __pool_alloc
<_Tp
>&)
201 template<typename _Tp
>
203 __pool_alloc
<_Tp
>::_S_force_new
;
205 template<typename _Tp
>
207 __pool_alloc
<_Tp
>::allocate(size_type __n
, const void*)
210 if (__builtin_expect(__n
!= 0, true))
212 if (__n
> this->max_size())
213 std::__throw_bad_alloc();
215 // If there is a race through here, assume answer from getenv
216 // will resolve in same direction. Inspired by techniques
217 // to efficiently support threading found in basic_string.h.
218 if (_S_force_new
== 0)
220 if (std::getenv("GLIBCXX_FORCE_NEW"))
221 __atomic_add_dispatch(&_S_force_new
, 1);
223 __atomic_add_dispatch(&_S_force_new
, -1);
226 const size_t __bytes
= __n
* sizeof(_Tp
);
227 if (__bytes
> size_t(_S_max_bytes
) || _S_force_new
> 0)
228 __ret
= static_cast<_Tp
*>(::operator new(__bytes
));
231 _Obj
* volatile* __free_list
= _M_get_free_list(__bytes
);
233 __scoped_lock
sentry(_M_get_mutex());
234 _Obj
* __restrict__ __result
= *__free_list
;
235 if (__builtin_expect(__result
== 0, 0))
236 __ret
= static_cast<_Tp
*>(_M_refill(_M_round_up(__bytes
)));
239 *__free_list
= __result
->_M_free_list_link
;
240 __ret
= reinterpret_cast<_Tp
*>(__result
);
243 std::__throw_bad_alloc();
249 template<typename _Tp
>
251 __pool_alloc
<_Tp
>::deallocate(pointer __p
, size_type __n
)
253 if (__builtin_expect(__n
!= 0 && __p
!= 0, true))
255 const size_t __bytes
= __n
* sizeof(_Tp
);
256 if (__bytes
> static_cast<size_t>(_S_max_bytes
) || _S_force_new
> 0)
257 ::operator delete(__p
);
260 _Obj
* volatile* __free_list
= _M_get_free_list(__bytes
);
261 _Obj
* __q
= reinterpret_cast<_Obj
*>(__p
);
263 __scoped_lock
sentry(_M_get_mutex());
264 __q
->_M_free_list_link
= *__free_list
;
270 _GLIBCXX_END_NAMESPACE_VERSION