1 // Allocators -*- C++ -*-
3 // Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
27 * Copyright (c) 1996-1997
28 * Silicon Graphics Computer Systems, Inc.
30 * Permission to use, copy, modify, distribute and sell this software
31 * and its documentation for any purpose is hereby granted without fee,
32 * provided that the above copyright notice appear in all copies and
33 * that both that copyright notice and this permission notice appear
34 * in supporting documentation. Silicon Graphics makes no
35 * representations about the suitability of this software for any
36 * purpose. It is provided "as is" without express or implied warranty.
39 /** @file ext/pool_allocator.h
40 * This file is a GNU extension to the Standard C++ Library.
43 #ifndef _POOL_ALLOCATOR_H
44 #define _POOL_ALLOCATOR_H 1
46 #include <bits/c++config.h>
49 #include <bits/functexcept.h>
50 #include <ext/atomicity.h>
51 #include <ext/concurrence.h>
52 #include <bits/move.h>
54 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx
)
60 * @brief Base class for __pool_alloc.
62 * Uses various allocators to fulfill underlying requests (and makes as
63 * few requests as possible when in default high-speed pool mode).
65 * Important implementation properties:
66 * 0. If globally mandated, then allocate objects from new
67 * 1. If the clients request an object of size > _S_max_bytes, the resulting
68 * object will be obtained directly from new
69 * 2. In all other cases, we allocate an object of size exactly
70 * _S_round_up(requested_size). Thus the client has enough size
71 * information that we can return the object to the proper free list
72 * without permanently losing part of the object.
74 class __pool_alloc_base
78 enum { _S_align
= 8 };
79 enum { _S_max_bytes
= 128 };
80 enum { _S_free_list_size
= (size_t)_S_max_bytes
/ (size_t)_S_align
};
84 union _Obj
* _M_free_list_link
;
85 char _M_client_data
[1]; // The client sees this.
88 static _Obj
* volatile _S_free_list
[_S_free_list_size
];
90 // Chunk allocation state.
91 static char* _S_start_free
;
92 static char* _S_end_free
;
93 static size_t _S_heap_size
;
96 _M_round_up(size_t __bytes
)
97 { return ((__bytes
+ (size_t)_S_align
- 1) & ~((size_t)_S_align
- 1)); }
100 _M_get_free_list(size_t __bytes
);
105 // Returns an object of size __n, and optionally adds to size __n
108 _M_refill(size_t __n
);
110 // Allocates a chunk for nobjs of size size. nobjs may be reduced
111 // if it is inconvenient to allocate the requested number.
113 _M_allocate_chunk(size_t __n
, int& __nobjs
);
118 * @brief Allocator using a memory pool with a single lock.
119 * @ingroup allocators
121 template<typename _Tp
>
122 class __pool_alloc
: private __pool_alloc_base
125 static _Atomic_word _S_force_new
;
128 typedef size_t size_type
;
129 typedef ptrdiff_t difference_type
;
130 typedef _Tp
* pointer
;
131 typedef const _Tp
* const_pointer
;
132 typedef _Tp
& reference
;
133 typedef const _Tp
& const_reference
;
134 typedef _Tp value_type
;
136 template<typename _Tp1
>
138 { typedef __pool_alloc
<_Tp1
> other
; };
140 __pool_alloc() throw() { }
142 __pool_alloc(const __pool_alloc
&) throw() { }
144 template<typename _Tp1
>
145 __pool_alloc(const __pool_alloc
<_Tp1
>&) throw() { }
147 ~__pool_alloc() throw() { }
150 address(reference __x
) const { return &__x
; }
153 address(const_reference __x
) const { return &__x
; }
156 max_size() const throw()
157 { return size_t(-1) / sizeof(_Tp
); }
159 // _GLIBCXX_RESOLVE_LIB_DEFECTS
160 // 402. wrong new expression in [some_] allocator::construct
162 construct(pointer __p
, const _Tp
& __val
)
163 { ::new((void *)__p
) _Tp(__val
); }
165 #ifdef __GXX_EXPERIMENTAL_CXX0X__
166 template<typename
... _Args
>
168 construct(pointer __p
, _Args
&&... __args
)
169 { ::new((void *)__p
) _Tp(std::forward
<_Args
>(__args
)...); }
173 destroy(pointer __p
) { __p
->~_Tp(); }
176 allocate(size_type __n
, const void* = 0);
179 deallocate(pointer __p
, size_type __n
);
182 template<typename _Tp
>
184 operator==(const __pool_alloc
<_Tp
>&, const __pool_alloc
<_Tp
>&)
187 template<typename _Tp
>
189 operator!=(const __pool_alloc
<_Tp
>&, const __pool_alloc
<_Tp
>&)
192 template<typename _Tp
>
194 __pool_alloc
<_Tp
>::_S_force_new
;
196 template<typename _Tp
>
198 __pool_alloc
<_Tp
>::allocate(size_type __n
, const void*)
201 if (__builtin_expect(__n
!= 0, true))
203 if (__builtin_expect(__n
> this->max_size(), false))
204 std::__throw_bad_alloc();
206 // If there is a race through here, assume answer from getenv
207 // will resolve in same direction. Inspired by techniques
208 // to efficiently support threading found in basic_string.h.
209 if (_S_force_new
== 0)
211 if (std::getenv("GLIBCXX_FORCE_NEW"))
212 __atomic_add_dispatch(&_S_force_new
, 1);
214 __atomic_add_dispatch(&_S_force_new
, -1);
217 const size_t __bytes
= __n
* sizeof(_Tp
);
218 if (__bytes
> size_t(_S_max_bytes
) || _S_force_new
> 0)
219 __ret
= static_cast<_Tp
*>(::operator new(__bytes
));
222 _Obj
* volatile* __free_list
= _M_get_free_list(__bytes
);
224 __scoped_lock
sentry(_M_get_mutex());
225 _Obj
* __restrict__ __result
= *__free_list
;
226 if (__builtin_expect(__result
== 0, 0))
227 __ret
= static_cast<_Tp
*>(_M_refill(_M_round_up(__bytes
)));
230 *__free_list
= __result
->_M_free_list_link
;
231 __ret
= reinterpret_cast<_Tp
*>(__result
);
233 if (__builtin_expect(__ret
== 0, 0))
234 std::__throw_bad_alloc();
240 template<typename _Tp
>
242 __pool_alloc
<_Tp
>::deallocate(pointer __p
, size_type __n
)
244 if (__builtin_expect(__n
!= 0 && __p
!= 0, true))
246 const size_t __bytes
= __n
* sizeof(_Tp
);
247 if (__bytes
> static_cast<size_t>(_S_max_bytes
) || _S_force_new
> 0)
248 ::operator delete(__p
);
251 _Obj
* volatile* __free_list
= _M_get_free_list(__bytes
);
252 _Obj
* __q
= reinterpret_cast<_Obj
*>(__p
);
254 __scoped_lock
sentry(_M_get_mutex());
255 __q
->_M_free_list_link
= *__free_list
;
261 _GLIBCXX_END_NAMESPACE