]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/bits/stl_alloc.h
Makefile.am (std_headers): Remove cXXX from list.
[thirdparty/gcc.git] / libstdc++-v3 / include / bits / stl_alloc.h
1 // Allocators -*- C++ -*-
2
3 // Copyright (C) 2001 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 /*
31 * Copyright (c) 1996-1997
32 * Silicon Graphics Computer Systems, Inc.
33 *
34 * Permission to use, copy, modify, distribute and sell this software
35 * and its documentation for any purpose is hereby granted without fee,
36 * provided that the above copyright notice appear in all copies and
37 * that both that copyright notice and this permission notice appear
38 * in supporting documentation. Silicon Graphics makes no
39 * representations about the suitability of this software for any
40 * purpose. It is provided "as is" without express or implied warranty.
41 */
42
43 /** @file stl_alloc.h
44 * This is an internal header file, included by other library headers.
45 * You should not attempt to use it directly.
46 */
47
48 #ifndef __GLIBCPP_INTERNAL_ALLOC_H
49 #define __GLIBCPP_INTERNAL_ALLOC_H
50
51 /**
52 * @defgroup Allocators Memory Allocators
53 * @maint
54 * stl_alloc.h implements some node allocators. These are NOT the same as
55 * allocators in the C++ standard, nor in the original H-P STL. They do not
56 * encapsulate different pointer types; we assume that there is only one
57 * pointer type. The C++ standard allocators are intended to allocate
58 * individual objects, not pools or arenas.
59 *
60 * In this file allocators are of two different styles: "standard" and
61 * "SGI" (quotes included). "Standard" allocators conform to 20.4. "SGI"
62 * allocators differ in AT LEAST the following ways (add to this list as you
63 * discover them):
64 *
65 * - "Standard" allocate() takes two parameters (n_count,hint=0) but "SGI"
66 * allocate() takes one paramter (n_size).
67 * - Likewise, "standard" deallocate()'s argument is a count, but in "SGI"
68 * is a byte size.
69 * - max_size(), construct(), and destroy() are missing in "SGI" allocators.
70 * - reallocate(p,oldsz,newsz) is added in "SGI", and behaves as
71 * if p=realloc(p,newsz).
72 *
73 * "SGI" allocators may be wrapped in __allocator to convert the interface
74 * into a "standard" one.
75 * @endmaint
76 *
77 * The canonical description of these classes is in docs/html/ext/howto.html
78 * or online at http://gcc.gnu.org/onlinedocs/libstdc++/ext/howto.html#3
79 */
80
81 #include <cstddef>
82 #include <cstdlib>
83 #include <cstring>
84 #include <cassert>
85 #include <bits/functexcept.h> // For __throw_bad_alloc
86 #include <bits/stl_threads.h>
87
88 namespace std
89 {
90 /**
91 * @maint
92 * A new-based allocator, as required by the standard. Allocation and
93 * deallocation forward to global new and delete. "SGI" style, minus
94 * reallocate().
95 * @endmaint
96 * (See @link Allocators allocators info @endlink for more.)
97 */
98 class __new_alloc
99 {
100 public:
101 static void*
102 allocate(size_t __n)
103 { return ::operator new(__n); }
104
105 static void
106 deallocate(void* __p, size_t)
107 { ::operator delete(__p); }
108 };
109
110
111 /**
112 * @maint
113 * A malloc-based allocator. Typically slower than the
114 * __default_alloc_template (below). Typically thread-safe and more
115 * storage efficient. The template argument is unused and is only present
116 * to permit multiple instantiations (but see __default_alloc_template
117 * for caveats). "SGI" style, plus __set_malloc_handler for OOM conditions.
118 * @endmaint
119 * (See @link Allocators allocators info @endlink for more.)
120 */
121 template <int __inst>
122 class __malloc_alloc_template
123 {
124 private:
125 static void* _S_oom_malloc(size_t);
126 static void* _S_oom_realloc(void*, size_t);
127 static void (* __malloc_alloc_oom_handler)();
128
129 public:
130 static void*
131 allocate(size_t __n)
132 {
133 void* __result = malloc(__n);
134 if (0 == __result) __result = _S_oom_malloc(__n);
135 return __result;
136 }
137
138 static void
139 deallocate(void* __p, size_t /* __n */)
140 { free(__p); }
141
142 static void*
143 reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
144 {
145 void* __result = realloc(__p, __new_sz);
146 if (0 == __result) __result = _S_oom_realloc(__p, __new_sz);
147 return __result;
148 }
149
150 static void (* __set_malloc_handler(void (*__f)()))()
151 {
152 void (* __old)() = __malloc_alloc_oom_handler;
153 __malloc_alloc_oom_handler = __f;
154 return(__old);
155 }
156 };
157
158 // malloc_alloc out-of-memory handling
159 template <int __inst>
160 void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0;
161
162 template <int __inst>
163 void*
164 __malloc_alloc_template<__inst>::_S_oom_malloc(size_t __n)
165 {
166 void (* __my_malloc_handler)();
167 void* __result;
168
169 for (;;)
170 {
171 __my_malloc_handler = __malloc_alloc_oom_handler;
172 if (0 == __my_malloc_handler)
173 std::__throw_bad_alloc();
174 (*__my_malloc_handler)();
175 __result = malloc(__n);
176 if (__result)
177 return(__result);
178 }
179 }
180
181 template <int __inst>
182 void*
183 __malloc_alloc_template<__inst>::_S_oom_realloc(void* __p, size_t __n)
184 {
185 void (* __my_malloc_handler)();
186 void* __result;
187
188 for (;;)
189 {
190 __my_malloc_handler = __malloc_alloc_oom_handler;
191 if (0 == __my_malloc_handler)
192 std::__throw_bad_alloc();
193 (*__my_malloc_handler)();
194 __result = realloc(__p, __n);
195 if (__result)
196 return(__result);
197 }
198 }
199
200
201 // Determines the underlying allocator choice for the node allocator.
202 #ifdef __USE_MALLOC
203 typedef __malloc_alloc_template<0> __mem_interface;
204 #else
205 typedef __new_alloc __mem_interface;
206 #endif
207
208
209 /**
210 * @maint
211 * This is used primarily (only?) in _Alloc_traits and other places to
212 * help provide the _Alloc_type typedef.
213 *
214 * This is neither "standard"-conforming nor "SGI". The _Alloc parameter
215 * must be "SGI" style.
216 * @endmaint
217 * (See @link Allocators allocators info @endlink for more.)
218 */
219 template<class _Tp, class _Alloc>
220 class __simple_alloc
221 {
222 public:
223 static _Tp* allocate(size_t __n)
224 { return 0 == __n ? 0 : (_Tp*) _Alloc::allocate(__n * sizeof (_Tp)); }
225
226 static _Tp* allocate()
227 { return (_Tp*) _Alloc::allocate(sizeof (_Tp)); }
228
229 static void deallocate(_Tp* __p, size_t __n)
230 { if (0 != __n) _Alloc::deallocate(__p, __n * sizeof (_Tp)); }
231
232 static void deallocate(_Tp* __p)
233 { _Alloc::deallocate(__p, sizeof (_Tp)); }
234 };
235
236
237 /**
238 * @maint
239 * An adaptor for an underlying allocator (_Alloc) to check the size
240 * arguments for debugging. Errors are reported using assert; these
241 * checks can be disabled via NDEBUG, but the space penalty is still
242 * paid, therefore it is far better to just use the underlying allocator
243 * by itelf when no checking is desired.
244 *
245 * "There is some evidence that this can confuse Purify." - SGI comment
246 *
247 * This adaptor is "SGI" style. The _Alloc parameter must also be "SGI".
248 * @endmaint
249 * (See @link Allocators allocators info @endlink for more.)
250 */
251 template <class _Alloc>
252 class __debug_alloc
253 {
254 private:
255 enum {_S_extra = 8}; // Size of space used to store size. Note that this
256 // must be large enough to preserve alignment.
257
258 public:
259
260 static void* allocate(size_t __n)
261 {
262 char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
263 *(size_t*)__result = __n;
264 return __result + (int) _S_extra;
265 }
266
267 static void deallocate(void* __p, size_t __n)
268 {
269 char* __real_p = (char*)__p - (int) _S_extra;
270 assert(*(size_t*)__real_p == __n);
271 _Alloc::deallocate(__real_p, __n + (int) _S_extra);
272 }
273
274 static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz)
275 {
276 char* __real_p = (char*)__p - (int) _S_extra;
277 assert(*(size_t*)__real_p == __old_sz);
278 char* __result = (char*)
279 _Alloc::reallocate(__real_p, __old_sz + (int) _S_extra,
280 __new_sz + (int) _S_extra);
281 *(size_t*)__result = __new_sz;
282 return __result + (int) _S_extra;
283 }
284 };
285
286
287 #ifdef __USE_MALLOC
288
289 typedef __mem_interface __alloc;
290 typedef __mem_interface __single_client_alloc;
291
292 #else
293
294
295 /**
296 * @maint
297 * Default node allocator. "SGI" style. Uses __mem_interface for its
298 * underlying requests (and makes as few requests as possible).
299 * **** Currently __mem_interface is always __new_alloc, never __malloc*.
300 *
301 * Important implementation properties:
302 * 1. If the clients request an object of size > _MAX_BYTES, the resulting
303 * object will be obtained directly from the underlying __mem_interface.
304 * 2. In all other cases, we allocate an object of size exactly
305 * _S_round_up(requested_size). Thus the client has enough size
306 * information that we can return the object to the proper free list
307 * without permanently losing part of the object.
308 *
309 * The first template parameter specifies whether more than one thread may
310 * use this allocator. It is safe to allocate an object from one instance
311 * of a default_alloc and deallocate it with another one. This effectively
312 * transfers its ownership to the second one. This may have undesirable
313 * effects on reference locality.
314 *
315 * The second parameter is unused and serves only to allow the creation of
316 * multiple default_alloc instances. Note that containers built on different
317 * allocator instances have different types, limiting the utility of this
318 * approach. If you do not wish to share the free lists with the main
319 * default_alloc instance, instantiate this with a non-zero __inst.
320 *
321 * @endmaint
322 * (See @link Allocators allocators info @endlink for more.)
323 */
324 template <bool __threads, int __inst>
325 class __default_alloc_template
326 {
327
328 private:
329 enum {_ALIGN = 8};
330 enum {_MAX_BYTES = 128};
331 enum {_NFREELISTS = _MAX_BYTES / _ALIGN};
332
333 static size_t
334 _S_round_up(size_t __bytes)
335 { return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }
336
337 union _Obj {
338 union _Obj* _M_free_list_link;
339 char _M_client_data[1]; // The client sees this.
340 };
341
342 static _Obj* volatile _S_free_list[];
343 static size_t _S_freelist_index(size_t __bytes)
344 { return (((__bytes) + (size_t)_ALIGN-1)/(size_t)_ALIGN - 1); }
345
346 // Returns an object of size __n, and optionally adds to size __n free list.
347 static void* _S_refill(size_t __n);
348 // Allocates a chunk for nobjs of size size. nobjs may be reduced
349 // if it is inconvenient to allocate the requested number.
350 static char* _S_chunk_alloc(size_t __size, int& __nobjs);
351
352 // Chunk allocation state.
353 static char* _S_start_free;
354 static char* _S_end_free;
355 static size_t _S_heap_size;
356
357 static _STL_mutex_lock _S_node_allocator_lock;
358
359 // It would be nice to use _STL_auto_lock here. But we need a test whether
360 // threads are in use.
361 class _Lock {
362 public:
363 _Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
364 ~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
365 } __attribute__ ((__unused__));
366 friend class _Lock;
367
368 public:
369
370 // __n must be > 0
371 static void* allocate(size_t __n)
372 {
373 void* __ret = 0;
374
375 if (__n > (size_t) _MAX_BYTES)
376 __ret = __mem_interface::allocate(__n);
377 else
378 {
379 _Obj* volatile* __my_free_list = _S_free_list + _S_freelist_index(__n);
380 // Acquire the lock here with a constructor call. This ensures that
381 // it is released in exit or during stack unwinding.
382 _Lock __lock_instance;
383 _Obj* __restrict__ __result = *__my_free_list;
384 if (__result == 0)
385 __ret = _S_refill(_S_round_up(__n));
386 else
387 {
388 *__my_free_list = __result -> _M_free_list_link;
389 __ret = __result;
390 }
391 }
392
393 return __ret;
394 };
395
396 // __p may not be 0
397 static void deallocate(void* __p, size_t __n)
398 {
399 if (__n > (size_t) _MAX_BYTES)
400 __mem_interface::deallocate(__p, __n);
401 else
402 {
403 _Obj* volatile* __my_free_list
404 = _S_free_list + _S_freelist_index(__n);
405 _Obj* __q = (_Obj*)__p;
406
407 // Acquire the lock here with a constructor call. This ensures that
408 // it is released in exit or during stack unwinding.
409 _Lock __lock_instance;
410 __q -> _M_free_list_link = *__my_free_list;
411 *__my_free_list = __q;
412 }
413 }
414
415 static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz);
416 };
417
418
419 template <bool __threads, int __inst>
420 inline bool operator==(const __default_alloc_template<__threads, __inst>&,
421 const __default_alloc_template<__threads, __inst>&)
422 {
423 return true;
424 }
425
426 template <bool __threads, int __inst>
427 inline bool operator!=(const __default_alloc_template<__threads, __inst>&,
428 const __default_alloc_template<__threads, __inst>&)
429 {
430 return false;
431 }
432
433
434 // We allocate memory in large chunks in order to avoid fragmenting the
435 // malloc heap (or whatever __mem_interface is using) too much. We assume
436 // that __size is properly aligned. We hold the allocation lock.
437 template <bool __threads, int __inst>
438 char*
439 __default_alloc_template<__threads, __inst>::_S_chunk_alloc(size_t __size,
440 int& __nobjs)
441 {
442 char* __result;
443 size_t __total_bytes = __size * __nobjs;
444 size_t __bytes_left = _S_end_free - _S_start_free;
445
446 if (__bytes_left >= __total_bytes)
447 {
448 __result = _S_start_free;
449 _S_start_free += __total_bytes;
450 return(__result);
451 }
452 else if (__bytes_left >= __size)
453 {
454 __nobjs = (int)(__bytes_left/__size);
455 __total_bytes = __size * __nobjs;
456 __result = _S_start_free;
457 _S_start_free += __total_bytes;
458 return(__result);
459 }
460 else
461 {
462 size_t __bytes_to_get =
463 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
464 // Try to make use of the left-over piece.
465 if (__bytes_left > 0)
466 {
467 _Obj* volatile* __my_free_list =
468 _S_free_list + _S_freelist_index(__bytes_left);
469
470 ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
471 *__my_free_list = (_Obj*)_S_start_free;
472 }
473 _S_start_free = (char*) __mem_interface::allocate(__bytes_to_get);
474 if (0 == _S_start_free)
475 {
476 size_t __i;
477 _Obj* volatile* __my_free_list;
478 _Obj* __p;
479 // Try to make do with what we have. That can't hurt. We
480 // do not try smaller requests, since that tends to result
481 // in disaster on multi-process machines.
482 __i = __size;
483 for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
484 {
485 __my_free_list = _S_free_list + _S_freelist_index(__i);
486 __p = *__my_free_list;
487 if (0 != __p)
488 {
489 *__my_free_list = __p -> _M_free_list_link;
490 _S_start_free = (char*)__p;
491 _S_end_free = _S_start_free + __i;
492 return(_S_chunk_alloc(__size, __nobjs));
493 // Any leftover piece will eventually make it to the
494 // right free list.
495 }
496 }
497 _S_end_free = 0; // In case of exception.
498 _S_start_free = (char*)__mem_interface::allocate(__bytes_to_get);
499 // This should either throw an exception or remedy the situation.
500 // Thus we assume it succeeded.
501 }
502 _S_heap_size += __bytes_to_get;
503 _S_end_free = _S_start_free + __bytes_to_get;
504 return(_S_chunk_alloc(__size, __nobjs));
505 }
506 }
507
508
509 // Returns an object of size __n, and optionally adds to "size __n"'s free list.
510 // We assume that __n is properly aligned. We hold the allocation lock.
511 template <bool __threads, int __inst>
512 void*
513 __default_alloc_template<__threads, __inst>::_S_refill(size_t __n)
514 {
515 int __nobjs = 20;
516 char* __chunk = _S_chunk_alloc(__n, __nobjs);
517 _Obj* volatile* __my_free_list;
518 _Obj* __result;
519 _Obj* __current_obj;
520 _Obj* __next_obj;
521 int __i;
522
523 if (1 == __nobjs) return(__chunk);
524 __my_free_list = _S_free_list + _S_freelist_index(__n);
525
526 /* Build free list in chunk */
527 __result = (_Obj*)__chunk;
528 *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
529 for (__i = 1; ; __i++) {
530 __current_obj = __next_obj;
531 __next_obj = (_Obj*)((char*)__next_obj + __n);
532 if (__nobjs - 1 == __i) {
533 __current_obj -> _M_free_list_link = 0;
534 break;
535 } else {
536 __current_obj -> _M_free_list_link = __next_obj;
537 }
538 }
539 return(__result);
540 }
541
542
543 template <bool threads, int inst>
544 void*
545 __default_alloc_template<threads, inst>::reallocate(void* __p,
546 size_t __old_sz,
547 size_t __new_sz)
548 {
549 void* __result;
550 size_t __copy_sz;
551
552 if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES) {
553 return(realloc(__p, __new_sz));
554 }
555 if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);
556 __result = allocate(__new_sz);
557 __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
558 memcpy(__result, __p, __copy_sz);
559 deallocate(__p, __old_sz);
560 return(__result);
561 }
562
563 template <bool __threads, int __inst>
564 _STL_mutex_lock
565 __default_alloc_template<__threads, __inst>::_S_node_allocator_lock
566 __STL_MUTEX_INITIALIZER;
567
568 template <bool __threads, int __inst>
569 char* __default_alloc_template<__threads, __inst>::_S_start_free = 0;
570
571 template <bool __threads, int __inst>
572 char* __default_alloc_template<__threads, __inst>::_S_end_free = 0;
573
574 template <bool __threads, int __inst>
575 size_t __default_alloc_template<__threads, __inst>::_S_heap_size = 0;
576
577 template <bool __threads, int __inst>
578 typename __default_alloc_template<__threads, __inst>::_Obj* volatile
579 __default_alloc_template<__threads, __inst> ::_S_free_list[
580 __default_alloc_template<__threads, __inst>::_NFREELISTS ];
581
582
583 typedef __default_alloc_template<true, 0> __alloc;
584 typedef __default_alloc_template<false, 0> __single_client_alloc;
585
586
587 #endif /* ! __USE_MALLOC */
588
589
590 /**
591 * This is a "standard" allocator, as per [20.4]. The private _Alloc is
592 * "SGI" style. (See comments at the top of stl_alloc.h.)
593 *
594 * The underlying allocator behaves as follows.
595 * - if __USE_MALLOC then
596 * - thread safety depends on malloc and is entirely out of our hands
597 * - __malloc_alloc_template is used for memory requests
598 * - else (the default)
599 * - __default_alloc_template is used via two typedefs
600 * - "__single_client_alloc" typedef does no locking for threads
601 * - "__alloc" typedef is threadsafe via the locks
602 * - __new_alloc is used for memory requests
603 *
604 * (See @link Allocators allocators info @endlink for more.)
605 */
606 template <class _Tp>
607 class allocator
608 {
609 typedef __alloc _Alloc; // The underlying allocator.
610 public:
611 typedef size_t size_type;
612 typedef ptrdiff_t difference_type;
613 typedef _Tp* pointer;
614 typedef const _Tp* const_pointer;
615 typedef _Tp& reference;
616 typedef const _Tp& const_reference;
617 typedef _Tp value_type;
618
619 template <class _Tp1> struct rebind {
620 typedef allocator<_Tp1> other;
621 };
622
623 allocator() throw() {}
624 allocator(const allocator&) throw() {}
625 template <class _Tp1> allocator(const allocator<_Tp1>&) throw() {}
626 ~allocator() throw() {}
627
628 pointer address(reference __x) const { return &__x; }
629 const_pointer address(const_reference __x) const { return &__x; }
630
631 // __n is permitted to be 0. The C++ standard says nothing about what
632 // the return value is when __n == 0.
633 _Tp* allocate(size_type __n, const void* = 0) {
634 return __n != 0 ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)))
635 : 0;
636 }
637
638 // __p is not permitted to be a null pointer.
639 void deallocate(pointer __p, size_type __n)
640 { _Alloc::deallocate(__p, __n * sizeof(_Tp)); }
641
642 size_type max_size() const throw()
643 { return size_t(-1) / sizeof(_Tp); }
644
645 void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
646 void destroy(pointer __p) { __p->~_Tp(); }
647 };
648
649 template<>
650 class allocator<void> {
651 public:
652 typedef size_t size_type;
653 typedef ptrdiff_t difference_type;
654 typedef void* pointer;
655 typedef const void* const_pointer;
656 typedef void value_type;
657
658 template <class _Tp1> struct rebind {
659 typedef allocator<_Tp1> other;
660 };
661 };
662
663
664 template <class _T1, class _T2>
665 inline bool operator==(const allocator<_T1>&, const allocator<_T2>&)
666 {
667 return true;
668 }
669
670 template <class _T1, class _T2>
671 inline bool operator!=(const allocator<_T1>&, const allocator<_T2>&)
672 {
673 return false;
674 }
675
676
677 /**
678 * @maint
679 * Allocator adaptor to turn an "SGI" style allocator (e.g., __alloc,
680 * __malloc_alloc_template) into a "standard" conforming allocator. Note
681 * that this adaptor does *not* assume that all objects of the underlying
682 * alloc class are identical, nor does it assume that all of the underlying
683 * alloc's member functions are static member functions. Note, also, that
684 * __allocator<_Tp, __alloc> is essentially the same thing as allocator<_Tp>.
685 * @endmaint
686 * (See @link Allocators allocators info @endlink for more.)
687 */
688 template <class _Tp, class _Alloc>
689 struct __allocator
690 {
691 _Alloc __underlying_alloc;
692
693 typedef size_t size_type;
694 typedef ptrdiff_t difference_type;
695 typedef _Tp* pointer;
696 typedef const _Tp* const_pointer;
697 typedef _Tp& reference;
698 typedef const _Tp& const_reference;
699 typedef _Tp value_type;
700
701 template <class _Tp1> struct rebind {
702 typedef __allocator<_Tp1, _Alloc> other;
703 };
704
705 __allocator() throw() {}
706 __allocator(const __allocator& __a) throw()
707 : __underlying_alloc(__a.__underlying_alloc) {}
708 template <class _Tp1>
709 __allocator(const __allocator<_Tp1, _Alloc>& __a) throw()
710 : __underlying_alloc(__a.__underlying_alloc) {}
711 ~__allocator() throw() {}
712
713 pointer address(reference __x) const { return &__x; }
714 const_pointer address(const_reference __x) const { return &__x; }
715
716 // __n is permitted to be 0.
717 _Tp* allocate(size_type __n, const void* = 0) {
718 return __n != 0
719 ? static_cast<_Tp*>(__underlying_alloc.allocate(__n * sizeof(_Tp)))
720 : 0;
721 }
722
723 // __p is not permitted to be a null pointer.
724 void deallocate(pointer __p, size_type __n)
725 { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); }
726
727 size_type max_size() const throw()
728 { return size_t(-1) / sizeof(_Tp); }
729
730 void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
731 void destroy(pointer __p) { __p->~_Tp(); }
732 };
733
734 template <class _Alloc>
735 class __allocator<void, _Alloc> {
736 typedef size_t size_type;
737 typedef ptrdiff_t difference_type;
738 typedef void* pointer;
739 typedef const void* const_pointer;
740 typedef void value_type;
741
742 template <class _Tp1> struct rebind {
743 typedef __allocator<_Tp1, _Alloc> other;
744 };
745 };
746
747 template <class _Tp, class _Alloc>
748 inline bool operator==(const __allocator<_Tp, _Alloc>& __a1,
749 const __allocator<_Tp, _Alloc>& __a2)
750 {
751 return __a1.__underlying_alloc == __a2.__underlying_alloc;
752 }
753
754 template <class _Tp, class _Alloc>
755 inline bool operator!=(const __allocator<_Tp, _Alloc>& __a1,
756 const __allocator<_Tp, _Alloc>& __a2)
757 {
758 return __a1.__underlying_alloc != __a2.__underlying_alloc;
759 }
760
761
762 //@{
763 /** Comparison operators for all of the predifined SGI-style allocators.
764 * This ensures that __allocator<malloc_alloc> (for example) will work
765 * correctly. As required, all allocators compare equal.
766 */
767 template <int inst>
768 inline bool operator==(const __malloc_alloc_template<inst>&,
769 const __malloc_alloc_template<inst>&)
770 {
771 return true;
772 }
773
774 template <int __inst>
775 inline bool operator!=(const __malloc_alloc_template<__inst>&,
776 const __malloc_alloc_template<__inst>&)
777 {
778 return false;
779 }
780
781 template <class _Alloc>
782 inline bool operator==(const __debug_alloc<_Alloc>&,
783 const __debug_alloc<_Alloc>&) {
784 return true;
785 }
786
787 template <class _Alloc>
788 inline bool operator!=(const __debug_alloc<_Alloc>&,
789 const __debug_alloc<_Alloc>&) {
790 return false;
791 }
792 //@}
793
794
795 /**
796 * @maint
797 * Another allocator adaptor: _Alloc_traits. This serves two purposes.
798 * First, make it possible to write containers that can use either "SGI"
799 * style allocators or "standard" allocators. Second, provide a mechanism
800 * so that containers can query whether or not the allocator has distinct
801 * instances. If not, the container can avoid wasting a word of memory to
802 * store an empty object. For examples of use, see stl_vector.h, etc, or
803 * any of the other classes derived from this one.
804 *
805 * This adaptor uses partial specialization. The general case of
806 * _Alloc_traits<_Tp, _Alloc> assumes that _Alloc is a
807 * standard-conforming allocator, possibly with non-equal instances and
808 * non-static members. (It still behaves correctly even if _Alloc has
809 * static member and if all instances are equal. Refinements affect
810 * performance, not correctness.)
811 *
812 * There are always two members: allocator_type, which is a standard-
813 * conforming allocator type for allocating objects of type _Tp, and
814 * _S_instanceless, a static const member of type bool. If
815 * _S_instanceless is true, this means that there is no difference
816 * between any two instances of type allocator_type. Furthermore, if
817 * _S_instanceless is true, then _Alloc_traits has one additional
818 * member: _Alloc_type. This type encapsulates allocation and
819 * deallocation of objects of type _Tp through a static interface; it
820 * has two member functions, whose signatures are
821 *
822 * - static _Tp* allocate(size_t)
823 * - static void deallocate(_Tp*, size_t)
824 *
825 * The size_t parameters are "standard" style (see top of stl_alloc.h) in
826 * that they take counts, not sizes.
827 *
828 * @endmaint
829 * (See @link Allocators allocators info @endlink for more.)
830 */
831 //@{
832 // The fully general version.
833 template <class _Tp, class _Allocator>
834 struct _Alloc_traits
835 {
836 static const bool _S_instanceless = false;
837 typedef typename _Allocator::template rebind<_Tp>::other allocator_type;
838 };
839
840 template <class _Tp, class _Allocator>
841 const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless;
842
843 /// The version for the default allocator.
844 template <class _Tp, class _Tp1>
845 struct _Alloc_traits<_Tp, allocator<_Tp1> >
846 {
847 static const bool _S_instanceless = true;
848 typedef __simple_alloc<_Tp, __alloc> _Alloc_type;
849 typedef allocator<_Tp> allocator_type;
850 };
851 //@}
852
853 //@{
854 /// Versions for the predefined "SGI" style allocators.
855 template <class _Tp, int __inst>
856 struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> >
857 {
858 static const bool _S_instanceless = true;
859 typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
860 typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
861 };
862
863 #ifndef __USE_MALLOC
864 template <class _Tp, bool __threads, int __inst>
865 struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> >
866 {
867 static const bool _S_instanceless = true;
868 typedef __simple_alloc<_Tp, __default_alloc_template<__threads, __inst> >
869 _Alloc_type;
870 typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> >
871 allocator_type;
872 };
873 #endif
874
875 template <class _Tp, class _Alloc>
876 struct _Alloc_traits<_Tp, __debug_alloc<_Alloc> >
877 {
878 static const bool _S_instanceless = true;
879 typedef __simple_alloc<_Tp, __debug_alloc<_Alloc> > _Alloc_type;
880 typedef __allocator<_Tp, __debug_alloc<_Alloc> > allocator_type;
881 };
882 //@}
883
884 //@{
885 /// Versions for the __allocator adaptor used with the predefined "SGI" style allocators.
886 template <class _Tp, class _Tp1, int __inst>
887 struct _Alloc_traits<_Tp,
888 __allocator<_Tp1, __malloc_alloc_template<__inst> > >
889 {
890 static const bool _S_instanceless = true;
891 typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
892 typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
893 };
894
895 #ifndef __USE_MALLOC
896 template <class _Tp, class _Tp1, bool __thr, int __inst>
897 struct _Alloc_traits<_Tp,
898 __allocator<_Tp1,
899 __default_alloc_template<__thr, __inst> > >
900 {
901 static const bool _S_instanceless = true;
902 typedef __simple_alloc<_Tp, __default_alloc_template<__thr,__inst> >
903 _Alloc_type;
904 typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> >
905 allocator_type;
906 };
907 #endif
908
909 template <class _Tp, class _Tp1, class _Alloc>
910 struct _Alloc_traits<_Tp, __allocator<_Tp1, __debug_alloc<_Alloc> > >
911 {
912 static const bool _S_instanceless = true;
913 typedef __simple_alloc<_Tp, __debug_alloc<_Alloc> > _Alloc_type;
914 typedef __allocator<_Tp, __debug_alloc<_Alloc> > allocator_type;
915 };
916 //@}
917
918 } // namespace std
919
920 #endif /* __GLIBCPP_INTERNAL_ALLOC_H */
921
922 // Local Variables:
923 // mode:C++
924 // End: