]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/ext/bitmap_allocator.h
bitmap_allocator.h (bitmap_allocator::_Alloc_block): Rename __unused to __M_unused.
[thirdparty/gcc.git] / libstdc++-v3 / include / ext / bitmap_allocator.h
1 // Bitmap Allocator. -*- C++ -*-
2
3 // Copyright (C) 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 /** @file ext/bitmap_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
32 * You should only include this header if you are using GCC 3 or later.
33 */
34
35 #ifndef _BITMAP_ALLOCATOR_H
36 #define _BITMAP_ALLOCATOR_H 1
37
38 // For std::size_t, and ptrdiff_t.
39 #include <cstddef>
40
41 // For std::pair.
42 #include <utility>
43
44 // For greater_equal, and less_equal.
45 #include <functional>
46
47 // For operator new.
48 #include <new>
49
50 // For __gthread_mutex_t, __gthread_mutex_lock and __gthread_mutex_unlock.
51 #include <bits/gthr.h>
52
53 // Define this to enable error checking withing the allocator
54 // itself(to debug the allocator itself).
55 //#define _BALLOC_SANITY_CHECK
56
57 #if defined _BALLOC_SANITY_CHECK
58 #include <cassert>
59 #define _BALLOC_ASSERT(_EXPR) assert(_EXPR)
60 #else
61 #define _BALLOC_ASSERT(_EXPR)
62 #endif
63
64
65 namespace __gnu_cxx
66 {
67 #if defined __GTHREADS
68 namespace
69 {
70 // If true, then the application being compiled will be using
71 // threads, so use mutexes as a synchronization primitive, else do
72 // no use any synchronization primitives.
73 bool const __threads_enabled = __gthread_active_p();
74 }
75 #endif
76
77 #if defined __GTHREADS
78 // _Mutex is an OO-Wrapper for __gthread_mutex_t. It does not allow
79 // you to copy or assign an already initialized mutex. This is used
80 // merely as a convenience for the locking classes.
81 class _Mutex
82 {
83 __gthread_mutex_t _M_mut;
84
85 // Prevent Copying and assignment.
86 _Mutex(_Mutex const&);
87 _Mutex& operator=(_Mutex const&);
88
89 public:
90 _Mutex()
91 {
92 if (__threads_enabled)
93 {
94 #if !defined __GTHREAD_MUTEX_INIT
95 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mut);
96 #else
97 __gthread_mutex_t __mtemp = __GTHREAD_MUTEX_INIT;
98 _M_mut = __mtemp;
99 #endif
100 }
101 }
102
103 ~_Mutex()
104 {
105 // Gthreads does not define a Mutex Destruction Function.
106 }
107
108 __gthread_mutex_t*
109 _M_get() { return &_M_mut; }
110 };
111
112 // _Lock is a simple manual lokcing class which allows you to
113 // manually lock and unlock a mutex associated with the lock. There
114 // is not automatic locking or unlocking happening without the
115 // programmer's explicit instructions. This class unlocks the mutex
116 // ONLY if it has not been locked. However, this check does not
117 // apply for lokcing, and wayward use may cause dead-locks.
118 class _Lock
119 {
120 _Mutex* _M_pmt;
121 bool _M_locked;
122
123 // Prevent Copying and assignment.
124 _Lock(_Lock const&);
125 _Lock& operator=(_Lock const&);
126
127 public:
128 _Lock(_Mutex* __mptr)
129 : _M_pmt(__mptr), _M_locked(false)
130 { }
131
132 void
133 _M_lock()
134 {
135 if (__threads_enabled)
136 {
137 _M_locked = true;
138 __gthread_mutex_lock(_M_pmt->_M_get());
139 }
140 }
141
142 void
143 _M_unlock()
144 {
145 if (__threads_enabled)
146 {
147 if (__builtin_expect(_M_locked, true))
148 {
149 __gthread_mutex_unlock(_M_pmt->_M_get());
150 _M_locked = false;
151 }
152 }
153 }
154
155 ~_Lock() { }
156 };
157
158 // _Auto_Lock locks the associated mutex on construction, and
159 // unlocks on it's destruction. There are no checks performed, and
160 // this calss follows the RAII principle.
161 class _Auto_Lock
162 {
163 _Mutex* _M_pmt;
164 // Prevent Copying and assignment.
165 _Auto_Lock(_Auto_Lock const&);
166 _Auto_Lock& operator=(_Auto_Lock const&);
167
168 void
169 _M_lock()
170 {
171 if (__threads_enabled)
172 __gthread_mutex_lock(_M_pmt->_M_get());
173 }
174
175 void
176 _M_unlock()
177 {
178 if (__threads_enabled)
179 __gthread_mutex_unlock(_M_pmt->_M_get());
180 }
181
182 public:
183 _Auto_Lock(_Mutex* __mptr) : _M_pmt(__mptr)
184 { this->_M_lock(); }
185
186 ~_Auto_Lock() { this->_M_unlock(); }
187 };
188 #endif
189
190 namespace balloc
191 {
192 // __mini_vector<> is to be used only for built-in types or
193 // PODs. It is a stripped down version of the full-fledged
194 // std::vector<>. Noteable differences are:
195 //
196 // 1. Not all accessor functions are present.
197 // 2. Used ONLY for PODs.
198 // 3. No Allocator template argument. Uses ::operator new() to get
199 // memory, and ::operator delete() to free it.
200 template<typename _Tp>
201 class __mini_vector
202 {
203 __mini_vector(const __mini_vector&);
204 __mini_vector& operator=(const __mini_vector&);
205
206 public:
207 typedef _Tp value_type;
208 typedef _Tp* pointer;
209 typedef _Tp& reference;
210 typedef const _Tp& const_reference;
211 typedef std::size_t size_type;
212 typedef std::ptrdiff_t difference_type;
213 typedef pointer iterator;
214
215 private:
216 pointer _M_start;
217 pointer _M_finish;
218 pointer _M_end_of_storage;
219
220 size_type
221 _M_space_left() const throw()
222 { return _M_end_of_storage - _M_finish; }
223
224 pointer
225 allocate(size_type __n)
226 { return static_cast<pointer>(::operator new(__n * sizeof(_Tp))); }
227
228 void
229 deallocate(pointer __p, size_type)
230 { ::operator delete(__p); }
231
232 public:
233 // Members used: size(), push_back(), pop_back(),
234 // insert(iterator, const_reference), erase(iterator),
235 // begin(), end(), back(), operator[].
236
237 __mini_vector() : _M_start(0), _M_finish(0),
238 _M_end_of_storage(0)
239 { }
240
241 ~__mini_vector()
242 {
243 if (this->_M_start)
244 {
245 this->deallocate(this->_M_start, this->_M_end_of_storage
246 - this->_M_start);
247 }
248 }
249
250 size_type
251 size() const throw()
252 { return _M_finish - _M_start; }
253
254 iterator
255 begin() const throw()
256 { return this->_M_start; }
257
258 iterator
259 end() const throw()
260 { return this->_M_finish; }
261
262 reference
263 back() const throw()
264 { return *(this->end() - 1); }
265
266 reference
267 operator[](const size_type __pos) const throw()
268 { return this->_M_start[__pos]; }
269
270 void
271 insert(iterator __pos, const_reference __x);
272
273 void
274 push_back(const_reference __x)
275 {
276 if (this->_M_space_left())
277 {
278 *this->end() = __x;
279 ++this->_M_finish;
280 }
281 else
282 this->insert(this->end(), __x);
283 }
284
285 void
286 pop_back() throw()
287 { --this->_M_finish; }
288
289 void
290 erase(iterator __pos) throw();
291
292 void
293 clear() throw()
294 { this->_M_finish = this->_M_start; }
295 };
296
297 // Out of line function definitions.
298 template<typename _Tp>
299 void __mini_vector<_Tp>::
300 insert(iterator __pos, const_reference __x)
301 {
302 if (this->_M_space_left())
303 {
304 size_type __to_move = this->_M_finish - __pos;
305 iterator __dest = this->end();
306 iterator __src = this->end() - 1;
307
308 ++this->_M_finish;
309 while (__to_move)
310 {
311 *__dest = *__src;
312 --__dest; --__src; --__to_move;
313 }
314 *__pos = __x;
315 }
316 else
317 {
318 size_type __new_size = this->size() ? this->size() * 2 : 1;
319 iterator __new_start = this->allocate(__new_size);
320 iterator __first = this->begin();
321 iterator __start = __new_start;
322 while (__first != __pos)
323 {
324 *__start = *__first;
325 ++__start; ++__first;
326 }
327 *__start = __x;
328 ++__start;
329 while (__first != this->end())
330 {
331 *__start = *__first;
332 ++__start; ++__first;
333 }
334 if (this->_M_start)
335 this->deallocate(this->_M_start, this->size());
336
337 this->_M_start = __new_start;
338 this->_M_finish = __start;
339 this->_M_end_of_storage = this->_M_start + __new_size;
340 }
341 }
342
343 template<typename _Tp>
344 void __mini_vector<_Tp>::
345 erase(iterator __pos) throw()
346 {
347 while (__pos + 1 != this->end())
348 {
349 *__pos = __pos[1];
350 ++__pos;
351 }
352 --this->_M_finish;
353 }
354
355
356 template<typename _Tp>
357 struct __mv_iter_traits
358 {
359 typedef typename _Tp::value_type value_type;
360 typedef typename _Tp::difference_type difference_type;
361 };
362
363 template<typename _Tp>
364 struct __mv_iter_traits<_Tp*>
365 {
366 typedef _Tp value_type;
367 typedef std::ptrdiff_t difference_type;
368 };
369
370 enum
371 {
372 bits_per_byte = 8,
373 bits_per_block = sizeof(unsigned int) * bits_per_byte
374 };
375
376 template<typename _ForwardIterator, typename _Tp, typename _Compare>
377 _ForwardIterator
378 __lower_bound(_ForwardIterator __first, _ForwardIterator __last,
379 const _Tp& __val, _Compare __comp)
380 {
381 typedef typename __mv_iter_traits<_ForwardIterator>::value_type
382 _ValueType;
383 typedef typename __mv_iter_traits<_ForwardIterator>::difference_type
384 _DistanceType;
385
386 _DistanceType __len = __last - __first;
387 _DistanceType __half;
388 _ForwardIterator __middle;
389
390 while (__len > 0)
391 {
392 __half = __len >> 1;
393 __middle = __first;
394 __middle += __half;
395 if (__comp(*__middle, __val))
396 {
397 __first = __middle;
398 ++__first;
399 __len = __len - __half - 1;
400 }
401 else
402 __len = __half;
403 }
404 return __first;
405 }
406
407 template<typename _InputIterator, typename _Predicate>
408 inline _InputIterator
409 __find_if(_InputIterator __first, _InputIterator __last, _Predicate __p)
410 {
411 while (__first != __last && !__p(*__first))
412 ++__first;
413 return __first;
414 }
415
416 template<typename _AddrPair>
417 inline size_t
418 __num_blocks(_AddrPair __ap)
419 { return (__ap.second - __ap.first) + 1; }
420
421 template<typename _AddrPair>
422 inline size_t
423 __num_bitmaps(_AddrPair __ap)
424 { return __num_blocks(__ap) / bits_per_block; }
425
426 // _Tp should be a pointer type.
427 template<typename _Tp>
428 class _Inclusive_between
429 : public std::unary_function<typename std::pair<_Tp, _Tp>, bool>
430 {
431 typedef _Tp pointer;
432 pointer _M_ptr_value;
433 typedef typename std::pair<_Tp, _Tp> _Block_pair;
434
435 public:
436 _Inclusive_between(pointer __ptr) : _M_ptr_value(__ptr)
437 { }
438
439 bool
440 operator()(_Block_pair __bp) const throw()
441 {
442 if (std::less_equal<pointer>()(_M_ptr_value, __bp.second)
443 && std::greater_equal<pointer>()(_M_ptr_value, __bp.first))
444 return true;
445 else
446 return false;
447 }
448 };
449
450 // Used to pass a Functor to functions by reference.
451 template<typename _Functor>
452 class _Functor_Ref
453 : public std::unary_function<typename _Functor::argument_type,
454 typename _Functor::result_type>
455 {
456 _Functor& _M_fref;
457
458 public:
459 typedef typename _Functor::argument_type argument_type;
460 typedef typename _Functor::result_type result_type;
461
462 _Functor_Ref(_Functor& __fref) : _M_fref(__fref)
463 { }
464
465 result_type
466 operator()(argument_type __arg)
467 { return _M_fref(__arg); }
468 };
469
470 // _Tp should be a pointer type, and _Alloc is the Allocator for
471 // the vector.
472 template<typename _Tp>
473 class _Ffit_finder
474 : public std::unary_function<typename std::pair<_Tp, _Tp>, bool>
475 {
476 typedef typename std::pair<_Tp, _Tp> _Block_pair;
477 typedef typename balloc::__mini_vector<_Block_pair> _BPVector;
478 typedef typename _BPVector::difference_type _Counter_type;
479
480 unsigned int* _M_pbitmap;
481 unsigned int _M_data_offset;
482
483 public:
484 _Ffit_finder() : _M_pbitmap(0), _M_data_offset(0)
485 { }
486
487 bool
488 operator()(_Block_pair __bp) throw()
489 {
490 // Set the _rover to the last unsigned integer, which is the
491 // bitmap to the first free block. Thus, the bitmaps are in exact
492 // reverse order of the actual memory layout. So, we count down
493 // the bimaps, which is the same as moving up the memory.
494
495 // If the used count stored at the start of the Bit Map headers
496 // is equal to the number of Objects that the current Block can
497 // store, then there is definitely no space for another single
498 // object, so just return false.
499 _Counter_type __diff =
500 __gnu_cxx::balloc::__num_bitmaps(__bp);
501
502 if (*reinterpret_cast<unsigned int*>
503 (reinterpret_cast<char*>(__bp.first) - (sizeof(unsigned int) *
504 (__diff+1)))
505 == __gnu_cxx::balloc::__num_blocks(__bp))
506 return false;
507
508 unsigned int* __rover = reinterpret_cast<unsigned int*>(__bp.first) - 1;
509
510 for (_Counter_type __i = 0; __i < __diff; ++__i)
511 {
512 _M_data_offset = __i;
513 if (*__rover)
514 {
515 _M_pbitmap = __rover;
516 return true;
517 }
518 --__rover;
519 }
520 return false;
521 }
522
523
524 unsigned int*
525 _M_get() const throw()
526 { return _M_pbitmap; }
527
528 unsigned int
529 _M_offset() const throw()
530 { return _M_data_offset * bits_per_block; }
531 };
532
533
534
535 // _Tp should be a pointer type.
536 template<typename _Tp>
537 class _Bitmap_counter
538 {
539 typedef typename balloc::__mini_vector<typename std::pair<_Tp, _Tp> >
540 _BPVector;
541 typedef typename _BPVector::size_type _Index_type;
542 typedef _Tp pointer;
543
544 _BPVector& _M_vbp;
545 unsigned int* _M_curr_bmap;
546 unsigned int* _M_last_bmap_in_block;
547 _Index_type _M_curr_index;
548
549 public:
550 // Use the 2nd parameter with care. Make sure that such an
551 // entry exists in the vector before passing that particular
552 // index to this ctor.
553 _Bitmap_counter(_BPVector& Rvbp, int __index = -1) : _M_vbp(Rvbp)
554 { this->_M_reset(__index); }
555
556 void
557 _M_reset(int __index = -1) throw()
558 {
559 if (__index == -1)
560 {
561 _M_curr_bmap = 0;
562 _M_curr_index = static_cast<_Index_type>(-1);
563 return;
564 }
565
566 _M_curr_index = __index;
567 _M_curr_bmap = reinterpret_cast<unsigned int*>
568 (_M_vbp[_M_curr_index].first) - 1;
569
570 _BALLOC_ASSERT(__index <= (int)_M_vbp.size() - 1);
571
572 _M_last_bmap_in_block = _M_curr_bmap
573 - ((_M_vbp[_M_curr_index].second
574 - _M_vbp[_M_curr_index].first + 1)
575 / bits_per_block - 1);
576 }
577
578 // Dangerous Function! Use with extreme care. Pass to this
579 // function ONLY those values that are known to be correct,
580 // otherwise this will mess up big time.
581 void
582 _M_set_internal_bitmap(unsigned int* __new_internal_marker) throw()
583 { _M_curr_bmap = __new_internal_marker; }
584
585 bool
586 _M_finished() const throw()
587 { return(_M_curr_bmap == 0); }
588
589 _Bitmap_counter&
590 operator++() throw()
591 {
592 if (_M_curr_bmap == _M_last_bmap_in_block)
593 {
594 if (++_M_curr_index == _M_vbp.size())
595 _M_curr_bmap = 0;
596 else
597 this->_M_reset(_M_curr_index);
598 }
599 else
600 --_M_curr_bmap;
601 return *this;
602 }
603
604 unsigned int*
605 _M_get() const throw()
606 { return _M_curr_bmap; }
607
608 pointer
609 _M_base() const throw()
610 { return _M_vbp[_M_curr_index].first; }
611
612 unsigned int
613 _M_offset() const throw()
614 {
615 return bits_per_block
616 * ((reinterpret_cast<unsigned int*>(this->_M_base())
617 - _M_curr_bmap) - 1);
618 }
619
620 unsigned int
621 _M_where() const throw()
622 { return _M_curr_index; }
623 };
624
625 inline void
626 __bit_allocate(unsigned int* __pbmap, unsigned int __pos) throw()
627 {
628 unsigned int __mask = 1 << __pos;
629 __mask = ~__mask;
630 *__pbmap &= __mask;
631 }
632
633 inline void
634 __bit_free(unsigned int* __pbmap, unsigned int __pos) throw()
635 {
636 unsigned int __mask = 1 << __pos;
637 *__pbmap |= __mask;
638 }
639 } // namespace balloc
640
641 // Generic Version of the bsf instruction.
642 inline unsigned int
643 _Bit_scan_forward(register unsigned int __num)
644 { return static_cast<unsigned int>(__builtin_ctz(__num)); }
645
646 class free_list
647 {
648 typedef unsigned int* value_type;
649 typedef balloc::__mini_vector<value_type> vector_type;
650 typedef vector_type::iterator iterator;
651
652 struct _LT_pointer_compare
653 {
654 bool
655 operator()(const unsigned int* __pui, const unsigned int __cui) const throw()
656 { return *__pui < __cui; }
657 };
658
659 #if defined __GTHREADS
660 static _Mutex _S_bfl_mutex;
661 #endif
662 static vector_type _S_free_list;
663
664 void
665 _M_validate(unsigned int* __addr) throw()
666 {
667 const unsigned int __max_size = 64;
668 if (_S_free_list.size() >= __max_size)
669 {
670 // Ok, the threshold value has been reached. We determine
671 // which block to remove from the list of free blocks.
672 if (*__addr >= *_S_free_list.back())
673 {
674 // Ok, the new block is greater than or equal to the
675 // last block in the list of free blocks. We just free
676 // the new block.
677 operator delete(static_cast<void*>(__addr));
678 return;
679 }
680 else
681 {
682 // Deallocate the last block in the list of free lists,
683 // and insert the new one in it's correct position.
684 operator delete(static_cast<void*>(_S_free_list.back()));
685 _S_free_list.pop_back();
686 }
687 }
688
689 // Just add the block to the list of free lists unconditionally.
690 iterator __temp = __gnu_cxx::balloc::__lower_bound
691 (_S_free_list.begin(), _S_free_list.end(),
692 *__addr, _LT_pointer_compare());
693
694 // We may insert the new free list before _temp;
695 _S_free_list.insert(__temp, __addr);
696 }
697
698 bool
699 _M_should_i_give(unsigned int __block_size,
700 unsigned int __required_size) throw()
701 {
702 const unsigned int __max_wastage_percentage = 36;
703 if (__block_size >= __required_size &&
704 (((__block_size - __required_size) * 100 / __block_size)
705 < __max_wastage_percentage))
706 return true;
707 else
708 return false;
709 }
710
711 public:
712 inline void
713 _M_insert(unsigned int* __addr) throw()
714 {
715 #if defined __GTHREADS
716 _Auto_Lock __bfl_lock(&_S_bfl_mutex);
717 #endif
718 // Call _M_validate to decide what should be done with
719 // this particular free list.
720 this->_M_validate(reinterpret_cast<unsigned int*>
721 (reinterpret_cast<char*>(__addr)
722 - sizeof(unsigned int)));
723 }
724
725 unsigned int*
726 _M_get(unsigned int __sz) throw(std::bad_alloc);
727
728 // This function just clears the internal Free List, and gives back
729 // all the memory to the OS.
730 void
731 _M_clear();
732 };
733
734
735 // Forward declare the class.
736 template<typename _Tp>
737 class bitmap_allocator;
738
739 // Specialize for void:
740 template<>
741 class bitmap_allocator<void>
742 {
743 public:
744 typedef void* pointer;
745 typedef const void* const_pointer;
746
747 // Reference-to-void members are impossible.
748 typedef void value_type;
749 template<typename _Tp1>
750 struct rebind
751 {
752 typedef bitmap_allocator<_Tp1> other;
753 };
754 };
755
756 template<typename _Tp>
757 class bitmap_allocator : private free_list
758 {
759 public:
760 typedef std::size_t size_type;
761 typedef std::ptrdiff_t difference_type;
762 typedef _Tp* pointer;
763 typedef const _Tp* const_pointer;
764 typedef _Tp& reference;
765 typedef const _Tp& const_reference;
766 typedef _Tp value_type;
767 template<typename _Tp1>
768 struct rebind
769 {
770 typedef bitmap_allocator<_Tp1> other;
771 };
772
773 private:
774 template<unsigned int _BSize, unsigned int _AlignSize>
775 struct aligned_size
776 {
777 enum
778 {
779 modulus = _BSize % _AlignSize,
780 value = _BSize + (modulus ? _AlignSize - (modulus) : 0)
781 };
782 };
783
784 struct _Alloc_block
785 {
786 char __M_unused[aligned_size<sizeof(value_type), 8>::value];
787 };
788
789
790 typedef typename std::pair<_Alloc_block*, _Alloc_block*> _Block_pair;
791
792 typedef typename
793 balloc::__mini_vector<_Block_pair> _BPVector;
794
795 #if defined _BALLOC_SANITY_CHECK
796 // Complexity: O(lg(N)). Where, N is the number of block of size
797 // sizeof(value_type).
798 void
799 _S_check_for_free_blocks() throw()
800 {
801 typedef typename
802 __gnu_cxx::balloc::_Ffit_finder<_Alloc_block*> _FFF;
803 _FFF __fff;
804 typedef typename _BPVector::iterator _BPiter;
805 _BPiter __bpi =
806 __gnu_cxx::balloc::__find_if
807 (_S_mem_blocks.begin(), _S_mem_blocks.end(),
808 __gnu_cxx::balloc::_Functor_Ref<_FFF>(__fff));
809
810 _BALLOC_ASSERT(__bpi == _S_mem_blocks.end());
811 }
812 #endif
813
814 // Complexity: O(1), but internally depends upon the complexity
815 // of the function free_list::_M_get. The
816 // part where the bitmap headers are written is of worst case
817 // complexity: O(X),where X is the number of blocks of size
818 // sizeof(value_type) within the newly acquired block. Having a
819 // tight bound.
820 void
821 _S_refill_pool() throw(std::bad_alloc)
822 {
823 #if defined _BALLOC_SANITY_CHECK
824 _S_check_for_free_blocks();
825 #endif
826
827 const unsigned int __num_bitmaps = _S_block_size / balloc::bits_per_block;
828 const unsigned int __size_to_allocate = sizeof(unsigned int)
829 + _S_block_size * sizeof(_Alloc_block)
830 + __num_bitmaps * sizeof(unsigned int);
831
832 unsigned int* __temp =
833 reinterpret_cast<unsigned int*>(this->_M_get(__size_to_allocate));
834 *__temp = 0;
835 // ++__temp;
836 __temp = reinterpret_cast<unsigned int*>
837 (reinterpret_cast<char*>(__temp) + sizeof(unsigned int));
838
839 // The Header information goes at the Beginning of the Block.
840 _Block_pair __bp =
841 std::make_pair(reinterpret_cast<_Alloc_block*>
842 (__temp + __num_bitmaps),
843 reinterpret_cast<_Alloc_block*>
844 (__temp + __num_bitmaps)
845 + _S_block_size - 1);
846
847 // Fill the Vector with this information.
848 _S_mem_blocks.push_back(__bp);
849
850 unsigned int __bit_mask = 0; // 0 Indicates all Allocated.
851 __bit_mask = ~__bit_mask; // 1 Indicates all Free.
852
853 for (unsigned int __i = 0; __i < __num_bitmaps; ++__i)
854 __temp[__i] = __bit_mask;
855
856 _S_block_size *= 2;
857 }
858
859
860 static _BPVector _S_mem_blocks;
861 static unsigned int _S_block_size;
862 static __gnu_cxx::balloc::
863 _Bitmap_counter<_Alloc_block*> _S_last_request;
864 static typename _BPVector::size_type _S_last_dealloc_index;
865 #if defined __GTHREADS
866 static _Mutex _S_mut;
867 #endif
868
869 public:
870
871 // Complexity: Worst case complexity is O(N), but that is hardly
872 // ever hit. if and when this particular case is encountered,
873 // the next few cases are guaranteed to have a worst case
874 // complexity of O(1)! That's why this function performs very
875 // well on the average. you can consider this function to be
876 // having a complexity referred to commonly as: Amortized
877 // Constant time.
878 pointer
879 _M_allocate_single_object() throw(std::bad_alloc)
880 {
881 #if defined __GTHREADS
882 _Auto_Lock __bit_lock(&_S_mut);
883 #endif
884
885 // The algorithm is something like this: The last_request
886 // variable points to the last accessed Bit Map. When such a
887 // condition occurs, we try to find a free block in the
888 // current bitmap, or succeeding bitmaps until the last bitmap
889 // is reached. If no free block turns up, we resort to First
890 // Fit method.
891
892 // WARNING: Do not re-order the condition in the while
893 // statement below, because it relies on C++'s short-circuit
894 // evaluation. The return from _S_last_request->_M_get() will
895 // NOT be dereference able if _S_last_request->_M_finished()
896 // returns true. This would inevitably lead to a NULL pointer
897 // dereference if tinkered with.
898 while (_S_last_request._M_finished() == false
899 && (*(_S_last_request._M_get()) == 0))
900 {
901 _S_last_request.operator++();
902 }
903
904 if (__builtin_expect(_S_last_request._M_finished() == true, false))
905 {
906 // Fall Back to First Fit algorithm.
907 typedef typename
908 __gnu_cxx::balloc::_Ffit_finder<_Alloc_block*> _FFF;
909 _FFF __fff;
910 typedef typename _BPVector::iterator _BPiter;
911 _BPiter __bpi =
912 __gnu_cxx::balloc::__find_if
913 (_S_mem_blocks.begin(), _S_mem_blocks.end(),
914 __gnu_cxx::balloc::_Functor_Ref<_FFF>(__fff));
915
916 if (__bpi != _S_mem_blocks.end())
917 {
918 // Search was successful. Ok, now mark the first bit from
919 // the right as 0, meaning Allocated. This bit is obtained
920 // by calling _M_get() on __fff.
921 unsigned int __nz_bit = _Bit_scan_forward(*__fff._M_get());
922 balloc::__bit_allocate(__fff._M_get(), __nz_bit);
923
924 _S_last_request._M_reset(__bpi - _S_mem_blocks.begin());
925
926 // Now, get the address of the bit we marked as allocated.
927 pointer __ret = reinterpret_cast<pointer>
928 (__bpi->first + __fff._M_offset() + __nz_bit);
929 unsigned int* __puse_count = reinterpret_cast<unsigned int*>
930 (reinterpret_cast<char*>
931 (__bpi->first) - (sizeof(unsigned int) *
932 (__gnu_cxx::balloc::__num_bitmaps(*__bpi)+1)));
933
934 ++(*__puse_count);
935 return __ret;
936 }
937 else
938 {
939 // Search was unsuccessful. We Add more memory to the
940 // pool by calling _S_refill_pool().
941 _S_refill_pool();
942
943 // _M_Reset the _S_last_request structure to the first
944 // free block's bit map.
945 _S_last_request._M_reset(_S_mem_blocks.size() - 1);
946
947 // Now, mark that bit as allocated.
948 }
949 }
950
951 // _S_last_request holds a pointer to a valid bit map, that
952 // points to a free block in memory.
953 unsigned int __nz_bit = _Bit_scan_forward(*_S_last_request._M_get());
954 balloc::__bit_allocate(_S_last_request._M_get(), __nz_bit);
955
956 pointer __ret = reinterpret_cast<pointer>
957 (_S_last_request._M_base() + _S_last_request._M_offset() + __nz_bit);
958
959 unsigned int* __puse_count = reinterpret_cast<unsigned int*>
960 (reinterpret_cast<char*>
961 (_S_mem_blocks[_S_last_request._M_where()].first)
962 - (sizeof(unsigned int) *
963 (__gnu_cxx::balloc::
964 __num_bitmaps(_S_mem_blocks[_S_last_request._M_where()])+1)));
965
966 ++(*__puse_count);
967 return __ret;
968 }
969
970 // Complexity: O(lg(N)), but the worst case is hit quite often!
971 // I need to do something about this. I'll be able to work on
972 // it, only when I have some solid figures from a few real apps.
973 void
974 _M_deallocate_single_object(pointer __p) throw()
975 {
976 #if defined __GTHREADS
977 _Auto_Lock __bit_lock(&_S_mut);
978 #endif
979 _Alloc_block* __real_p = reinterpret_cast<_Alloc_block*>(__p);
980
981 typedef typename _BPVector::iterator _Iterator;
982 typedef typename _BPVector::difference_type _Difference_type;
983
984 _Difference_type __diff;
985 int __displacement;
986
987 _BALLOC_ASSERT(_S_last_dealloc_index >= 0);
988
989
990 if (__gnu_cxx::balloc::_Inclusive_between<_Alloc_block*>
991 (__real_p)
992 (_S_mem_blocks[_S_last_dealloc_index]))
993 {
994 _BALLOC_ASSERT(_S_last_dealloc_index <= _S_mem_blocks.size() - 1);
995
996 // Initial Assumption was correct!
997 __diff = _S_last_dealloc_index;
998 __displacement = __real_p - _S_mem_blocks[__diff].first;
999 }
1000 else
1001 {
1002 _Iterator _iter =
1003 __gnu_cxx::balloc::__find_if(_S_mem_blocks.begin(),
1004 _S_mem_blocks.end(),
1005 __gnu_cxx::balloc::
1006 _Inclusive_between<_Alloc_block*>(__real_p));
1007 _BALLOC_ASSERT(_iter != _S_mem_blocks.end());
1008
1009 __diff = _iter - _S_mem_blocks.begin();
1010 __displacement = __real_p - _S_mem_blocks[__diff].first;
1011 _S_last_dealloc_index = __diff;
1012 }
1013
1014 // Get the position of the iterator that has been found.
1015 const unsigned int __rotate = __displacement % balloc::bits_per_block;
1016 unsigned int* __bitmapC =
1017 reinterpret_cast<unsigned int*>(_S_mem_blocks[__diff].first) - 1;
1018 __bitmapC -= (__displacement / balloc::bits_per_block);
1019
1020 balloc::__bit_free(__bitmapC, __rotate);
1021 unsigned int* __puse_count = reinterpret_cast<unsigned int*>
1022 (reinterpret_cast<char*>
1023 (_S_mem_blocks[__diff].first)
1024 - (sizeof(unsigned int) *
1025 (__gnu_cxx::balloc::__num_bitmaps(_S_mem_blocks[__diff])+1)));
1026
1027 _BALLOC_ASSERT(*__puse_count != 0);
1028
1029 --(*__puse_count);
1030
1031 if (__builtin_expect(*__puse_count == 0, false))
1032 {
1033 _S_block_size /= 2;
1034
1035 // We can safely remove this block.
1036 // _Block_pair __bp = _S_mem_blocks[__diff];
1037 this->_M_insert(__puse_count);
1038 _S_mem_blocks.erase(_S_mem_blocks.begin() + __diff);
1039
1040 // Reset the _S_last_request variable to reflect the
1041 // erased block. We do this to protect future requests
1042 // after the last block has been removed from a particular
1043 // memory Chunk, which in turn has been returned to the
1044 // free list, and hence had been erased from the vector,
1045 // so the size of the vector gets reduced by 1.
1046 if ((_Difference_type)_S_last_request._M_where() >= __diff--)
1047 _S_last_request._M_reset(__diff);
1048
1049 // If the Index into the vector of the region of memory
1050 // that might hold the next address that will be passed to
1051 // deallocated may have been invalidated due to the above
1052 // erase procedure being called on the vector, hence we
1053 // try to restore this invariant too.
1054 if (_S_last_dealloc_index >= _S_mem_blocks.size())
1055 {
1056 _S_last_dealloc_index =(__diff != -1 ? __diff : 0);
1057 _BALLOC_ASSERT(_S_last_dealloc_index >= 0);
1058 }
1059 }
1060 }
1061
1062 public:
1063 bitmap_allocator() throw()
1064 { }
1065
1066 bitmap_allocator(const bitmap_allocator&)
1067 { }
1068
1069 template<typename _Tp1>
1070 bitmap_allocator(const bitmap_allocator<_Tp1>&) throw()
1071 { }
1072
1073 ~bitmap_allocator() throw()
1074 { }
1075
1076 // Complexity: O(1), but internally the complexity depends upon the
1077 // complexity of the function(s) _S_allocate_single_object and
1078 // operator new.
1079 pointer
1080 allocate(size_type __n)
1081 {
1082 if (__builtin_expect(__n == 1, true))
1083 return this->_M_allocate_single_object();
1084 else
1085 {
1086 const size_type __b = __n * sizeof(value_type);
1087 return reinterpret_cast<pointer>(::operator new(__b));
1088 }
1089 }
1090
1091 pointer
1092 allocate(size_type __n, typename bitmap_allocator<void>::const_pointer)
1093 { return allocate(__n); }
1094
1095 void
1096 deallocate(pointer __p, size_type __n) throw()
1097 {
1098 if (__builtin_expect(__n == 1, true))
1099 this->_M_deallocate_single_object(__p);
1100 else
1101 ::operator delete(__p);
1102 }
1103
1104 pointer
1105 address(reference __r) const
1106 { return &__r; }
1107
1108 const_pointer
1109 address(const_reference __r) const
1110 { return &__r; }
1111
1112 size_type
1113 max_size() const throw()
1114 { return (size_type()-1)/sizeof(value_type); }
1115
1116 void
1117 construct(pointer __p, const_reference __data)
1118 { ::new(__p) value_type(__data); }
1119
1120 void
1121 destroy(pointer __p)
1122 { __p->~value_type(); }
1123 };
1124
1125 template<typename _Tp1, typename _Tp2>
1126 bool
1127 operator==(const bitmap_allocator<_Tp1>&,
1128 const bitmap_allocator<_Tp2>&) throw()
1129 { return true; }
1130
1131 template<typename _Tp1, typename _Tp2>
1132 bool
1133 operator!=(const bitmap_allocator<_Tp1>&,
1134 const bitmap_allocator<_Tp2>&) throw()
1135 { return false; }
1136
1137 // Static member definitions.
1138 template<typename _Tp>
1139 typename bitmap_allocator<_Tp>::_BPVector
1140 bitmap_allocator<_Tp>::_S_mem_blocks;
1141
1142 template<typename _Tp>
1143 unsigned int bitmap_allocator<_Tp>::_S_block_size = balloc::bits_per_block;
1144
1145 template<typename _Tp>
1146 typename __gnu_cxx::bitmap_allocator<_Tp>::_BPVector::size_type
1147 bitmap_allocator<_Tp>::_S_last_dealloc_index = 0;
1148
1149 template<typename _Tp>
1150 __gnu_cxx::balloc::_Bitmap_counter
1151 <typename bitmap_allocator<_Tp>::_Alloc_block*>
1152 bitmap_allocator<_Tp>::_S_last_request(_S_mem_blocks);
1153
1154 #if defined __GTHREADS
1155 template<typename _Tp>
1156 __gnu_cxx::_Mutex
1157 bitmap_allocator<_Tp>::_S_mut;
1158 #endif
1159
1160
1161 }
1162
1163 #endif
1164
1165 // LocalWords: namespace GTHREADS bool const gthread endif Mutex mutex