]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/src/c++98/mt_allocator.cc
PR:52604: (~__freelist): Reset pointer
[thirdparty/gcc.git] / libstdc++-v3 / src / c++98 / mt_allocator.cc
1 // Allocator details.
2
3 // Copyright (C) 2004, 2005, 2006, 2009, 2010, 2012
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
25
26 //
27 // ISO C++ 14882:
28 //
29
30 #include <bits/c++config.h>
31 #include <ext/concurrence.h>
32 #include <ext/mt_allocator.h>
33 #include <cstring>
34
35 namespace
36 {
37 #ifdef __GTHREADS
38 struct __freelist
39 {
40 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
41 _Thread_record* _M_thread_freelist;
42 _Thread_record* _M_thread_freelist_array;
43 size_t _M_max_threads;
44 __gthread_key_t _M_key;
45
46 ~__freelist()
47 {
48 if (_M_thread_freelist_array)
49 {
50 __gthread_key_delete(_M_key);
51 ::operator delete(static_cast<void*>(_M_thread_freelist_array));
52 _M_thread_freelist = 0;
53 }
54 }
55 };
56
57 __freelist&
58 get_freelist()
59 {
60 static __freelist freelist;
61 return freelist;
62 }
63
64 __gnu_cxx::__mutex&
65 get_freelist_mutex()
66 {
67 static __gnu_cxx::__mutex freelist_mutex;
68 return freelist_mutex;
69 }
70
71 static void
72 _M_destroy_thread_key(void* __id)
73 {
74 // Return this thread id record to the front of thread_freelist.
75 __freelist& freelist = get_freelist();
76 {
77 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
78 size_t _M_id = reinterpret_cast<size_t>(__id);
79
80 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
81 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
82 __tr->_M_next = freelist._M_thread_freelist;
83 freelist._M_thread_freelist = __tr;
84 }
85 }
86 #endif
87 } // anonymous namespace
88
89 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
90 {
91 _GLIBCXX_BEGIN_NAMESPACE_VERSION
92
93 void
94 __pool<false>::_M_destroy() throw()
95 {
96 if (_M_init && !_M_options._M_force_new)
97 {
98 for (size_t __n = 0; __n < _M_bin_size; ++__n)
99 {
100 _Bin_record& __bin = _M_bin[__n];
101 while (__bin._M_address)
102 {
103 _Block_address* __tmp = __bin._M_address->_M_next;
104 ::operator delete(__bin._M_address->_M_initial);
105 __bin._M_address = __tmp;
106 }
107 ::operator delete(__bin._M_first);
108 }
109 ::operator delete(_M_bin);
110 ::operator delete(_M_binmap);
111 }
112 }
113
114 void
115 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
116 {
117 // Round up to power of 2 and figure out which bin to use.
118 const size_t __which = _M_binmap[__bytes];
119 _Bin_record& __bin = _M_bin[__which];
120
121 char* __c = __p - _M_get_align();
122 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
123
124 // Single threaded application - return to global pool.
125 __block->_M_next = __bin._M_first[0];
126 __bin._M_first[0] = __block;
127 }
128
129 char*
130 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
131 {
132 // Round up to power of 2 and figure out which bin to use.
133 const size_t __which = _M_binmap[__bytes];
134 _Bin_record& __bin = _M_bin[__which];
135 const _Tune& __options = _M_get_options();
136 const size_t __bin_size = (__options._M_min_bin << __which)
137 + __options._M_align;
138 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
139 __block_count /= __bin_size;
140
141 // Get a new block dynamically, set it up for use.
142 void* __v = ::operator new(__options._M_chunk_size);
143 _Block_address* __address = static_cast<_Block_address*>(__v);
144 __address->_M_initial = __v;
145 __address->_M_next = __bin._M_address;
146 __bin._M_address = __address;
147
148 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
149 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
150 __bin._M_first[__thread_id] = __block;
151 while (--__block_count > 0)
152 {
153 __c += __bin_size;
154 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
155 __block = __block->_M_next;
156 }
157 __block->_M_next = 0;
158
159 __block = __bin._M_first[__thread_id];
160 __bin._M_first[__thread_id] = __block->_M_next;
161
162 // NB: For alignment reasons, we can't use the first _M_align
163 // bytes, even when sizeof(_Block_record) < _M_align.
164 return reinterpret_cast<char*>(__block) + __options._M_align;
165 }
166
167 void
168 __pool<false>::_M_initialize()
169 {
170 // _M_force_new must not change after the first allocate(), which
171 // in turn calls this method, so if it's false, it's false forever
172 // and we don't need to return here ever again.
173 if (_M_options._M_force_new)
174 {
175 _M_init = true;
176 return;
177 }
178
179 // Create the bins.
180 // Calculate the number of bins required based on _M_max_bytes.
181 // _M_bin_size is statically-initialized to one.
182 size_t __bin_size = _M_options._M_min_bin;
183 while (_M_options._M_max_bytes > __bin_size)
184 {
185 __bin_size <<= 1;
186 ++_M_bin_size;
187 }
188
189 // Setup the bin map for quick lookup of the relevant bin.
190 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
191 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
192 _Binmap_type* __bp = _M_binmap;
193 _Binmap_type __bin_max = _M_options._M_min_bin;
194 _Binmap_type __bint = 0;
195 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
196 {
197 if (__ct > __bin_max)
198 {
199 __bin_max <<= 1;
200 ++__bint;
201 }
202 *__bp++ = __bint;
203 }
204
205 // Initialize _M_bin and its members.
206 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
207 _M_bin = static_cast<_Bin_record*>(__v);
208 for (size_t __n = 0; __n < _M_bin_size; ++__n)
209 {
210 _Bin_record& __bin = _M_bin[__n];
211 __v = ::operator new(sizeof(_Block_record*));
212 __bin._M_first = static_cast<_Block_record**>(__v);
213 __bin._M_first[0] = 0;
214 __bin._M_address = 0;
215 }
216 _M_init = true;
217 }
218
219
220 #ifdef __GTHREADS
221 void
222 __pool<true>::_M_destroy() throw()
223 {
224 if (_M_init && !_M_options._M_force_new)
225 {
226 if (__gthread_active_p())
227 {
228 for (size_t __n = 0; __n < _M_bin_size; ++__n)
229 {
230 _Bin_record& __bin = _M_bin[__n];
231 while (__bin._M_address)
232 {
233 _Block_address* __tmp = __bin._M_address->_M_next;
234 ::operator delete(__bin._M_address->_M_initial);
235 __bin._M_address = __tmp;
236 }
237 ::operator delete(__bin._M_first);
238 ::operator delete(__bin._M_free);
239 ::operator delete(__bin._M_used);
240 ::operator delete(__bin._M_mutex);
241 }
242 }
243 else
244 {
245 for (size_t __n = 0; __n < _M_bin_size; ++__n)
246 {
247 _Bin_record& __bin = _M_bin[__n];
248 while (__bin._M_address)
249 {
250 _Block_address* __tmp = __bin._M_address->_M_next;
251 ::operator delete(__bin._M_address->_M_initial);
252 __bin._M_address = __tmp;
253 }
254 ::operator delete(__bin._M_first);
255 }
256 }
257 ::operator delete(_M_bin);
258 ::operator delete(_M_binmap);
259 }
260 }
261
262 void
263 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
264 {
265 // Round up to power of 2 and figure out which bin to use.
266 const size_t __which = _M_binmap[__bytes];
267 const _Bin_record& __bin = _M_bin[__which];
268
269 // Know __p not null, assume valid block.
270 char* __c = __p - _M_get_align();
271 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
272 if (__gthread_active_p())
273 {
274 // Calculate the number of records to remove from our freelist:
275 // in order to avoid too much contention we wait until the
276 // number of records is "high enough".
277 const size_t __thread_id = _M_get_thread_id();
278 const _Tune& __options = _M_get_options();
279 const size_t __limit = (100 * (_M_bin_size - __which)
280 * __options._M_freelist_headroom);
281
282 size_t __remove = __bin._M_free[__thread_id];
283 __remove *= __options._M_freelist_headroom;
284
285 // NB: We assume that reads of _Atomic_words are atomic.
286 const size_t __max_threads = __options._M_max_threads + 1;
287 _Atomic_word* const __reclaimed_base =
288 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
289 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
290 const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
291
292 // NB: For performance sake we don't resync every time, in order
293 // to spare atomic ops. Note that if __reclaimed increased by,
294 // say, 1024, since the last sync, it means that the other
295 // threads executed the atomic in the else below at least the
296 // same number of times (at least, because _M_reserve_block may
297 // have decreased the counter), therefore one more cannot hurt.
298 if (__reclaimed > 1024)
299 {
300 __bin._M_used[__thread_id] -= __reclaimed;
301 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
302 }
303
304 if (__remove >= __net_used)
305 __remove -= __net_used;
306 else
307 __remove = 0;
308 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
309 {
310 _Block_record* __first = __bin._M_first[__thread_id];
311 _Block_record* __tmp = __first;
312 __remove /= __options._M_freelist_headroom;
313 const size_t __removed = __remove;
314 while (--__remove > 0)
315 __tmp = __tmp->_M_next;
316 __bin._M_first[__thread_id] = __tmp->_M_next;
317 __bin._M_free[__thread_id] -= __removed;
318
319 __gthread_mutex_lock(__bin._M_mutex);
320 __tmp->_M_next = __bin._M_first[0];
321 __bin._M_first[0] = __first;
322 __bin._M_free[0] += __removed;
323 __gthread_mutex_unlock(__bin._M_mutex);
324 }
325
326 // Return this block to our list and update counters and
327 // owner id as needed.
328 if (__block->_M_thread_id == __thread_id)
329 --__bin._M_used[__thread_id];
330 else
331 __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
332
333 __block->_M_next = __bin._M_first[__thread_id];
334 __bin._M_first[__thread_id] = __block;
335
336 ++__bin._M_free[__thread_id];
337 }
338 else
339 {
340 // Not using threads, so single threaded application - return
341 // to global pool.
342 __block->_M_next = __bin._M_first[0];
343 __bin._M_first[0] = __block;
344 }
345 }
346
347 char*
348 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
349 {
350 // Round up to power of 2 and figure out which bin to use.
351 const size_t __which = _M_binmap[__bytes];
352 const _Tune& __options = _M_get_options();
353 const size_t __bin_size = ((__options._M_min_bin << __which)
354 + __options._M_align);
355 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
356 __block_count /= __bin_size;
357
358 // Are we using threads?
359 // - Yes, check if there are free blocks on the global
360 // list. If so, grab up to __block_count blocks in one
361 // lock and change ownership. If the global list is
362 // empty, we allocate a new chunk and add those blocks
363 // directly to our own freelist (with us as owner).
364 // - No, all operations are made directly to global pool 0
365 // no need to lock or change ownership but check for free
366 // blocks on global list (and if not add new ones) and
367 // get the first one.
368 _Bin_record& __bin = _M_bin[__which];
369 _Block_record* __block = 0;
370 if (__gthread_active_p())
371 {
372 // Resync the _M_used counters.
373 const size_t __max_threads = __options._M_max_threads + 1;
374 _Atomic_word* const __reclaimed_base =
375 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
376 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
377 __bin._M_used[__thread_id] -= __reclaimed;
378 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
379
380 __gthread_mutex_lock(__bin._M_mutex);
381 if (__bin._M_first[0] == 0)
382 {
383 void* __v = ::operator new(__options._M_chunk_size);
384 _Block_address* __address = static_cast<_Block_address*>(__v);
385 __address->_M_initial = __v;
386 __address->_M_next = __bin._M_address;
387 __bin._M_address = __address;
388 __gthread_mutex_unlock(__bin._M_mutex);
389
390 // No need to hold the lock when we are adding a whole
391 // chunk to our own list.
392 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
393 __block = reinterpret_cast<_Block_record*>(__c);
394 __bin._M_free[__thread_id] = __block_count;
395 __bin._M_first[__thread_id] = __block;
396 while (--__block_count > 0)
397 {
398 __c += __bin_size;
399 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
400 __block = __block->_M_next;
401 }
402 __block->_M_next = 0;
403 }
404 else
405 {
406 // Is the number of required blocks greater than or equal
407 // to the number that can be provided by the global free
408 // list?
409 __bin._M_first[__thread_id] = __bin._M_first[0];
410 if (__block_count >= __bin._M_free[0])
411 {
412 __bin._M_free[__thread_id] = __bin._M_free[0];
413 __bin._M_free[0] = 0;
414 __bin._M_first[0] = 0;
415 }
416 else
417 {
418 __bin._M_free[__thread_id] = __block_count;
419 __bin._M_free[0] -= __block_count;
420 __block = __bin._M_first[0];
421 while (--__block_count > 0)
422 __block = __block->_M_next;
423 __bin._M_first[0] = __block->_M_next;
424 __block->_M_next = 0;
425 }
426 __gthread_mutex_unlock(__bin._M_mutex);
427 }
428 }
429 else
430 {
431 void* __v = ::operator new(__options._M_chunk_size);
432 _Block_address* __address = static_cast<_Block_address*>(__v);
433 __address->_M_initial = __v;
434 __address->_M_next = __bin._M_address;
435 __bin._M_address = __address;
436
437 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
438 __block = reinterpret_cast<_Block_record*>(__c);
439 __bin._M_first[0] = __block;
440 while (--__block_count > 0)
441 {
442 __c += __bin_size;
443 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
444 __block = __block->_M_next;
445 }
446 __block->_M_next = 0;
447 }
448
449 __block = __bin._M_first[__thread_id];
450 __bin._M_first[__thread_id] = __block->_M_next;
451
452 if (__gthread_active_p())
453 {
454 __block->_M_thread_id = __thread_id;
455 --__bin._M_free[__thread_id];
456 ++__bin._M_used[__thread_id];
457 }
458
459 // NB: For alignment reasons, we can't use the first _M_align
460 // bytes, even when sizeof(_Block_record) < _M_align.
461 return reinterpret_cast<char*>(__block) + __options._M_align;
462 }
463
464 void
465 __pool<true>::_M_initialize()
466 {
467 // _M_force_new must not change after the first allocate(),
468 // which in turn calls this method, so if it's false, it's false
469 // forever and we don't need to return here ever again.
470 if (_M_options._M_force_new)
471 {
472 _M_init = true;
473 return;
474 }
475
476 // Create the bins.
477 // Calculate the number of bins required based on _M_max_bytes.
478 // _M_bin_size is statically-initialized to one.
479 size_t __bin_size = _M_options._M_min_bin;
480 while (_M_options._M_max_bytes > __bin_size)
481 {
482 __bin_size <<= 1;
483 ++_M_bin_size;
484 }
485
486 // Setup the bin map for quick lookup of the relevant bin.
487 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
488 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
489 _Binmap_type* __bp = _M_binmap;
490 _Binmap_type __bin_max = _M_options._M_min_bin;
491 _Binmap_type __bint = 0;
492 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
493 {
494 if (__ct > __bin_max)
495 {
496 __bin_max <<= 1;
497 ++__bint;
498 }
499 *__bp++ = __bint;
500 }
501
502 // Initialize _M_bin and its members.
503 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
504 _M_bin = static_cast<_Bin_record*>(__v);
505
506 // If __gthread_active_p() create and initialize the list of
507 // free thread ids. Single threaded applications use thread id 0
508 // directly and have no need for this.
509 if (__gthread_active_p())
510 {
511 __freelist& freelist = get_freelist();
512 {
513 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
514
515 if (!freelist._M_thread_freelist_array
516 || freelist._M_max_threads < _M_options._M_max_threads)
517 {
518 const size_t __k = sizeof(_Thread_record)
519 * _M_options._M_max_threads;
520 __v = ::operator new(__k);
521 _M_thread_freelist = static_cast<_Thread_record*>(__v);
522
523 // NOTE! The first assignable thread id is 1 since the
524 // global pool uses id 0
525 size_t __i;
526 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
527 {
528 _Thread_record& __tr = _M_thread_freelist[__i - 1];
529 __tr._M_next = &_M_thread_freelist[__i];
530 __tr._M_id = __i;
531 }
532
533 // Set last record.
534 _M_thread_freelist[__i - 1]._M_next = 0;
535 _M_thread_freelist[__i - 1]._M_id = __i;
536
537 if (!freelist._M_thread_freelist_array)
538 {
539 // Initialize per thread key to hold pointer to
540 // _M_thread_freelist.
541 __gthread_key_create(&freelist._M_key,
542 ::_M_destroy_thread_key);
543 freelist._M_thread_freelist = _M_thread_freelist;
544 }
545 else
546 {
547 _Thread_record* _M_old_freelist
548 = freelist._M_thread_freelist;
549 _Thread_record* _M_old_array
550 = freelist._M_thread_freelist_array;
551 freelist._M_thread_freelist
552 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
553 while (_M_old_freelist)
554 {
555 size_t next_id;
556 if (_M_old_freelist->_M_next)
557 next_id = _M_old_freelist->_M_next - _M_old_array;
558 else
559 next_id = freelist._M_max_threads;
560 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
561 = &_M_thread_freelist[next_id];
562 _M_old_freelist = _M_old_freelist->_M_next;
563 }
564 ::operator delete(static_cast<void*>(_M_old_array));
565 }
566 freelist._M_thread_freelist_array = _M_thread_freelist;
567 freelist._M_max_threads = _M_options._M_max_threads;
568 }
569 }
570
571 const size_t __max_threads = _M_options._M_max_threads + 1;
572 for (size_t __n = 0; __n < _M_bin_size; ++__n)
573 {
574 _Bin_record& __bin = _M_bin[__n];
575 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
576 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
577 __bin._M_first = static_cast<_Block_record**>(__v);
578
579 __bin._M_address = 0;
580
581 __v = ::operator new(sizeof(size_t) * __max_threads);
582 std::memset(__v, 0, sizeof(size_t) * __max_threads);
583
584 __bin._M_free = static_cast<size_t*>(__v);
585
586 __v = ::operator new(sizeof(size_t) * __max_threads
587 + sizeof(_Atomic_word) * __max_threads);
588 std::memset(__v, 0, (sizeof(size_t) * __max_threads
589 + sizeof(_Atomic_word) * __max_threads));
590 __bin._M_used = static_cast<size_t*>(__v);
591
592 __v = ::operator new(sizeof(__gthread_mutex_t));
593 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
594
595 #ifdef __GTHREAD_MUTEX_INIT
596 {
597 // Do not copy a POSIX/gthr mutex once in use.
598 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
599 *__bin._M_mutex = __tmp;
600 }
601 #else
602 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
603 #endif
604 }
605 }
606 else
607 {
608 for (size_t __n = 0; __n < _M_bin_size; ++__n)
609 {
610 _Bin_record& __bin = _M_bin[__n];
611 __v = ::operator new(sizeof(_Block_record*));
612 __bin._M_first = static_cast<_Block_record**>(__v);
613 __bin._M_first[0] = 0;
614 __bin._M_address = 0;
615 }
616 }
617 _M_init = true;
618 }
619
620 size_t
621 __pool<true>::_M_get_thread_id()
622 {
623 // If we have thread support and it's active we check the thread
624 // key value and return its id or if it's not set we take the
625 // first record from _M_thread_freelist and sets the key and
626 // returns its id.
627 if (__gthread_active_p())
628 {
629 __freelist& freelist = get_freelist();
630 void* v = __gthread_getspecific(freelist._M_key);
631 size_t _M_id = (size_t)v;
632 if (_M_id == 0)
633 {
634 {
635 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
636 if (freelist._M_thread_freelist)
637 {
638 _M_id = freelist._M_thread_freelist->_M_id;
639 freelist._M_thread_freelist
640 = freelist._M_thread_freelist->_M_next;
641 }
642 }
643
644 __gthread_setspecific(freelist._M_key, (void*)_M_id);
645 }
646 return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
647 }
648
649 // Otherwise (no thread support or inactive) all requests are
650 // served from the global pool 0.
651 return 0;
652 }
653
654 // XXX GLIBCXX_ABI Deprecated
655 void
656 __pool<true>::_M_destroy_thread_key(void*) throw () { }
657
658 // XXX GLIBCXX_ABI Deprecated
659 void
660 __pool<true>::_M_initialize(__destroy_handler)
661 {
662 // _M_force_new must not change after the first allocate(),
663 // which in turn calls this method, so if it's false, it's false
664 // forever and we don't need to return here ever again.
665 if (_M_options._M_force_new)
666 {
667 _M_init = true;
668 return;
669 }
670
671 // Create the bins.
672 // Calculate the number of bins required based on _M_max_bytes.
673 // _M_bin_size is statically-initialized to one.
674 size_t __bin_size = _M_options._M_min_bin;
675 while (_M_options._M_max_bytes > __bin_size)
676 {
677 __bin_size <<= 1;
678 ++_M_bin_size;
679 }
680
681 // Setup the bin map for quick lookup of the relevant bin.
682 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
683 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
684 _Binmap_type* __bp = _M_binmap;
685 _Binmap_type __bin_max = _M_options._M_min_bin;
686 _Binmap_type __bint = 0;
687 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
688 {
689 if (__ct > __bin_max)
690 {
691 __bin_max <<= 1;
692 ++__bint;
693 }
694 *__bp++ = __bint;
695 }
696
697 // Initialize _M_bin and its members.
698 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
699 _M_bin = static_cast<_Bin_record*>(__v);
700
701 // If __gthread_active_p() create and initialize the list of
702 // free thread ids. Single threaded applications use thread id 0
703 // directly and have no need for this.
704 if (__gthread_active_p())
705 {
706 __freelist& freelist = get_freelist();
707 {
708 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
709
710 if (!freelist._M_thread_freelist_array
711 || freelist._M_max_threads < _M_options._M_max_threads)
712 {
713 const size_t __k = sizeof(_Thread_record)
714 * _M_options._M_max_threads;
715 __v = ::operator new(__k);
716 _M_thread_freelist = static_cast<_Thread_record*>(__v);
717
718 // NOTE! The first assignable thread id is 1 since the
719 // global pool uses id 0
720 size_t __i;
721 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
722 {
723 _Thread_record& __tr = _M_thread_freelist[__i - 1];
724 __tr._M_next = &_M_thread_freelist[__i];
725 __tr._M_id = __i;
726 }
727
728 // Set last record.
729 _M_thread_freelist[__i - 1]._M_next = 0;
730 _M_thread_freelist[__i - 1]._M_id = __i;
731
732 if (!freelist._M_thread_freelist_array)
733 {
734 // Initialize per thread key to hold pointer to
735 // _M_thread_freelist.
736 __gthread_key_create(&freelist._M_key,
737 ::_M_destroy_thread_key);
738 freelist._M_thread_freelist = _M_thread_freelist;
739 }
740 else
741 {
742 _Thread_record* _M_old_freelist
743 = freelist._M_thread_freelist;
744 _Thread_record* _M_old_array
745 = freelist._M_thread_freelist_array;
746 freelist._M_thread_freelist
747 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
748 while (_M_old_freelist)
749 {
750 size_t next_id;
751 if (_M_old_freelist->_M_next)
752 next_id = _M_old_freelist->_M_next - _M_old_array;
753 else
754 next_id = freelist._M_max_threads;
755 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
756 = &_M_thread_freelist[next_id];
757 _M_old_freelist = _M_old_freelist->_M_next;
758 }
759 ::operator delete(static_cast<void*>(_M_old_array));
760 }
761 freelist._M_thread_freelist_array = _M_thread_freelist;
762 freelist._M_max_threads = _M_options._M_max_threads;
763 }
764 }
765
766 const size_t __max_threads = _M_options._M_max_threads + 1;
767 for (size_t __n = 0; __n < _M_bin_size; ++__n)
768 {
769 _Bin_record& __bin = _M_bin[__n];
770 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
771 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
772 __bin._M_first = static_cast<_Block_record**>(__v);
773
774 __bin._M_address = 0;
775
776 __v = ::operator new(sizeof(size_t) * __max_threads);
777 std::memset(__v, 0, sizeof(size_t) * __max_threads);
778 __bin._M_free = static_cast<size_t*>(__v);
779
780 __v = ::operator new(sizeof(size_t) * __max_threads +
781 sizeof(_Atomic_word) * __max_threads);
782 std::memset(__v, 0, (sizeof(size_t) * __max_threads
783 + sizeof(_Atomic_word) * __max_threads));
784 __bin._M_used = static_cast<size_t*>(__v);
785
786 __v = ::operator new(sizeof(__gthread_mutex_t));
787 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
788
789 #ifdef __GTHREAD_MUTEX_INIT
790 {
791 // Do not copy a POSIX/gthr mutex once in use.
792 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
793 *__bin._M_mutex = __tmp;
794 }
795 #else
796 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
797 #endif
798 }
799 }
800 else
801 {
802 for (size_t __n = 0; __n < _M_bin_size; ++__n)
803 {
804 _Bin_record& __bin = _M_bin[__n];
805 __v = ::operator new(sizeof(_Block_record*));
806 __bin._M_first = static_cast<_Block_record**>(__v);
807 __bin._M_first[0] = 0;
808 __bin._M_address = 0;
809 }
810 }
811 _M_init = true;
812 }
813 #endif
814
815 // Instantiations.
816 template class __mt_alloc<char>;
817 template class __mt_alloc<wchar_t>;
818
819 _GLIBCXX_END_NAMESPACE_VERSION
820 } // namespace