3 // Copyright (C) 2004-2013 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
29 #include <bits/c++config.h>
30 #include <ext/concurrence.h>
31 #include <ext/mt_allocator.h>
39 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
40 _Thread_record
* _M_thread_freelist
;
41 _Thread_record
* _M_thread_freelist_array
;
42 size_t _M_max_threads
;
43 __gthread_key_t _M_key
;
47 if (_M_thread_freelist_array
)
49 __gthread_key_delete(_M_key
);
50 ::operator delete(static_cast<void*>(_M_thread_freelist_array
));
51 _M_thread_freelist
= 0;
59 static __freelist freelist
;
66 static __gnu_cxx::__mutex freelist_mutex
;
67 return freelist_mutex
;
71 _M_destroy_thread_key(void* __id
)
73 // Return this thread id record to the front of thread_freelist.
74 __freelist
& freelist
= get_freelist();
76 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
77 size_t _M_id
= reinterpret_cast<size_t>(__id
);
79 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
80 _Thread_record
* __tr
= &freelist
._M_thread_freelist_array
[_M_id
- 1];
81 __tr
->_M_next
= freelist
._M_thread_freelist
;
82 freelist
._M_thread_freelist
= __tr
;
86 } // anonymous namespace
88 namespace __gnu_cxx
_GLIBCXX_VISIBILITY(default)
90 _GLIBCXX_BEGIN_NAMESPACE_VERSION
93 __pool
<false>::_M_destroy() throw()
95 if (_M_init
&& !_M_options
._M_force_new
)
97 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
99 _Bin_record
& __bin
= _M_bin
[__n
];
100 while (__bin
._M_address
)
102 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
103 ::operator delete(__bin
._M_address
->_M_initial
);
104 __bin
._M_address
= __tmp
;
106 ::operator delete(__bin
._M_first
);
108 ::operator delete(_M_bin
);
109 ::operator delete(_M_binmap
);
114 __pool
<false>::_M_reclaim_block(char* __p
, size_t __bytes
) throw ()
116 // Round up to power of 2 and figure out which bin to use.
117 const size_t __which
= _M_binmap
[__bytes
];
118 _Bin_record
& __bin
= _M_bin
[__which
];
120 char* __c
= __p
- _M_get_align();
121 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
123 // Single threaded application - return to global pool.
124 __block
->_M_next
= __bin
._M_first
[0];
125 __bin
._M_first
[0] = __block
;
129 __pool
<false>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
131 // Round up to power of 2 and figure out which bin to use.
132 const size_t __which
= _M_binmap
[__bytes
];
133 _Bin_record
& __bin
= _M_bin
[__which
];
134 const _Tune
& __options
= _M_get_options();
135 const size_t __bin_size
= (__options
._M_min_bin
<< __which
)
136 + __options
._M_align
;
137 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
138 __block_count
/= __bin_size
;
140 // Get a new block dynamically, set it up for use.
141 void* __v
= ::operator new(__options
._M_chunk_size
);
142 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
143 __address
->_M_initial
= __v
;
144 __address
->_M_next
= __bin
._M_address
;
145 __bin
._M_address
= __address
;
147 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
148 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
149 __bin
._M_first
[__thread_id
] = __block
;
150 while (--__block_count
> 0)
153 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
154 __block
= __block
->_M_next
;
156 __block
->_M_next
= 0;
158 __block
= __bin
._M_first
[__thread_id
];
159 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
161 // NB: For alignment reasons, we can't use the first _M_align
162 // bytes, even when sizeof(_Block_record) < _M_align.
163 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
167 __pool
<false>::_M_initialize()
169 // _M_force_new must not change after the first allocate(), which
170 // in turn calls this method, so if it's false, it's false forever
171 // and we don't need to return here ever again.
172 if (_M_options
._M_force_new
)
179 // Calculate the number of bins required based on _M_max_bytes.
180 // _M_bin_size is statically-initialized to one.
181 size_t __bin_size
= _M_options
._M_min_bin
;
182 while (_M_options
._M_max_bytes
> __bin_size
)
188 // Setup the bin map for quick lookup of the relevant bin.
189 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
190 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
191 _Binmap_type
* __bp
= _M_binmap
;
192 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
193 _Binmap_type __bint
= 0;
194 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
196 if (__ct
> __bin_max
)
204 // Initialize _M_bin and its members.
205 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
206 _M_bin
= static_cast<_Bin_record
*>(__v
);
207 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
209 _Bin_record
& __bin
= _M_bin
[__n
];
210 __v
= ::operator new(sizeof(_Block_record
*));
211 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
212 __bin
._M_first
[0] = 0;
213 __bin
._M_address
= 0;
221 __pool
<true>::_M_destroy() throw()
223 if (_M_init
&& !_M_options
._M_force_new
)
225 if (__gthread_active_p())
227 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
229 _Bin_record
& __bin
= _M_bin
[__n
];
230 while (__bin
._M_address
)
232 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
233 ::operator delete(__bin
._M_address
->_M_initial
);
234 __bin
._M_address
= __tmp
;
236 ::operator delete(__bin
._M_first
);
237 ::operator delete(__bin
._M_free
);
238 ::operator delete(__bin
._M_used
);
239 ::operator delete(__bin
._M_mutex
);
244 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
246 _Bin_record
& __bin
= _M_bin
[__n
];
247 while (__bin
._M_address
)
249 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
250 ::operator delete(__bin
._M_address
->_M_initial
);
251 __bin
._M_address
= __tmp
;
253 ::operator delete(__bin
._M_first
);
256 ::operator delete(_M_bin
);
257 ::operator delete(_M_binmap
);
262 __pool
<true>::_M_reclaim_block(char* __p
, size_t __bytes
) throw ()
264 // Round up to power of 2 and figure out which bin to use.
265 const size_t __which
= _M_binmap
[__bytes
];
266 const _Bin_record
& __bin
= _M_bin
[__which
];
268 // Know __p not null, assume valid block.
269 char* __c
= __p
- _M_get_align();
270 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
271 if (__gthread_active_p())
273 // Calculate the number of records to remove from our freelist:
274 // in order to avoid too much contention we wait until the
275 // number of records is "high enough".
276 const size_t __thread_id
= _M_get_thread_id();
277 const _Tune
& __options
= _M_get_options();
278 const size_t __limit
= (100 * (_M_bin_size
- __which
)
279 * __options
._M_freelist_headroom
);
281 size_t __remove
= __bin
._M_free
[__thread_id
];
282 __remove
*= __options
._M_freelist_headroom
;
284 // NB: We assume that reads of _Atomic_words are atomic.
285 const size_t __max_threads
= __options
._M_max_threads
+ 1;
286 _Atomic_word
* const __reclaimed_base
=
287 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
288 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
289 const size_t __net_used
= __bin
._M_used
[__thread_id
] - __reclaimed
;
291 // NB: For performance sake we don't resync every time, in order
292 // to spare atomic ops. Note that if __reclaimed increased by,
293 // say, 1024, since the last sync, it means that the other
294 // threads executed the atomic in the else below at least the
295 // same number of times (at least, because _M_reserve_block may
296 // have decreased the counter), therefore one more cannot hurt.
297 if (__reclaimed
> 1024)
299 __bin
._M_used
[__thread_id
] -= __reclaimed
;
300 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
303 if (__remove
>= __net_used
)
304 __remove
-= __net_used
;
307 if (__remove
> __limit
&& __remove
> __bin
._M_free
[__thread_id
])
309 _Block_record
* __first
= __bin
._M_first
[__thread_id
];
310 _Block_record
* __tmp
= __first
;
311 __remove
/= __options
._M_freelist_headroom
;
312 const size_t __removed
= __remove
;
313 while (--__remove
> 0)
314 __tmp
= __tmp
->_M_next
;
315 __bin
._M_first
[__thread_id
] = __tmp
->_M_next
;
316 __bin
._M_free
[__thread_id
] -= __removed
;
318 __gthread_mutex_lock(__bin
._M_mutex
);
319 __tmp
->_M_next
= __bin
._M_first
[0];
320 __bin
._M_first
[0] = __first
;
321 __bin
._M_free
[0] += __removed
;
322 __gthread_mutex_unlock(__bin
._M_mutex
);
325 // Return this block to our list and update counters and
326 // owner id as needed.
327 if (__block
->_M_thread_id
== __thread_id
)
328 --__bin
._M_used
[__thread_id
];
330 __atomic_add(&__reclaimed_base
[__block
->_M_thread_id
], 1);
332 __block
->_M_next
= __bin
._M_first
[__thread_id
];
333 __bin
._M_first
[__thread_id
] = __block
;
335 ++__bin
._M_free
[__thread_id
];
339 // Not using threads, so single threaded application - return
341 __block
->_M_next
= __bin
._M_first
[0];
342 __bin
._M_first
[0] = __block
;
347 __pool
<true>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
349 // Round up to power of 2 and figure out which bin to use.
350 const size_t __which
= _M_binmap
[__bytes
];
351 const _Tune
& __options
= _M_get_options();
352 const size_t __bin_size
= ((__options
._M_min_bin
<< __which
)
353 + __options
._M_align
);
354 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
355 __block_count
/= __bin_size
;
357 // Are we using threads?
358 // - Yes, check if there are free blocks on the global
359 // list. If so, grab up to __block_count blocks in one
360 // lock and change ownership. If the global list is
361 // empty, we allocate a new chunk and add those blocks
362 // directly to our own freelist (with us as owner).
363 // - No, all operations are made directly to global pool 0
364 // no need to lock or change ownership but check for free
365 // blocks on global list (and if not add new ones) and
366 // get the first one.
367 _Bin_record
& __bin
= _M_bin
[__which
];
368 _Block_record
* __block
= 0;
369 if (__gthread_active_p())
371 // Resync the _M_used counters.
372 const size_t __max_threads
= __options
._M_max_threads
+ 1;
373 _Atomic_word
* const __reclaimed_base
=
374 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
375 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
376 __bin
._M_used
[__thread_id
] -= __reclaimed
;
377 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
379 __gthread_mutex_lock(__bin
._M_mutex
);
380 if (__bin
._M_first
[0] == 0)
382 void* __v
= ::operator new(__options
._M_chunk_size
);
383 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
384 __address
->_M_initial
= __v
;
385 __address
->_M_next
= __bin
._M_address
;
386 __bin
._M_address
= __address
;
387 __gthread_mutex_unlock(__bin
._M_mutex
);
389 // No need to hold the lock when we are adding a whole
390 // chunk to our own list.
391 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
392 __block
= reinterpret_cast<_Block_record
*>(__c
);
393 __bin
._M_free
[__thread_id
] = __block_count
;
394 __bin
._M_first
[__thread_id
] = __block
;
395 while (--__block_count
> 0)
398 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
399 __block
= __block
->_M_next
;
401 __block
->_M_next
= 0;
405 // Is the number of required blocks greater than or equal
406 // to the number that can be provided by the global free
408 __bin
._M_first
[__thread_id
] = __bin
._M_first
[0];
409 if (__block_count
>= __bin
._M_free
[0])
411 __bin
._M_free
[__thread_id
] = __bin
._M_free
[0];
412 __bin
._M_free
[0] = 0;
413 __bin
._M_first
[0] = 0;
417 __bin
._M_free
[__thread_id
] = __block_count
;
418 __bin
._M_free
[0] -= __block_count
;
419 __block
= __bin
._M_first
[0];
420 while (--__block_count
> 0)
421 __block
= __block
->_M_next
;
422 __bin
._M_first
[0] = __block
->_M_next
;
423 __block
->_M_next
= 0;
425 __gthread_mutex_unlock(__bin
._M_mutex
);
430 void* __v
= ::operator new(__options
._M_chunk_size
);
431 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
432 __address
->_M_initial
= __v
;
433 __address
->_M_next
= __bin
._M_address
;
434 __bin
._M_address
= __address
;
436 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
437 __block
= reinterpret_cast<_Block_record
*>(__c
);
438 __bin
._M_first
[0] = __block
;
439 while (--__block_count
> 0)
442 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
443 __block
= __block
->_M_next
;
445 __block
->_M_next
= 0;
448 __block
= __bin
._M_first
[__thread_id
];
449 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
451 if (__gthread_active_p())
453 __block
->_M_thread_id
= __thread_id
;
454 --__bin
._M_free
[__thread_id
];
455 ++__bin
._M_used
[__thread_id
];
458 // NB: For alignment reasons, we can't use the first _M_align
459 // bytes, even when sizeof(_Block_record) < _M_align.
460 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
464 __pool
<true>::_M_initialize()
466 // _M_force_new must not change after the first allocate(),
467 // which in turn calls this method, so if it's false, it's false
468 // forever and we don't need to return here ever again.
469 if (_M_options
._M_force_new
)
476 // Calculate the number of bins required based on _M_max_bytes.
477 // _M_bin_size is statically-initialized to one.
478 size_t __bin_size
= _M_options
._M_min_bin
;
479 while (_M_options
._M_max_bytes
> __bin_size
)
485 // Setup the bin map for quick lookup of the relevant bin.
486 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
487 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
488 _Binmap_type
* __bp
= _M_binmap
;
489 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
490 _Binmap_type __bint
= 0;
491 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
493 if (__ct
> __bin_max
)
501 // Initialize _M_bin and its members.
502 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
503 _M_bin
= static_cast<_Bin_record
*>(__v
);
505 // If __gthread_active_p() create and initialize the list of
506 // free thread ids. Single threaded applications use thread id 0
507 // directly and have no need for this.
508 if (__gthread_active_p())
510 __freelist
& freelist
= get_freelist();
512 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
514 if (!freelist
._M_thread_freelist_array
515 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
517 const size_t __k
= sizeof(_Thread_record
)
518 * _M_options
._M_max_threads
;
519 __v
= ::operator new(__k
);
520 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
522 // NOTE! The first assignable thread id is 1 since the
523 // global pool uses id 0
525 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
527 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
528 __tr
._M_next
= &_M_thread_freelist
[__i
];
533 _M_thread_freelist
[__i
- 1]._M_next
= 0;
534 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
536 if (!freelist
._M_thread_freelist_array
)
538 // Initialize per thread key to hold pointer to
539 // _M_thread_freelist.
540 __gthread_key_create(&freelist
._M_key
,
541 ::_M_destroy_thread_key
);
542 freelist
._M_thread_freelist
= _M_thread_freelist
;
546 _Thread_record
* _M_old_freelist
547 = freelist
._M_thread_freelist
;
548 _Thread_record
* _M_old_array
549 = freelist
._M_thread_freelist_array
;
550 freelist
._M_thread_freelist
551 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
552 while (_M_old_freelist
)
555 if (_M_old_freelist
->_M_next
)
556 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
558 next_id
= freelist
._M_max_threads
;
559 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
560 = &_M_thread_freelist
[next_id
];
561 _M_old_freelist
= _M_old_freelist
->_M_next
;
563 ::operator delete(static_cast<void*>(_M_old_array
));
565 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
566 freelist
._M_max_threads
= _M_options
._M_max_threads
;
570 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
571 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
573 _Bin_record
& __bin
= _M_bin
[__n
];
574 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
575 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
576 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
578 __bin
._M_address
= 0;
580 __v
= ::operator new(sizeof(size_t) * __max_threads
);
581 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
583 __bin
._M_free
= static_cast<size_t*>(__v
);
585 __v
= ::operator new(sizeof(size_t) * __max_threads
586 + sizeof(_Atomic_word
) * __max_threads
);
587 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
588 + sizeof(_Atomic_word
) * __max_threads
));
589 __bin
._M_used
= static_cast<size_t*>(__v
);
591 __v
= ::operator new(sizeof(__gthread_mutex_t
));
592 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
594 #ifdef __GTHREAD_MUTEX_INIT
596 // Do not copy a POSIX/gthr mutex once in use.
597 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
598 *__bin
._M_mutex
= __tmp
;
601 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
607 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
609 _Bin_record
& __bin
= _M_bin
[__n
];
610 __v
= ::operator new(sizeof(_Block_record
*));
611 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
612 __bin
._M_first
[0] = 0;
613 __bin
._M_address
= 0;
620 __pool
<true>::_M_get_thread_id()
622 // If we have thread support and it's active we check the thread
623 // key value and return its id or if it's not set we take the
624 // first record from _M_thread_freelist and sets the key and
626 if (__gthread_active_p())
628 __freelist
& freelist
= get_freelist();
629 void* v
= __gthread_getspecific(freelist
._M_key
);
630 size_t _M_id
= (size_t)v
;
634 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
635 if (freelist
._M_thread_freelist
)
637 _M_id
= freelist
._M_thread_freelist
->_M_id
;
638 freelist
._M_thread_freelist
639 = freelist
._M_thread_freelist
->_M_next
;
643 __gthread_setspecific(freelist
._M_key
, (void*)_M_id
);
645 return _M_id
>= _M_options
._M_max_threads
? 0 : _M_id
;
648 // Otherwise (no thread support or inactive) all requests are
649 // served from the global pool 0.
653 // XXX GLIBCXX_ABI Deprecated
655 __pool
<true>::_M_destroy_thread_key(void*) throw () { }
657 // XXX GLIBCXX_ABI Deprecated
659 __pool
<true>::_M_initialize(__destroy_handler
)
661 // _M_force_new must not change after the first allocate(),
662 // which in turn calls this method, so if it's false, it's false
663 // forever and we don't need to return here ever again.
664 if (_M_options
._M_force_new
)
671 // Calculate the number of bins required based on _M_max_bytes.
672 // _M_bin_size is statically-initialized to one.
673 size_t __bin_size
= _M_options
._M_min_bin
;
674 while (_M_options
._M_max_bytes
> __bin_size
)
680 // Setup the bin map for quick lookup of the relevant bin.
681 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
682 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
683 _Binmap_type
* __bp
= _M_binmap
;
684 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
685 _Binmap_type __bint
= 0;
686 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
688 if (__ct
> __bin_max
)
696 // Initialize _M_bin and its members.
697 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
698 _M_bin
= static_cast<_Bin_record
*>(__v
);
700 // If __gthread_active_p() create and initialize the list of
701 // free thread ids. Single threaded applications use thread id 0
702 // directly and have no need for this.
703 if (__gthread_active_p())
705 __freelist
& freelist
= get_freelist();
707 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
709 if (!freelist
._M_thread_freelist_array
710 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
712 const size_t __k
= sizeof(_Thread_record
)
713 * _M_options
._M_max_threads
;
714 __v
= ::operator new(__k
);
715 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
717 // NOTE! The first assignable thread id is 1 since the
718 // global pool uses id 0
720 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
722 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
723 __tr
._M_next
= &_M_thread_freelist
[__i
];
728 _M_thread_freelist
[__i
- 1]._M_next
= 0;
729 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
731 if (!freelist
._M_thread_freelist_array
)
733 // Initialize per thread key to hold pointer to
734 // _M_thread_freelist.
735 __gthread_key_create(&freelist
._M_key
,
736 ::_M_destroy_thread_key
);
737 freelist
._M_thread_freelist
= _M_thread_freelist
;
741 _Thread_record
* _M_old_freelist
742 = freelist
._M_thread_freelist
;
743 _Thread_record
* _M_old_array
744 = freelist
._M_thread_freelist_array
;
745 freelist
._M_thread_freelist
746 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
747 while (_M_old_freelist
)
750 if (_M_old_freelist
->_M_next
)
751 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
753 next_id
= freelist
._M_max_threads
;
754 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
755 = &_M_thread_freelist
[next_id
];
756 _M_old_freelist
= _M_old_freelist
->_M_next
;
758 ::operator delete(static_cast<void*>(_M_old_array
));
760 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
761 freelist
._M_max_threads
= _M_options
._M_max_threads
;
765 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
766 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
768 _Bin_record
& __bin
= _M_bin
[__n
];
769 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
770 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
771 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
773 __bin
._M_address
= 0;
775 __v
= ::operator new(sizeof(size_t) * __max_threads
);
776 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
777 __bin
._M_free
= static_cast<size_t*>(__v
);
779 __v
= ::operator new(sizeof(size_t) * __max_threads
+
780 sizeof(_Atomic_word
) * __max_threads
);
781 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
782 + sizeof(_Atomic_word
) * __max_threads
));
783 __bin
._M_used
= static_cast<size_t*>(__v
);
785 __v
= ::operator new(sizeof(__gthread_mutex_t
));
786 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
788 #ifdef __GTHREAD_MUTEX_INIT
790 // Do not copy a POSIX/gthr mutex once in use.
791 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
792 *__bin
._M_mutex
= __tmp
;
795 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
801 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
803 _Bin_record
& __bin
= _M_bin
[__n
];
804 __v
= ::operator new(sizeof(_Block_record
*));
805 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
806 __bin
._M_first
[0] = 0;
807 __bin
._M_address
= 0;
815 template class __mt_alloc
<char>;
816 template class __mt_alloc
<wchar_t>;
818 _GLIBCXX_END_NAMESPACE_VERSION