3 // Copyright (C) 2004, 2005, 2006, 2009, 2010, 2012
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
30 #include <bits/c++config.h>
31 #include <ext/concurrence.h>
32 #include <ext/mt_allocator.h>
40 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
41 _Thread_record
* _M_thread_freelist
;
42 _Thread_record
* _M_thread_freelist_array
;
43 size_t _M_max_threads
;
44 __gthread_key_t _M_key
;
48 if (_M_thread_freelist_array
)
50 __gthread_key_delete(_M_key
);
51 ::operator delete(static_cast<void*>(_M_thread_freelist_array
));
52 _M_thread_freelist
= 0;
60 static __freelist freelist
;
67 static __gnu_cxx::__mutex freelist_mutex
;
68 return freelist_mutex
;
72 _M_destroy_thread_key(void* __id
)
74 // Return this thread id record to the front of thread_freelist.
75 __freelist
& freelist
= get_freelist();
77 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
78 size_t _M_id
= reinterpret_cast<size_t>(__id
);
80 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
81 _Thread_record
* __tr
= &freelist
._M_thread_freelist_array
[_M_id
- 1];
82 __tr
->_M_next
= freelist
._M_thread_freelist
;
83 freelist
._M_thread_freelist
= __tr
;
87 } // anonymous namespace
89 namespace __gnu_cxx
_GLIBCXX_VISIBILITY(default)
91 _GLIBCXX_BEGIN_NAMESPACE_VERSION
94 __pool
<false>::_M_destroy() throw()
96 if (_M_init
&& !_M_options
._M_force_new
)
98 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
100 _Bin_record
& __bin
= _M_bin
[__n
];
101 while (__bin
._M_address
)
103 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
104 ::operator delete(__bin
._M_address
->_M_initial
);
105 __bin
._M_address
= __tmp
;
107 ::operator delete(__bin
._M_first
);
109 ::operator delete(_M_bin
);
110 ::operator delete(_M_binmap
);
115 __pool
<false>::_M_reclaim_block(char* __p
, size_t __bytes
) throw ()
117 // Round up to power of 2 and figure out which bin to use.
118 const size_t __which
= _M_binmap
[__bytes
];
119 _Bin_record
& __bin
= _M_bin
[__which
];
121 char* __c
= __p
- _M_get_align();
122 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
124 // Single threaded application - return to global pool.
125 __block
->_M_next
= __bin
._M_first
[0];
126 __bin
._M_first
[0] = __block
;
130 __pool
<false>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
132 // Round up to power of 2 and figure out which bin to use.
133 const size_t __which
= _M_binmap
[__bytes
];
134 _Bin_record
& __bin
= _M_bin
[__which
];
135 const _Tune
& __options
= _M_get_options();
136 const size_t __bin_size
= (__options
._M_min_bin
<< __which
)
137 + __options
._M_align
;
138 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
139 __block_count
/= __bin_size
;
141 // Get a new block dynamically, set it up for use.
142 void* __v
= ::operator new(__options
._M_chunk_size
);
143 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
144 __address
->_M_initial
= __v
;
145 __address
->_M_next
= __bin
._M_address
;
146 __bin
._M_address
= __address
;
148 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
149 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
150 __bin
._M_first
[__thread_id
] = __block
;
151 while (--__block_count
> 0)
154 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
155 __block
= __block
->_M_next
;
157 __block
->_M_next
= 0;
159 __block
= __bin
._M_first
[__thread_id
];
160 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
162 // NB: For alignment reasons, we can't use the first _M_align
163 // bytes, even when sizeof(_Block_record) < _M_align.
164 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
168 __pool
<false>::_M_initialize()
170 // _M_force_new must not change after the first allocate(), which
171 // in turn calls this method, so if it's false, it's false forever
172 // and we don't need to return here ever again.
173 if (_M_options
._M_force_new
)
180 // Calculate the number of bins required based on _M_max_bytes.
181 // _M_bin_size is statically-initialized to one.
182 size_t __bin_size
= _M_options
._M_min_bin
;
183 while (_M_options
._M_max_bytes
> __bin_size
)
189 // Setup the bin map for quick lookup of the relevant bin.
190 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
191 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
192 _Binmap_type
* __bp
= _M_binmap
;
193 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
194 _Binmap_type __bint
= 0;
195 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
197 if (__ct
> __bin_max
)
205 // Initialize _M_bin and its members.
206 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
207 _M_bin
= static_cast<_Bin_record
*>(__v
);
208 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
210 _Bin_record
& __bin
= _M_bin
[__n
];
211 __v
= ::operator new(sizeof(_Block_record
*));
212 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
213 __bin
._M_first
[0] = 0;
214 __bin
._M_address
= 0;
222 __pool
<true>::_M_destroy() throw()
224 if (_M_init
&& !_M_options
._M_force_new
)
226 if (__gthread_active_p())
228 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
230 _Bin_record
& __bin
= _M_bin
[__n
];
231 while (__bin
._M_address
)
233 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
234 ::operator delete(__bin
._M_address
->_M_initial
);
235 __bin
._M_address
= __tmp
;
237 ::operator delete(__bin
._M_first
);
238 ::operator delete(__bin
._M_free
);
239 ::operator delete(__bin
._M_used
);
240 ::operator delete(__bin
._M_mutex
);
245 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
247 _Bin_record
& __bin
= _M_bin
[__n
];
248 while (__bin
._M_address
)
250 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
251 ::operator delete(__bin
._M_address
->_M_initial
);
252 __bin
._M_address
= __tmp
;
254 ::operator delete(__bin
._M_first
);
257 ::operator delete(_M_bin
);
258 ::operator delete(_M_binmap
);
263 __pool
<true>::_M_reclaim_block(char* __p
, size_t __bytes
) throw ()
265 // Round up to power of 2 and figure out which bin to use.
266 const size_t __which
= _M_binmap
[__bytes
];
267 const _Bin_record
& __bin
= _M_bin
[__which
];
269 // Know __p not null, assume valid block.
270 char* __c
= __p
- _M_get_align();
271 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
272 if (__gthread_active_p())
274 // Calculate the number of records to remove from our freelist:
275 // in order to avoid too much contention we wait until the
276 // number of records is "high enough".
277 const size_t __thread_id
= _M_get_thread_id();
278 const _Tune
& __options
= _M_get_options();
279 const size_t __limit
= (100 * (_M_bin_size
- __which
)
280 * __options
._M_freelist_headroom
);
282 size_t __remove
= __bin
._M_free
[__thread_id
];
283 __remove
*= __options
._M_freelist_headroom
;
285 // NB: We assume that reads of _Atomic_words are atomic.
286 const size_t __max_threads
= __options
._M_max_threads
+ 1;
287 _Atomic_word
* const __reclaimed_base
=
288 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
289 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
290 const size_t __net_used
= __bin
._M_used
[__thread_id
] - __reclaimed
;
292 // NB: For performance sake we don't resync every time, in order
293 // to spare atomic ops. Note that if __reclaimed increased by,
294 // say, 1024, since the last sync, it means that the other
295 // threads executed the atomic in the else below at least the
296 // same number of times (at least, because _M_reserve_block may
297 // have decreased the counter), therefore one more cannot hurt.
298 if (__reclaimed
> 1024)
300 __bin
._M_used
[__thread_id
] -= __reclaimed
;
301 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
304 if (__remove
>= __net_used
)
305 __remove
-= __net_used
;
308 if (__remove
> __limit
&& __remove
> __bin
._M_free
[__thread_id
])
310 _Block_record
* __first
= __bin
._M_first
[__thread_id
];
311 _Block_record
* __tmp
= __first
;
312 __remove
/= __options
._M_freelist_headroom
;
313 const size_t __removed
= __remove
;
314 while (--__remove
> 0)
315 __tmp
= __tmp
->_M_next
;
316 __bin
._M_first
[__thread_id
] = __tmp
->_M_next
;
317 __bin
._M_free
[__thread_id
] -= __removed
;
319 __gthread_mutex_lock(__bin
._M_mutex
);
320 __tmp
->_M_next
= __bin
._M_first
[0];
321 __bin
._M_first
[0] = __first
;
322 __bin
._M_free
[0] += __removed
;
323 __gthread_mutex_unlock(__bin
._M_mutex
);
326 // Return this block to our list and update counters and
327 // owner id as needed.
328 if (__block
->_M_thread_id
== __thread_id
)
329 --__bin
._M_used
[__thread_id
];
331 __atomic_add(&__reclaimed_base
[__block
->_M_thread_id
], 1);
333 __block
->_M_next
= __bin
._M_first
[__thread_id
];
334 __bin
._M_first
[__thread_id
] = __block
;
336 ++__bin
._M_free
[__thread_id
];
340 // Not using threads, so single threaded application - return
342 __block
->_M_next
= __bin
._M_first
[0];
343 __bin
._M_first
[0] = __block
;
348 __pool
<true>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
350 // Round up to power of 2 and figure out which bin to use.
351 const size_t __which
= _M_binmap
[__bytes
];
352 const _Tune
& __options
= _M_get_options();
353 const size_t __bin_size
= ((__options
._M_min_bin
<< __which
)
354 + __options
._M_align
);
355 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
356 __block_count
/= __bin_size
;
358 // Are we using threads?
359 // - Yes, check if there are free blocks on the global
360 // list. If so, grab up to __block_count blocks in one
361 // lock and change ownership. If the global list is
362 // empty, we allocate a new chunk and add those blocks
363 // directly to our own freelist (with us as owner).
364 // - No, all operations are made directly to global pool 0
365 // no need to lock or change ownership but check for free
366 // blocks on global list (and if not add new ones) and
367 // get the first one.
368 _Bin_record
& __bin
= _M_bin
[__which
];
369 _Block_record
* __block
= 0;
370 if (__gthread_active_p())
372 // Resync the _M_used counters.
373 const size_t __max_threads
= __options
._M_max_threads
+ 1;
374 _Atomic_word
* const __reclaimed_base
=
375 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
376 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
377 __bin
._M_used
[__thread_id
] -= __reclaimed
;
378 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
380 __gthread_mutex_lock(__bin
._M_mutex
);
381 if (__bin
._M_first
[0] == 0)
383 void* __v
= ::operator new(__options
._M_chunk_size
);
384 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
385 __address
->_M_initial
= __v
;
386 __address
->_M_next
= __bin
._M_address
;
387 __bin
._M_address
= __address
;
388 __gthread_mutex_unlock(__bin
._M_mutex
);
390 // No need to hold the lock when we are adding a whole
391 // chunk to our own list.
392 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
393 __block
= reinterpret_cast<_Block_record
*>(__c
);
394 __bin
._M_free
[__thread_id
] = __block_count
;
395 __bin
._M_first
[__thread_id
] = __block
;
396 while (--__block_count
> 0)
399 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
400 __block
= __block
->_M_next
;
402 __block
->_M_next
= 0;
406 // Is the number of required blocks greater than or equal
407 // to the number that can be provided by the global free
409 __bin
._M_first
[__thread_id
] = __bin
._M_first
[0];
410 if (__block_count
>= __bin
._M_free
[0])
412 __bin
._M_free
[__thread_id
] = __bin
._M_free
[0];
413 __bin
._M_free
[0] = 0;
414 __bin
._M_first
[0] = 0;
418 __bin
._M_free
[__thread_id
] = __block_count
;
419 __bin
._M_free
[0] -= __block_count
;
420 __block
= __bin
._M_first
[0];
421 while (--__block_count
> 0)
422 __block
= __block
->_M_next
;
423 __bin
._M_first
[0] = __block
->_M_next
;
424 __block
->_M_next
= 0;
426 __gthread_mutex_unlock(__bin
._M_mutex
);
431 void* __v
= ::operator new(__options
._M_chunk_size
);
432 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
433 __address
->_M_initial
= __v
;
434 __address
->_M_next
= __bin
._M_address
;
435 __bin
._M_address
= __address
;
437 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
438 __block
= reinterpret_cast<_Block_record
*>(__c
);
439 __bin
._M_first
[0] = __block
;
440 while (--__block_count
> 0)
443 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
444 __block
= __block
->_M_next
;
446 __block
->_M_next
= 0;
449 __block
= __bin
._M_first
[__thread_id
];
450 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
452 if (__gthread_active_p())
454 __block
->_M_thread_id
= __thread_id
;
455 --__bin
._M_free
[__thread_id
];
456 ++__bin
._M_used
[__thread_id
];
459 // NB: For alignment reasons, we can't use the first _M_align
460 // bytes, even when sizeof(_Block_record) < _M_align.
461 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
465 __pool
<true>::_M_initialize()
467 // _M_force_new must not change after the first allocate(),
468 // which in turn calls this method, so if it's false, it's false
469 // forever and we don't need to return here ever again.
470 if (_M_options
._M_force_new
)
477 // Calculate the number of bins required based on _M_max_bytes.
478 // _M_bin_size is statically-initialized to one.
479 size_t __bin_size
= _M_options
._M_min_bin
;
480 while (_M_options
._M_max_bytes
> __bin_size
)
486 // Setup the bin map for quick lookup of the relevant bin.
487 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
488 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
489 _Binmap_type
* __bp
= _M_binmap
;
490 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
491 _Binmap_type __bint
= 0;
492 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
494 if (__ct
> __bin_max
)
502 // Initialize _M_bin and its members.
503 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
504 _M_bin
= static_cast<_Bin_record
*>(__v
);
506 // If __gthread_active_p() create and initialize the list of
507 // free thread ids. Single threaded applications use thread id 0
508 // directly and have no need for this.
509 if (__gthread_active_p())
511 __freelist
& freelist
= get_freelist();
513 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
515 if (!freelist
._M_thread_freelist_array
516 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
518 const size_t __k
= sizeof(_Thread_record
)
519 * _M_options
._M_max_threads
;
520 __v
= ::operator new(__k
);
521 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
523 // NOTE! The first assignable thread id is 1 since the
524 // global pool uses id 0
526 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
528 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
529 __tr
._M_next
= &_M_thread_freelist
[__i
];
534 _M_thread_freelist
[__i
- 1]._M_next
= 0;
535 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
537 if (!freelist
._M_thread_freelist_array
)
539 // Initialize per thread key to hold pointer to
540 // _M_thread_freelist.
541 __gthread_key_create(&freelist
._M_key
,
542 ::_M_destroy_thread_key
);
543 freelist
._M_thread_freelist
= _M_thread_freelist
;
547 _Thread_record
* _M_old_freelist
548 = freelist
._M_thread_freelist
;
549 _Thread_record
* _M_old_array
550 = freelist
._M_thread_freelist_array
;
551 freelist
._M_thread_freelist
552 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
553 while (_M_old_freelist
)
556 if (_M_old_freelist
->_M_next
)
557 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
559 next_id
= freelist
._M_max_threads
;
560 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
561 = &_M_thread_freelist
[next_id
];
562 _M_old_freelist
= _M_old_freelist
->_M_next
;
564 ::operator delete(static_cast<void*>(_M_old_array
));
566 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
567 freelist
._M_max_threads
= _M_options
._M_max_threads
;
571 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
572 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
574 _Bin_record
& __bin
= _M_bin
[__n
];
575 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
576 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
577 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
579 __bin
._M_address
= 0;
581 __v
= ::operator new(sizeof(size_t) * __max_threads
);
582 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
584 __bin
._M_free
= static_cast<size_t*>(__v
);
586 __v
= ::operator new(sizeof(size_t) * __max_threads
587 + sizeof(_Atomic_word
) * __max_threads
);
588 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
589 + sizeof(_Atomic_word
) * __max_threads
));
590 __bin
._M_used
= static_cast<size_t*>(__v
);
592 __v
= ::operator new(sizeof(__gthread_mutex_t
));
593 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
595 #ifdef __GTHREAD_MUTEX_INIT
597 // Do not copy a POSIX/gthr mutex once in use.
598 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
599 *__bin
._M_mutex
= __tmp
;
602 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
608 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
610 _Bin_record
& __bin
= _M_bin
[__n
];
611 __v
= ::operator new(sizeof(_Block_record
*));
612 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
613 __bin
._M_first
[0] = 0;
614 __bin
._M_address
= 0;
621 __pool
<true>::_M_get_thread_id()
623 // If we have thread support and it's active we check the thread
624 // key value and return its id or if it's not set we take the
625 // first record from _M_thread_freelist and sets the key and
627 if (__gthread_active_p())
629 __freelist
& freelist
= get_freelist();
630 void* v
= __gthread_getspecific(freelist
._M_key
);
631 size_t _M_id
= (size_t)v
;
635 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
636 if (freelist
._M_thread_freelist
)
638 _M_id
= freelist
._M_thread_freelist
->_M_id
;
639 freelist
._M_thread_freelist
640 = freelist
._M_thread_freelist
->_M_next
;
644 __gthread_setspecific(freelist
._M_key
, (void*)_M_id
);
646 return _M_id
>= _M_options
._M_max_threads
? 0 : _M_id
;
649 // Otherwise (no thread support or inactive) all requests are
650 // served from the global pool 0.
654 // XXX GLIBCXX_ABI Deprecated
656 __pool
<true>::_M_destroy_thread_key(void*) throw () { }
658 // XXX GLIBCXX_ABI Deprecated
660 __pool
<true>::_M_initialize(__destroy_handler
)
662 // _M_force_new must not change after the first allocate(),
663 // which in turn calls this method, so if it's false, it's false
664 // forever and we don't need to return here ever again.
665 if (_M_options
._M_force_new
)
672 // Calculate the number of bins required based on _M_max_bytes.
673 // _M_bin_size is statically-initialized to one.
674 size_t __bin_size
= _M_options
._M_min_bin
;
675 while (_M_options
._M_max_bytes
> __bin_size
)
681 // Setup the bin map for quick lookup of the relevant bin.
682 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
683 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
684 _Binmap_type
* __bp
= _M_binmap
;
685 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
686 _Binmap_type __bint
= 0;
687 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
689 if (__ct
> __bin_max
)
697 // Initialize _M_bin and its members.
698 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
699 _M_bin
= static_cast<_Bin_record
*>(__v
);
701 // If __gthread_active_p() create and initialize the list of
702 // free thread ids. Single threaded applications use thread id 0
703 // directly and have no need for this.
704 if (__gthread_active_p())
706 __freelist
& freelist
= get_freelist();
708 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
710 if (!freelist
._M_thread_freelist_array
711 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
713 const size_t __k
= sizeof(_Thread_record
)
714 * _M_options
._M_max_threads
;
715 __v
= ::operator new(__k
);
716 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
718 // NOTE! The first assignable thread id is 1 since the
719 // global pool uses id 0
721 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
723 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
724 __tr
._M_next
= &_M_thread_freelist
[__i
];
729 _M_thread_freelist
[__i
- 1]._M_next
= 0;
730 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
732 if (!freelist
._M_thread_freelist_array
)
734 // Initialize per thread key to hold pointer to
735 // _M_thread_freelist.
736 __gthread_key_create(&freelist
._M_key
,
737 ::_M_destroy_thread_key
);
738 freelist
._M_thread_freelist
= _M_thread_freelist
;
742 _Thread_record
* _M_old_freelist
743 = freelist
._M_thread_freelist
;
744 _Thread_record
* _M_old_array
745 = freelist
._M_thread_freelist_array
;
746 freelist
._M_thread_freelist
747 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
748 while (_M_old_freelist
)
751 if (_M_old_freelist
->_M_next
)
752 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
754 next_id
= freelist
._M_max_threads
;
755 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
756 = &_M_thread_freelist
[next_id
];
757 _M_old_freelist
= _M_old_freelist
->_M_next
;
759 ::operator delete(static_cast<void*>(_M_old_array
));
761 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
762 freelist
._M_max_threads
= _M_options
._M_max_threads
;
766 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
767 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
769 _Bin_record
& __bin
= _M_bin
[__n
];
770 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
771 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
772 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
774 __bin
._M_address
= 0;
776 __v
= ::operator new(sizeof(size_t) * __max_threads
);
777 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
778 __bin
._M_free
= static_cast<size_t*>(__v
);
780 __v
= ::operator new(sizeof(size_t) * __max_threads
+
781 sizeof(_Atomic_word
) * __max_threads
);
782 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
783 + sizeof(_Atomic_word
) * __max_threads
));
784 __bin
._M_used
= static_cast<size_t*>(__v
);
786 __v
= ::operator new(sizeof(__gthread_mutex_t
));
787 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
789 #ifdef __GTHREAD_MUTEX_INIT
791 // Do not copy a POSIX/gthr mutex once in use.
792 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
793 *__bin
._M_mutex
= __tmp
;
796 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
802 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
804 _Bin_record
& __bin
= _M_bin
[__n
];
805 __v
= ::operator new(sizeof(_Block_record
*));
806 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
807 __bin
._M_first
[0] = 0;
808 __bin
._M_address
= 0;
816 template class __mt_alloc
<char>;
817 template class __mt_alloc
<wchar_t>;
819 _GLIBCXX_END_NAMESPACE_VERSION