]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/src/mt_allocator.cc
Licensing changes to GPLv3 resp. GPLv3 with GCC Runtime Exception.
[thirdparty/gcc.git] / libstdc++-v3 / src / mt_allocator.cc
CommitLineData
8bfd0a46
BK
1// Allocator details.
2
99827523 3// Copyright (C) 2004, 2005, 2006, 2009 Free Software Foundation, Inc.
8bfd0a46 4//
2f9f6cef 5// This file is part of the GNU ISO C++ Library. This library is free
8bfd0a46
BK
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
748086b7 8// Free Software Foundation; either version 3, or (at your option)
8bfd0a46
BK
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
748086b7
JJ
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
8bfd0a46 19
748086b7
JJ
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
8bfd0a46
BK
24
25//
26// ISO C++ 14882:
27//
28
29#include <bits/c++config.h>
2e362c74 30#include <ext/concurrence.h>
12cde21b 31#include <ext/mt_allocator.h>
1d3e6248 32#include <cstring>
8bfd0a46 33
b82f782b 34namespace
8bfd0a46 35{
2f9f6cef
BK
36#ifdef __GTHREADS
37 struct __freelist
38 {
39 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
40 _Thread_record* _M_thread_freelist;
41 _Thread_record* _M_thread_freelist_array;
42 size_t _M_max_threads;
43 __gthread_key_t _M_key;
44
45 ~__freelist()
46 {
47 if (_M_thread_freelist_array)
48 {
49 __gthread_key_delete(_M_key);
50 ::operator delete(static_cast<void*>(_M_thread_freelist_array));
51 }
52 }
53 };
54
99827523
BK
55 __freelist&
56 get_freelist()
57 {
58 static __freelist freelist;
59 return freelist;
60 }
61
62 __gnu_cxx::__mutex&
63 get_freelist_mutex()
64 {
65 static __gnu_cxx::__mutex freelist_mutex;
66 return freelist_mutex;
67 }
8bfd0a46 68
2f9f6cef
BK
69 static void
70 _M_destroy_thread_key(void* __id)
71 {
797308b2 72 // Return this thread id record to the front of thread_freelist.
99827523
BK
73 __freelist& freelist = get_freelist();
74 {
75 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
76 size_t _M_id = reinterpret_cast<size_t>(__id);
77
78 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
79 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
80 __tr->_M_next = freelist._M_thread_freelist;
81 freelist._M_thread_freelist = __tr;
82 }
2f9f6cef 83 }
8bfd0a46 84#endif
b82f782b 85} // anonymous namespace
8bfd0a46 86
3cbc7af0
BK
87_GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
88
5a1e5472
BK
89 void
90 __pool<false>::_M_destroy() throw()
12cde21b
BK
91 {
92 if (_M_init && !_M_options._M_force_new)
93 {
94 for (size_t __n = 0; __n < _M_bin_size; ++__n)
95 {
96 _Bin_record& __bin = _M_bin[__n];
97 while (__bin._M_address)
98 {
99 _Block_address* __tmp = __bin._M_address->_M_next;
100 ::operator delete(__bin._M_address->_M_initial);
12cde21b
BK
101 __bin._M_address = __tmp;
102 }
0d6b41f2 103 ::operator delete(__bin._M_first);
12cde21b 104 }
0d6b41f2
PC
105 ::operator delete(_M_bin);
106 ::operator delete(_M_binmap);
12cde21b
BK
107 }
108 }
109
110 void
111 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
112 {
113 // Round up to power of 2 and figure out which bin to use.
114 const size_t __which = _M_binmap[__bytes];
115 _Bin_record& __bin = _M_bin[__which];
116
7befac71 117 char* __c = __p - _M_get_align();
12cde21b
BK
118 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
119
120 // Single threaded application - return to global pool.
121 __block->_M_next = __bin._M_first[0];
122 __bin._M_first[0] = __block;
123 }
124
125 char*
126 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
127 {
128 // Round up to power of 2 and figure out which bin to use.
129 const size_t __which = _M_binmap[__bytes];
11aaaa84 130 _Bin_record& __bin = _M_bin[__which];
12cde21b 131 const _Tune& __options = _M_get_options();
7befac71
BK
132 const size_t __bin_size = (__options._M_min_bin << __which)
133 + __options._M_align;
134 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
135 __block_count /= __bin_size;
12cde21b
BK
136
137 // Get a new block dynamically, set it up for use.
138 void* __v = ::operator new(__options._M_chunk_size);
7befac71
BK
139 _Block_address* __address = static_cast<_Block_address*>(__v);
140 __address->_M_initial = __v;
141 __address->_M_next = __bin._M_address;
142 __bin._M_address = __address;
143
144 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
145 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
11aaaa84
BK
146 __bin._M_first[__thread_id] = __block;
147 while (--__block_count > 0)
12cde21b 148 {
7befac71 149 __c += __bin_size;
11aaaa84
BK
150 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
151 __block = __block->_M_next;
12cde21b 152 }
11aaaa84 153 __block->_M_next = NULL;
12cde21b 154
11aaaa84
BK
155 __block = __bin._M_first[__thread_id];
156 __bin._M_first[__thread_id] = __block->_M_next;
157
12cde21b
BK
158 // NB: For alignment reasons, we can't use the first _M_align
159 // bytes, even when sizeof(_Block_record) < _M_align.
160 return reinterpret_cast<char*>(__block) + __options._M_align;
161 }
162
163 void
164 __pool<false>::_M_initialize()
165 {
166 // _M_force_new must not change after the first allocate(), which
167 // in turn calls this method, so if it's false, it's false forever
168 // and we don't need to return here ever again.
169 if (_M_options._M_force_new)
170 {
171 _M_init = true;
172 return;
173 }
174
175 // Create the bins.
176 // Calculate the number of bins required based on _M_max_bytes.
177 // _M_bin_size is statically-initialized to one.
178 size_t __bin_size = _M_options._M_min_bin;
179 while (_M_options._M_max_bytes > __bin_size)
180 {
181 __bin_size <<= 1;
182 ++_M_bin_size;
183 }
184
185 // Setup the bin map for quick lookup of the relevant bin.
186 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
187 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
188 _Binmap_type* __bp = _M_binmap;
189 _Binmap_type __bin_max = _M_options._M_min_bin;
190 _Binmap_type __bint = 0;
191 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
192 {
193 if (__ct > __bin_max)
194 {
195 __bin_max <<= 1;
196 ++__bint;
197 }
198 *__bp++ = __bint;
199 }
200
201 // Initialize _M_bin and its members.
202 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
203 _M_bin = static_cast<_Bin_record*>(__v);
204 for (size_t __n = 0; __n < _M_bin_size; ++__n)
205 {
206 _Bin_record& __bin = _M_bin[__n];
207 __v = ::operator new(sizeof(_Block_record*));
208 __bin._M_first = static_cast<_Block_record**>(__v);
209 __bin._M_first[0] = NULL;
210 __bin._M_address = NULL;
211 }
212 _M_init = true;
213 }
2f9f6cef 214
12cde21b 215
8bfd0a46 216#ifdef __GTHREADS
5a1e5472
BK
217 void
218 __pool<true>::_M_destroy() throw()
12cde21b
BK
219 {
220 if (_M_init && !_M_options._M_force_new)
221 {
222 if (__gthread_active_p())
223 {
224 for (size_t __n = 0; __n < _M_bin_size; ++__n)
225 {
226 _Bin_record& __bin = _M_bin[__n];
227 while (__bin._M_address)
228 {
229 _Block_address* __tmp = __bin._M_address->_M_next;
230 ::operator delete(__bin._M_address->_M_initial);
12cde21b
BK
231 __bin._M_address = __tmp;
232 }
0d6b41f2
PC
233 ::operator delete(__bin._M_first);
234 ::operator delete(__bin._M_free);
235 ::operator delete(__bin._M_used);
236 ::operator delete(__bin._M_mutex);
12cde21b 237 }
12cde21b
BK
238 }
239 else
240 {
241 for (size_t __n = 0; __n < _M_bin_size; ++__n)
242 {
243 _Bin_record& __bin = _M_bin[__n];
244 while (__bin._M_address)
245 {
246 _Block_address* __tmp = __bin._M_address->_M_next;
247 ::operator delete(__bin._M_address->_M_initial);
12cde21b
BK
248 __bin._M_address = __tmp;
249 }
0d6b41f2 250 ::operator delete(__bin._M_first);
12cde21b
BK
251 }
252 }
0d6b41f2
PC
253 ::operator delete(_M_bin);
254 ::operator delete(_M_binmap);
12cde21b
BK
255 }
256 }
257
8bfd0a46 258 void
12cde21b 259 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
8bfd0a46
BK
260 {
261 // Round up to power of 2 and figure out which bin to use.
262 const size_t __which = _M_binmap[__bytes];
263 const _Bin_record& __bin = _M_bin[__which];
12cde21b 264
7befac71
BK
265 // Know __p not null, assume valid block.
266 char* __c = __p - _M_get_align();
8bfd0a46 267 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
8bfd0a46
BK
268 if (__gthread_active_p())
269 {
270 // Calculate the number of records to remove from our freelist:
271 // in order to avoid too much contention we wait until the
272 // number of records is "high enough".
273 const size_t __thread_id = _M_get_thread_id();
7befac71 274 const _Tune& __options = _M_get_options();
1d3e6248
PC
275 const size_t __limit = (100 * (_M_bin_size - __which)
276 * __options._M_freelist_headroom);
7befac71 277
1d3e6248 278 size_t __remove = __bin._M_free[__thread_id];
7befac71 279 __remove *= __options._M_freelist_headroom;
1d3e6248
PC
280
281 // NB: We assume that reads of _Atomic_words are atomic.
282 const size_t __max_threads = __options._M_max_threads + 1;
283 _Atomic_word* const __reclaimed_base =
284 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
285 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
c4338a62 286 const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
1d3e6248
PC
287
288 // NB: For performance sake we don't resync every time, in order
289 // to spare atomic ops. Note that if __reclaimed increased by,
290 // say, 1024, since the last sync, it means that the other
291 // threads executed the atomic in the else below at least the
292 // same number of times (at least, because _M_reserve_block may
293 // have decreased the counter), therefore one more cannot hurt.
294 if (__reclaimed > 1024)
295 {
296 __bin._M_used[__thread_id] -= __reclaimed;
297 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
298 }
299
c4338a62
PC
300 if (__remove >= __net_used)
301 __remove -= __net_used;
7befac71
BK
302 else
303 __remove = 0;
304 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
8bfd0a46 305 {
7befac71
BK
306 _Block_record* __first = __bin._M_first[__thread_id];
307 _Block_record* __tmp = __first;
8bfd0a46 308 __remove /= __options._M_freelist_headroom;
1d3e6248 309 const size_t __removed = __remove;
11aaaa84 310 while (--__remove > 0)
8bfd0a46
BK
311 __tmp = __tmp->_M_next;
312 __bin._M_first[__thread_id] = __tmp->_M_next;
313 __bin._M_free[__thread_id] -= __removed;
314
315 __gthread_mutex_lock(__bin._M_mutex);
316 __tmp->_M_next = __bin._M_first[0];
317 __bin._M_first[0] = __first;
318 __bin._M_free[0] += __removed;
319 __gthread_mutex_unlock(__bin._M_mutex);
320 }
7befac71 321
8bfd0a46
BK
322 // Return this block to our list and update counters and
323 // owner id as needed.
1d3e6248
PC
324 if (__block->_M_thread_id == __thread_id)
325 --__bin._M_used[__thread_id];
326 else
327 __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
328
8bfd0a46
BK
329 __block->_M_next = __bin._M_first[__thread_id];
330 __bin._M_first[__thread_id] = __block;
331
332 ++__bin._M_free[__thread_id];
333 }
334 else
335 {
336 // Not using threads, so single threaded application - return
337 // to global pool.
338 __block->_M_next = __bin._M_first[0];
339 __bin._M_first[0] = __block;
340 }
341 }
8bfd0a46 342
8bfd0a46 343 char*
12cde21b 344 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
8bfd0a46
BK
345 {
346 // Round up to power of 2 and figure out which bin to use.
347 const size_t __which = _M_binmap[__bytes];
8bfd0a46 348 const _Tune& __options = _M_get_options();
8bfd0a46
BK
349 const size_t __bin_size = ((__options._M_min_bin << __which)
350 + __options._M_align);
7befac71
BK
351 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
352 __block_count /= __bin_size;
8bfd0a46
BK
353
354 // Are we using threads?
355 // - Yes, check if there are free blocks on the global
356 // list. If so, grab up to __block_count blocks in one
357 // lock and change ownership. If the global list is
358 // empty, we allocate a new chunk and add those blocks
359 // directly to our own freelist (with us as owner).
360 // - No, all operations are made directly to global pool 0
361 // no need to lock or change ownership but check for free
362 // blocks on global list (and if not add new ones) and
363 // get the first one.
12cde21b
BK
364 _Bin_record& __bin = _M_bin[__which];
365 _Block_record* __block = NULL;
8bfd0a46
BK
366 if (__gthread_active_p())
367 {
1d3e6248
PC
368 // Resync the _M_used counters.
369 const size_t __max_threads = __options._M_max_threads + 1;
370 _Atomic_word* const __reclaimed_base =
371 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
372 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
373 __bin._M_used[__thread_id] -= __reclaimed;
374 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
375
84706343 376 __gthread_mutex_lock(__bin._M_mutex);
8bfd0a46
BK
377 if (__bin._M_first[0] == NULL)
378 {
7befac71
BK
379 void* __v = ::operator new(__options._M_chunk_size);
380 _Block_address* __address = static_cast<_Block_address*>(__v);
381 __address->_M_initial = __v;
382 __address->_M_next = __bin._M_address;
383 __bin._M_address = __address;
84706343
BK
384 __gthread_mutex_unlock(__bin._M_mutex);
385
7befac71
BK
386 // No need to hold the lock when we are adding a whole
387 // chunk to our own list.
388 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
389 __block = reinterpret_cast<_Block_record*>(__c);
8bfd0a46 390 __bin._M_free[__thread_id] = __block_count;
11aaaa84
BK
391 __bin._M_first[__thread_id] = __block;
392 while (--__block_count > 0)
8bfd0a46 393 {
7befac71 394 __c += __bin_size;
8bfd0a46
BK
395 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
396 __block = __block->_M_next;
397 }
398 __block->_M_next = NULL;
399 }
400 else
401 {
12cde21b
BK
402 // Is the number of required blocks greater than or equal
403 // to the number that can be provided by the global free
404 // list?
8bfd0a46
BK
405 __bin._M_first[__thread_id] = __bin._M_first[0];
406 if (__block_count >= __bin._M_free[0])
407 {
408 __bin._M_free[__thread_id] = __bin._M_free[0];
409 __bin._M_free[0] = 0;
410 __bin._M_first[0] = NULL;
411 }
412 else
413 {
414 __bin._M_free[__thread_id] = __block_count;
415 __bin._M_free[0] -= __block_count;
8bfd0a46 416 __block = __bin._M_first[0];
11aaaa84 417 while (--__block_count > 0)
8bfd0a46
BK
418 __block = __block->_M_next;
419 __bin._M_first[0] = __block->_M_next;
420 __block->_M_next = NULL;
421 }
422 __gthread_mutex_unlock(__bin._M_mutex);
423 }
424 }
425 else
426 {
427 void* __v = ::operator new(__options._M_chunk_size);
7befac71
BK
428 _Block_address* __address = static_cast<_Block_address*>(__v);
429 __address->_M_initial = __v;
430 __address->_M_next = __bin._M_address;
431 __bin._M_address = __address;
432
433 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
1ab79481 434 __block = reinterpret_cast<_Block_record*>(__c);
7befac71 435 __bin._M_first[0] = __block;
11aaaa84 436 while (--__block_count > 0)
8bfd0a46 437 {
7befac71 438 __c += __bin_size;
8bfd0a46
BK
439 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
440 __block = __block->_M_next;
441 }
442 __block->_M_next = NULL;
443 }
444
445 __block = __bin._M_first[__thread_id];
11aaaa84 446 __bin._M_first[__thread_id] = __block->_M_next;
8bfd0a46
BK
447
448 if (__gthread_active_p())
449 {
450 __block->_M_thread_id = __thread_id;
451 --__bin._M_free[__thread_id];
452 ++__bin._M_used[__thread_id];
453 }
8bfd0a46 454
8bfd0a46
BK
455 // NB: For alignment reasons, we can't use the first _M_align
456 // bytes, even when sizeof(_Block_record) < _M_align.
8bfd0a46
BK
457 return reinterpret_cast<char*>(__block) + __options._M_align;
458 }
459
2f9f6cef
BK
460 void
461 __pool<true>::_M_initialize()
8bfd0a46 462 {
8bfd0a46
BK
463 // _M_force_new must not change after the first allocate(),
464 // which in turn calls this method, so if it's false, it's false
465 // forever and we don't need to return here ever again.
466 if (_M_options._M_force_new)
467 {
468 _M_init = true;
469 return;
470 }
2f9f6cef 471
12cde21b 472 // Create the bins.
8bfd0a46
BK
473 // Calculate the number of bins required based on _M_max_bytes.
474 // _M_bin_size is statically-initialized to one.
475 size_t __bin_size = _M_options._M_min_bin;
476 while (_M_options._M_max_bytes > __bin_size)
477 {
478 __bin_size <<= 1;
479 ++_M_bin_size;
480 }
481
482 // Setup the bin map for quick lookup of the relevant bin.
483 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
484 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
8bfd0a46
BK
485 _Binmap_type* __bp = _M_binmap;
486 _Binmap_type __bin_max = _M_options._M_min_bin;
487 _Binmap_type __bint = 0;
488 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
489 {
490 if (__ct > __bin_max)
491 {
492 __bin_max <<= 1;
493 ++__bint;
494 }
495 *__bp++ = __bint;
496 }
497
498 // Initialize _M_bin and its members.
499 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
500 _M_bin = static_cast<_Bin_record*>(__v);
501
502 // If __gthread_active_p() create and initialize the list of
503 // free thread ids. Single threaded applications use thread id 0
504 // directly and have no need for this.
505 if (__gthread_active_p())
506 {
99827523 507 __freelist& freelist = get_freelist();
2f9f6cef 508 {
99827523 509 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
2f9f6cef 510
b82f782b 511 if (!freelist._M_thread_freelist_array
2e362c74 512 || freelist._M_max_threads < _M_options._M_max_threads)
2f9f6cef
BK
513 {
514 const size_t __k = sizeof(_Thread_record)
515 * _M_options._M_max_threads;
516 __v = ::operator new(__k);
1ab79481 517 _M_thread_freelist = static_cast<_Thread_record*>(__v);
2f9f6cef
BK
518
519 // NOTE! The first assignable thread id is 1 since the
520 // global pool uses id 0
521 size_t __i;
522 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
523 {
524 _Thread_record& __tr = _M_thread_freelist[__i - 1];
525 __tr._M_next = &_M_thread_freelist[__i];
526 __tr._M_id = __i;
527 }
528
529 // Set last record.
530 _M_thread_freelist[__i - 1]._M_next = NULL;
531 _M_thread_freelist[__i - 1]._M_id = __i;
532
b82f782b 533 if (!freelist._M_thread_freelist_array)
2f9f6cef
BK
534 {
535 // Initialize per thread key to hold pointer to
536 // _M_thread_freelist.
b82f782b
BK
537 __gthread_key_create(&freelist._M_key,
538 ::_M_destroy_thread_key);
1ab79481 539 freelist._M_thread_freelist = _M_thread_freelist;
2f9f6cef
BK
540 }
541 else
542 {
543 _Thread_record* _M_old_freelist
b82f782b 544 = freelist._M_thread_freelist;
2f9f6cef 545 _Thread_record* _M_old_array
b82f782b
BK
546 = freelist._M_thread_freelist_array;
547 freelist._M_thread_freelist
2f9f6cef
BK
548 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
549 while (_M_old_freelist)
550 {
551 size_t next_id;
552 if (_M_old_freelist->_M_next)
553 next_id = _M_old_freelist->_M_next - _M_old_array;
554 else
b82f782b 555 next_id = freelist._M_max_threads;
2f9f6cef
BK
556 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
557 = &_M_thread_freelist[next_id];
558 _M_old_freelist = _M_old_freelist->_M_next;
559 }
560 ::operator delete(static_cast<void*>(_M_old_array));
561 }
1ab79481
BK
562 freelist._M_thread_freelist_array = _M_thread_freelist;
563 freelist._M_max_threads = _M_options._M_max_threads;
2f9f6cef
BK
564 }
565 }
566
8bfd0a46
BK
567 const size_t __max_threads = _M_options._M_max_threads + 1;
568 for (size_t __n = 0; __n < _M_bin_size; ++__n)
569 {
570 _Bin_record& __bin = _M_bin[__n];
571 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
1d3e6248 572 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
8bfd0a46 573 __bin._M_first = static_cast<_Block_record**>(__v);
12cde21b
BK
574
575 __bin._M_address = NULL;
576
8bfd0a46 577 __v = ::operator new(sizeof(size_t) * __max_threads);
1ab79481
BK
578 std::memset(__v, 0, sizeof(size_t) * __max_threads);
579
8bfd0a46 580 __bin._M_free = static_cast<size_t*>(__v);
1d3e6248
PC
581
582 __v = ::operator new(sizeof(size_t) * __max_threads
583 + sizeof(_Atomic_word) * __max_threads);
584 std::memset(__v, 0, (sizeof(size_t) * __max_threads
585 + sizeof(_Atomic_word) * __max_threads));
8bfd0a46
BK
586 __bin._M_used = static_cast<size_t*>(__v);
587
588 __v = ::operator new(sizeof(__gthread_mutex_t));
589 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
590
591#ifdef __GTHREAD_MUTEX_INIT
592 {
593 // Do not copy a POSIX/gthr mutex once in use.
594 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
595 *__bin._M_mutex = __tmp;
596 }
597#else
598 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
599#endif
8bfd0a46
BK
600 }
601 }
602 else
8bfd0a46 603 {
12cde21b 604 for (size_t __n = 0; __n < _M_bin_size; ++__n)
8bfd0a46 605 {
12cde21b
BK
606 _Bin_record& __bin = _M_bin[__n];
607 __v = ::operator new(sizeof(_Block_record*));
608 __bin._M_first = static_cast<_Block_record**>(__v);
609 __bin._M_first[0] = NULL;
610 __bin._M_address = NULL;
8bfd0a46 611 }
8bfd0a46
BK
612 }
613 _M_init = true;
614 }
12cde21b 615
8bfd0a46
BK
616 size_t
617 __pool<true>::_M_get_thread_id()
618 {
619 // If we have thread support and it's active we check the thread
620 // key value and return its id or if it's not set we take the
621 // first record from _M_thread_freelist and sets the key and
41e404e6 622 // returns its id.
8bfd0a46
BK
623 if (__gthread_active_p())
624 {
99827523 625 __freelist& freelist = get_freelist();
b82f782b 626 void* v = __gthread_getspecific(freelist._M_key);
2f9f6cef
BK
627 size_t _M_id = (size_t)v;
628 if (_M_id == 0)
8bfd0a46 629 {
8bfd0a46 630 {
99827523 631 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
b82f782b 632 if (freelist._M_thread_freelist)
2f9f6cef 633 {
b82f782b
BK
634 _M_id = freelist._M_thread_freelist->_M_id;
635 freelist._M_thread_freelist
636 = freelist._M_thread_freelist->_M_next;
2f9f6cef 637 }
8bfd0a46 638 }
2f9f6cef 639
1ab79481 640 __gthread_setspecific(freelist._M_key, (void*)_M_id);
8bfd0a46 641 }
2f9f6cef 642 return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
8bfd0a46
BK
643 }
644
645 // Otherwise (no thread support or inactive) all requests are
646 // served from the global pool 0.
647 return 0;
648 }
649
2f9f6cef
BK
650 // XXX GLIBCXX_ABI Deprecated
651 void
652 __pool<true>::_M_destroy_thread_key(void*) { }
653
654 // XXX GLIBCXX_ABI Deprecated
8bfd0a46 655 void
2f9f6cef 656 __pool<true>::_M_initialize(__destroy_handler)
8bfd0a46 657 {
2f9f6cef
BK
658 // _M_force_new must not change after the first allocate(),
659 // which in turn calls this method, so if it's false, it's false
660 // forever and we don't need to return here ever again.
661 if (_M_options._M_force_new)
662 {
663 _M_init = true;
664 return;
665 }
666
667 // Create the bins.
668 // Calculate the number of bins required based on _M_max_bytes.
669 // _M_bin_size is statically-initialized to one.
670 size_t __bin_size = _M_options._M_min_bin;
671 while (_M_options._M_max_bytes > __bin_size)
672 {
673 __bin_size <<= 1;
674 ++_M_bin_size;
675 }
676
677 // Setup the bin map for quick lookup of the relevant bin.
678 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
679 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
680 _Binmap_type* __bp = _M_binmap;
681 _Binmap_type __bin_max = _M_options._M_min_bin;
682 _Binmap_type __bint = 0;
683 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
684 {
685 if (__ct > __bin_max)
686 {
687 __bin_max <<= 1;
688 ++__bint;
689 }
690 *__bp++ = __bint;
691 }
692
693 // Initialize _M_bin and its members.
694 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
695 _M_bin = static_cast<_Bin_record*>(__v);
696
697 // If __gthread_active_p() create and initialize the list of
698 // free thread ids. Single threaded applications use thread id 0
699 // directly and have no need for this.
700 if (__gthread_active_p())
701 {
99827523 702 __freelist& freelist = get_freelist();
2f9f6cef 703 {
99827523 704 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
2f9f6cef 705
b82f782b 706 if (!freelist._M_thread_freelist_array
1ab79481 707 || freelist._M_max_threads < _M_options._M_max_threads)
2f9f6cef
BK
708 {
709 const size_t __k = sizeof(_Thread_record)
710 * _M_options._M_max_threads;
711 __v = ::operator new(__k);
1ab79481 712 _M_thread_freelist = static_cast<_Thread_record*>(__v);
2f9f6cef
BK
713
714 // NOTE! The first assignable thread id is 1 since the
715 // global pool uses id 0
716 size_t __i;
717 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
718 {
719 _Thread_record& __tr = _M_thread_freelist[__i - 1];
720 __tr._M_next = &_M_thread_freelist[__i];
721 __tr._M_id = __i;
722 }
723
724 // Set last record.
725 _M_thread_freelist[__i - 1]._M_next = NULL;
726 _M_thread_freelist[__i - 1]._M_id = __i;
727
b82f782b 728 if (!freelist._M_thread_freelist_array)
2f9f6cef
BK
729 {
730 // Initialize per thread key to hold pointer to
731 // _M_thread_freelist.
b82f782b
BK
732 __gthread_key_create(&freelist._M_key,
733 ::_M_destroy_thread_key);
734 freelist._M_thread_freelist = _M_thread_freelist;
2f9f6cef
BK
735 }
736 else
737 {
738 _Thread_record* _M_old_freelist
b82f782b 739 = freelist._M_thread_freelist;
2f9f6cef 740 _Thread_record* _M_old_array
b82f782b
BK
741 = freelist._M_thread_freelist_array;
742 freelist._M_thread_freelist
2f9f6cef
BK
743 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
744 while (_M_old_freelist)
745 {
746 size_t next_id;
747 if (_M_old_freelist->_M_next)
748 next_id = _M_old_freelist->_M_next - _M_old_array;
749 else
b82f782b 750 next_id = freelist._M_max_threads;
2f9f6cef
BK
751 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
752 = &_M_thread_freelist[next_id];
753 _M_old_freelist = _M_old_freelist->_M_next;
754 }
755 ::operator delete(static_cast<void*>(_M_old_array));
756 }
b82f782b
BK
757 freelist._M_thread_freelist_array = _M_thread_freelist;
758 freelist._M_max_threads = _M_options._M_max_threads;
2f9f6cef
BK
759 }
760 }
761
762 const size_t __max_threads = _M_options._M_max_threads + 1;
763 for (size_t __n = 0; __n < _M_bin_size; ++__n)
764 {
765 _Bin_record& __bin = _M_bin[__n];
766 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
1d3e6248 767 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
2f9f6cef
BK
768 __bin._M_first = static_cast<_Block_record**>(__v);
769
770 __bin._M_address = NULL;
771
772 __v = ::operator new(sizeof(size_t) * __max_threads);
1d3e6248 773 std::memset(__v, 0, sizeof(size_t) * __max_threads);
2f9f6cef
BK
774 __bin._M_free = static_cast<size_t*>(__v);
775
1d3e6248
PC
776 __v = ::operator new(sizeof(size_t) * __max_threads +
777 sizeof(_Atomic_word) * __max_threads);
778 std::memset(__v, 0, (sizeof(size_t) * __max_threads
779 + sizeof(_Atomic_word) * __max_threads));
2f9f6cef 780 __bin._M_used = static_cast<size_t*>(__v);
1d3e6248 781
2f9f6cef
BK
782 __v = ::operator new(sizeof(__gthread_mutex_t));
783 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
784
785#ifdef __GTHREAD_MUTEX_INIT
786 {
787 // Do not copy a POSIX/gthr mutex once in use.
788 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
789 *__bin._M_mutex = __tmp;
790 }
791#else
792 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
793#endif
2f9f6cef
BK
794 }
795 }
796 else
797 {
798 for (size_t __n = 0; __n < _M_bin_size; ++__n)
799 {
800 _Bin_record& __bin = _M_bin[__n];
801 __v = ::operator new(sizeof(_Block_record*));
802 __bin._M_first = static_cast<_Block_record**>(__v);
803 __bin._M_first[0] = NULL;
804 __bin._M_address = NULL;
805 }
806 }
807 _M_init = true;
8bfd0a46
BK
808 }
809#endif
810
8bfd0a46
BK
811 // Instantiations.
812 template class __mt_alloc<char>;
813 template class __mt_alloc<wchar_t>;
3cbc7af0
BK
814
815_GLIBCXX_END_NAMESPACE