]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/src/c++98/mt_allocator.cc
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / src / c++98 / mt_allocator.cc
CommitLineData
8bfd0a46
BK
1// Allocator details.
2
a5544970 3// Copyright (C) 2004-2019 Free Software Foundation, Inc.
8bfd0a46 4//
2f9f6cef 5// This file is part of the GNU ISO C++ Library. This library is free
8bfd0a46
BK
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
748086b7 8// Free Software Foundation; either version 3, or (at your option)
8bfd0a46
BK
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
748086b7
JJ
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
8bfd0a46 19
748086b7
JJ
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
8bfd0a46
BK
24
25//
26// ISO C++ 14882:
27//
28
29#include <bits/c++config.h>
2e362c74 30#include <ext/concurrence.h>
12cde21b 31#include <ext/mt_allocator.h>
1d3e6248 32#include <cstring>
8bfd0a46 33
2c59c2d1
JS
34// The include file is needed for uintptr_t. If this file does not compile,
35// check to make sure the target has <stdint.h> and that it provides
36// uintptr_t.
37#include <stdint.h>
38
b82f782b 39namespace
8bfd0a46 40{
2f9f6cef
BK
41#ifdef __GTHREADS
42 struct __freelist
43 {
44 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
45 _Thread_record* _M_thread_freelist;
46 _Thread_record* _M_thread_freelist_array;
47 size_t _M_max_threads;
48 __gthread_key_t _M_key;
49
50 ~__freelist()
51 {
52 if (_M_thread_freelist_array)
53 {
54 __gthread_key_delete(_M_key);
55 ::operator delete(static_cast<void*>(_M_thread_freelist_array));
2db21e7e 56 _M_thread_freelist = 0;
2f9f6cef
BK
57 }
58 }
59 };
60
99827523
BK
61 __freelist&
62 get_freelist()
63 {
64 static __freelist freelist;
65 return freelist;
66 }
67
68 __gnu_cxx::__mutex&
69 get_freelist_mutex()
70 {
71 static __gnu_cxx::__mutex freelist_mutex;
72 return freelist_mutex;
73 }
8bfd0a46 74
f92ab29f 75 static void
2f9f6cef
BK
76 _M_destroy_thread_key(void* __id)
77 {
797308b2 78 // Return this thread id record to the front of thread_freelist.
99827523
BK
79 __freelist& freelist = get_freelist();
80 {
81 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
2c59c2d1 82 uintptr_t _M_id = reinterpret_cast<uintptr_t>(__id);
f92ab29f 83
99827523
BK
84 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
85 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
86 __tr->_M_next = freelist._M_thread_freelist;
87 freelist._M_thread_freelist = __tr;
88 }
2f9f6cef 89 }
8bfd0a46 90#endif
b82f782b 91} // anonymous namespace
8bfd0a46 92
12ffa228
BK
93namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
94{
95_GLIBCXX_BEGIN_NAMESPACE_VERSION
3cbc7af0 96
5a1e5472
BK
97 void
98 __pool<false>::_M_destroy() throw()
12cde21b
BK
99 {
100 if (_M_init && !_M_options._M_force_new)
101 {
102 for (size_t __n = 0; __n < _M_bin_size; ++__n)
103 {
104 _Bin_record& __bin = _M_bin[__n];
105 while (__bin._M_address)
106 {
107 _Block_address* __tmp = __bin._M_address->_M_next;
108 ::operator delete(__bin._M_address->_M_initial);
12cde21b
BK
109 __bin._M_address = __tmp;
110 }
0d6b41f2 111 ::operator delete(__bin._M_first);
12cde21b 112 }
0d6b41f2
PC
113 ::operator delete(_M_bin);
114 ::operator delete(_M_binmap);
12cde21b
BK
115 }
116 }
117
118 void
5d51b87a 119 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
12cde21b
BK
120 {
121 // Round up to power of 2 and figure out which bin to use.
122 const size_t __which = _M_binmap[__bytes];
123 _Bin_record& __bin = _M_bin[__which];
124
7befac71 125 char* __c = __p - _M_get_align();
12cde21b 126 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
f92ab29f 127
12cde21b
BK
128 // Single threaded application - return to global pool.
129 __block->_M_next = __bin._M_first[0];
130 __bin._M_first[0] = __block;
131 }
132
f92ab29f 133 char*
12cde21b
BK
134 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
135 {
136 // Round up to power of 2 and figure out which bin to use.
137 const size_t __which = _M_binmap[__bytes];
11aaaa84 138 _Bin_record& __bin = _M_bin[__which];
12cde21b 139 const _Tune& __options = _M_get_options();
f92ab29f 140 const size_t __bin_size = (__options._M_min_bin << __which)
7befac71
BK
141 + __options._M_align;
142 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
f92ab29f 143 __block_count /= __bin_size;
12cde21b
BK
144
145 // Get a new block dynamically, set it up for use.
146 void* __v = ::operator new(__options._M_chunk_size);
7befac71
BK
147 _Block_address* __address = static_cast<_Block_address*>(__v);
148 __address->_M_initial = __v;
149 __address->_M_next = __bin._M_address;
150 __bin._M_address = __address;
151
152 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
153 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
11aaaa84
BK
154 __bin._M_first[__thread_id] = __block;
155 while (--__block_count > 0)
12cde21b 156 {
7befac71 157 __c += __bin_size;
11aaaa84
BK
158 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
159 __block = __block->_M_next;
12cde21b 160 }
8fc81078 161 __block->_M_next = 0;
12cde21b 162
11aaaa84
BK
163 __block = __bin._M_first[__thread_id];
164 __bin._M_first[__thread_id] = __block->_M_next;
165
12cde21b
BK
166 // NB: For alignment reasons, we can't use the first _M_align
167 // bytes, even when sizeof(_Block_record) < _M_align.
168 return reinterpret_cast<char*>(__block) + __options._M_align;
169 }
170
171 void
172 __pool<false>::_M_initialize()
173 {
174 // _M_force_new must not change after the first allocate(), which
175 // in turn calls this method, so if it's false, it's false forever
176 // and we don't need to return here ever again.
f92ab29f 177 if (_M_options._M_force_new)
12cde21b
BK
178 {
179 _M_init = true;
180 return;
181 }
f92ab29f 182
12cde21b
BK
183 // Create the bins.
184 // Calculate the number of bins required based on _M_max_bytes.
185 // _M_bin_size is statically-initialized to one.
186 size_t __bin_size = _M_options._M_min_bin;
187 while (_M_options._M_max_bytes > __bin_size)
188 {
189 __bin_size <<= 1;
190 ++_M_bin_size;
191 }
f92ab29f 192
12cde21b
BK
193 // Setup the bin map for quick lookup of the relevant bin.
194 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
195 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
196 _Binmap_type* __bp = _M_binmap;
197 _Binmap_type __bin_max = _M_options._M_min_bin;
198 _Binmap_type __bint = 0;
199 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
200 {
201 if (__ct > __bin_max)
202 {
203 __bin_max <<= 1;
204 ++__bint;
205 }
206 *__bp++ = __bint;
207 }
f92ab29f 208
12cde21b
BK
209 // Initialize _M_bin and its members.
210 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
211 _M_bin = static_cast<_Bin_record*>(__v);
212 for (size_t __n = 0; __n < _M_bin_size; ++__n)
213 {
214 _Bin_record& __bin = _M_bin[__n];
215 __v = ::operator new(sizeof(_Block_record*));
216 __bin._M_first = static_cast<_Block_record**>(__v);
8fc81078
PC
217 __bin._M_first[0] = 0;
218 __bin._M_address = 0;
12cde21b
BK
219 }
220 _M_init = true;
221 }
2f9f6cef 222
f92ab29f 223
8bfd0a46 224#ifdef __GTHREADS
5a1e5472
BK
225 void
226 __pool<true>::_M_destroy() throw()
12cde21b
BK
227 {
228 if (_M_init && !_M_options._M_force_new)
229 {
230 if (__gthread_active_p())
231 {
232 for (size_t __n = 0; __n < _M_bin_size; ++__n)
233 {
234 _Bin_record& __bin = _M_bin[__n];
235 while (__bin._M_address)
236 {
237 _Block_address* __tmp = __bin._M_address->_M_next;
238 ::operator delete(__bin._M_address->_M_initial);
12cde21b
BK
239 __bin._M_address = __tmp;
240 }
0d6b41f2
PC
241 ::operator delete(__bin._M_first);
242 ::operator delete(__bin._M_free);
243 ::operator delete(__bin._M_used);
244 ::operator delete(__bin._M_mutex);
12cde21b 245 }
12cde21b
BK
246 }
247 else
248 {
249 for (size_t __n = 0; __n < _M_bin_size; ++__n)
250 {
251 _Bin_record& __bin = _M_bin[__n];
252 while (__bin._M_address)
253 {
254 _Block_address* __tmp = __bin._M_address->_M_next;
255 ::operator delete(__bin._M_address->_M_initial);
12cde21b
BK
256 __bin._M_address = __tmp;
257 }
0d6b41f2 258 ::operator delete(__bin._M_first);
12cde21b
BK
259 }
260 }
0d6b41f2
PC
261 ::operator delete(_M_bin);
262 ::operator delete(_M_binmap);
12cde21b
BK
263 }
264 }
265
8bfd0a46 266 void
5d51b87a 267 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
8bfd0a46
BK
268 {
269 // Round up to power of 2 and figure out which bin to use.
270 const size_t __which = _M_binmap[__bytes];
271 const _Bin_record& __bin = _M_bin[__which];
12cde21b 272
7befac71
BK
273 // Know __p not null, assume valid block.
274 char* __c = __p - _M_get_align();
8bfd0a46 275 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
8bfd0a46
BK
276 if (__gthread_active_p())
277 {
278 // Calculate the number of records to remove from our freelist:
279 // in order to avoid too much contention we wait until the
280 // number of records is "high enough".
281 const size_t __thread_id = _M_get_thread_id();
f92ab29f 282 const _Tune& __options = _M_get_options();
1d3e6248
PC
283 const size_t __limit = (100 * (_M_bin_size - __which)
284 * __options._M_freelist_headroom);
7befac71 285
1d3e6248 286 size_t __remove = __bin._M_free[__thread_id];
7befac71 287 __remove *= __options._M_freelist_headroom;
1d3e6248
PC
288
289 // NB: We assume that reads of _Atomic_words are atomic.
290 const size_t __max_threads = __options._M_max_threads + 1;
291 _Atomic_word* const __reclaimed_base =
292 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
293 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
c4338a62 294 const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
1d3e6248
PC
295
296 // NB: For performance sake we don't resync every time, in order
297 // to spare atomic ops. Note that if __reclaimed increased by,
298 // say, 1024, since the last sync, it means that the other
299 // threads executed the atomic in the else below at least the
300 // same number of times (at least, because _M_reserve_block may
301 // have decreased the counter), therefore one more cannot hurt.
302 if (__reclaimed > 1024)
303 {
304 __bin._M_used[__thread_id] -= __reclaimed;
305 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
306 }
307
c4338a62
PC
308 if (__remove >= __net_used)
309 __remove -= __net_used;
7befac71
BK
310 else
311 __remove = 0;
312 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
8bfd0a46 313 {
7befac71
BK
314 _Block_record* __first = __bin._M_first[__thread_id];
315 _Block_record* __tmp = __first;
8bfd0a46 316 __remove /= __options._M_freelist_headroom;
1d3e6248 317 const size_t __removed = __remove;
11aaaa84 318 while (--__remove > 0)
8bfd0a46
BK
319 __tmp = __tmp->_M_next;
320 __bin._M_first[__thread_id] = __tmp->_M_next;
321 __bin._M_free[__thread_id] -= __removed;
f92ab29f 322
8bfd0a46
BK
323 __gthread_mutex_lock(__bin._M_mutex);
324 __tmp->_M_next = __bin._M_first[0];
325 __bin._M_first[0] = __first;
326 __bin._M_free[0] += __removed;
327 __gthread_mutex_unlock(__bin._M_mutex);
328 }
7befac71 329
8bfd0a46
BK
330 // Return this block to our list and update counters and
331 // owner id as needed.
1d3e6248
PC
332 if (__block->_M_thread_id == __thread_id)
333 --__bin._M_used[__thread_id];
334 else
335 __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
336
8bfd0a46
BK
337 __block->_M_next = __bin._M_first[__thread_id];
338 __bin._M_first[__thread_id] = __block;
f92ab29f 339
8bfd0a46
BK
340 ++__bin._M_free[__thread_id];
341 }
342 else
343 {
344 // Not using threads, so single threaded application - return
345 // to global pool.
346 __block->_M_next = __bin._M_first[0];
347 __bin._M_first[0] = __block;
348 }
349 }
8bfd0a46 350
f92ab29f 351 char*
12cde21b 352 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
8bfd0a46
BK
353 {
354 // Round up to power of 2 and figure out which bin to use.
355 const size_t __which = _M_binmap[__bytes];
8bfd0a46 356 const _Tune& __options = _M_get_options();
8bfd0a46
BK
357 const size_t __bin_size = ((__options._M_min_bin << __which)
358 + __options._M_align);
7befac71 359 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
f92ab29f
CG
360 __block_count /= __bin_size;
361
8bfd0a46
BK
362 // Are we using threads?
363 // - Yes, check if there are free blocks on the global
364 // list. If so, grab up to __block_count blocks in one
f92ab29f
CG
365 // lock and change ownership. If the global list is
366 // empty, we allocate a new chunk and add those blocks
8bfd0a46
BK
367 // directly to our own freelist (with us as owner).
368 // - No, all operations are made directly to global pool 0
369 // no need to lock or change ownership but check for free
370 // blocks on global list (and if not add new ones) and
371 // get the first one.
12cde21b 372 _Bin_record& __bin = _M_bin[__which];
8fc81078 373 _Block_record* __block = 0;
8bfd0a46
BK
374 if (__gthread_active_p())
375 {
1d3e6248
PC
376 // Resync the _M_used counters.
377 const size_t __max_threads = __options._M_max_threads + 1;
378 _Atomic_word* const __reclaimed_base =
379 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
380 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
381 __bin._M_used[__thread_id] -= __reclaimed;
382 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
383
84706343 384 __gthread_mutex_lock(__bin._M_mutex);
8fc81078 385 if (__bin._M_first[0] == 0)
8bfd0a46 386 {
7befac71
BK
387 void* __v = ::operator new(__options._M_chunk_size);
388 _Block_address* __address = static_cast<_Block_address*>(__v);
389 __address->_M_initial = __v;
390 __address->_M_next = __bin._M_address;
391 __bin._M_address = __address;
84706343
BK
392 __gthread_mutex_unlock(__bin._M_mutex);
393
7befac71
BK
394 // No need to hold the lock when we are adding a whole
395 // chunk to our own list.
396 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
397 __block = reinterpret_cast<_Block_record*>(__c);
8bfd0a46 398 __bin._M_free[__thread_id] = __block_count;
11aaaa84
BK
399 __bin._M_first[__thread_id] = __block;
400 while (--__block_count > 0)
8bfd0a46 401 {
7befac71 402 __c += __bin_size;
8bfd0a46
BK
403 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
404 __block = __block->_M_next;
405 }
8fc81078 406 __block->_M_next = 0;
8bfd0a46
BK
407 }
408 else
409 {
12cde21b
BK
410 // Is the number of required blocks greater than or equal
411 // to the number that can be provided by the global free
412 // list?
8bfd0a46
BK
413 __bin._M_first[__thread_id] = __bin._M_first[0];
414 if (__block_count >= __bin._M_free[0])
415 {
416 __bin._M_free[__thread_id] = __bin._M_free[0];
417 __bin._M_free[0] = 0;
8fc81078 418 __bin._M_first[0] = 0;
8bfd0a46
BK
419 }
420 else
421 {
422 __bin._M_free[__thread_id] = __block_count;
423 __bin._M_free[0] -= __block_count;
8bfd0a46 424 __block = __bin._M_first[0];
11aaaa84 425 while (--__block_count > 0)
8bfd0a46
BK
426 __block = __block->_M_next;
427 __bin._M_first[0] = __block->_M_next;
8fc81078 428 __block->_M_next = 0;
8bfd0a46
BK
429 }
430 __gthread_mutex_unlock(__bin._M_mutex);
431 }
432 }
433 else
434 {
435 void* __v = ::operator new(__options._M_chunk_size);
7befac71
BK
436 _Block_address* __address = static_cast<_Block_address*>(__v);
437 __address->_M_initial = __v;
438 __address->_M_next = __bin._M_address;
439 __bin._M_address = __address;
440
441 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
1ab79481 442 __block = reinterpret_cast<_Block_record*>(__c);
7befac71 443 __bin._M_first[0] = __block;
11aaaa84 444 while (--__block_count > 0)
8bfd0a46 445 {
7befac71 446 __c += __bin_size;
8bfd0a46
BK
447 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
448 __block = __block->_M_next;
449 }
8fc81078 450 __block->_M_next = 0;
8bfd0a46 451 }
f92ab29f 452
8bfd0a46 453 __block = __bin._M_first[__thread_id];
11aaaa84 454 __bin._M_first[__thread_id] = __block->_M_next;
8bfd0a46
BK
455
456 if (__gthread_active_p())
457 {
458 __block->_M_thread_id = __thread_id;
459 --__bin._M_free[__thread_id];
460 ++__bin._M_used[__thread_id];
461 }
8bfd0a46 462
8bfd0a46
BK
463 // NB: For alignment reasons, we can't use the first _M_align
464 // bytes, even when sizeof(_Block_record) < _M_align.
8bfd0a46
BK
465 return reinterpret_cast<char*>(__block) + __options._M_align;
466 }
467
2f9f6cef
BK
468 void
469 __pool<true>::_M_initialize()
8bfd0a46 470 {
8bfd0a46
BK
471 // _M_force_new must not change after the first allocate(),
472 // which in turn calls this method, so if it's false, it's false
473 // forever and we don't need to return here ever again.
f92ab29f 474 if (_M_options._M_force_new)
8bfd0a46
BK
475 {
476 _M_init = true;
477 return;
478 }
2f9f6cef 479
12cde21b 480 // Create the bins.
8bfd0a46
BK
481 // Calculate the number of bins required based on _M_max_bytes.
482 // _M_bin_size is statically-initialized to one.
483 size_t __bin_size = _M_options._M_min_bin;
484 while (_M_options._M_max_bytes > __bin_size)
485 {
486 __bin_size <<= 1;
487 ++_M_bin_size;
488 }
f92ab29f 489
8bfd0a46
BK
490 // Setup the bin map for quick lookup of the relevant bin.
491 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
492 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
8bfd0a46
BK
493 _Binmap_type* __bp = _M_binmap;
494 _Binmap_type __bin_max = _M_options._M_min_bin;
495 _Binmap_type __bint = 0;
496 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
497 {
498 if (__ct > __bin_max)
499 {
500 __bin_max <<= 1;
501 ++__bint;
502 }
503 *__bp++ = __bint;
504 }
f92ab29f 505
8bfd0a46
BK
506 // Initialize _M_bin and its members.
507 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
508 _M_bin = static_cast<_Bin_record*>(__v);
f92ab29f 509
8bfd0a46
BK
510 // If __gthread_active_p() create and initialize the list of
511 // free thread ids. Single threaded applications use thread id 0
512 // directly and have no need for this.
513 if (__gthread_active_p())
514 {
99827523 515 __freelist& freelist = get_freelist();
2f9f6cef 516 {
99827523 517 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
2f9f6cef 518
b82f782b 519 if (!freelist._M_thread_freelist_array
2e362c74 520 || freelist._M_max_threads < _M_options._M_max_threads)
2f9f6cef
BK
521 {
522 const size_t __k = sizeof(_Thread_record)
523 * _M_options._M_max_threads;
524 __v = ::operator new(__k);
1ab79481 525 _M_thread_freelist = static_cast<_Thread_record*>(__v);
2f9f6cef
BK
526
527 // NOTE! The first assignable thread id is 1 since the
528 // global pool uses id 0
529 size_t __i;
530 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
531 {
532 _Thread_record& __tr = _M_thread_freelist[__i - 1];
533 __tr._M_next = &_M_thread_freelist[__i];
534 __tr._M_id = __i;
535 }
536
537 // Set last record.
8fc81078 538 _M_thread_freelist[__i - 1]._M_next = 0;
2f9f6cef
BK
539 _M_thread_freelist[__i - 1]._M_id = __i;
540
b82f782b 541 if (!freelist._M_thread_freelist_array)
2f9f6cef
BK
542 {
543 // Initialize per thread key to hold pointer to
544 // _M_thread_freelist.
b82f782b
BK
545 __gthread_key_create(&freelist._M_key,
546 ::_M_destroy_thread_key);
1ab79481 547 freelist._M_thread_freelist = _M_thread_freelist;
2f9f6cef
BK
548 }
549 else
550 {
551 _Thread_record* _M_old_freelist
b82f782b 552 = freelist._M_thread_freelist;
2f9f6cef 553 _Thread_record* _M_old_array
b82f782b
BK
554 = freelist._M_thread_freelist_array;
555 freelist._M_thread_freelist
2f9f6cef
BK
556 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
557 while (_M_old_freelist)
558 {
559 size_t next_id;
560 if (_M_old_freelist->_M_next)
561 next_id = _M_old_freelist->_M_next - _M_old_array;
562 else
b82f782b 563 next_id = freelist._M_max_threads;
2f9f6cef
BK
564 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
565 = &_M_thread_freelist[next_id];
566 _M_old_freelist = _M_old_freelist->_M_next;
567 }
568 ::operator delete(static_cast<void*>(_M_old_array));
569 }
1ab79481
BK
570 freelist._M_thread_freelist_array = _M_thread_freelist;
571 freelist._M_max_threads = _M_options._M_max_threads;
2f9f6cef
BK
572 }
573 }
574
8bfd0a46
BK
575 const size_t __max_threads = _M_options._M_max_threads + 1;
576 for (size_t __n = 0; __n < _M_bin_size; ++__n)
577 {
578 _Bin_record& __bin = _M_bin[__n];
579 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
f92ab29f 580 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
8bfd0a46 581 __bin._M_first = static_cast<_Block_record**>(__v);
12cde21b 582
8fc81078 583 __bin._M_address = 0;
12cde21b 584
8bfd0a46 585 __v = ::operator new(sizeof(size_t) * __max_threads);
1ab79481
BK
586 std::memset(__v, 0, sizeof(size_t) * __max_threads);
587
8bfd0a46 588 __bin._M_free = static_cast<size_t*>(__v);
1d3e6248
PC
589
590 __v = ::operator new(sizeof(size_t) * __max_threads
591 + sizeof(_Atomic_word) * __max_threads);
592 std::memset(__v, 0, (sizeof(size_t) * __max_threads
593 + sizeof(_Atomic_word) * __max_threads));
8bfd0a46 594 __bin._M_used = static_cast<size_t*>(__v);
f92ab29f 595
8bfd0a46
BK
596 __v = ::operator new(sizeof(__gthread_mutex_t));
597 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
f92ab29f 598
8bfd0a46
BK
599#ifdef __GTHREAD_MUTEX_INIT
600 {
601 // Do not copy a POSIX/gthr mutex once in use.
602 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
603 *__bin._M_mutex = __tmp;
604 }
605#else
606 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
607#endif
8bfd0a46
BK
608 }
609 }
610 else
8bfd0a46 611 {
12cde21b 612 for (size_t __n = 0; __n < _M_bin_size; ++__n)
8bfd0a46 613 {
12cde21b
BK
614 _Bin_record& __bin = _M_bin[__n];
615 __v = ::operator new(sizeof(_Block_record*));
616 __bin._M_first = static_cast<_Block_record**>(__v);
8fc81078
PC
617 __bin._M_first[0] = 0;
618 __bin._M_address = 0;
8bfd0a46 619 }
8bfd0a46
BK
620 }
621 _M_init = true;
622 }
12cde21b 623
8bfd0a46
BK
624 size_t
625 __pool<true>::_M_get_thread_id()
626 {
627 // If we have thread support and it's active we check the thread
628 // key value and return its id or if it's not set we take the
629 // first record from _M_thread_freelist and sets the key and
41e404e6 630 // returns its id.
8bfd0a46
BK
631 if (__gthread_active_p())
632 {
99827523 633 __freelist& freelist = get_freelist();
b82f782b 634 void* v = __gthread_getspecific(freelist._M_key);
2c59c2d1 635 uintptr_t _M_id = (uintptr_t)v;
2f9f6cef 636 if (_M_id == 0)
8bfd0a46 637 {
8bfd0a46 638 {
99827523 639 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
b82f782b 640 if (freelist._M_thread_freelist)
2f9f6cef 641 {
b82f782b
BK
642 _M_id = freelist._M_thread_freelist->_M_id;
643 freelist._M_thread_freelist
644 = freelist._M_thread_freelist->_M_next;
2f9f6cef 645 }
8bfd0a46 646 }
2f9f6cef 647
1ab79481 648 __gthread_setspecific(freelist._M_key, (void*)_M_id);
8bfd0a46 649 }
2f9f6cef 650 return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
8bfd0a46
BK
651 }
652
653 // Otherwise (no thread support or inactive) all requests are
654 // served from the global pool 0.
655 return 0;
656 }
657
2f9f6cef 658 // XXX GLIBCXX_ABI Deprecated
f92ab29f 659 void
5d51b87a 660 __pool<true>::_M_destroy_thread_key(void*) throw () { }
2f9f6cef
BK
661
662 // XXX GLIBCXX_ABI Deprecated
8bfd0a46 663 void
2f9f6cef 664 __pool<true>::_M_initialize(__destroy_handler)
8bfd0a46 665 {
2f9f6cef
BK
666 // _M_force_new must not change after the first allocate(),
667 // which in turn calls this method, so if it's false, it's false
668 // forever and we don't need to return here ever again.
f92ab29f 669 if (_M_options._M_force_new)
2f9f6cef
BK
670 {
671 _M_init = true;
672 return;
673 }
674
675 // Create the bins.
676 // Calculate the number of bins required based on _M_max_bytes.
677 // _M_bin_size is statically-initialized to one.
678 size_t __bin_size = _M_options._M_min_bin;
679 while (_M_options._M_max_bytes > __bin_size)
680 {
681 __bin_size <<= 1;
682 ++_M_bin_size;
683 }
f92ab29f 684
2f9f6cef
BK
685 // Setup the bin map for quick lookup of the relevant bin.
686 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
687 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
688 _Binmap_type* __bp = _M_binmap;
689 _Binmap_type __bin_max = _M_options._M_min_bin;
690 _Binmap_type __bint = 0;
691 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
692 {
693 if (__ct > __bin_max)
694 {
695 __bin_max <<= 1;
696 ++__bint;
697 }
698 *__bp++ = __bint;
699 }
f92ab29f 700
2f9f6cef
BK
701 // Initialize _M_bin and its members.
702 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
703 _M_bin = static_cast<_Bin_record*>(__v);
f92ab29f 704
2f9f6cef
BK
705 // If __gthread_active_p() create and initialize the list of
706 // free thread ids. Single threaded applications use thread id 0
707 // directly and have no need for this.
708 if (__gthread_active_p())
709 {
99827523 710 __freelist& freelist = get_freelist();
2f9f6cef 711 {
99827523 712 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
2f9f6cef 713
b82f782b 714 if (!freelist._M_thread_freelist_array
1ab79481 715 || freelist._M_max_threads < _M_options._M_max_threads)
2f9f6cef
BK
716 {
717 const size_t __k = sizeof(_Thread_record)
718 * _M_options._M_max_threads;
719 __v = ::operator new(__k);
1ab79481 720 _M_thread_freelist = static_cast<_Thread_record*>(__v);
2f9f6cef
BK
721
722 // NOTE! The first assignable thread id is 1 since the
723 // global pool uses id 0
724 size_t __i;
725 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
726 {
727 _Thread_record& __tr = _M_thread_freelist[__i - 1];
728 __tr._M_next = &_M_thread_freelist[__i];
729 __tr._M_id = __i;
730 }
731
732 // Set last record.
8fc81078 733 _M_thread_freelist[__i - 1]._M_next = 0;
2f9f6cef
BK
734 _M_thread_freelist[__i - 1]._M_id = __i;
735
b82f782b 736 if (!freelist._M_thread_freelist_array)
2f9f6cef
BK
737 {
738 // Initialize per thread key to hold pointer to
739 // _M_thread_freelist.
f92ab29f 740 __gthread_key_create(&freelist._M_key,
b82f782b
BK
741 ::_M_destroy_thread_key);
742 freelist._M_thread_freelist = _M_thread_freelist;
2f9f6cef
BK
743 }
744 else
745 {
746 _Thread_record* _M_old_freelist
b82f782b 747 = freelist._M_thread_freelist;
2f9f6cef 748 _Thread_record* _M_old_array
b82f782b
BK
749 = freelist._M_thread_freelist_array;
750 freelist._M_thread_freelist
2f9f6cef
BK
751 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
752 while (_M_old_freelist)
753 {
754 size_t next_id;
755 if (_M_old_freelist->_M_next)
756 next_id = _M_old_freelist->_M_next - _M_old_array;
757 else
b82f782b 758 next_id = freelist._M_max_threads;
2f9f6cef
BK
759 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
760 = &_M_thread_freelist[next_id];
761 _M_old_freelist = _M_old_freelist->_M_next;
762 }
763 ::operator delete(static_cast<void*>(_M_old_array));
764 }
b82f782b
BK
765 freelist._M_thread_freelist_array = _M_thread_freelist;
766 freelist._M_max_threads = _M_options._M_max_threads;
2f9f6cef
BK
767 }
768 }
769
770 const size_t __max_threads = _M_options._M_max_threads + 1;
771 for (size_t __n = 0; __n < _M_bin_size; ++__n)
772 {
773 _Bin_record& __bin = _M_bin[__n];
774 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
1d3e6248 775 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
2f9f6cef
BK
776 __bin._M_first = static_cast<_Block_record**>(__v);
777
8fc81078 778 __bin._M_address = 0;
2f9f6cef
BK
779
780 __v = ::operator new(sizeof(size_t) * __max_threads);
1d3e6248 781 std::memset(__v, 0, sizeof(size_t) * __max_threads);
2f9f6cef 782 __bin._M_free = static_cast<size_t*>(__v);
f92ab29f
CG
783
784 __v = ::operator new(sizeof(size_t) * __max_threads +
1d3e6248
PC
785 sizeof(_Atomic_word) * __max_threads);
786 std::memset(__v, 0, (sizeof(size_t) * __max_threads
787 + sizeof(_Atomic_word) * __max_threads));
2f9f6cef 788 __bin._M_used = static_cast<size_t*>(__v);
1d3e6248 789
2f9f6cef
BK
790 __v = ::operator new(sizeof(__gthread_mutex_t));
791 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
f92ab29f 792
2f9f6cef
BK
793#ifdef __GTHREAD_MUTEX_INIT
794 {
795 // Do not copy a POSIX/gthr mutex once in use.
796 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
797 *__bin._M_mutex = __tmp;
798 }
799#else
800 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
801#endif
2f9f6cef
BK
802 }
803 }
804 else
805 {
806 for (size_t __n = 0; __n < _M_bin_size; ++__n)
807 {
808 _Bin_record& __bin = _M_bin[__n];
809 __v = ::operator new(sizeof(_Block_record*));
810 __bin._M_first = static_cast<_Block_record**>(__v);
8fc81078
PC
811 __bin._M_first[0] = 0;
812 __bin._M_address = 0;
2f9f6cef
BK
813 }
814 }
815 _M_init = true;
8bfd0a46
BK
816 }
817#endif
818
8bfd0a46
BK
819 // Instantiations.
820 template class __mt_alloc<char>;
821 template class __mt_alloc<wchar_t>;
3cbc7af0 822
12ffa228
BK
823_GLIBCXX_END_NAMESPACE_VERSION
824} // namespace