#include <bits/c++config.h>
#include <bits/concurrence.h>
#include <ext/mt_allocator.h>
+#include <cstring>
namespace
{
// number of records is "high enough".
const size_t __thread_id = _M_get_thread_id();
const _Tune& __options = _M_get_options();
- const unsigned long __limit = 100 * (_M_bin_size - __which)
- * __options._M_freelist_headroom;
+ const size_t __limit = (100 * (_M_bin_size - __which)
+ * __options._M_freelist_headroom);
- unsigned long __remove = __bin._M_free[__thread_id];
+ size_t __remove = __bin._M_free[__thread_id];
__remove *= __options._M_freelist_headroom;
- if (__remove >= __bin._M_used[__thread_id])
- __remove -= __bin._M_used[__thread_id];
+
+ // NB: We assume that reads of _Atomic_words are atomic.
+ const size_t __max_threads = __options._M_max_threads + 1;
+ _Atomic_word* const __reclaimed_base =
+ reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
+ const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
+ const size_t __used = __bin._M_used[__thread_id] - __reclaimed;
+
+ // NB: For performance sake we don't resync every time, in order
+ // to spare atomic ops. Note that if __reclaimed increased by,
+ // say, 1024, since the last sync, it means that the other
+ // threads executed the atomic in the else below at least the
+ // same number of times (at least, because _M_reserve_block may
+ // have decreased the counter), therefore one more cannot hurt.
+ if (__reclaimed > 1024)
+ {
+ __bin._M_used[__thread_id] -= __reclaimed;
+ __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
+ }
+
+ if (__remove >= __used)
+ __remove -= __used;
else
__remove = 0;
if (__remove > __limit && __remove > __bin._M_free[__thread_id])
_Block_record* __first = __bin._M_first[__thread_id];
_Block_record* __tmp = __first;
__remove /= __options._M_freelist_headroom;
- const unsigned long __removed = __remove;
+ const size_t __removed = __remove;
while (--__remove > 0)
__tmp = __tmp->_M_next;
__bin._M_first[__thread_id] = __tmp->_M_next;
// Return this block to our list and update counters and
// owner id as needed.
- --__bin._M_used[__block->_M_thread_id];
-
+ if (__block->_M_thread_id == __thread_id)
+ --__bin._M_used[__thread_id];
+ else
+ __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
+
__block->_M_next = __bin._M_first[__thread_id];
__bin._M_first[__thread_id] = __block;
_Block_record* __block = NULL;
if (__gthread_active_p())
{
+ // Resync the _M_used counters.
+ const size_t __max_threads = __options._M_max_threads + 1;
+ _Atomic_word* const __reclaimed_base =
+ reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
+ const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
+ __bin._M_used[__thread_id] -= __reclaimed;
+ __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
+
__gthread_mutex_lock(__bin._M_mutex);
if (__bin._M_first[0] == NULL)
{
{
_Bin_record& __bin = _M_bin[__n];
__v = ::operator new(sizeof(_Block_record*) * __max_threads);
+ std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
__bin._M_first = static_cast<_Block_record**>(__v);
__bin._M_address = NULL;
__v = ::operator new(sizeof(size_t) * __max_threads);
+ std::memset(__v, 0, sizeof(size_t) * __max_threads);
__bin._M_free = static_cast<size_t*>(__v);
-
- __v = ::operator new(sizeof(size_t) * __max_threads);
+
+ __v = ::operator new(sizeof(size_t) * __max_threads
+ + sizeof(_Atomic_word) * __max_threads);
+ std::memset(__v, 0, (sizeof(size_t) * __max_threads
+ + sizeof(_Atomic_word) * __max_threads));
__bin._M_used = static_cast<size_t*>(__v);
__v = ::operator new(sizeof(__gthread_mutex_t));
#else
{ __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
#endif
- for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
- {
- __bin._M_first[__threadn] = NULL;
- __bin._M_free[__threadn] = 0;
- __bin._M_used[__threadn] = 0;
- }
}
}
else
{
_Bin_record& __bin = _M_bin[__n];
__v = ::operator new(sizeof(_Block_record*) * __max_threads);
+ std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
__bin._M_first = static_cast<_Block_record**>(__v);
__bin._M_address = NULL;
__v = ::operator new(sizeof(size_t) * __max_threads);
+ std::memset(__v, 0, sizeof(size_t) * __max_threads);
__bin._M_free = static_cast<size_t*>(__v);
- __v = ::operator new(sizeof(size_t) * __max_threads);
+ __v = ::operator new(sizeof(size_t) * __max_threads +
+ sizeof(_Atomic_word) * __max_threads);
+ std::memset(__v, 0, (sizeof(size_t) * __max_threads
+ + sizeof(_Atomic_word) * __max_threads));
__bin._M_used = static_cast<size_t*>(__v);
-
+
__v = ::operator new(sizeof(__gthread_mutex_t));
__bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
#else
{ __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
#endif
- for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
- {
- __bin._M_first[__threadn] = NULL;
- __bin._M_free[__threadn] = 0;
- __bin._M_used[__threadn] = 0;
- }
}
}
else