const size_t size = mi_segment_size(segment);
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
- _mi_abandoned_await_readers(); // wait until safe to free
+ _mi_abandoned_await_readers(tld->abandoned); // wait until safe to free
_mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats);
}
// Use the bottom 20-bits (on 64-bit) of the aligned segment pointers
// to put in a tag that increments on update to avoid the A-B-A problem.
#define MI_TAGGED_MASK MI_SEGMENT_MASK
-typedef uintptr_t mi_tagged_segment_t;
static mi_segment_t* mi_tagged_segment_ptr(mi_tagged_segment_t ts) {
return (mi_segment_t*)(ts & ~MI_TAGGED_MASK);
return ((uintptr_t)segment | tag);
}
-// This is a list of visited abandoned pages that were full at the time.
-// this list migrates to `abandoned` when that becomes NULL. The use of
-// this list reduces contention and the rate at which segments are visited.
-static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL
-
-// The abandoned page list (tagged as it supports pop)
-static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
-
-// Maintain these for debug purposes (these counts may be a bit off)
-static mi_decl_cache_align _Atomic(size_t) abandoned_count;
-static mi_decl_cache_align _Atomic(size_t) abandoned_visited_count;
-
-// We also maintain a count of current readers of the abandoned list
-// in order to prevent resetting/decommitting segment memory if it might
-// still be read.
-static mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0
+mi_abandoned_pool_t _mi_abandoned_default;
// Push on the visited list
-static void mi_abandoned_visited_push(mi_segment_t* segment) {
+static void mi_abandoned_visited_push(mi_abandoned_pool_t *pool, mi_segment_t* segment) {
mi_assert_internal(segment->thread_id == 0);
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t,&segment->abandoned_next) == NULL);
mi_assert_internal(segment->next == NULL);
mi_assert_internal(segment->used > 0);
- mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited);
+ mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &pool->abandoned_visited);
do {
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, anext);
- } while (!mi_atomic_cas_ptr_weak_release(mi_segment_t, &abandoned_visited, &anext, segment));
- mi_atomic_increment_relaxed(&abandoned_visited_count);
+ } while (!mi_atomic_cas_ptr_weak_release(mi_segment_t, &pool->abandoned_visited, &anext, segment));
+ mi_atomic_increment_relaxed(&pool->abandoned_visited_count);
}
// Move the visited list to the abandoned list.
-static bool mi_abandoned_visited_revisit(void)
+static bool mi_abandoned_visited_revisit(mi_abandoned_pool_t *pool)
{
// quick check if the visited list is empty
- if (mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited) == NULL) return false;
+ if (mi_atomic_load_ptr_relaxed(mi_segment_t, &pool->abandoned_visited) == NULL) return false;
// grab the whole visited list
- mi_segment_t* first = mi_atomic_exchange_ptr_acq_rel(mi_segment_t, &abandoned_visited, NULL);
+ mi_segment_t* first = mi_atomic_exchange_ptr_acq_rel(mi_segment_t, &pool->abandoned_visited, NULL);
if (first == NULL) return false;
// first try to swap directly if the abandoned list happens to be NULL
mi_tagged_segment_t afirst;
- mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
+ mi_tagged_segment_t ts = mi_atomic_load_relaxed(&pool->abandoned);
if (mi_tagged_segment_ptr(ts)==NULL) {
- size_t count = mi_atomic_load_relaxed(&abandoned_visited_count);
+ size_t count = mi_atomic_load_relaxed(&pool->abandoned_visited_count);
afirst = mi_tagged_segment(first, ts);
- if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) {
- mi_atomic_add_relaxed(&abandoned_count, count);
- mi_atomic_sub_relaxed(&abandoned_visited_count, count);
+ if (mi_atomic_cas_strong_acq_rel(&pool->abandoned, &ts, afirst)) {
+ mi_atomic_add_relaxed(&pool->abandoned_count, count);
+ mi_atomic_sub_relaxed(&pool->abandoned_visited_count, count);
return true;
}
}
// and atomically prepend to the abandoned list
// (no need to increase the readers as we don't access the abandoned segments)
- mi_tagged_segment_t anext = mi_atomic_load_relaxed(&abandoned);
+ mi_tagged_segment_t anext = mi_atomic_load_relaxed(&pool->abandoned);
size_t count;
do {
- count = mi_atomic_load_relaxed(&abandoned_visited_count);
+ count = mi_atomic_load_relaxed(&pool->abandoned_visited_count);
mi_atomic_store_ptr_release(mi_segment_t, &last->abandoned_next, mi_tagged_segment_ptr(anext));
afirst = mi_tagged_segment(first, anext);
- } while (!mi_atomic_cas_weak_release(&abandoned, &anext, afirst));
- mi_atomic_add_relaxed(&abandoned_count, count);
- mi_atomic_sub_relaxed(&abandoned_visited_count, count);
+ } while (!mi_atomic_cas_weak_release(&pool->abandoned, &anext, afirst));
+ mi_atomic_add_relaxed(&pool->abandoned_count, count);
+ mi_atomic_sub_relaxed(&pool->abandoned_visited_count, count);
return true;
}
// Push on the abandoned list.
-static void mi_abandoned_push(mi_segment_t* segment) {
+static void mi_abandoned_push(mi_abandoned_pool_t* pool, mi_segment_t* segment) {
mi_assert_internal(segment->thread_id == 0);
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
mi_assert_internal(segment->next == NULL);
mi_assert_internal(segment->used > 0);
mi_tagged_segment_t next;
- mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
+ mi_tagged_segment_t ts = mi_atomic_load_relaxed(&pool->abandoned);
do {
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, mi_tagged_segment_ptr(ts));
next = mi_tagged_segment(segment, ts);
- } while (!mi_atomic_cas_weak_release(&abandoned, &ts, next));
- mi_atomic_increment_relaxed(&abandoned_count);
+ } while (!mi_atomic_cas_weak_release(&pool->abandoned, &ts, next));
+ mi_atomic_increment_relaxed(&pool->abandoned_count);
}
// Wait until there are no more pending reads on segments that used to be in the abandoned list
// called for example from `arena.c` before decommitting
-void _mi_abandoned_await_readers(void) {
+void _mi_abandoned_await_readers(mi_abandoned_pool_t* pool) {
size_t n;
do {
- n = mi_atomic_load_acquire(&abandoned_readers);
+ n = mi_atomic_load_acquire(&pool->abandoned_readers);
if (n != 0) mi_atomic_yield();
} while (n != 0);
}
// Pop from the abandoned list
-static mi_segment_t* mi_abandoned_pop(void) {
+static mi_segment_t* mi_abandoned_pop(mi_abandoned_pool_t* pool) {
mi_segment_t* segment;
// Check efficiently if it is empty (or if the visited list needs to be moved)
- mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
+ mi_tagged_segment_t ts = mi_atomic_load_relaxed(&pool->abandoned);
segment = mi_tagged_segment_ptr(ts);
if mi_likely(segment == NULL) {
- if mi_likely(!mi_abandoned_visited_revisit()) { // try to swap in the visited list on NULL
+ if mi_likely(!mi_abandoned_visited_revisit(pool)) { // try to swap in the visited list on NULL
return NULL;
}
}
// a segment to be decommitted while a read is still pending,
// and a tagged pointer to prevent A-B-A link corruption.
// (this is called from `region.c:_mi_mem_free` for example)
- mi_atomic_increment_relaxed(&abandoned_readers); // ensure no segment gets decommitted
+ mi_atomic_increment_relaxed(&pool->abandoned_readers); // ensure no segment gets decommitted
mi_tagged_segment_t next = 0;
- ts = mi_atomic_load_acquire(&abandoned);
+ ts = mi_atomic_load_acquire(&pool->abandoned);
do {
segment = mi_tagged_segment_ptr(ts);
if (segment != NULL) {
mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next);
next = mi_tagged_segment(anext, ts); // note: reads the segment's `abandoned_next` field so should not be decommitted
}
- } while (segment != NULL && !mi_atomic_cas_weak_acq_rel(&abandoned, &ts, next));
- mi_atomic_decrement_relaxed(&abandoned_readers); // release reader lock
+ } while (segment != NULL && !mi_atomic_cas_weak_acq_rel(&pool->abandoned, &ts, next));
+ mi_atomic_decrement_relaxed(&pool->abandoned_readers); // release reader lock
if (segment != NULL) {
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
- mi_atomic_decrement_relaxed(&abandoned_count);
+ mi_atomic_decrement_relaxed(&pool->abandoned_count);
}
return segment;
}
segment->thread_id = 0;
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
- mi_abandoned_push(segment);
+ mi_abandoned_push(tld->abandoned, segment);
}
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
mi_segment_t* segment;
- while ((segment = mi_abandoned_pop()) != NULL) {
+ while ((segment = mi_abandoned_pop(tld->abandoned)) != NULL) {
mi_segment_reclaim(segment, heap, 0, NULL, tld);
}
}
*reclaimed = false;
mi_segment_t* segment;
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times
- while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
+ while ((max_tries-- > 0) && ((segment = mi_abandoned_pop(tld->abandoned)) != NULL)) {
segment->abandoned_visits++;
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
// and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
else {
// otherwise, push on the visited list so it gets not looked at too quickly again
mi_segment_try_purge(segment, true /* force? */, tld->stats); // force purge if needed as we may not visit soon again
- mi_abandoned_visited_push(segment);
+ mi_abandoned_visited_push(tld->abandoned, segment);
}
}
return NULL;
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
{
mi_segment_t* segment;
+ mi_abandoned_pool_t* pool = tld->abandoned;
int max_tries = (force ? 16*1024 : 1024); // limit latency
if (force) {
- mi_abandoned_visited_revisit();
+ mi_abandoned_visited_revisit(pool);
}
- while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
+ while ((max_tries-- > 0) && ((segment = mi_abandoned_pop(pool)) != NULL)) {
mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees)
if (segment->used == 0) {
// free the segment (by forced reclaim) to make it available to other threads.
// otherwise, purge if needed and push on the visited list
// note: forced purge can be expensive if many threads are destroyed/created as in mstress.
mi_segment_try_purge(segment, force, tld->stats);
- mi_abandoned_visited_push(segment);
+ mi_abandoned_visited_push(pool, segment);
}
}
}