// note: this will free retired pages as well.
bool freed = _PyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE);
if (!freed && collect == MI_ABANDON) {
- _mi_page_abandon(page, pq);
+ // _PyMem_mi_page_maybe_free may have moved the page to a different
+ // page queue, so we need to re-fetch the correct queue.
+ uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size));
+ _mi_page_abandon(page, &heap->pages[bin]);
}
}
else if (collect == MI_ABANDON) {
// update counts now
page->used -= count;
+
+ if (page->used == 0) {
+ // The page may have had a QSBR goal set from a previous point when it
+ // was all-free. That goal is no longer valid because the page was
+ // allocated from and then freed again by other threads.
+ _PyMem_mi_page_clear_qsbr(page);
+ }
}
void _mi_page_free_collect(mi_page_t* page, bool force) {
// and the local free list
if (page->local_free != NULL) {
- // any previous QSBR goals are no longer valid because we reused the page
- _PyMem_mi_page_clear_qsbr(page);
-
if mi_likely(page->free == NULL) {
// usual case
page->free = page->local_free;
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
#ifdef Py_GIL_DISABLED
page->qsbr_goal = 0;
+ mi_assert_internal(page->qsbr_node.next == NULL);
#endif
segment->abandoned--;
slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce!
// if everything free by now, free the page
#ifdef Py_GIL_DISABLED
page->qsbr_goal = 0;
+ mi_assert_internal(page->qsbr_node.next == NULL);
#endif
slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
}
}
return false;
}
+
+static _PyThreadStateImpl *
+tstate_from_heap(mi_heap_t *heap)
+{
+ return _Py_CONTAINER_OF(heap->tld, _PyThreadStateImpl, mimalloc.tld);
+}
#endif
static bool
#ifdef Py_GIL_DISABLED
assert(mi_page_all_free(page));
if (page->use_qsbr) {
- _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET();
- if (page->qsbr_goal != 0 && _Py_qbsr_goal_reached(tstate->qsbr, page->qsbr_goal)) {
+ struct _qsbr_thread_state *qsbr = ((_PyThreadStateImpl *)PyThreadState_GET())->qsbr;
+ if (page->qsbr_goal != 0 && _Py_qbsr_goal_reached(qsbr, page->qsbr_goal)) {
_PyMem_mi_page_clear_qsbr(page);
_mi_page_free(page, pq, force);
return true;
}
+ // gh-145615: since we are not freeing this page yet, we want to
+ // make it available for allocations. Note that the QSBR goal and
+ // linked list node remain set even if the page is later used for
+ // an allocation. We only detect and clear the QSBR goal when the
+ // page becomes empty again (used == 0).
+ if (mi_page_is_in_full(page)) {
+ _mi_page_unfull(page);
+ }
+
_PyMem_mi_page_clear_qsbr(page);
page->retire_expire = 0;
- if (should_advance_qsbr_for_page(tstate->qsbr, page)) {
- page->qsbr_goal = _Py_qsbr_advance(tstate->qsbr->shared);
+ if (should_advance_qsbr_for_page(qsbr, page)) {
+ page->qsbr_goal = _Py_qsbr_advance(qsbr->shared);
}
else {
- page->qsbr_goal = _Py_qsbr_shared_next(tstate->qsbr->shared);
+ page->qsbr_goal = _Py_qsbr_shared_next(qsbr->shared);
}
+ // We may be freeing a page belonging to a different thread during a
+ // stop-the-world event. Find the _PyThreadStateImpl for the page.
+ _PyThreadStateImpl *tstate = tstate_from_heap(mi_page_heap(page));
llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node);
return false;
}
if (page->qsbr_goal != 0) {
if (mi_page_all_free(page)) {
assert(page->qsbr_node.next == NULL);
- _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)PyThreadState_GET();
+ _PyThreadStateImpl *tstate = tstate_from_heap(mi_page_heap(page));
+ assert(tstate == (_PyThreadStateImpl *)_PyThreadState_GET());
page->retire_expire = 0;
llist_insert_tail(&tstate->mimalloc.page_list, &page->qsbr_node);
}