assert_python_ok("-c", code_inside_function)
+ @unittest.skipUnless(Py_GIL_DISABLED, "requires free-threaded GC")
+ @unittest.skipIf(_testinternalcapi is None, "requires _testinternalcapi")
+ def test_tuple_untrack_counts(self):
+ # This ensures that the free-threaded GC is counting untracked tuples
+ # in the "long_lived_total" count. This is required to avoid
+ # performance issues from running the GC too frequently. See
+ # GH-142531 as an example.
+ gc.collect()
+ count = _testinternalcapi.get_long_lived_total()
+ n = 20_000
+ tuples = [(x,) for x in range(n)]
+ gc.collect()
+ new_count = _testinternalcapi.get_long_lived_total()
+ self.assertFalse(gc.is_tracked(tuples[0]))
+ # Use n // 2 just in case some other objects were collected.
+ self.assertTrue(new_count - count > (n // 2))
+
+
class IncrementalGCTests(unittest.TestCase):
@unittest.skipIf(_testinternalcapi is None, "requires _testinternalcapi")
@requires_gil_enabled("Free threading does not support incremental GC")
}
return PyLong_FromVoidPtr(bc);
}
+
+static PyObject *
+get_long_lived_total(PyObject *self, PyObject *Py_UNUSED(ignored))
+{
+ return PyLong_FromInt64(PyInterpreterState_Get()->gc.long_lived_total);
+}
+
#endif
static PyObject *
{"py_thread_id", get_py_thread_id, METH_NOARGS},
{"get_tlbc", get_tlbc, METH_O, NULL},
{"get_tlbc_id", get_tlbc_id, METH_O, NULL},
+ {"get_long_lived_total", get_long_lived_total, METH_NOARGS},
#endif
#ifdef _Py_TIER2
{"uop_symbols_test", _Py_uop_symbols_test, METH_NOARGS},
return op;
}
+// As above but returns untracked and frozen objects as well.
+static PyObject *
+op_from_block_all_gc(void *block, void *arg)
+{
+ struct visitor_args *a = arg;
+ if (block == NULL) {
+ return NULL;
+ }
+ PyObject *op = (PyObject *)((char*)block + a->offset);
+ assert(PyObject_IS_GC(op));
+ return op;
+}
+
static int
gc_visit_heaps_lock_held(PyInterpreterState *interp, mi_block_visit_fun *visitor,
struct visitor_args *arg)
scan_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
void *block, size_t block_size, void *args)
{
- PyObject *op = op_from_block(block, args, false);
+ PyObject *op = op_from_block_all_gc(block, args);
if (op == NULL) {
return true;
}
-
struct collection_state *state = (struct collection_state *)args;
+ // The free-threaded GC cost is proportional to the number of objects in
+ // the mimalloc GC heap and so we should include the counts for untracked
+ // and frozen objects as well. This is especially important if many
+ // tuples have been untracked.
+ state->long_lived_total++;
+ if (!_PyObject_GC_IS_TRACKED(op) || gc_is_frozen(op)) {
+ return true;
+ }
+
if (gc_is_unreachable(op)) {
// Disable deferred refcounting for unreachable objects so that they
// are collected immediately after finalization.
else {
worklist_push(&state->unreachable, op);
}
+ // It is possible this object will be resurrected but
+ // for now we assume it will be deallocated.
+ state->long_lived_total--;
return true;
}
// object is reachable, restore `ob_tid`; we're done with these objects
gc_restore_tid(op);
gc_clear_alive(op);
- state->long_lived_total++;
return true;
}
_PyObject_ASSERT(op, Py_REFCNT(op) > 1);
worklist_remove(&iter);
merge_refcount(op, -1); // remove worklist reference
+ state->long_lived_total++;
}
}
}
}
}
- // Record the number of live GC objects
- interp->gc.long_lived_total = state->long_lived_total;
-
// Find weakref callbacks we will honor (but do not call them).
find_weakref_callbacks(state);
_PyEval_StartTheWorld(interp);
if (err == 0) {
clear_weakrefs(state);
}
+ // Record the number of live GC objects
+ interp->gc.long_lived_total = state->long_lived_total;
_PyEval_StartTheWorld(interp);
+
if (err < 0) {
cleanup_worklist(&state->unreachable);
cleanup_worklist(&state->legacy_finalizers);