{
assert(op->ob_ref_local == 0);
- _Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);
Py_ssize_t shared = _Py_atomic_load_ssize_acquire(&op->ob_ref_shared);
if (shared == 0) {
// Fast-path: shared refcount is zero (including flags)
return;
}
+ // gh-121794: This must be before the store to `ob_ref_shared` (gh-119999),
+ // but should outside the fast-path to maintain the invariant that
+ // a zero `ob_tid` implies a merged refcount.
+ _Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);
+
// Slow-path: atomically set the flags (low two bits) to _Py_REF_MERGED.
Py_ssize_t new_shared;
do {
_PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op));
_PyObject_ASSERT(op, Py_REFCNT(op) == 0);
#ifdef Py_GIL_DISABLED
- _PyObject_ASSERT(op, op->ob_tid == 0);
op->ob_tid = (uintptr_t)tstate->delete_later;
#else
_PyGCHead_SET_PREV(_Py_AS_GC(op), (PyGC_Head*)tstate->delete_later);
#ifdef Py_GIL_DISABLED
tstate->delete_later = (PyObject*) op->ob_tid;
op->ob_tid = 0;
+ _Py_atomic_store_ssize_relaxed(&op->ob_ref_shared, _Py_REF_MERGED);
#else
tstate->delete_later = (PyObject*) _PyGCHead_PREV(_Py_AS_GC(op));
#endif
}
#ifdef GC_DEBUG
+static bool
+validate_refcounts(const mi_heap_t *heap, const mi_heap_area_t *area,
+ void *block, size_t block_size, void *args)
+{
+ PyObject *op = op_from_block(block, args, false);
+ if (op == NULL) {
+ return true;
+ }
+
+ _PyObject_ASSERT_WITH_MSG(op, !gc_is_unreachable(op),
+ "object should not be marked as unreachable yet");
+
+ if (_Py_REF_IS_MERGED(op->ob_ref_shared)) {
+ _PyObject_ASSERT_WITH_MSG(op, op->ob_tid == 0,
+ "merged objects should have ob_tid == 0");
+ }
+ else if (!_Py_IsImmortal(op)) {
+ _PyObject_ASSERT_WITH_MSG(op, op->ob_tid != 0,
+ "unmerged objects should have ob_tid != 0");
+ }
+
+ return true;
+}
+
static bool
validate_gc_objects(const mi_heap_t *heap, const mi_heap_area_t *area,
void *block, size_t block_size, void *args)
return true;
}
+static bool
+restore_refs(const mi_heap_t *heap, const mi_heap_area_t *area,
+ void *block, size_t block_size, void *args)
+{
+ PyObject *op = op_from_block(block, args, false);
+ if (op == NULL) {
+ return true;
+ }
+ gc_restore_tid(op);
+ gc_clear_unreachable(op);
+ return true;
+}
+
/* Return true if object has a pre-PEP 442 finalization method. */
static int
has_legacy_finalizer(PyObject *op)
deduce_unreachable_heap(PyInterpreterState *interp,
struct collection_state *state)
{
+
+#ifdef GC_DEBUG
+ // Check that all objects are marked as unreachable and that the computed
+ // reference count difference (stored in `ob_tid`) is non-negative.
+ gc_visit_heaps(interp, &validate_refcounts, &state->base);
+#endif
+
// Identify objects that are directly reachable from outside the GC heap
// by computing the difference between the refcount and the number of
// incoming references.
// Transitively mark reachable objects by clearing the
// _PyGC_BITS_UNREACHABLE flag.
if (gc_visit_heaps(interp, &mark_heap_visitor, &state->base) < 0) {
+ // On out-of-memory, restore the refcounts and bail out.
+ gc_visit_heaps(interp, &restore_refs, &state->base);
return -1;
}
int err = deduce_unreachable_heap(interp, state);
if (err < 0) {
_PyEval_StartTheWorld(interp);
- goto error;
+ PyErr_NoMemory();
+ return;
}
// Print debugging information.
_PyEval_StartTheWorld(interp);
if (err < 0) {
- goto error;
+ cleanup_worklist(&state->unreachable);
+ cleanup_worklist(&state->legacy_finalizers);
+ cleanup_worklist(&state->wrcb_to_call);
+ cleanup_worklist(&state->objs_to_decref);
+ PyErr_NoMemory();
+ return;
}
// Call tp_clear on objects in the unreachable set. This will cause
// Append objects with legacy finalizers to the "gc.garbage" list.
handle_legacy_finalizers(state);
- return;
-
-error:
- cleanup_worklist(&state->unreachable);
- cleanup_worklist(&state->legacy_finalizers);
- cleanup_worklist(&state->wrcb_to_call);
- cleanup_worklist(&state->objs_to_decref);
- PyErr_NoMemory();
- PyErr_FormatUnraisable("Out of memory during garbage collection");
}
/* This is the main function. Read this to understand how the