/* Bit flags for ob_gc_bits (in Py_GIL_DISABLED builds) */
#ifdef Py_GIL_DISABLED
-# define _PyGC_BITS_TRACKED (1)
-# define _PyGC_BITS_FINALIZED (2)
+# define _PyGC_BITS_TRACKED (1) // Tracked by the GC
+# define _PyGC_BITS_FINALIZED (2) // tp_finalize was called
# define _PyGC_BITS_UNREACHABLE (4)
# define _PyGC_BITS_FROZEN (8)
# define _PyGC_BITS_SHARED (16)
# define _PyGC_BITS_SHARED_INLINE (32)
+# define _PyGC_BITS_DEFERRED (64) // Use deferred reference counting
#endif
/* True if the object is currently tracked by the GC. */
op = NULL; \
} while (0)
+// Mark an object as supporting deferred reference counting. This is a no-op
+// in the default (with GIL) build. Objects that use deferred reference
+// counting should be tracked by the GC so that they are eventually collected.
+extern void _PyObject_SetDeferredRefcount(PyObject *op);
+
+static inline int
+_PyObject_HasDeferredRefcount(PyObject *op)
+{
+#ifdef Py_GIL_DISABLED
+ return (op->ob_gc_bits & _PyGC_BITS_DEFERRED) != 0;
+#else
+ return 0;
+#endif
+}
+
#if !defined(Py_GIL_DISABLED)
static inline void
_Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct)
SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(100))
del f
+ gc_collect() # For free-threaded build
self.assertEqual(LAST_FREED, 100)
def test_get_set(self):
del f
tt.start()
tt.join()
+ gc_collect() # For free-threaded build
self.assertEqual(LAST_FREED, 500)
descr = (PyDescrObject *)PyType_GenericAlloc(descrtype, 0);
if (descr != NULL) {
+ _PyObject_SetDeferredRefcount((PyObject *)descr);
descr->d_type = (PyTypeObject*)Py_XNewRef(type);
descr->d_name = PyUnicode_InternFromString(name);
if (descr->d_name == NULL) {
op->func_typeparams = NULL;
op->vectorcall = _PyFunction_Vectorcall;
op->func_version = 0;
+ // NOTE: functions created via FrameConstructor do not use deferred
+ // reference counting because they are typically not part of cycles
+ // nor accessed by multiple threads.
_PyObject_GC_TRACK(op);
handle_func_event(PyFunction_EVENT_CREATE, op, NULL);
return op;
op->func_typeparams = NULL;
op->vectorcall = _PyFunction_Vectorcall;
op->func_version = 0;
+ if ((code_obj->co_flags & CO_NESTED) == 0) {
+ // Use deferred reference counting for top-level functions, but not
+ // nested functions because they are more likely to capture variables,
+ // which makes prompt deallocation more important.
+ _PyObject_SetDeferredRefcount((PyObject *)op);
+ }
_PyObject_GC_TRACK(op);
handle_func_event(PyFunction_EVENT_CREATE, op, NULL);
return (PyObject *)op;
m->md_weaklist = NULL;
m->md_name = NULL;
m->md_dict = PyDict_New();
- if (m->md_dict != NULL) {
- return m;
+ if (m->md_dict == NULL) {
+ Py_DECREF(m);
+ return NULL;
}
- Py_DECREF(m);
- return NULL;
+ return m;
+}
+
+static void
+track_module(PyModuleObject *m)
+{
+ _PyObject_SetDeferredRefcount(m->md_dict);
+ PyObject_GC_Track(m->md_dict);
+
+ _PyObject_SetDeferredRefcount((PyObject *)m);
+ PyObject_GC_Track(m);
}
static PyObject *
new_module(PyTypeObject *mt, PyObject *args, PyObject *kws)
{
- PyObject *m = (PyObject *)new_module_notrack(mt);
+ PyModuleObject *m = new_module_notrack(mt);
if (m != NULL) {
- PyObject_GC_Track(m);
+ track_module(m);
}
- return m;
+ return (PyObject *)m;
}
PyObject *
return NULL;
if (module_init_dict(m, m->md_dict, name, NULL) != 0)
goto fail;
- PyObject_GC_Track(m);
+ track_module(m);
return (PyObject *)m;
fail:
module___init___impl(PyModuleObject *self, PyObject *name, PyObject *doc)
/*[clinic end generated code: output=e7e721c26ce7aad7 input=57f9e177401e5e1e]*/
{
- PyObject *dict = self->md_dict;
- if (dict == NULL) {
- dict = PyDict_New();
- if (dict == NULL)
- return -1;
- self->md_dict = dict;
- }
- if (module_init_dict(self, dict, name, doc) < 0)
- return -1;
- return 0;
+ return module_init_dict(self, self->md_dict, name, doc);
}
static void
_Py_SetImmortalUntracked(op);
}
+void
+_PyObject_SetDeferredRefcount(PyObject *op)
+{
+#ifdef Py_GIL_DISABLED
+ assert(PyType_IS_GC(Py_TYPE(op)));
+ assert(_Py_IsOwnedByCurrentThread(op));
+ assert(op->ob_ref_shared == 0);
+ op->ob_gc_bits |= _PyGC_BITS_DEFERRED;
+ op->ob_ref_local += 1;
+ op->ob_ref_shared = _Py_REF_QUEUED;
+#endif
+}
+
void
_Py_ResurrectReference(PyObject *op)
{
et->ht_module = NULL;
et->_ht_tpname = NULL;
+ _PyObject_SetDeferredRefcount((PyObject *)et);
+
return type;
}
op->ob_tid -= 1;
}
+static void
+disable_deferred_refcounting(PyObject *op)
+{
+ if (_PyObject_HasDeferredRefcount(op)) {
+ op->ob_gc_bits &= ~_PyGC_BITS_DEFERRED;
+ op->ob_ref_shared -= (1 << _Py_REF_SHARED_SHIFT);
+ }
+}
+
static Py_ssize_t
merge_refcount(PyObject *op, Py_ssize_t extra)
{
}
Py_ssize_t refcount = Py_REFCNT(op);
+ refcount -= _PyObject_HasDeferredRefcount(op);
_PyObject_ASSERT(op, refcount >= 0);
- if (refcount > 0) {
+ if (refcount > 0 && !_PyObject_HasDeferredRefcount(op)) {
// Untrack tuples and dicts as necessary in this pass, but not objects
// with zero refcount, which we will want to collect.
if (PyTuple_CheckExact(op)) {
return true;
}
+ _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(op) >= 0,
+ "refcount is too small");
+
if (gc_is_unreachable(op) && gc_get_refs(op) != 0) {
// Object is reachable but currently marked as unreachable.
// Mark it as reachable and traverse its pointers to find
struct collection_state *state = (struct collection_state *)args;
if (gc_is_unreachable(op)) {
+ // Disable deferred refcounting for unreachable objects so that they
+ // are collected immediately after finalization.
+ disable_deferred_refcounting(op);
+
// Merge and add one to the refcount to prevent deallocation while we
// are holding on to it in a worklist.
merge_refcount(op, 1);