// Don't leave a dangling pointer to the old frame when creating generators
// and coroutines:
dest->previous = NULL;
+
+#ifdef Py_GIL_DISABLED
+ PyCodeObject *co = (PyCodeObject *)dest->f_executable;
+ for (int i = stacktop; i < co->co_nlocalsplus + co->co_stacksize; i++) {
+ dest->localsplus[i] = PyStackRef_NULL;
+ }
+#endif
}
/* Consumes reference to func and locals.
for (int i = null_locals_from; i < code->co_nlocalsplus; i++) {
frame->localsplus[i] = PyStackRef_NULL;
}
+
+#ifdef Py_GIL_DISABLED
+ // On GIL disabled, we walk the entire stack in GC. Since stacktop
+ // is not always in sync with the real stack pointer, we have
+ // no choice but to traverse the entire stack.
+ // This just makes sure we don't pass the GC invalid stack values.
+ for (int i = code->co_nlocalsplus; i < code->co_nlocalsplus + code->co_stacksize; i++) {
+ frame->localsplus[i] = PyStackRef_NULL;
+ }
+#endif
}
/* Gets the pointer to the locals array
frame->instr_ptr = _PyCode_CODE(code);
frame->owner = FRAME_OWNED_BY_THREAD;
frame->return_offset = 0;
+
+#ifdef Py_GIL_DISABLED
+ assert(code->co_nlocalsplus == 0);
+ for (int i = 0; i < code->co_stacksize; i++) {
+ frame->localsplus[i] = PyStackRef_NULL;
+ }
+#endif
return frame;
}
static void
disable_deferred_refcounting(PyObject *op)
{
- if (_PyObject_HasDeferredRefcount(op)) {
- op->ob_gc_bits &= ~_PyGC_BITS_DEFERRED;
- op->ob_ref_shared -= _Py_REF_SHARED(_Py_REF_DEFERRED, 0);
-
- if (PyType_Check(op)) {
- // Disable thread-local refcounting for heap types
- PyTypeObject *type = (PyTypeObject *)op;
- if (PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) {
- _PyType_ReleaseId((PyHeapTypeObject *)op);
+ if (!_PyObject_HasDeferredRefcount(op)) {
+ return;
+ }
+
+ op->ob_gc_bits &= ~_PyGC_BITS_DEFERRED;
+ op->ob_ref_shared -= _Py_REF_SHARED(_Py_REF_DEFERRED, 0);
+
+ if (PyType_Check(op)) {
+ // Disable thread-local refcounting for heap types
+ PyTypeObject *type = (PyTypeObject *)op;
+ if (PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) {
+ _PyType_ReleaseId((PyHeapTypeObject *)op);
+ }
+ }
+ else if (PyGen_CheckExact(op) || PyCoro_CheckExact(op) || PyAsyncGen_CheckExact(op)) {
+ // Ensure any non-refcounted pointers in locals are converted to
+ // strong references. This ensures that the generator/coroutine is not
+ // freed before its locals.
+ PyGenObject *gen = (PyGenObject *)op;
+ struct _PyInterpreterFrame *frame = &gen->gi_iframe;
+ assert(frame->stackpointer != NULL);
+ for (_PyStackRef *ref = frame->localsplus; ref < frame->stackpointer; ref++) {
+ if (!PyStackRef_IsNull(*ref) && PyStackRef_IsDeferred(*ref)) {
+ // Convert a deferred reference to a strong reference.
+ *ref = PyStackRef_FromPyObjectSteal(PyStackRef_AsPyObjectSteal(*ref));
}
}
}
return err;
}
+static inline void
+gc_visit_stackref(_PyStackRef stackref)
+{
+ // Note: we MUST check that it is deferred before checking the rest.
+ // Otherwise we might read into invalid memory due to non-deferred references
+ // being dead already.
+ if (PyStackRef_IsDeferred(stackref) && !PyStackRef_IsNull(stackref)) {
+ PyObject *obj = PyStackRef_AsPyObjectBorrow(stackref);
+ if (_PyObject_GC_IS_TRACKED(obj)) {
+ gc_add_refs(obj, 1);
+ }
+ }
+}
+
+// Add 1 to the gc_refs for every deferred reference on each thread's stack.
+static void
+gc_visit_thread_stacks(PyInterpreterState *interp)
+{
+ HEAD_LOCK(&_PyRuntime);
+ for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) {
+ _PyInterpreterFrame *f = p->current_frame;
+ while (f != NULL) {
+ if (f->f_executable != NULL && PyCode_Check(f->f_executable)) {
+ PyCodeObject *co = (PyCodeObject *)f->f_executable;
+ int max_stack = co->co_nlocalsplus + co->co_stacksize;
+ for (int i = 0; i < max_stack; i++) {
+ gc_visit_stackref(f->localsplus[i]);
+ }
+ }
+ f = f->previous;
+ }
+ }
+ HEAD_UNLOCK(&_PyRuntime);
+}
+
static void
merge_queued_objects(_PyThreadStateImpl *tstate, struct collection_state *state)
{
gc_visit_heaps(interp, &validate_gc_objects, &state->base);
#endif
+ // Visit the thread stacks to account for any deferred references.
+ gc_visit_thread_stacks(interp);
+
// Transitively mark reachable objects by clearing the
// _PyGC_BITS_UNREACHABLE flag.
if (gc_visit_heaps(interp, &mark_heap_visitor, &state->base) < 0) {
return 0;
}
+int
+_PyGC_VisitFrameStack(_PyInterpreterFrame *frame, visitproc visit, void *arg)
+{
+ _PyStackRef *ref = _PyFrame_GetLocalsArray(frame);
+ /* locals and stack */
+ for (; ref < frame->stackpointer; ref++) {
+ // This is a bit tricky! We want to ignore deferred references when
+ // computing the incoming references, but otherwise treat them like
+ // regular references.
+ if (PyStackRef_IsDeferred(*ref) &&
+ (visit == visit_decref || visit == visit_decref_unreachable)) {
+ continue;
+ }
+ Py_VISIT(PyStackRef_AsPyObjectBorrow(*ref));
+ }
+ return 0;
+}
+
// Handle objects that may have resurrected after a call to 'finalize_garbage'.
static int
handle_resurrected_objects(struct collection_state *state)