_testcapi = None
from test import support
-from test.support import threading_helper
+from test.support import threading_helper, Py_GIL_DISABLED
from test.support.script_helper import assert_python_ok
assert_python_ok("-c", code)
@support.cpython_only
+ @unittest.skipIf(Py_GIL_DISABLED, "test requires precise GC scheduling")
def test_sneaky_frame_object(self):
def trace(frame, event, arg):
/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
{
GCState *gcstate = get_gc_state();
+
+#ifdef Py_GIL_DISABLED
+ _PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
+ struct _gc_thread_state *gc = &tstate->gc;
+
+ // Flush the local allocation count to the global count
+ _Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
+ gc->alloc_count = 0;
+#endif
+
return Py_BuildValue("(iii)",
gcstate->generations[0].count,
gcstate->generations[1].count,
# define GC_DEBUG
#endif
+// Each thread buffers the count of allocated objects in a thread-local
+// variable up to +/- this amount to reduce the overhead of updating
+// the global count.
+#define LOCAL_ALLOC_COUNT_THRESHOLD 512
+
// Automatically choose the generation that needs collecting.
#define GENERATION_AUTO (-1)
gcstate->generations[1].threshold == 0);
}
+static void
+record_allocation(PyThreadState *tstate)
+{
+ struct _gc_thread_state *gc = &((_PyThreadStateImpl *)tstate)->gc;
+
+ // We buffer the allocation count to avoid the overhead of atomic
+ // operations for every allocation.
+ gc->alloc_count++;
+ if (gc->alloc_count >= LOCAL_ALLOC_COUNT_THRESHOLD) {
+ // TODO: Use Py_ssize_t for the generation count.
+ GCState *gcstate = &tstate->interp->gc;
+ _Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
+ gc->alloc_count = 0;
+
+ if (gc_should_collect(gcstate) &&
+ !_Py_atomic_load_int_relaxed(&gcstate->collecting))
+ {
+ _Py_ScheduleGC(tstate->interp);
+ }
+ }
+}
+
+static void
+record_deallocation(PyThreadState *tstate)
+{
+ struct _gc_thread_state *gc = &((_PyThreadStateImpl *)tstate)->gc;
+
+ gc->alloc_count--;
+ if (gc->alloc_count <= -LOCAL_ALLOC_COUNT_THRESHOLD) {
+ GCState *gcstate = &tstate->interp->gc;
+ _Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
+ gc->alloc_count = 0;
+ }
+}
+
static void
gc_collect_internal(PyInterpreterState *interp, struct collection_state *state)
{
}
}
+ // Record the number of live GC objects
+ interp->gc.long_lived_total = state->long_lived_total;
+
// Clear weakrefs and enqueue callbacks (but do not call them).
clear_weakrefs(state);
_PyEval_StartTheWorld(interp);
m = state.collected;
n = state.uncollectable;
- gcstate->long_lived_total = state.long_lived_total;
if (gcstate->debug & _PyGC_DEBUG_STATS) {
double d = _PyTime_AsSecondsDouble(_PyTime_GetPerfCounter() - t1);
void
_PyObject_GC_Link(PyObject *op)
{
- PyThreadState *tstate = _PyThreadState_GET();
- GCState *gcstate = &tstate->interp->gc;
- gcstate->generations[0].count++;
-
- if (gc_should_collect(gcstate) &&
- !_Py_atomic_load_int_relaxed(&gcstate->collecting))
- {
- _Py_ScheduleGC(tstate->interp);
- }
+ record_allocation(_PyThreadState_GET());
}
void
((PyObject **)mem)[1] = NULL;
}
PyObject *op = (PyObject *)(mem + presize);
- _PyObject_GC_Link(op);
+ record_allocation(tstate);
return op;
}
PyErr_SetRaisedException(exc);
#endif
}
- GCState *gcstate = get_gc_state();
- if (gcstate->generations[0].count > 0) {
- gcstate->generations[0].count--;
- }
+
+ record_deallocation(_PyThreadState_GET());
+
PyObject_Free(((char *)op)-presize);
}