static void
process_interp_queue(struct _Py_mem_interp_free_queue *queue,
struct _qsbr_thread_state *qsbr)
+{
+ assert(PyMutex_IsLocked(&queue->mutex));
+ process_queue(&queue->head, qsbr, false);
+
+ int more_work = !llist_empty(&queue->head);
+ _Py_atomic_store_int_relaxed(&queue->has_work, more_work);
+}
+
+static void
+maybe_process_interp_queue(struct _Py_mem_interp_free_queue *queue,
+ struct _qsbr_thread_state *qsbr)
{
if (!_Py_atomic_load_int_relaxed(&queue->has_work)) {
return;
// Try to acquire the lock, but don't block if it's already held.
if (_PyMutex_LockTimed(&queue->mutex, 0, 0) == PY_LOCK_ACQUIRED) {
- process_queue(&queue->head, qsbr, false);
-
- int more_work = !llist_empty(&queue->head);
- _Py_atomic_store_int_relaxed(&queue->has_work, more_work);
-
+ process_interp_queue(queue, qsbr);
PyMutex_Unlock(&queue->mutex);
}
}
process_queue(&tstate_impl->mem_free_queue, tstate_impl->qsbr, true);
// Process shared interpreter work
- process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
+ maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
}
void
return;
}
- // Merge the thread's work queue into the interpreter's work queue.
PyMutex_Lock(&interp->mem_free_queue.mutex);
+
+ // Merge the thread's work queue into the interpreter's work queue.
llist_concat(&interp->mem_free_queue.head, queue);
- _Py_atomic_store_int_relaxed(&interp->mem_free_queue.has_work, 1);
+
+ // Process the merged queue now (see gh-130794).
+ _PyThreadStateImpl *this_tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
+ process_interp_queue(&interp->mem_free_queue, this_tstate->qsbr);
+
PyMutex_Unlock(&interp->mem_free_queue.mutex);
assert(llist_empty(queue)); // the thread's queue is now empty