]> git.ipfire.org Git - thirdparty/Python/cpython.git/blame - Python/ceval_gil.c
GH-118095: Make sure that progress is made if there are pending calls being handled...
[thirdparty/Python/cpython.git] / Python / ceval_gil.c
CommitLineData
a4a9f2e8
MS
1
2#include "Python.h"
a4a9f2e8 3#include "pycore_ceval.h" // _PyEval_SignalReceived()
a4a9f2e8 4#include "pycore_initconfig.h" // _PyStatus_OK()
83eb8272 5#include "pycore_interp.h" // _Py_RunGC()
a0773b89
VS
6#include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
7#include "pycore_pylifecycle.h" // _PyErr_Print()
a4a9f2e8 8#include "pycore_pymem.h" // _PyMem_IsPtrFreed()
a0773b89 9#include "pycore_pystats.h" // _Py_PrintSpecializationStats()
a4a9f2e8
MS
10
11/*
12 Notes about the implementation:
13
14 - The GIL is just a boolean variable (locked) whose access is protected
15 by a mutex (gil_mutex), and whose changes are signalled by a condition
16 variable (gil_cond). gil_mutex is taken for short periods of time,
17 and therefore mostly uncontended.
18
19 - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
20 able to release the GIL on demand by another thread. A volatile boolean
21 variable (gil_drop_request) is used for that purpose, which is checked
22 at every turn of the eval loop. That variable is set after a wait of
23 `interval` microseconds on `gil_cond` has timed out.
24
25 [Actually, another volatile boolean variable (eval_breaker) is used
26 which ORs several conditions into one. Volatile booleans are
27 sufficient as inter-thread signalling means since Python is run
28 on cache-coherent architectures only.]
29
30 - A thread wanting to take the GIL will first let pass a given amount of
31 time (`interval` microseconds) before setting gil_drop_request. This
32 encourages a defined switching period, but doesn't enforce it since
33 opcodes can take an arbitrary time to execute.
34
35 The `interval` value is available for the user to read and modify
36 using the Python API `sys.{get,set}switchinterval()`.
37
38 - When a thread releases the GIL and gil_drop_request is set, that thread
39 ensures that another GIL-awaiting thread gets scheduled.
40 It does so by waiting on a condition variable (switch_cond) until
41 the value of last_holder is changed to something else than its
42 own thread state pointer, indicating that another thread was able to
43 take the GIL.
44
45 This is meant to prohibit the latency-adverse behaviour on multi-core
46 machines where one thread would speculatively release the GIL, but still
47 run and end up being the first to re-acquire it, making the "timeslices"
48 much longer than expected.
49 (Note: this mechanism is enabled with FORCE_SWITCHING above)
50*/
51
52// GH-89279: Force inlining by using a macro.
53#if defined(_MSC_VER) && SIZEOF_INT == 4
54#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
55#else
56#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
57#endif
58
0749244d 59// Atomically copy the bits indicated by mask between two values.
a4a9f2e8 60static inline void
0749244d 61copy_eval_breaker_bits(uintptr_t *from, uintptr_t *to, uintptr_t mask)
a4a9f2e8 62{
0749244d
BS
63 uintptr_t from_bits = _Py_atomic_load_uintptr_relaxed(from) & mask;
64 uintptr_t old_value = _Py_atomic_load_uintptr_relaxed(to);
65 uintptr_t to_bits = old_value & mask;
66 if (from_bits == to_bits) {
bf4bc360
MS
67 return;
68 }
a4a9f2e8 69
0749244d
BS
70 uintptr_t new_value;
71 do {
72 new_value = (old_value & ~mask) | from_bits;
73 } while (!_Py_atomic_compare_exchange_uintptr(to, &old_value, new_value));
bf4bc360 74}
a4a9f2e8 75
0749244d
BS
76// When attaching a thread, set the global instrumentation version and
77// _PY_CALLS_TO_DO_BIT from the current state of the interpreter.
a4a9f2e8 78static inline void
0749244d 79update_eval_breaker_for_thread(PyInterpreterState *interp, PyThreadState *tstate)
a4a9f2e8 80{
0749244d
BS
81#ifdef Py_GIL_DISABLED
82 // Free-threaded builds eagerly update the eval_breaker on *all* threads as
83 // needed, so this function doesn't apply.
84 return;
85#endif
a4a9f2e8 86
09c29475
ES
87 int32_t npending = _Py_atomic_load_int32_relaxed(
88 &interp->ceval.pending.npending);
89 if (npending) {
0749244d
BS
90 _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT);
91 }
92 else if (_Py_IsMainThread()) {
09c29475
ES
93 npending = _Py_atomic_load_int32_relaxed(
94 &_PyRuntime.ceval.pending_mainthread.npending);
95 if (npending) {
0749244d
BS
96 _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT);
97 }
98 }
a4a9f2e8 99
0749244d
BS
100 // _PY_CALLS_TO_DO_BIT was derived from other state above, so the only bits
101 // we copy from our interpreter's state are the instrumentation version.
102 copy_eval_breaker_bits(&interp->ceval.instrumentation_version,
103 &tstate->eval_breaker,
104 ~_PY_EVAL_EVENTS_MASK);
a4a9f2e8
MS
105}
106
a4a9f2e8
MS
107/*
108 * Implementation of the Global Interpreter Lock (GIL).
109 */
110
111#include <stdlib.h>
112#include <errno.h>
113
a4a9f2e8
MS
114#include "condvar.h"
115
116#define MUTEX_INIT(mut) \
117 if (PyMUTEX_INIT(&(mut))) { \
118 Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
119#define MUTEX_FINI(mut) \
120 if (PyMUTEX_FINI(&(mut))) { \
121 Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
122#define MUTEX_LOCK(mut) \
123 if (PyMUTEX_LOCK(&(mut))) { \
124 Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
125#define MUTEX_UNLOCK(mut) \
126 if (PyMUTEX_UNLOCK(&(mut))) { \
127 Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
128
129#define COND_INIT(cond) \
130 if (PyCOND_INIT(&(cond))) { \
131 Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
132#define COND_FINI(cond) \
133 if (PyCOND_FINI(&(cond))) { \
134 Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
135#define COND_SIGNAL(cond) \
136 if (PyCOND_SIGNAL(&(cond))) { \
137 Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
138#define COND_WAIT(cond, mut) \
139 if (PyCOND_WAIT(&(cond), &(mut))) { \
140 Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
141#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
142 { \
143 int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
144 if (r < 0) \
145 Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
146 if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
147 timeout_result = 1; \
148 else \
149 timeout_result = 0; \
150 } \
151
152
153#define DEFAULT_INTERVAL 5000
154
155static void _gil_initialize(struct _gil_runtime_state *gil)
156{
86559ddf 157 gil->locked = -1;
a4a9f2e8
MS
158 gil->interval = DEFAULT_INTERVAL;
159}
160
161static int gil_created(struct _gil_runtime_state *gil)
162{
55671fe0
ES
163 if (gil == NULL) {
164 return 0;
165 }
86559ddf 166 return (_Py_atomic_load_int_acquire(&gil->locked) >= 0);
a4a9f2e8
MS
167}
168
169static void create_gil(struct _gil_runtime_state *gil)
170{
171 MUTEX_INIT(gil->mutex);
172#ifdef FORCE_SWITCHING
173 MUTEX_INIT(gil->switch_mutex);
174#endif
175 COND_INIT(gil->cond);
176#ifdef FORCE_SWITCHING
177 COND_INIT(gil->switch_cond);
178#endif
2566434e 179 _Py_atomic_store_ptr_relaxed(&gil->last_holder, 0);
a4a9f2e8 180 _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
86559ddf 181 _Py_atomic_store_int_release(&gil->locked, 0);
a4a9f2e8
MS
182}
183
184static void destroy_gil(struct _gil_runtime_state *gil)
185{
186 /* some pthread-like implementations tie the mutex to the cond
187 * and must have the cond destroyed first.
188 */
189 COND_FINI(gil->cond);
190 MUTEX_FINI(gil->mutex);
191#ifdef FORCE_SWITCHING
192 COND_FINI(gil->switch_cond);
193 MUTEX_FINI(gil->switch_mutex);
194#endif
86559ddf 195 _Py_atomic_store_int_release(&gil->locked, -1);
a4a9f2e8
MS
196 _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
197}
198
199#ifdef HAVE_FORK
200static void recreate_gil(struct _gil_runtime_state *gil)
201{
202 _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
203 /* XXX should we destroy the old OS resources here? */
204 create_gil(gil);
205}
206#endif
207
208static void
bf4bc360 209drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
a4a9f2e8 210{
bf4bc360 211 struct _ceval_state *ceval = &interp->ceval;
3698fda0
ES
212 /* If tstate is NULL, the caller is indicating that we're releasing
213 the GIL for the last time in this thread. This is particularly
214 relevant when the current thread state is finalizing or its
215 interpreter is finalizing (either may be in an inconsistent
216 state). In that case the current thread will definitely
217 never try to acquire the GIL again. */
218 // XXX It may be more correct to check tstate->_status.finalizing.
219 // XXX assert(tstate == NULL || !tstate->_status.cleared);
220
55671fe0 221 struct _gil_runtime_state *gil = ceval->gil;
2731913d
BS
222#ifdef Py_GIL_DISABLED
223 if (!gil->enabled) {
224 return;
225 }
226#endif
2566434e 227 if (!_Py_atomic_load_ptr_relaxed(&gil->locked)) {
a4a9f2e8
MS
228 Py_FatalError("drop_gil: GIL is not locked");
229 }
230
231 /* tstate is allowed to be NULL (early interpreter init) */
232 if (tstate != NULL) {
233 /* Sub-interpreter support: threads might have been switched
234 under our feet using PyThreadState_Swap(). Fix the GIL last
235 holder variable so that our heuristics work. */
2566434e 236 _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate);
a4a9f2e8
MS
237 }
238
239 MUTEX_LOCK(gil->mutex);
240 _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
86559ddf 241 _Py_atomic_store_int_relaxed(&gil->locked, 0);
a4a9f2e8
MS
242 COND_SIGNAL(gil->cond);
243 MUTEX_UNLOCK(gil->mutex);
244
245#ifdef FORCE_SWITCHING
3698fda0
ES
246 /* We check tstate first in case we might be releasing the GIL for
247 the last time in this thread. In that case there's a possible
248 race with tstate->interp getting deleted after gil->mutex is
249 unlocked and before the following code runs, leading to a crash.
250 We can use (tstate == NULL) to indicate the thread is done with
251 the GIL, and that's the only time we might delete the
252 interpreter, so checking tstate first prevents the crash.
253 See https://github.com/python/cpython/issues/104341. */
0749244d
BS
254 if (tstate != NULL &&
255 _Py_eval_breaker_bit_is_set(tstate, _PY_GIL_DROP_REQUEST_BIT)) {
a4a9f2e8
MS
256 MUTEX_LOCK(gil->switch_mutex);
257 /* Not switched yet => wait */
2566434e 258 if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) == tstate)
a4a9f2e8 259 {
f63d3787 260 assert(_PyThreadState_CheckConsistency(tstate));
0749244d 261 _Py_unset_eval_breaker_bit(tstate, _PY_GIL_DROP_REQUEST_BIT);
a4a9f2e8
MS
262 /* NOTE: if COND_WAIT does not atomically start waiting when
263 releasing the mutex, another thread can run through, take
264 the GIL and drop it again, and reset the condition
265 before we even had a chance to wait for it. */
266 COND_WAIT(gil->switch_cond, gil->switch_mutex);
267 }
268 MUTEX_UNLOCK(gil->switch_mutex);
269 }
270#endif
271}
272
273
a4a9f2e8
MS
274/* Take the GIL.
275
276 The function saves errno at entry and restores its value at exit.
277
278 tstate must be non-NULL. */
279static void
280take_gil(PyThreadState *tstate)
281{
282 int err = errno;
283
284 assert(tstate != NULL);
3698fda0
ES
285 /* We shouldn't be using a thread state that isn't viable any more. */
286 // XXX It may be more correct to check tstate->_status.finalizing.
287 // XXX assert(!tstate->_status.cleared);
a4a9f2e8 288
517cd82e 289 if (_PyThreadState_MustExit(tstate)) {
a4a9f2e8
MS
290 /* bpo-39877: If Py_Finalize() has been called and tstate is not the
291 thread which called Py_Finalize(), exit immediately the thread.
292
293 This code path can be reached by a daemon thread after Py_Finalize()
294 completes. In this case, tstate is a dangling pointer: points to
295 PyThreadState freed memory. */
296 PyThread_exit_thread();
297 }
298
f63d3787 299 assert(_PyThreadState_CheckConsistency(tstate));
a4a9f2e8 300 PyInterpreterState *interp = tstate->interp;
bf4bc360 301 struct _gil_runtime_state *gil = interp->ceval.gil;
2731913d
BS
302#ifdef Py_GIL_DISABLED
303 if (!gil->enabled) {
304 return;
305 }
306#endif
a4a9f2e8
MS
307
308 /* Check that _PyEval_InitThreads() was called to create the lock */
309 assert(gil_created(gil));
310
311 MUTEX_LOCK(gil->mutex);
312
04f4977f 313 int drop_requested = 0;
86559ddf 314 while (_Py_atomic_load_int_relaxed(&gil->locked)) {
a4a9f2e8
MS
315 unsigned long saved_switchnum = gil->switch_number;
316
317 unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
318 int timed_out = 0;
319 COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
320
321 /* If we timed out and no switch occurred in the meantime, it is time
322 to ask the GIL-holding thread to drop it. */
323 if (timed_out &&
86559ddf 324 _Py_atomic_load_int_relaxed(&gil->locked) &&
a4a9f2e8
MS
325 gil->switch_number == saved_switchnum)
326 {
0749244d
BS
327 PyThreadState *holder_tstate =
328 (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder);
517cd82e 329 if (_PyThreadState_MustExit(tstate)) {
a4a9f2e8 330 MUTEX_UNLOCK(gil->mutex);
04f4977f
VS
331 // gh-96387: If the loop requested a drop request in a previous
332 // iteration, reset the request. Otherwise, drop_gil() can
333 // block forever waiting for the thread which exited. Drop
334 // requests made by other threads are also reset: these threads
335 // may have to request again a drop request (iterate one more
336 // time).
337 if (drop_requested) {
0749244d 338 _Py_unset_eval_breaker_bit(holder_tstate, _PY_GIL_DROP_REQUEST_BIT);
04f4977f 339 }
a4a9f2e8
MS
340 PyThread_exit_thread();
341 }
f63d3787 342 assert(_PyThreadState_CheckConsistency(tstate));
a4a9f2e8 343
0749244d 344 _Py_set_eval_breaker_bit(holder_tstate, _PY_GIL_DROP_REQUEST_BIT);
04f4977f 345 drop_requested = 1;
a4a9f2e8
MS
346 }
347 }
348
a4a9f2e8
MS
349#ifdef FORCE_SWITCHING
350 /* This mutex must be taken before modifying gil->last_holder:
351 see drop_gil(). */
352 MUTEX_LOCK(gil->switch_mutex);
353#endif
354 /* We now hold the GIL */
86559ddf 355 _Py_atomic_store_int_relaxed(&gil->locked, 1);
a4a9f2e8
MS
356 _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
357
2566434e
DN
358 if (tstate != (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) {
359 _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate);
a4a9f2e8
MS
360 ++gil->switch_number;
361 }
362
363#ifdef FORCE_SWITCHING
364 COND_SIGNAL(gil->switch_cond);
365 MUTEX_UNLOCK(gil->switch_mutex);
366#endif
367
517cd82e 368 if (_PyThreadState_MustExit(tstate)) {
a4a9f2e8
MS
369 /* bpo-36475: If Py_Finalize() has been called and tstate is not
370 the thread which called Py_Finalize(), exit immediately the
371 thread.
372
373 This code path can be reached by a daemon thread which was waiting
374 in take_gil() while the main thread called
375 wait_for_thread_shutdown() from Py_Finalize(). */
376 MUTEX_UNLOCK(gil->mutex);
0749244d
BS
377 /* Passing NULL to drop_gil() indicates that this thread is about to
378 terminate and will never hold the GIL again. */
379 drop_gil(interp, NULL);
a4a9f2e8
MS
380 PyThread_exit_thread();
381 }
f63d3787 382 assert(_PyThreadState_CheckConsistency(tstate));
a4a9f2e8 383
0749244d
BS
384 _Py_unset_eval_breaker_bit(tstate, _PY_GIL_DROP_REQUEST_BIT);
385 update_eval_breaker_for_thread(interp, tstate);
a4a9f2e8
MS
386
387 MUTEX_UNLOCK(gil->mutex);
388
389 errno = err;
390}
391
392void _PyEval_SetSwitchInterval(unsigned long microseconds)
393{
18b1fdeb 394 PyInterpreterState *interp = _PyInterpreterState_GET();
55671fe0
ES
395 struct _gil_runtime_state *gil = interp->ceval.gil;
396 assert(gil != NULL);
a4a9f2e8
MS
397 gil->interval = microseconds;
398}
399
119f67de 400unsigned long _PyEval_GetSwitchInterval(void)
a4a9f2e8 401{
18b1fdeb 402 PyInterpreterState *interp = _PyInterpreterState_GET();
55671fe0
ES
403 struct _gil_runtime_state *gil = interp->ceval.gil;
404 assert(gil != NULL);
a4a9f2e8
MS
405 return gil->interval;
406}
407
408
409int
55671fe0 410_PyEval_ThreadsInitialized(void)
a4a9f2e8 411{
5c9ee498
ES
412 /* XXX This is only needed for an assert in PyGILState_Ensure(),
413 * which currently does not work with subinterpreters.
414 * Thus we only use the main interpreter. */
55671fe0
ES
415 PyInterpreterState *interp = _PyInterpreterState_Main();
416 if (interp == NULL) {
417 return 0;
418 }
419 struct _gil_runtime_state *gil = interp->ceval.gil;
420 return gil_created(gil);
a4a9f2e8
MS
421}
422
ec0082ca
VS
423// Function removed in the Python 3.13 API but kept in the stable ABI.
424PyAPI_FUNC(int)
a4a9f2e8
MS
425PyEval_ThreadsInitialized(void)
426{
55671fe0 427 return _PyEval_ThreadsInitialized();
a4a9f2e8
MS
428}
429
817fe33a 430#ifndef NDEBUG
92d8bfff
ES
431static inline int
432current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
433{
2566434e 434 if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) != tstate) {
92d8bfff
ES
435 return 0;
436 }
86559ddf 437 return _Py_atomic_load_int_relaxed(&gil->locked);
92d8bfff 438}
817fe33a 439#endif
92d8bfff
ES
440
441static void
442init_shared_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
443{
444 assert(gil_created(gil));
445 interp->ceval.gil = gil;
446 interp->ceval.own_gil = 0;
447}
448
449static void
450init_own_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
451{
452 assert(!gil_created(gil));
2731913d 453#ifdef Py_GIL_DISABLED
5a90de0d 454 gil->enabled = _PyInterpreterState_GetConfig(interp)->enable_gil == _PyConfig_GIL_ENABLE;
2731913d 455#endif
92d8bfff
ES
456 create_gil(gil);
457 assert(gil_created(gil));
458 interp->ceval.gil = gil;
459 interp->ceval.own_gil = 1;
460}
461
a3c03188 462void
f3e7eb48 463_PyEval_InitGIL(PyThreadState *tstate, int own_gil)
a4a9f2e8 464{
55671fe0 465 assert(tstate->interp->ceval.gil == NULL);
f3e7eb48 466 if (!own_gil) {
5c9ee498 467 /* The interpreter will share the main interpreter's instead. */
f3e7eb48
ES
468 PyInterpreterState *main_interp = _PyInterpreterState_Main();
469 assert(tstate->interp != main_interp);
470 struct _gil_runtime_state *gil = main_interp->ceval.gil;
92d8bfff 471 init_shared_gil(tstate->interp, gil);
6e97a964 472 assert(!current_thread_holds_gil(gil, tstate));
f3e7eb48 473 }
92d8bfff
ES
474 else {
475 PyThread_init_thread();
5c9ee498 476 init_own_gil(tstate->interp, &tstate->interp->_gil);
a4a9f2e8 477 }
a4a9f2e8 478
6e97a964
SG
479 // Lock the GIL and mark the current thread as attached.
480 _PyThreadState_Attach(tstate);
a4a9f2e8
MS
481}
482
483void
484_PyEval_FiniGIL(PyInterpreterState *interp)
485{
5c9ee498
ES
486 struct _gil_runtime_state *gil = interp->ceval.gil;
487 if (gil == NULL) {
55671fe0 488 /* It was already finalized (or hasn't been initialized yet). */
f3e7eb48
ES
489 assert(!interp->ceval.own_gil);
490 return;
491 }
492 else if (!interp->ceval.own_gil) {
66167107 493#ifdef Py_DEBUG
f3e7eb48 494 PyInterpreterState *main_interp = _PyInterpreterState_Main();
5c9ee498 495 assert(main_interp != NULL && interp != main_interp);
f3e7eb48 496 assert(interp->ceval.gil == main_interp->ceval.gil);
66167107 497#endif
f3e7eb48 498 interp->ceval.gil = NULL;
55671fe0
ES
499 return;
500 }
501
a4a9f2e8
MS
502 if (!gil_created(gil)) {
503 /* First Py_InitializeFromConfig() call: the GIL doesn't exist
504 yet: do nothing. */
505 return;
506 }
507
508 destroy_gil(gil);
509 assert(!gil_created(gil));
55671fe0 510 interp->ceval.gil = NULL;
a4a9f2e8
MS
511}
512
75eed5b3 513void
a4a9f2e8
MS
514PyEval_InitThreads(void)
515{
516 /* Do nothing: kept for backward compatibility */
517}
518
519void
520_PyEval_Fini(void)
521{
522#ifdef Py_STATS
523 _Py_PrintSpecializationStats(1);
524#endif
525}
ec0082ca
VS
526
527// Function removed in the Python 3.13 API but kept in the stable ABI.
528PyAPI_FUNC(void)
a4a9f2e8
MS
529PyEval_AcquireLock(void)
530{
45398ad5 531 PyThreadState *tstate = _PyThreadState_GET();
a4a9f2e8
MS
532 _Py_EnsureTstateNotNULL(tstate);
533
534 take_gil(tstate);
535}
536
ec0082ca
VS
537// Function removed in the Python 3.13 API but kept in the stable ABI.
538PyAPI_FUNC(void)
a4a9f2e8
MS
539PyEval_ReleaseLock(void)
540{
45398ad5 541 PyThreadState *tstate = _PyThreadState_GET();
a4a9f2e8
MS
542 /* This function must succeed when the current thread state is NULL.
543 We therefore avoid PyThreadState_Get() which dumps a fatal error
544 in debug mode. */
bf4bc360 545 drop_gil(tstate->interp, tstate);
a4a9f2e8
MS
546}
547
92d8bfff
ES
548void
549_PyEval_AcquireLock(PyThreadState *tstate)
550{
551 _Py_EnsureTstateNotNULL(tstate);
552 take_gil(tstate);
553}
554
a4a9f2e8 555void
3698fda0 556_PyEval_ReleaseLock(PyInterpreterState *interp, PyThreadState *tstate)
a4a9f2e8 557{
3698fda0
ES
558 /* If tstate is NULL then we do not expect the current thread
559 to acquire the GIL ever again. */
560 assert(tstate == NULL || tstate->interp == interp);
bf4bc360 561 drop_gil(interp, tstate);
a4a9f2e8
MS
562}
563
564void
565PyEval_AcquireThread(PyThreadState *tstate)
566{
567 _Py_EnsureTstateNotNULL(tstate);
6e97a964 568 _PyThreadState_Attach(tstate);
a4a9f2e8
MS
569}
570
571void
572PyEval_ReleaseThread(PyThreadState *tstate)
573{
f63d3787 574 assert(_PyThreadState_CheckConsistency(tstate));
6e97a964 575 _PyThreadState_Detach(tstate);
a4a9f2e8
MS
576}
577
578#ifdef HAVE_FORK
1f72fb54
SG
579/* This function is called from PyOS_AfterFork_Child to re-initialize the
580 GIL and pending calls lock. */
a4a9f2e8
MS
581PyStatus
582_PyEval_ReInitThreads(PyThreadState *tstate)
583{
55671fe0 584 assert(tstate->interp == _PyInterpreterState_Main());
a4a9f2e8 585
55671fe0 586 struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
a4a9f2e8
MS
587 if (!gil_created(gil)) {
588 return _PyStatus_OK();
589 }
590 recreate_gil(gil);
591
592 take_gil(tstate);
593
594 struct _pending_calls *pending = &tstate->interp->ceval.pending;
cf6110ba 595 _PyMutex_at_fork_reinit(&pending->mutex);
a4a9f2e8 596
a4a9f2e8
MS
597 return _PyStatus_OK();
598}
599#endif
600
a4a9f2e8
MS
601PyThreadState *
602PyEval_SaveThread(void)
603{
6e97a964
SG
604 PyThreadState *tstate = _PyThreadState_GET();
605 _PyThreadState_Detach(tstate);
a4a9f2e8
MS
606 return tstate;
607}
608
609void
610PyEval_RestoreThread(PyThreadState *tstate)
611{
b3f0b698
AR
612#ifdef MS_WINDOWS
613 int err = GetLastError();
614#endif
615
a4a9f2e8 616 _Py_EnsureTstateNotNULL(tstate);
6e97a964 617 _PyThreadState_Attach(tstate);
b3f0b698
AR
618
619#ifdef MS_WINDOWS
620 SetLastError(err);
621#endif
a4a9f2e8
MS
622}
623
624
09c29475
ES
625void
626_PyEval_SignalReceived(void)
627{
628 _Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_SIGNALS_PENDING_BIT);
629}
630
631
632#ifndef Py_GIL_DISABLED
633static void
634signal_active_thread(PyInterpreterState *interp, uintptr_t bit)
635{
636 struct _gil_runtime_state *gil = interp->ceval.gil;
637
638 // If a thread from the targeted interpreter is holding the GIL, signal
639 // that thread. Otherwise, the next thread to run from the targeted
640 // interpreter will have its bit set as part of taking the GIL.
641 MUTEX_LOCK(gil->mutex);
642 if (_Py_atomic_load_int_relaxed(&gil->locked)) {
643 PyThreadState *holder = (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder);
644 if (holder->interp == interp) {
645 _Py_set_eval_breaker_bit(holder, bit);
646 }
647 }
648 MUTEX_UNLOCK(gil->mutex);
649}
650#endif
651
652
a4a9f2e8
MS
653/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
654 signal handlers or Mac I/O completion routines) can schedule calls
655 to a function to be called synchronously.
656 The synchronous function is called with one void* argument.
657 It should return 0 for success or -1 for failure -- failure should
658 be accompanied by an exception.
659
660 If registry succeeds, the registry function returns 0; if it fails
661 (e.g. due to too many pending calls) it returns -1 (without setting
662 an exception condition).
663
664 Note that because registry may occur from within signal handlers,
665 or other asynchronous events, calling malloc() is unsafe!
666
667 Any thread can schedule pending calls, but only the main thread
668 will execute them.
669 There is no facility to schedule calls to a particular thread, but
670 that should be easy to change, should that ever be required. In
671 that case, the static variables here should go into the python
672 threadstate.
673*/
674
a4a9f2e8
MS
675/* Push one item onto the queue while holding the lock. */
676static int
677_push_pending_call(struct _pending_calls *pending,
7bd560ce 678 _Py_pending_call_func func, void *arg, int flags)
a4a9f2e8 679{
09c29475
ES
680 if (pending->npending == pending->max) {
681 return _Py_ADD_PENDING_FULL;
a4a9f2e8 682 }
09c29475
ES
683 assert(pending->npending < pending->max);
684
685 int i = pending->next;
686 assert(pending->calls[i].func == NULL);
687
a4a9f2e8
MS
688 pending->calls[i].func = func;
689 pending->calls[i].arg = arg;
7bd560ce 690 pending->calls[i].flags = flags;
09c29475
ES
691
692 assert(pending->npending < PENDINGCALLSARRAYSIZE);
693 _Py_atomic_add_int32(&pending->npending, 1);
694
695 pending->next = (i + 1) % PENDINGCALLSARRAYSIZE;
696 assert(pending->next != pending->first
697 || pending->npending == pending->max);
698
699 return _Py_ADD_PENDING_SUCCESS;
a4a9f2e8
MS
700}
701
757b402e
ES
702static int
703_next_pending_call(struct _pending_calls *pending,
7bd560ce 704 int (**func)(void *), void **arg, int *flags)
a4a9f2e8
MS
705{
706 int i = pending->first;
09c29475 707 if (pending->npending == 0) {
757b402e 708 /* Queue empty */
09c29475 709 assert(i == pending->next);
757b402e
ES
710 assert(pending->calls[i].func == NULL);
711 return -1;
a4a9f2e8 712 }
a4a9f2e8
MS
713 *func = pending->calls[i].func;
714 *arg = pending->calls[i].arg;
7bd560ce 715 *flags = pending->calls[i].flags;
757b402e
ES
716 return i;
717}
718
719/* Pop one item off the queue while holding the lock. */
720static void
721_pop_pending_call(struct _pending_calls *pending,
7bd560ce 722 int (**func)(void *), void **arg, int *flags)
757b402e 723{
7bd560ce 724 int i = _next_pending_call(pending, func, arg, flags);
757b402e
ES
725 if (i >= 0) {
726 pending->calls[i] = (struct _pending_call){0};
09c29475
ES
727 pending->first = (i + 1) % PENDINGCALLSARRAYSIZE;
728 assert(pending->npending > 0);
729 _Py_atomic_add_int32(&pending->npending, -1);
757b402e 730 }
a4a9f2e8
MS
731}
732
733/* This implementation is thread-safe. It allows
734 scheduling to be made from any thread, and even from an executing
735 callback.
736 */
737
09c29475 738_Py_add_pending_call_result
a4a9f2e8 739_PyEval_AddPendingCall(PyInterpreterState *interp,
7bd560ce 740 _Py_pending_call_func func, void *arg, int flags)
a4a9f2e8
MS
741{
742 struct _pending_calls *pending = &interp->ceval.pending;
0749244d
BS
743 int main_only = (flags & _Py_PENDING_MAINTHREADONLY) != 0;
744 if (main_only) {
757b402e
ES
745 /* The main thread only exists in the main interpreter. */
746 assert(_Py_IsMainInterpreter(interp));
747 pending = &_PyRuntime.ceval.pending_mainthread;
748 }
a4a9f2e8 749
cf6110ba 750 PyMutex_Lock(&pending->mutex);
09c29475
ES
751 _Py_add_pending_call_result result =
752 _push_pending_call(pending, func, arg, flags);
cf6110ba 753 PyMutex_Unlock(&pending->mutex);
a4a9f2e8 754
0749244d
BS
755 if (main_only) {
756 _Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_CALLS_TO_DO_BIT);
757 }
758 else {
759#ifdef Py_GIL_DISABLED
760 _Py_set_eval_breaker_bit_all(interp, _PY_CALLS_TO_DO_BIT);
761#else
762 signal_active_thread(interp, _PY_CALLS_TO_DO_BIT);
763#endif
764 }
765
a4a9f2e8
MS
766 return result;
767}
768
769int
fd7e08a6 770Py_AddPendingCall(_Py_pending_call_func func, void *arg)
a4a9f2e8 771{
757b402e
ES
772 /* Legacy users of this API will continue to target the main thread
773 (of the main interpreter). */
774 PyInterpreterState *interp = _PyInterpreterState_Main();
09c29475
ES
775 _Py_add_pending_call_result r =
776 _PyEval_AddPendingCall(interp, func, arg, _Py_PENDING_MAINTHREADONLY);
777 if (r == _Py_ADD_PENDING_FULL) {
778 return -1;
779 }
780 else {
781 assert(r == _Py_ADD_PENDING_SUCCESS);
782 return 0;
783 }
a4a9f2e8
MS
784}
785
786static int
787handle_signals(PyThreadState *tstate)
788{
f63d3787 789 assert(_PyThreadState_CheckConsistency(tstate));
0749244d 790 _Py_unset_eval_breaker_bit(tstate, _PY_SIGNALS_PENDING_BIT);
a4a9f2e8
MS
791 if (!_Py_ThreadCanHandleSignals(tstate->interp)) {
792 return 0;
793 }
a4a9f2e8
MS
794 if (_PyErr_CheckSignalsTstate(tstate) < 0) {
795 /* On failure, re-schedule a call to handle_signals(). */
0749244d 796 _Py_set_eval_breaker_bit(tstate, _PY_SIGNALS_PENDING_BIT);
a4a9f2e8
MS
797 return -1;
798 }
799 return 0;
800}
801
757b402e 802static int
09c29475 803_make_pending_calls(struct _pending_calls *pending, int32_t *p_npending)
757b402e 804{
09c29475
ES
805 int res = 0;
806 int32_t npending = -1;
807
808 assert(sizeof(pending->max) <= sizeof(size_t)
809 && ((size_t)pending->max) <= Py_ARRAY_LENGTH(pending->calls));
810 int32_t maxloop = pending->maxloop;
811 if (maxloop == 0) {
812 maxloop = pending->max;
813 }
814 assert(maxloop > 0 && maxloop <= pending->max);
815
a4a9f2e8 816 /* perform a bounded number of calls, in case of recursion */
09c29475 817 for (int i=0; i<maxloop; i++) {
fd7e08a6 818 _Py_pending_call_func func = NULL;
a4a9f2e8 819 void *arg = NULL;
7bd560ce 820 int flags = 0;
a4a9f2e8
MS
821
822 /* pop one item off the queue while holding the lock */
cf6110ba 823 PyMutex_Lock(&pending->mutex);
7bd560ce 824 _pop_pending_call(pending, &func, &arg, &flags);
09c29475 825 npending = pending->npending;
cf6110ba 826 PyMutex_Unlock(&pending->mutex);
a4a9f2e8 827
09c29475 828 /* Check if there are any more pending calls. */
a4a9f2e8 829 if (func == NULL) {
09c29475 830 assert(npending == 0);
a4a9f2e8
MS
831 break;
832 }
09c29475
ES
833
834 /* having released the lock, perform the callback */
835 res = func(arg);
7bd560ce
ES
836 if ((flags & _Py_PENDING_RAWFREE) && arg != NULL) {
837 PyMem_RawFree(arg);
838 }
839 if (res != 0) {
09c29475
ES
840 res = -1;
841 goto finally;
a4a9f2e8
MS
842 }
843 }
09c29475
ES
844
845finally:
846 *p_npending = npending;
847 return res;
757b402e
ES
848}
849
0749244d
BS
850static void
851signal_pending_calls(PyThreadState *tstate, PyInterpreterState *interp)
852{
853#ifdef Py_GIL_DISABLED
854 _Py_set_eval_breaker_bit_all(interp, _PY_CALLS_TO_DO_BIT);
855#else
856 _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT);
857#endif
858}
859
860static void
861unsignal_pending_calls(PyThreadState *tstate, PyInterpreterState *interp)
862{
863#ifdef Py_GIL_DISABLED
864 _Py_unset_eval_breaker_bit_all(interp, _PY_CALLS_TO_DO_BIT);
865#else
866 _Py_unset_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT);
867#endif
868}
869
757b402e 870static int
0749244d 871make_pending_calls(PyThreadState *tstate)
757b402e 872{
0749244d 873 PyInterpreterState *interp = tstate->interp;
757b402e
ES
874 struct _pending_calls *pending = &interp->ceval.pending;
875 struct _pending_calls *pending_main = &_PyRuntime.ceval.pending_mainthread;
876
877 /* Only one thread (per interpreter) may run the pending calls
878 at once. In the same way, we don't do recursive pending calls. */
cf6110ba 879 PyMutex_Lock(&pending->mutex);
39981fd0 880 if (pending->handling_thread != NULL) {
757b402e
ES
881 /* A pending call was added after another thread was already
882 handling the pending calls (and had already "unsignaled").
883 Once that thread is done, it may have taken care of all the
884 pending calls, or there might be some still waiting.
39981fd0
MS
885 To avoid all threads constantly stopping on the eval breaker,
886 we clear the bit for this thread and make sure it is set
887 for the thread currently handling the pending call. */
888 _Py_set_eval_breaker_bit(pending->handling_thread, _PY_CALLS_TO_DO_BIT);
889 _Py_unset_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT);
cf6110ba 890 PyMutex_Unlock(&pending->mutex);
757b402e
ES
891 return 0;
892 }
39981fd0 893 pending->handling_thread = tstate;
cf6110ba 894 PyMutex_Unlock(&pending->mutex);
757b402e
ES
895
896 /* unsignal before starting to call callbacks, so that any callback
897 added in-between re-signals */
0749244d 898 unsignal_pending_calls(tstate, interp);
757b402e 899
09c29475
ES
900 int32_t npending;
901 if (_make_pending_calls(pending, &npending) != 0) {
39981fd0 902 pending->handling_thread = NULL;
757b402e 903 /* There might not be more calls to make, but we play it safe. */
0749244d 904 signal_pending_calls(tstate, interp);
757b402e
ES
905 return -1;
906 }
09c29475
ES
907 if (npending > 0) {
908 /* We hit pending->maxloop. */
909 signal_pending_calls(tstate, interp);
910 }
a4a9f2e8 911
757b402e 912 if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
09c29475 913 if (_make_pending_calls(pending_main, &npending) != 0) {
39981fd0 914 pending->handling_thread = NULL;
757b402e 915 /* There might not be more calls to make, but we play it safe. */
0749244d 916 signal_pending_calls(tstate, interp);
757b402e
ES
917 return -1;
918 }
09c29475
ES
919 if (npending > 0) {
920 /* We hit pending_main->maxloop. */
921 signal_pending_calls(tstate, interp);
922 }
757b402e 923 }
a4a9f2e8 924
39981fd0 925 pending->handling_thread = NULL;
757b402e 926 return 0;
a4a9f2e8
MS
927}
928
09c29475 929
0749244d
BS
930void
931_Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit)
932{
933 _PyRuntimeState *runtime = &_PyRuntime;
934
935 HEAD_LOCK(runtime);
936 for (PyThreadState *tstate = interp->threads.head; tstate != NULL; tstate = tstate->next) {
937 _Py_set_eval_breaker_bit(tstate, bit);
938 }
939 HEAD_UNLOCK(runtime);
940}
941
942void
943_Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit)
944{
945 _PyRuntimeState *runtime = &_PyRuntime;
946
947 HEAD_LOCK(runtime);
948 for (PyThreadState *tstate = interp->threads.head; tstate != NULL; tstate = tstate->next) {
949 _Py_unset_eval_breaker_bit(tstate, bit);
950 }
951 HEAD_UNLOCK(runtime);
952}
953
a4a9f2e8
MS
954void
955_Py_FinishPendingCalls(PyThreadState *tstate)
956{
957 assert(PyGILState_Check());
f63d3787 958 assert(_PyThreadState_CheckConsistency(tstate));
a4a9f2e8 959
0749244d 960 if (make_pending_calls(tstate) < 0) {
4c87537e 961 PyObject *exc = _PyErr_GetRaisedException(tstate);
a4a9f2e8 962 PyErr_BadInternalCall();
4c87537e 963 _PyErr_ChainExceptions1(exc);
a4a9f2e8
MS
964 _PyErr_Print(tstate);
965 }
966}
967
757b402e
ES
968int
969_PyEval_MakePendingCalls(PyThreadState *tstate)
970{
971 int res;
972
973 if (_Py_IsMainThread() && _Py_IsMainInterpreter(tstate->interp)) {
974 /* Python signal handler doesn't really queue a callback:
975 it only signals that a signal was received,
976 see _PyEval_SignalReceived(). */
977 res = handle_signals(tstate);
978 if (res != 0) {
979 return res;
980 }
981 }
982
0749244d 983 res = make_pending_calls(tstate);
757b402e
ES
984 if (res != 0) {
985 return res;
986 }
987
988 return 0;
989}
990
a4a9f2e8
MS
991/* Py_MakePendingCalls() is a simple wrapper for the sake
992 of backward-compatibility. */
993int
994Py_MakePendingCalls(void)
995{
996 assert(PyGILState_Check());
997
998 PyThreadState *tstate = _PyThreadState_GET();
f63d3787 999 assert(_PyThreadState_CheckConsistency(tstate));
a4a9f2e8 1000
757b402e
ES
1001 /* Only execute pending calls on the main thread. */
1002 if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(tstate->interp)) {
1003 return 0;
a4a9f2e8 1004 }
757b402e 1005 return _PyEval_MakePendingCalls(tstate);
a4a9f2e8
MS
1006}
1007
a4a9f2e8 1008void
cf6110ba 1009_PyEval_InitState(PyInterpreterState *interp)
a4a9f2e8 1010{
5c9ee498 1011 _gil_initialize(&interp->_gil);
a4a9f2e8
MS
1012}
1013
e5862113
MS
1014
1015/* Do periodic things, like check for signals and async I/0.
1016* We need to do reasonably frequently, but not too frequently.
1017* All loops should include a check of the eval breaker.
1018* We also check on return from any builtin function.
1019*
1020* ## More Details ###
1021*
1022* The eval loop (this function) normally executes the instructions
1023* of a code object sequentially. However, the runtime supports a
1024* number of out-of-band execution scenarios that may pause that
1025* sequential execution long enough to do that out-of-band work
1026* in the current thread using the current PyThreadState.
1027*
1028* The scenarios include:
1029*
1030* - cyclic garbage collection
1031* - GIL drop requests
1032* - "async" exceptions
1033* - "pending calls" (some only in the main thread)
1034* - signal handling (only in the main thread)
1035*
1036* When the need for one of the above is detected, the eval loop
1037* pauses long enough to handle the detected case. Then, if doing
1038* so didn't trigger an exception, the eval loop resumes executing
1039* the sequential instructions.
1040*
1041* To make this work, the eval loop periodically checks if any
1042* of the above needs to happen. The individual checks can be
1043* expensive if computed each time, so a while back we switched
1044* to using pre-computed, per-interpreter variables for the checks,
1045* and later consolidated that to a single "eval breaker" variable
1046* (now a PyInterpreterState field).
1047*
1048* For the longest time, the eval breaker check would happen
1049* frequently, every 5 or so times through the loop, regardless
1050* of what instruction ran last or what would run next. Then, in
1051* early 2021 (gh-18334, commit 4958f5d), we switched to checking
1052* the eval breaker less frequently, by hard-coding the check to
1053* specific places in the eval loop (e.g. certain instructions).
1054* The intent then was to check after returning from calls
1055* and on the back edges of loops.
1056*
1057* In addition to being more efficient, that approach keeps
1058* the eval loop from running arbitrary code between instructions
1059* that don't handle that well. (See gh-74174.)
1060*
1061* Currently, the eval breaker check happens on back edges in
1062* the control flow graph, which pretty much applies to all loops,
1063* and most calls.
1064* (See bytecodes.c for exact information.)
1065*
1066* One consequence of this approach is that it might not be obvious
1067* how to force any specific thread to pick up the eval breaker,
1068* or for any specific thread to not pick it up. Mostly this
1069* involves judicious uses of locks and careful ordering of code,
1070* while avoiding code that might trigger the eval breaker
1071* until so desired.
1072*/
a4a9f2e8
MS
1073int
1074_Py_HandlePending(PyThreadState *tstate)
1075{
0749244d 1076 uintptr_t breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
a4a9f2e8 1077
441affc9 1078 /* Stop-the-world */
0749244d
BS
1079 if ((breaker & _PY_EVAL_PLEASE_STOP_BIT) != 0) {
1080 _Py_unset_eval_breaker_bit(tstate, _PY_EVAL_PLEASE_STOP_BIT);
441affc9
SG
1081 _PyThreadState_Suspend(tstate);
1082
1083 /* The attach blocks until the stop-the-world event is complete. */
1084 _PyThreadState_Attach(tstate);
1085 }
1086
a4a9f2e8 1087 /* Pending signals */
0749244d 1088 if ((breaker & _PY_SIGNALS_PENDING_BIT) != 0) {
a4a9f2e8
MS
1089 if (handle_signals(tstate) != 0) {
1090 return -1;
1091 }
1092 }
1093
1094 /* Pending calls */
0749244d
BS
1095 if ((breaker & _PY_CALLS_TO_DO_BIT) != 0) {
1096 if (make_pending_calls(tstate) != 0) {
a4a9f2e8
MS
1097 return -1;
1098 }
1099 }
1100
a3af3cb4
SG
1101#ifdef Py_GIL_DISABLED
1102 /* Objects with refcounts to merge */
0749244d
BS
1103 if ((breaker & _PY_EVAL_EXPLICIT_MERGE_BIT) != 0) {
1104 _Py_unset_eval_breaker_bit(tstate, _PY_EVAL_EXPLICIT_MERGE_BIT);
a3af3cb4
SG
1105 _Py_brc_merge_refcounts(tstate);
1106 }
1107#endif
1108
83eb8272 1109 /* GC scheduled to run */
0749244d
BS
1110 if ((breaker & _PY_GC_SCHEDULED_BIT) != 0) {
1111 _Py_unset_eval_breaker_bit(tstate, _PY_GC_SCHEDULED_BIT);
83eb8272
PGS
1112 _Py_RunGC(tstate);
1113 }
1114
a4a9f2e8 1115 /* GIL drop request */
0749244d 1116 if ((breaker & _PY_GIL_DROP_REQUEST_BIT) != 0) {
a4a9f2e8 1117 /* Give another thread a chance */
6e97a964 1118 _PyThreadState_Detach(tstate);
a4a9f2e8
MS
1119
1120 /* Other threads may run now */
1121
6e97a964 1122 _PyThreadState_Attach(tstate);
a4a9f2e8
MS
1123 }
1124
1125 /* Check for asynchronous exception. */
0749244d
BS
1126 if ((breaker & _PY_ASYNC_EXCEPTION_BIT) != 0) {
1127 _Py_unset_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT);
1128 PyObject *exc = _Py_atomic_exchange_ptr(&tstate->async_exc, NULL);
1129 if (exc != NULL) {
bf4bc360
MS
1130 _PyErr_SetNone(tstate, exc);
1131 Py_DECREF(exc);
1132 return -1;
1133 }
a4a9f2e8 1134 }
a4a9f2e8
MS
1135 return 0;
1136}