]>
Commit | Line | Data |
---|---|---|
a4a9f2e8 | 1 | #include "Python.h" |
a4a9f2e8 | 2 | #include "pycore_ceval.h" // _PyEval_SignalReceived() |
b69da006 | 3 | #include "pycore_gc.h" // _Py_RunGC() |
a4a9f2e8 | 4 | #include "pycore_initconfig.h" // _PyStatus_OK() |
20c5f969 | 5 | #include "pycore_optimizer.h" // _Py_Executors_InvalidateCold() |
a0773b89 VS |
6 | #include "pycore_pyerrors.h" // _PyErr_GetRaisedException() |
7 | #include "pycore_pylifecycle.h" // _PyErr_Print() | |
a0773b89 | 8 | #include "pycore_pystats.h" // _Py_PrintSpecializationStats() |
b69da006 | 9 | #include "pycore_runtime.h" // _PyRuntime |
a4a9f2e8 | 10 | |
20c5f969 | 11 | |
a4a9f2e8 MS |
12 | /* |
13 | Notes about the implementation: | |
14 | ||
15 | - The GIL is just a boolean variable (locked) whose access is protected | |
16 | by a mutex (gil_mutex), and whose changes are signalled by a condition | |
17 | variable (gil_cond). gil_mutex is taken for short periods of time, | |
18 | and therefore mostly uncontended. | |
19 | ||
20 | - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be | |
21 | able to release the GIL on demand by another thread. A volatile boolean | |
22 | variable (gil_drop_request) is used for that purpose, which is checked | |
23 | at every turn of the eval loop. That variable is set after a wait of | |
24 | `interval` microseconds on `gil_cond` has timed out. | |
25 | ||
26 | [Actually, another volatile boolean variable (eval_breaker) is used | |
27 | which ORs several conditions into one. Volatile booleans are | |
28 | sufficient as inter-thread signalling means since Python is run | |
29 | on cache-coherent architectures only.] | |
30 | ||
31 | - A thread wanting to take the GIL will first let pass a given amount of | |
32 | time (`interval` microseconds) before setting gil_drop_request. This | |
33 | encourages a defined switching period, but doesn't enforce it since | |
34 | opcodes can take an arbitrary time to execute. | |
35 | ||
36 | The `interval` value is available for the user to read and modify | |
37 | using the Python API `sys.{get,set}switchinterval()`. | |
38 | ||
39 | - When a thread releases the GIL and gil_drop_request is set, that thread | |
40 | ensures that another GIL-awaiting thread gets scheduled. | |
41 | It does so by waiting on a condition variable (switch_cond) until | |
42 | the value of last_holder is changed to something else than its | |
43 | own thread state pointer, indicating that another thread was able to | |
44 | take the GIL. | |
45 | ||
46 | This is meant to prohibit the latency-adverse behaviour on multi-core | |
47 | machines where one thread would speculatively release the GIL, but still | |
48 | run and end up being the first to re-acquire it, making the "timeslices" | |
49 | much longer than expected. | |
50 | (Note: this mechanism is enabled with FORCE_SWITCHING above) | |
51 | */ | |
52 | ||
0749244d | 53 | // Atomically copy the bits indicated by mask between two values. |
a4a9f2e8 | 54 | static inline void |
0749244d | 55 | copy_eval_breaker_bits(uintptr_t *from, uintptr_t *to, uintptr_t mask) |
a4a9f2e8 | 56 | { |
0749244d BS |
57 | uintptr_t from_bits = _Py_atomic_load_uintptr_relaxed(from) & mask; |
58 | uintptr_t old_value = _Py_atomic_load_uintptr_relaxed(to); | |
59 | uintptr_t to_bits = old_value & mask; | |
60 | if (from_bits == to_bits) { | |
bf4bc360 MS |
61 | return; |
62 | } | |
a4a9f2e8 | 63 | |
0749244d BS |
64 | uintptr_t new_value; |
65 | do { | |
66 | new_value = (old_value & ~mask) | from_bits; | |
67 | } while (!_Py_atomic_compare_exchange_uintptr(to, &old_value, new_value)); | |
bf4bc360 | 68 | } |
a4a9f2e8 | 69 | |
0749244d BS |
70 | // When attaching a thread, set the global instrumentation version and |
71 | // _PY_CALLS_TO_DO_BIT from the current state of the interpreter. | |
a4a9f2e8 | 72 | static inline void |
0749244d | 73 | update_eval_breaker_for_thread(PyInterpreterState *interp, PyThreadState *tstate) |
a4a9f2e8 | 74 | { |
0749244d BS |
75 | #ifdef Py_GIL_DISABLED |
76 | // Free-threaded builds eagerly update the eval_breaker on *all* threads as | |
77 | // needed, so this function doesn't apply. | |
78 | return; | |
79 | #endif | |
a4a9f2e8 | 80 | |
09c29475 ES |
81 | int32_t npending = _Py_atomic_load_int32_relaxed( |
82 | &interp->ceval.pending.npending); | |
83 | if (npending) { | |
0749244d BS |
84 | _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT); |
85 | } | |
86 | else if (_Py_IsMainThread()) { | |
09c29475 ES |
87 | npending = _Py_atomic_load_int32_relaxed( |
88 | &_PyRuntime.ceval.pending_mainthread.npending); | |
89 | if (npending) { | |
0749244d BS |
90 | _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT); |
91 | } | |
92 | } | |
a4a9f2e8 | 93 | |
0749244d BS |
94 | // _PY_CALLS_TO_DO_BIT was derived from other state above, so the only bits |
95 | // we copy from our interpreter's state are the instrumentation version. | |
96 | copy_eval_breaker_bits(&interp->ceval.instrumentation_version, | |
97 | &tstate->eval_breaker, | |
98 | ~_PY_EVAL_EVENTS_MASK); | |
a4a9f2e8 MS |
99 | } |
100 | ||
a4a9f2e8 MS |
101 | /* |
102 | * Implementation of the Global Interpreter Lock (GIL). | |
103 | */ | |
104 | ||
105 | #include <stdlib.h> | |
106 | #include <errno.h> | |
107 | ||
a4a9f2e8 MS |
108 | #include "condvar.h" |
109 | ||
110 | #define MUTEX_INIT(mut) \ | |
111 | if (PyMUTEX_INIT(&(mut))) { \ | |
112 | Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); }; | |
113 | #define MUTEX_FINI(mut) \ | |
114 | if (PyMUTEX_FINI(&(mut))) { \ | |
115 | Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); }; | |
116 | #define MUTEX_LOCK(mut) \ | |
117 | if (PyMUTEX_LOCK(&(mut))) { \ | |
118 | Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); }; | |
119 | #define MUTEX_UNLOCK(mut) \ | |
120 | if (PyMUTEX_UNLOCK(&(mut))) { \ | |
121 | Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); }; | |
122 | ||
123 | #define COND_INIT(cond) \ | |
124 | if (PyCOND_INIT(&(cond))) { \ | |
125 | Py_FatalError("PyCOND_INIT(" #cond ") failed"); }; | |
126 | #define COND_FINI(cond) \ | |
127 | if (PyCOND_FINI(&(cond))) { \ | |
128 | Py_FatalError("PyCOND_FINI(" #cond ") failed"); }; | |
129 | #define COND_SIGNAL(cond) \ | |
130 | if (PyCOND_SIGNAL(&(cond))) { \ | |
131 | Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); }; | |
132 | #define COND_WAIT(cond, mut) \ | |
133 | if (PyCOND_WAIT(&(cond), &(mut))) { \ | |
134 | Py_FatalError("PyCOND_WAIT(" #cond ") failed"); }; | |
135 | #define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \ | |
136 | { \ | |
137 | int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \ | |
138 | if (r < 0) \ | |
139 | Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \ | |
140 | if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \ | |
141 | timeout_result = 1; \ | |
142 | else \ | |
143 | timeout_result = 0; \ | |
144 | } \ | |
145 | ||
146 | ||
147 | #define DEFAULT_INTERVAL 5000 | |
148 | ||
149 | static void _gil_initialize(struct _gil_runtime_state *gil) | |
150 | { | |
86559ddf | 151 | gil->locked = -1; |
a4a9f2e8 MS |
152 | gil->interval = DEFAULT_INTERVAL; |
153 | } | |
154 | ||
155 | static int gil_created(struct _gil_runtime_state *gil) | |
156 | { | |
55671fe0 ES |
157 | if (gil == NULL) { |
158 | return 0; | |
159 | } | |
86559ddf | 160 | return (_Py_atomic_load_int_acquire(&gil->locked) >= 0); |
a4a9f2e8 MS |
161 | } |
162 | ||
163 | static void create_gil(struct _gil_runtime_state *gil) | |
164 | { | |
165 | MUTEX_INIT(gil->mutex); | |
166 | #ifdef FORCE_SWITCHING | |
167 | MUTEX_INIT(gil->switch_mutex); | |
168 | #endif | |
169 | COND_INIT(gil->cond); | |
170 | #ifdef FORCE_SWITCHING | |
171 | COND_INIT(gil->switch_cond); | |
172 | #endif | |
2566434e | 173 | _Py_atomic_store_ptr_relaxed(&gil->last_holder, 0); |
a4a9f2e8 | 174 | _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked); |
86559ddf | 175 | _Py_atomic_store_int_release(&gil->locked, 0); |
a4a9f2e8 MS |
176 | } |
177 | ||
178 | static void destroy_gil(struct _gil_runtime_state *gil) | |
179 | { | |
180 | /* some pthread-like implementations tie the mutex to the cond | |
181 | * and must have the cond destroyed first. | |
182 | */ | |
183 | COND_FINI(gil->cond); | |
184 | MUTEX_FINI(gil->mutex); | |
185 | #ifdef FORCE_SWITCHING | |
186 | COND_FINI(gil->switch_cond); | |
187 | MUTEX_FINI(gil->switch_mutex); | |
188 | #endif | |
86559ddf | 189 | _Py_atomic_store_int_release(&gil->locked, -1); |
a4a9f2e8 MS |
190 | _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked); |
191 | } | |
192 | ||
193 | #ifdef HAVE_FORK | |
194 | static void recreate_gil(struct _gil_runtime_state *gil) | |
195 | { | |
196 | _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked); | |
197 | /* XXX should we destroy the old OS resources here? */ | |
198 | create_gil(gil); | |
199 | } | |
200 | #endif | |
201 | ||
be1dfccd BS |
202 | static inline void |
203 | drop_gil_impl(PyThreadState *tstate, struct _gil_runtime_state *gil) | |
853163d3 BS |
204 | { |
205 | MUTEX_LOCK(gil->mutex); | |
206 | _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1); | |
207 | _Py_atomic_store_int_relaxed(&gil->locked, 0); | |
be1dfccd | 208 | if (tstate != NULL) { |
052cb717 | 209 | tstate->holds_gil = 0; |
be1dfccd | 210 | } |
853163d3 BS |
211 | COND_SIGNAL(gil->cond); |
212 | MUTEX_UNLOCK(gil->mutex); | |
213 | } | |
214 | ||
a4a9f2e8 | 215 | static void |
be1dfccd | 216 | drop_gil(PyInterpreterState *interp, PyThreadState *tstate, int final_release) |
a4a9f2e8 | 217 | { |
bf4bc360 | 218 | struct _ceval_state *ceval = &interp->ceval; |
be1dfccd | 219 | /* If final_release is true, the caller is indicating that we're releasing |
3698fda0 ES |
220 | the GIL for the last time in this thread. This is particularly |
221 | relevant when the current thread state is finalizing or its | |
222 | interpreter is finalizing (either may be in an inconsistent | |
223 | state). In that case the current thread will definitely | |
224 | never try to acquire the GIL again. */ | |
225 | // XXX It may be more correct to check tstate->_status.finalizing. | |
be1dfccd | 226 | // XXX assert(final_release || !tstate->_status.cleared); |
3698fda0 | 227 | |
be1dfccd | 228 | assert(final_release || tstate != NULL); |
55671fe0 | 229 | struct _gil_runtime_state *gil = ceval->gil; |
2731913d | 230 | #ifdef Py_GIL_DISABLED |
be1dfccd BS |
231 | // Check if we have the GIL before dropping it. tstate will be NULL if |
232 | // take_gil() detected that this thread has been destroyed, in which case | |
233 | // we know we have the GIL. | |
052cb717 | 234 | if (tstate != NULL && !tstate->holds_gil) { |
2731913d BS |
235 | return; |
236 | } | |
237 | #endif | |
0e78a545 | 238 | if (!_Py_atomic_load_int_relaxed(&gil->locked)) { |
a4a9f2e8 MS |
239 | Py_FatalError("drop_gil: GIL is not locked"); |
240 | } | |
241 | ||
be1dfccd | 242 | if (!final_release) { |
a4a9f2e8 MS |
243 | /* Sub-interpreter support: threads might have been switched |
244 | under our feet using PyThreadState_Swap(). Fix the GIL last | |
245 | holder variable so that our heuristics work. */ | |
2566434e | 246 | _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate); |
a4a9f2e8 MS |
247 | } |
248 | ||
be1dfccd | 249 | drop_gil_impl(tstate, gil); |
a4a9f2e8 MS |
250 | |
251 | #ifdef FORCE_SWITCHING | |
be1dfccd BS |
252 | /* We might be releasing the GIL for the last time in this thread. In that |
253 | case there's a possible race with tstate->interp getting deleted after | |
254 | gil->mutex is unlocked and before the following code runs, leading to a | |
255 | crash. We can use final_release to indicate the thread is done with the | |
256 | GIL, and that's the only time we might delete the interpreter. See | |
257 | https://github.com/python/cpython/issues/104341. */ | |
258 | if (!final_release && | |
0749244d | 259 | _Py_eval_breaker_bit_is_set(tstate, _PY_GIL_DROP_REQUEST_BIT)) { |
a4a9f2e8 MS |
260 | MUTEX_LOCK(gil->switch_mutex); |
261 | /* Not switched yet => wait */ | |
2566434e | 262 | if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) == tstate) |
a4a9f2e8 | 263 | { |
f63d3787 | 264 | assert(_PyThreadState_CheckConsistency(tstate)); |
0749244d | 265 | _Py_unset_eval_breaker_bit(tstate, _PY_GIL_DROP_REQUEST_BIT); |
a4a9f2e8 MS |
266 | /* NOTE: if COND_WAIT does not atomically start waiting when |
267 | releasing the mutex, another thread can run through, take | |
268 | the GIL and drop it again, and reset the condition | |
269 | before we even had a chance to wait for it. */ | |
270 | COND_WAIT(gil->switch_cond, gil->switch_mutex); | |
271 | } | |
272 | MUTEX_UNLOCK(gil->switch_mutex); | |
273 | } | |
274 | #endif | |
275 | } | |
276 | ||
277 | ||
a4a9f2e8 MS |
278 | /* Take the GIL. |
279 | ||
280 | The function saves errno at entry and restores its value at exit. | |
8cc5aa47 | 281 | It may hang rather than return if the interpreter has been finalized. |
a4a9f2e8 | 282 | |
8cc5aa47 | 283 | tstate must be non-NULL. */ |
be1dfccd | 284 | static void |
a4a9f2e8 MS |
285 | take_gil(PyThreadState *tstate) |
286 | { | |
287 | int err = errno; | |
288 | ||
289 | assert(tstate != NULL); | |
3698fda0 ES |
290 | /* We shouldn't be using a thread state that isn't viable any more. */ |
291 | // XXX It may be more correct to check tstate->_status.finalizing. | |
292 | // XXX assert(!tstate->_status.cleared); | |
a4a9f2e8 | 293 | |
517cd82e | 294 | if (_PyThreadState_MustExit(tstate)) { |
a4a9f2e8 | 295 | /* bpo-39877: If Py_Finalize() has been called and tstate is not the |
8cc5aa47 | 296 | thread which called Py_Finalize(), this thread cannot continue. |
a4a9f2e8 MS |
297 | |
298 | This code path can be reached by a daemon thread after Py_Finalize() | |
052cb717 | 299 | completes. |
8cc5aa47 JMS |
300 | |
301 | This used to call a *thread_exit API, but that was not safe as it | |
302 | lacks stack unwinding and local variable destruction important to | |
303 | C++. gh-87135: The best that can be done is to hang the thread as | |
304 | the public APIs calling this have no error reporting mechanism (!). | |
305 | */ | |
052cb717 | 306 | _PyThreadState_HangThread(tstate); |
a4a9f2e8 MS |
307 | } |
308 | ||
f63d3787 | 309 | assert(_PyThreadState_CheckConsistency(tstate)); |
a4a9f2e8 | 310 | PyInterpreterState *interp = tstate->interp; |
bf4bc360 | 311 | struct _gil_runtime_state *gil = interp->ceval.gil; |
2731913d | 312 | #ifdef Py_GIL_DISABLED |
853163d3 | 313 | if (!_Py_atomic_load_int_relaxed(&gil->enabled)) { |
be1dfccd | 314 | return; |
2731913d BS |
315 | } |
316 | #endif | |
a4a9f2e8 MS |
317 | |
318 | /* Check that _PyEval_InitThreads() was called to create the lock */ | |
319 | assert(gil_created(gil)); | |
320 | ||
321 | MUTEX_LOCK(gil->mutex); | |
322 | ||
04f4977f | 323 | int drop_requested = 0; |
86559ddf | 324 | while (_Py_atomic_load_int_relaxed(&gil->locked)) { |
a4a9f2e8 MS |
325 | unsigned long saved_switchnum = gil->switch_number; |
326 | ||
038e4d60 SG |
327 | unsigned long interval = _Py_atomic_load_ulong_relaxed(&gil->interval); |
328 | if (interval < 1) { | |
329 | interval = 1; | |
330 | } | |
a4a9f2e8 MS |
331 | int timed_out = 0; |
332 | COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out); | |
333 | ||
334 | /* If we timed out and no switch occurred in the meantime, it is time | |
335 | to ask the GIL-holding thread to drop it. */ | |
336 | if (timed_out && | |
86559ddf | 337 | _Py_atomic_load_int_relaxed(&gil->locked) && |
a4a9f2e8 MS |
338 | gil->switch_number == saved_switchnum) |
339 | { | |
0749244d BS |
340 | PyThreadState *holder_tstate = |
341 | (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder); | |
517cd82e | 342 | if (_PyThreadState_MustExit(tstate)) { |
a4a9f2e8 | 343 | MUTEX_UNLOCK(gil->mutex); |
04f4977f VS |
344 | // gh-96387: If the loop requested a drop request in a previous |
345 | // iteration, reset the request. Otherwise, drop_gil() can | |
346 | // block forever waiting for the thread which exited. Drop | |
347 | // requests made by other threads are also reset: these threads | |
348 | // may have to request again a drop request (iterate one more | |
349 | // time). | |
350 | if (drop_requested) { | |
0749244d | 351 | _Py_unset_eval_breaker_bit(holder_tstate, _PY_GIL_DROP_REQUEST_BIT); |
04f4977f | 352 | } |
8cc5aa47 JMS |
353 | // gh-87135: hang the thread as *thread_exit() is not a safe |
354 | // API. It lacks stack unwind and local variable destruction. | |
052cb717 | 355 | _PyThreadState_HangThread(tstate); |
a4a9f2e8 | 356 | } |
f63d3787 | 357 | assert(_PyThreadState_CheckConsistency(tstate)); |
a4a9f2e8 | 358 | |
0749244d | 359 | _Py_set_eval_breaker_bit(holder_tstate, _PY_GIL_DROP_REQUEST_BIT); |
04f4977f | 360 | drop_requested = 1; |
a4a9f2e8 MS |
361 | } |
362 | } | |
363 | ||
853163d3 BS |
364 | #ifdef Py_GIL_DISABLED |
365 | if (!_Py_atomic_load_int_relaxed(&gil->enabled)) { | |
366 | // Another thread disabled the GIL between our check above and | |
367 | // now. Don't take the GIL, signal any other waiting threads, and | |
be1dfccd | 368 | // return. |
853163d3 BS |
369 | COND_SIGNAL(gil->cond); |
370 | MUTEX_UNLOCK(gil->mutex); | |
be1dfccd | 371 | return; |
853163d3 BS |
372 | } |
373 | #endif | |
374 | ||
a4a9f2e8 MS |
375 | #ifdef FORCE_SWITCHING |
376 | /* This mutex must be taken before modifying gil->last_holder: | |
377 | see drop_gil(). */ | |
378 | MUTEX_LOCK(gil->switch_mutex); | |
379 | #endif | |
380 | /* We now hold the GIL */ | |
86559ddf | 381 | _Py_atomic_store_int_relaxed(&gil->locked, 1); |
a4a9f2e8 MS |
382 | _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1); |
383 | ||
2566434e DN |
384 | if (tstate != (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) { |
385 | _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate); | |
a4a9f2e8 MS |
386 | ++gil->switch_number; |
387 | } | |
388 | ||
389 | #ifdef FORCE_SWITCHING | |
390 | COND_SIGNAL(gil->switch_cond); | |
391 | MUTEX_UNLOCK(gil->switch_mutex); | |
392 | #endif | |
393 | ||
517cd82e | 394 | if (_PyThreadState_MustExit(tstate)) { |
a4a9f2e8 | 395 | /* bpo-36475: If Py_Finalize() has been called and tstate is not |
8cc5aa47 | 396 | the thread which called Py_Finalize(), gh-87135: hang the |
a4a9f2e8 MS |
397 | thread. |
398 | ||
399 | This code path can be reached by a daemon thread which was waiting | |
400 | in take_gil() while the main thread called | |
401 | wait_for_thread_shutdown() from Py_Finalize(). */ | |
402 | MUTEX_UNLOCK(gil->mutex); | |
be1dfccd BS |
403 | /* tstate could be a dangling pointer, so don't pass it to |
404 | drop_gil(). */ | |
405 | drop_gil(interp, NULL, 1); | |
052cb717 | 406 | _PyThreadState_HangThread(tstate); |
a4a9f2e8 | 407 | } |
f63d3787 | 408 | assert(_PyThreadState_CheckConsistency(tstate)); |
a4a9f2e8 | 409 | |
052cb717 | 410 | tstate->holds_gil = 1; |
0749244d BS |
411 | _Py_unset_eval_breaker_bit(tstate, _PY_GIL_DROP_REQUEST_BIT); |
412 | update_eval_breaker_for_thread(interp, tstate); | |
a4a9f2e8 MS |
413 | |
414 | MUTEX_UNLOCK(gil->mutex); | |
415 | ||
416 | errno = err; | |
be1dfccd | 417 | return; |
a4a9f2e8 MS |
418 | } |
419 | ||
420 | void _PyEval_SetSwitchInterval(unsigned long microseconds) | |
421 | { | |
18b1fdeb | 422 | PyInterpreterState *interp = _PyInterpreterState_GET(); |
55671fe0 ES |
423 | struct _gil_runtime_state *gil = interp->ceval.gil; |
424 | assert(gil != NULL); | |
038e4d60 | 425 | _Py_atomic_store_ulong_relaxed(&gil->interval, microseconds); |
a4a9f2e8 MS |
426 | } |
427 | ||
119f67de | 428 | unsigned long _PyEval_GetSwitchInterval(void) |
a4a9f2e8 | 429 | { |
18b1fdeb | 430 | PyInterpreterState *interp = _PyInterpreterState_GET(); |
55671fe0 ES |
431 | struct _gil_runtime_state *gil = interp->ceval.gil; |
432 | assert(gil != NULL); | |
038e4d60 | 433 | return _Py_atomic_load_ulong_relaxed(&gil->interval); |
a4a9f2e8 MS |
434 | } |
435 | ||
436 | ||
437 | int | |
55671fe0 | 438 | _PyEval_ThreadsInitialized(void) |
a4a9f2e8 | 439 | { |
5c9ee498 ES |
440 | /* XXX This is only needed for an assert in PyGILState_Ensure(), |
441 | * which currently does not work with subinterpreters. | |
442 | * Thus we only use the main interpreter. */ | |
55671fe0 ES |
443 | PyInterpreterState *interp = _PyInterpreterState_Main(); |
444 | if (interp == NULL) { | |
445 | return 0; | |
446 | } | |
447 | struct _gil_runtime_state *gil = interp->ceval.gil; | |
448 | return gil_created(gil); | |
a4a9f2e8 MS |
449 | } |
450 | ||
ec0082ca VS |
451 | // Function removed in the Python 3.13 API but kept in the stable ABI. |
452 | PyAPI_FUNC(int) | |
a4a9f2e8 MS |
453 | PyEval_ThreadsInitialized(void) |
454 | { | |
55671fe0 | 455 | return _PyEval_ThreadsInitialized(); |
a4a9f2e8 MS |
456 | } |
457 | ||
817fe33a | 458 | #ifndef NDEBUG |
92d8bfff ES |
459 | static inline int |
460 | current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate) | |
461 | { | |
052cb717 | 462 | int holds_gil = tstate->holds_gil; |
be1dfccd BS |
463 | |
464 | // holds_gil is the source of truth; check that last_holder and gil->locked | |
465 | // are consistent with it. | |
466 | int locked = _Py_atomic_load_int_relaxed(&gil->locked); | |
467 | int is_last_holder = | |
468 | ((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) == tstate; | |
469 | assert(!holds_gil || locked); | |
470 | assert(!holds_gil || is_last_holder); | |
471 | ||
472 | return holds_gil; | |
92d8bfff | 473 | } |
817fe33a | 474 | #endif |
92d8bfff ES |
475 | |
476 | static void | |
477 | init_shared_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil) | |
478 | { | |
479 | assert(gil_created(gil)); | |
480 | interp->ceval.gil = gil; | |
481 | interp->ceval.own_gil = 0; | |
482 | } | |
483 | ||
484 | static void | |
485 | init_own_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil) | |
486 | { | |
487 | assert(!gil_created(gil)); | |
2731913d | 488 | #ifdef Py_GIL_DISABLED |
853163d3 BS |
489 | const PyConfig *config = _PyInterpreterState_GetConfig(interp); |
490 | gil->enabled = config->enable_gil == _PyConfig_GIL_ENABLE ? INT_MAX : 0; | |
2731913d | 491 | #endif |
92d8bfff ES |
492 | create_gil(gil); |
493 | assert(gil_created(gil)); | |
494 | interp->ceval.gil = gil; | |
495 | interp->ceval.own_gil = 1; | |
496 | } | |
497 | ||
a3c03188 | 498 | void |
f3e7eb48 | 499 | _PyEval_InitGIL(PyThreadState *tstate, int own_gil) |
a4a9f2e8 | 500 | { |
55671fe0 | 501 | assert(tstate->interp->ceval.gil == NULL); |
f3e7eb48 | 502 | if (!own_gil) { |
5c9ee498 | 503 | /* The interpreter will share the main interpreter's instead. */ |
f3e7eb48 ES |
504 | PyInterpreterState *main_interp = _PyInterpreterState_Main(); |
505 | assert(tstate->interp != main_interp); | |
506 | struct _gil_runtime_state *gil = main_interp->ceval.gil; | |
92d8bfff | 507 | init_shared_gil(tstate->interp, gil); |
6e97a964 | 508 | assert(!current_thread_holds_gil(gil, tstate)); |
f3e7eb48 | 509 | } |
92d8bfff ES |
510 | else { |
511 | PyThread_init_thread(); | |
5c9ee498 | 512 | init_own_gil(tstate->interp, &tstate->interp->_gil); |
a4a9f2e8 | 513 | } |
a4a9f2e8 | 514 | |
6e97a964 SG |
515 | // Lock the GIL and mark the current thread as attached. |
516 | _PyThreadState_Attach(tstate); | |
a4a9f2e8 MS |
517 | } |
518 | ||
519 | void | |
520 | _PyEval_FiniGIL(PyInterpreterState *interp) | |
521 | { | |
5c9ee498 ES |
522 | struct _gil_runtime_state *gil = interp->ceval.gil; |
523 | if (gil == NULL) { | |
55671fe0 | 524 | /* It was already finalized (or hasn't been initialized yet). */ |
f3e7eb48 ES |
525 | assert(!interp->ceval.own_gil); |
526 | return; | |
527 | } | |
528 | else if (!interp->ceval.own_gil) { | |
66167107 | 529 | #ifdef Py_DEBUG |
f3e7eb48 | 530 | PyInterpreterState *main_interp = _PyInterpreterState_Main(); |
5c9ee498 | 531 | assert(main_interp != NULL && interp != main_interp); |
f3e7eb48 | 532 | assert(interp->ceval.gil == main_interp->ceval.gil); |
66167107 | 533 | #endif |
f3e7eb48 | 534 | interp->ceval.gil = NULL; |
55671fe0 ES |
535 | return; |
536 | } | |
537 | ||
a4a9f2e8 MS |
538 | if (!gil_created(gil)) { |
539 | /* First Py_InitializeFromConfig() call: the GIL doesn't exist | |
540 | yet: do nothing. */ | |
541 | return; | |
542 | } | |
543 | ||
544 | destroy_gil(gil); | |
545 | assert(!gil_created(gil)); | |
55671fe0 | 546 | interp->ceval.gil = NULL; |
a4a9f2e8 MS |
547 | } |
548 | ||
75eed5b3 | 549 | void |
a4a9f2e8 MS |
550 | PyEval_InitThreads(void) |
551 | { | |
552 | /* Do nothing: kept for backward compatibility */ | |
553 | } | |
554 | ||
555 | void | |
556 | _PyEval_Fini(void) | |
557 | { | |
558 | #ifdef Py_STATS | |
559 | _Py_PrintSpecializationStats(1); | |
560 | #endif | |
561 | } | |
ec0082ca VS |
562 | |
563 | // Function removed in the Python 3.13 API but kept in the stable ABI. | |
564 | PyAPI_FUNC(void) | |
a4a9f2e8 MS |
565 | PyEval_AcquireLock(void) |
566 | { | |
45398ad5 | 567 | PyThreadState *tstate = _PyThreadState_GET(); |
a4a9f2e8 MS |
568 | _Py_EnsureTstateNotNULL(tstate); |
569 | ||
570 | take_gil(tstate); | |
571 | } | |
572 | ||
ec0082ca VS |
573 | // Function removed in the Python 3.13 API but kept in the stable ABI. |
574 | PyAPI_FUNC(void) | |
a4a9f2e8 MS |
575 | PyEval_ReleaseLock(void) |
576 | { | |
45398ad5 | 577 | PyThreadState *tstate = _PyThreadState_GET(); |
a4a9f2e8 MS |
578 | /* This function must succeed when the current thread state is NULL. |
579 | We therefore avoid PyThreadState_Get() which dumps a fatal error | |
580 | in debug mode. */ | |
be1dfccd | 581 | drop_gil(tstate->interp, tstate, 0); |
a4a9f2e8 MS |
582 | } |
583 | ||
be1dfccd | 584 | void |
92d8bfff ES |
585 | _PyEval_AcquireLock(PyThreadState *tstate) |
586 | { | |
587 | _Py_EnsureTstateNotNULL(tstate); | |
be1dfccd | 588 | take_gil(tstate); |
92d8bfff ES |
589 | } |
590 | ||
a4a9f2e8 | 591 | void |
be1dfccd BS |
592 | _PyEval_ReleaseLock(PyInterpreterState *interp, |
593 | PyThreadState *tstate, | |
594 | int final_release) | |
a4a9f2e8 | 595 | { |
be1dfccd BS |
596 | assert(tstate != NULL); |
597 | assert(tstate->interp == interp); | |
598 | drop_gil(interp, tstate, final_release); | |
a4a9f2e8 MS |
599 | } |
600 | ||
601 | void | |
602 | PyEval_AcquireThread(PyThreadState *tstate) | |
603 | { | |
604 | _Py_EnsureTstateNotNULL(tstate); | |
6e97a964 | 605 | _PyThreadState_Attach(tstate); |
a4a9f2e8 MS |
606 | } |
607 | ||
608 | void | |
609 | PyEval_ReleaseThread(PyThreadState *tstate) | |
610 | { | |
f63d3787 | 611 | assert(_PyThreadState_CheckConsistency(tstate)); |
6e97a964 | 612 | _PyThreadState_Detach(tstate); |
a4a9f2e8 MS |
613 | } |
614 | ||
615 | #ifdef HAVE_FORK | |
1f72fb54 SG |
616 | /* This function is called from PyOS_AfterFork_Child to re-initialize the |
617 | GIL and pending calls lock. */ | |
a4a9f2e8 MS |
618 | PyStatus |
619 | _PyEval_ReInitThreads(PyThreadState *tstate) | |
620 | { | |
55671fe0 | 621 | assert(tstate->interp == _PyInterpreterState_Main()); |
a4a9f2e8 | 622 | |
55671fe0 | 623 | struct _gil_runtime_state *gil = tstate->interp->ceval.gil; |
a4a9f2e8 MS |
624 | if (!gil_created(gil)) { |
625 | return _PyStatus_OK(); | |
626 | } | |
627 | recreate_gil(gil); | |
628 | ||
629 | take_gil(tstate); | |
630 | ||
631 | struct _pending_calls *pending = &tstate->interp->ceval.pending; | |
cf6110ba | 632 | _PyMutex_at_fork_reinit(&pending->mutex); |
a4a9f2e8 | 633 | |
a4a9f2e8 MS |
634 | return _PyStatus_OK(); |
635 | } | |
636 | #endif | |
637 | ||
a4a9f2e8 MS |
638 | PyThreadState * |
639 | PyEval_SaveThread(void) | |
640 | { | |
6e97a964 SG |
641 | PyThreadState *tstate = _PyThreadState_GET(); |
642 | _PyThreadState_Detach(tstate); | |
a4a9f2e8 MS |
643 | return tstate; |
644 | } | |
645 | ||
646 | void | |
647 | PyEval_RestoreThread(PyThreadState *tstate) | |
648 | { | |
b3f0b698 AR |
649 | #ifdef MS_WINDOWS |
650 | int err = GetLastError(); | |
651 | #endif | |
652 | ||
a4a9f2e8 | 653 | _Py_EnsureTstateNotNULL(tstate); |
6e97a964 | 654 | _PyThreadState_Attach(tstate); |
b3f0b698 AR |
655 | |
656 | #ifdef MS_WINDOWS | |
657 | SetLastError(err); | |
658 | #endif | |
a4a9f2e8 MS |
659 | } |
660 | ||
661 | ||
09c29475 ES |
662 | void |
663 | _PyEval_SignalReceived(void) | |
664 | { | |
665 | _Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_SIGNALS_PENDING_BIT); | |
666 | } | |
667 | ||
668 | ||
669 | #ifndef Py_GIL_DISABLED | |
670 | static void | |
671 | signal_active_thread(PyInterpreterState *interp, uintptr_t bit) | |
672 | { | |
673 | struct _gil_runtime_state *gil = interp->ceval.gil; | |
674 | ||
675 | // If a thread from the targeted interpreter is holding the GIL, signal | |
676 | // that thread. Otherwise, the next thread to run from the targeted | |
677 | // interpreter will have its bit set as part of taking the GIL. | |
678 | MUTEX_LOCK(gil->mutex); | |
679 | if (_Py_atomic_load_int_relaxed(&gil->locked)) { | |
680 | PyThreadState *holder = (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder); | |
681 | if (holder->interp == interp) { | |
682 | _Py_set_eval_breaker_bit(holder, bit); | |
683 | } | |
684 | } | |
685 | MUTEX_UNLOCK(gil->mutex); | |
686 | } | |
687 | #endif | |
688 | ||
689 | ||
a4a9f2e8 MS |
690 | /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX |
691 | signal handlers or Mac I/O completion routines) can schedule calls | |
692 | to a function to be called synchronously. | |
693 | The synchronous function is called with one void* argument. | |
694 | It should return 0 for success or -1 for failure -- failure should | |
695 | be accompanied by an exception. | |
696 | ||
697 | If registry succeeds, the registry function returns 0; if it fails | |
698 | (e.g. due to too many pending calls) it returns -1 (without setting | |
699 | an exception condition). | |
700 | ||
701 | Note that because registry may occur from within signal handlers, | |
702 | or other asynchronous events, calling malloc() is unsafe! | |
703 | ||
704 | Any thread can schedule pending calls, but only the main thread | |
705 | will execute them. | |
706 | There is no facility to schedule calls to a particular thread, but | |
707 | that should be easy to change, should that ever be required. In | |
708 | that case, the static variables here should go into the python | |
709 | threadstate. | |
710 | */ | |
711 | ||
a4a9f2e8 MS |
712 | /* Push one item onto the queue while holding the lock. */ |
713 | static int | |
714 | _push_pending_call(struct _pending_calls *pending, | |
7bd560ce | 715 | _Py_pending_call_func func, void *arg, int flags) |
a4a9f2e8 | 716 | { |
09c29475 ES |
717 | if (pending->npending == pending->max) { |
718 | return _Py_ADD_PENDING_FULL; | |
a4a9f2e8 | 719 | } |
09c29475 ES |
720 | assert(pending->npending < pending->max); |
721 | ||
722 | int i = pending->next; | |
723 | assert(pending->calls[i].func == NULL); | |
724 | ||
a4a9f2e8 MS |
725 | pending->calls[i].func = func; |
726 | pending->calls[i].arg = arg; | |
7bd560ce | 727 | pending->calls[i].flags = flags; |
09c29475 ES |
728 | |
729 | assert(pending->npending < PENDINGCALLSARRAYSIZE); | |
730 | _Py_atomic_add_int32(&pending->npending, 1); | |
731 | ||
732 | pending->next = (i + 1) % PENDINGCALLSARRAYSIZE; | |
733 | assert(pending->next != pending->first | |
734 | || pending->npending == pending->max); | |
735 | ||
736 | return _Py_ADD_PENDING_SUCCESS; | |
a4a9f2e8 MS |
737 | } |
738 | ||
757b402e ES |
739 | static int |
740 | _next_pending_call(struct _pending_calls *pending, | |
7bd560ce | 741 | int (**func)(void *), void **arg, int *flags) |
a4a9f2e8 MS |
742 | { |
743 | int i = pending->first; | |
09c29475 | 744 | if (pending->npending == 0) { |
757b402e | 745 | /* Queue empty */ |
09c29475 | 746 | assert(i == pending->next); |
757b402e ES |
747 | assert(pending->calls[i].func == NULL); |
748 | return -1; | |
a4a9f2e8 | 749 | } |
a4a9f2e8 MS |
750 | *func = pending->calls[i].func; |
751 | *arg = pending->calls[i].arg; | |
7bd560ce | 752 | *flags = pending->calls[i].flags; |
757b402e ES |
753 | return i; |
754 | } | |
755 | ||
756 | /* Pop one item off the queue while holding the lock. */ | |
757 | static void | |
758 | _pop_pending_call(struct _pending_calls *pending, | |
7bd560ce | 759 | int (**func)(void *), void **arg, int *flags) |
757b402e | 760 | { |
7bd560ce | 761 | int i = _next_pending_call(pending, func, arg, flags); |
757b402e ES |
762 | if (i >= 0) { |
763 | pending->calls[i] = (struct _pending_call){0}; | |
09c29475 ES |
764 | pending->first = (i + 1) % PENDINGCALLSARRAYSIZE; |
765 | assert(pending->npending > 0); | |
766 | _Py_atomic_add_int32(&pending->npending, -1); | |
757b402e | 767 | } |
a4a9f2e8 MS |
768 | } |
769 | ||
770 | /* This implementation is thread-safe. It allows | |
771 | scheduling to be made from any thread, and even from an executing | |
772 | callback. | |
773 | */ | |
774 | ||
09c29475 | 775 | _Py_add_pending_call_result |
a4a9f2e8 | 776 | _PyEval_AddPendingCall(PyInterpreterState *interp, |
7bd560ce | 777 | _Py_pending_call_func func, void *arg, int flags) |
a4a9f2e8 MS |
778 | { |
779 | struct _pending_calls *pending = &interp->ceval.pending; | |
0749244d BS |
780 | int main_only = (flags & _Py_PENDING_MAINTHREADONLY) != 0; |
781 | if (main_only) { | |
757b402e ES |
782 | /* The main thread only exists in the main interpreter. */ |
783 | assert(_Py_IsMainInterpreter(interp)); | |
784 | pending = &_PyRuntime.ceval.pending_mainthread; | |
785 | } | |
a4a9f2e8 | 786 | |
cf6110ba | 787 | PyMutex_Lock(&pending->mutex); |
09c29475 ES |
788 | _Py_add_pending_call_result result = |
789 | _push_pending_call(pending, func, arg, flags); | |
cf6110ba | 790 | PyMutex_Unlock(&pending->mutex); |
a4a9f2e8 | 791 | |
0749244d BS |
792 | if (main_only) { |
793 | _Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_CALLS_TO_DO_BIT); | |
794 | } | |
795 | else { | |
796 | #ifdef Py_GIL_DISABLED | |
797 | _Py_set_eval_breaker_bit_all(interp, _PY_CALLS_TO_DO_BIT); | |
798 | #else | |
799 | signal_active_thread(interp, _PY_CALLS_TO_DO_BIT); | |
800 | #endif | |
801 | } | |
802 | ||
a4a9f2e8 MS |
803 | return result; |
804 | } | |
805 | ||
806 | int | |
fd7e08a6 | 807 | Py_AddPendingCall(_Py_pending_call_func func, void *arg) |
a4a9f2e8 | 808 | { |
757b402e ES |
809 | /* Legacy users of this API will continue to target the main thread |
810 | (of the main interpreter). */ | |
811 | PyInterpreterState *interp = _PyInterpreterState_Main(); | |
09c29475 ES |
812 | _Py_add_pending_call_result r = |
813 | _PyEval_AddPendingCall(interp, func, arg, _Py_PENDING_MAINTHREADONLY); | |
814 | if (r == _Py_ADD_PENDING_FULL) { | |
815 | return -1; | |
816 | } | |
817 | else { | |
818 | assert(r == _Py_ADD_PENDING_SUCCESS); | |
819 | return 0; | |
820 | } | |
a4a9f2e8 MS |
821 | } |
822 | ||
823 | static int | |
824 | handle_signals(PyThreadState *tstate) | |
825 | { | |
f63d3787 | 826 | assert(_PyThreadState_CheckConsistency(tstate)); |
0749244d | 827 | _Py_unset_eval_breaker_bit(tstate, _PY_SIGNALS_PENDING_BIT); |
a4a9f2e8 MS |
828 | if (!_Py_ThreadCanHandleSignals(tstate->interp)) { |
829 | return 0; | |
830 | } | |
a4a9f2e8 MS |
831 | if (_PyErr_CheckSignalsTstate(tstate) < 0) { |
832 | /* On failure, re-schedule a call to handle_signals(). */ | |
0749244d | 833 | _Py_set_eval_breaker_bit(tstate, _PY_SIGNALS_PENDING_BIT); |
a4a9f2e8 MS |
834 | return -1; |
835 | } | |
836 | return 0; | |
837 | } | |
838 | ||
757b402e | 839 | static int |
09c29475 | 840 | _make_pending_calls(struct _pending_calls *pending, int32_t *p_npending) |
757b402e | 841 | { |
09c29475 ES |
842 | int res = 0; |
843 | int32_t npending = -1; | |
844 | ||
845 | assert(sizeof(pending->max) <= sizeof(size_t) | |
846 | && ((size_t)pending->max) <= Py_ARRAY_LENGTH(pending->calls)); | |
847 | int32_t maxloop = pending->maxloop; | |
848 | if (maxloop == 0) { | |
849 | maxloop = pending->max; | |
850 | } | |
851 | assert(maxloop > 0 && maxloop <= pending->max); | |
852 | ||
a4a9f2e8 | 853 | /* perform a bounded number of calls, in case of recursion */ |
09c29475 | 854 | for (int i=0; i<maxloop; i++) { |
fd7e08a6 | 855 | _Py_pending_call_func func = NULL; |
a4a9f2e8 | 856 | void *arg = NULL; |
7bd560ce | 857 | int flags = 0; |
a4a9f2e8 MS |
858 | |
859 | /* pop one item off the queue while holding the lock */ | |
cf6110ba | 860 | PyMutex_Lock(&pending->mutex); |
7bd560ce | 861 | _pop_pending_call(pending, &func, &arg, &flags); |
09c29475 | 862 | npending = pending->npending; |
cf6110ba | 863 | PyMutex_Unlock(&pending->mutex); |
a4a9f2e8 | 864 | |
09c29475 | 865 | /* Check if there are any more pending calls. */ |
a4a9f2e8 | 866 | if (func == NULL) { |
09c29475 | 867 | assert(npending == 0); |
a4a9f2e8 MS |
868 | break; |
869 | } | |
09c29475 ES |
870 | |
871 | /* having released the lock, perform the callback */ | |
872 | res = func(arg); | |
7bd560ce ES |
873 | if ((flags & _Py_PENDING_RAWFREE) && arg != NULL) { |
874 | PyMem_RawFree(arg); | |
875 | } | |
876 | if (res != 0) { | |
09c29475 ES |
877 | res = -1; |
878 | goto finally; | |
a4a9f2e8 MS |
879 | } |
880 | } | |
09c29475 ES |
881 | |
882 | finally: | |
883 | *p_npending = npending; | |
884 | return res; | |
757b402e ES |
885 | } |
886 | ||
0749244d BS |
887 | static void |
888 | signal_pending_calls(PyThreadState *tstate, PyInterpreterState *interp) | |
889 | { | |
890 | #ifdef Py_GIL_DISABLED | |
891 | _Py_set_eval_breaker_bit_all(interp, _PY_CALLS_TO_DO_BIT); | |
892 | #else | |
893 | _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT); | |
894 | #endif | |
895 | } | |
896 | ||
897 | static void | |
898 | unsignal_pending_calls(PyThreadState *tstate, PyInterpreterState *interp) | |
899 | { | |
900 | #ifdef Py_GIL_DISABLED | |
901 | _Py_unset_eval_breaker_bit_all(interp, _PY_CALLS_TO_DO_BIT); | |
902 | #else | |
903 | _Py_unset_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT); | |
904 | #endif | |
905 | } | |
906 | ||
c557ae97 SG |
907 | static void |
908 | clear_pending_handling_thread(struct _pending_calls *pending) | |
909 | { | |
910 | #ifdef Py_GIL_DISABLED | |
911 | PyMutex_Lock(&pending->mutex); | |
912 | pending->handling_thread = NULL; | |
913 | PyMutex_Unlock(&pending->mutex); | |
914 | #else | |
915 | pending->handling_thread = NULL; | |
916 | #endif | |
917 | } | |
918 | ||
757b402e | 919 | static int |
0749244d | 920 | make_pending_calls(PyThreadState *tstate) |
757b402e | 921 | { |
0749244d | 922 | PyInterpreterState *interp = tstate->interp; |
757b402e ES |
923 | struct _pending_calls *pending = &interp->ceval.pending; |
924 | struct _pending_calls *pending_main = &_PyRuntime.ceval.pending_mainthread; | |
925 | ||
926 | /* Only one thread (per interpreter) may run the pending calls | |
927 | at once. In the same way, we don't do recursive pending calls. */ | |
cf6110ba | 928 | PyMutex_Lock(&pending->mutex); |
39981fd0 | 929 | if (pending->handling_thread != NULL) { |
757b402e ES |
930 | /* A pending call was added after another thread was already |
931 | handling the pending calls (and had already "unsignaled"). | |
932 | Once that thread is done, it may have taken care of all the | |
933 | pending calls, or there might be some still waiting. | |
39981fd0 MS |
934 | To avoid all threads constantly stopping on the eval breaker, |
935 | we clear the bit for this thread and make sure it is set | |
936 | for the thread currently handling the pending call. */ | |
937 | _Py_set_eval_breaker_bit(pending->handling_thread, _PY_CALLS_TO_DO_BIT); | |
938 | _Py_unset_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT); | |
cf6110ba | 939 | PyMutex_Unlock(&pending->mutex); |
757b402e ES |
940 | return 0; |
941 | } | |
39981fd0 | 942 | pending->handling_thread = tstate; |
cf6110ba | 943 | PyMutex_Unlock(&pending->mutex); |
757b402e ES |
944 | |
945 | /* unsignal before starting to call callbacks, so that any callback | |
946 | added in-between re-signals */ | |
0749244d | 947 | unsignal_pending_calls(tstate, interp); |
757b402e | 948 | |
09c29475 ES |
949 | int32_t npending; |
950 | if (_make_pending_calls(pending, &npending) != 0) { | |
c557ae97 | 951 | clear_pending_handling_thread(pending); |
757b402e | 952 | /* There might not be more calls to make, but we play it safe. */ |
0749244d | 953 | signal_pending_calls(tstate, interp); |
757b402e ES |
954 | return -1; |
955 | } | |
09c29475 ES |
956 | if (npending > 0) { |
957 | /* We hit pending->maxloop. */ | |
958 | signal_pending_calls(tstate, interp); | |
959 | } | |
a4a9f2e8 | 960 | |
757b402e | 961 | if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) { |
09c29475 | 962 | if (_make_pending_calls(pending_main, &npending) != 0) { |
c557ae97 | 963 | clear_pending_handling_thread(pending); |
757b402e | 964 | /* There might not be more calls to make, but we play it safe. */ |
0749244d | 965 | signal_pending_calls(tstate, interp); |
757b402e ES |
966 | return -1; |
967 | } | |
09c29475 ES |
968 | if (npending > 0) { |
969 | /* We hit pending_main->maxloop. */ | |
970 | signal_pending_calls(tstate, interp); | |
971 | } | |
757b402e | 972 | } |
a4a9f2e8 | 973 | |
c557ae97 | 974 | clear_pending_handling_thread(pending); |
757b402e | 975 | return 0; |
a4a9f2e8 MS |
976 | } |
977 | ||
09c29475 | 978 | |
0749244d BS |
979 | void |
980 | _Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit) | |
981 | { | |
9dabace3 | 982 | _Py_FOR_EACH_TSTATE_BEGIN(interp, tstate) { |
0749244d BS |
983 | _Py_set_eval_breaker_bit(tstate, bit); |
984 | } | |
9dabace3 | 985 | _Py_FOR_EACH_TSTATE_END(interp); |
0749244d BS |
986 | } |
987 | ||
988 | void | |
989 | _Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit) | |
990 | { | |
9dabace3 | 991 | _Py_FOR_EACH_TSTATE_BEGIN(interp, tstate) { |
0749244d BS |
992 | _Py_unset_eval_breaker_bit(tstate, bit); |
993 | } | |
9dabace3 | 994 | _Py_FOR_EACH_TSTATE_END(interp); |
0749244d BS |
995 | } |
996 | ||
a4a9f2e8 MS |
997 | void |
998 | _Py_FinishPendingCalls(PyThreadState *tstate) | |
999 | { | |
4d0a6595 | 1000 | _Py_AssertHoldsTstate(); |
f63d3787 | 1001 | assert(_PyThreadState_CheckConsistency(tstate)); |
a4a9f2e8 | 1002 | |
985dd8e1 ES |
1003 | struct _pending_calls *pending = &tstate->interp->ceval.pending; |
1004 | struct _pending_calls *pending_main = | |
1005 | _Py_IsMainThread() && _Py_IsMainInterpreter(tstate->interp) | |
1006 | ? &_PyRuntime.ceval.pending_mainthread | |
1007 | : NULL; | |
1008 | /* make_pending_calls() may return early without making all pending | |
1009 | calls, so we keep trying until we're actually done. */ | |
1010 | int32_t npending; | |
1011 | #ifndef NDEBUG | |
1012 | int32_t npending_prev = INT32_MAX; | |
1013 | #endif | |
1014 | do { | |
1015 | if (make_pending_calls(tstate) < 0) { | |
1016 | PyObject *exc = _PyErr_GetRaisedException(tstate); | |
1017 | PyErr_BadInternalCall(); | |
1018 | _PyErr_ChainExceptions1(exc); | |
1019 | _PyErr_Print(tstate); | |
1020 | } | |
1021 | ||
1022 | npending = _Py_atomic_load_int32_relaxed(&pending->npending); | |
1023 | if (pending_main != NULL) { | |
1024 | npending += _Py_atomic_load_int32_relaxed(&pending_main->npending); | |
1025 | } | |
1026 | #ifndef NDEBUG | |
1027 | assert(npending_prev > npending); | |
1028 | npending_prev = npending; | |
1029 | #endif | |
1030 | } while (npending > 0); | |
a4a9f2e8 MS |
1031 | } |
1032 | ||
757b402e ES |
1033 | int |
1034 | _PyEval_MakePendingCalls(PyThreadState *tstate) | |
1035 | { | |
1036 | int res; | |
1037 | ||
1038 | if (_Py_IsMainThread() && _Py_IsMainInterpreter(tstate->interp)) { | |
1039 | /* Python signal handler doesn't really queue a callback: | |
1040 | it only signals that a signal was received, | |
1041 | see _PyEval_SignalReceived(). */ | |
1042 | res = handle_signals(tstate); | |
1043 | if (res != 0) { | |
1044 | return res; | |
1045 | } | |
1046 | } | |
1047 | ||
0749244d | 1048 | res = make_pending_calls(tstate); |
757b402e ES |
1049 | if (res != 0) { |
1050 | return res; | |
1051 | } | |
1052 | ||
1053 | return 0; | |
1054 | } | |
1055 | ||
a4a9f2e8 MS |
1056 | /* Py_MakePendingCalls() is a simple wrapper for the sake |
1057 | of backward-compatibility. */ | |
1058 | int | |
1059 | Py_MakePendingCalls(void) | |
1060 | { | |
4d0a6595 | 1061 | _Py_AssertHoldsTstate(); |
a4a9f2e8 MS |
1062 | |
1063 | PyThreadState *tstate = _PyThreadState_GET(); | |
f63d3787 | 1064 | assert(_PyThreadState_CheckConsistency(tstate)); |
a4a9f2e8 | 1065 | |
757b402e ES |
1066 | /* Only execute pending calls on the main thread. */ |
1067 | if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(tstate->interp)) { | |
1068 | return 0; | |
a4a9f2e8 | 1069 | } |
757b402e | 1070 | return _PyEval_MakePendingCalls(tstate); |
a4a9f2e8 MS |
1071 | } |
1072 | ||
a4a9f2e8 | 1073 | void |
cf6110ba | 1074 | _PyEval_InitState(PyInterpreterState *interp) |
a4a9f2e8 | 1075 | { |
5c9ee498 | 1076 | _gil_initialize(&interp->_gil); |
a4a9f2e8 MS |
1077 | } |
1078 | ||
853163d3 BS |
1079 | #ifdef Py_GIL_DISABLED |
1080 | int | |
1081 | _PyEval_EnableGILTransient(PyThreadState *tstate) | |
1082 | { | |
1083 | const PyConfig *config = _PyInterpreterState_GetConfig(tstate->interp); | |
1084 | if (config->enable_gil != _PyConfig_GIL_DEFAULT) { | |
1085 | return 0; | |
1086 | } | |
1087 | struct _gil_runtime_state *gil = tstate->interp->ceval.gil; | |
1088 | ||
1089 | int enabled = _Py_atomic_load_int_relaxed(&gil->enabled); | |
1090 | if (enabled == INT_MAX) { | |
1091 | // The GIL is already enabled permanently. | |
1092 | return 0; | |
1093 | } | |
1094 | if (enabled == INT_MAX - 1) { | |
1095 | Py_FatalError("Too many transient requests to enable the GIL"); | |
1096 | } | |
1097 | if (enabled > 0) { | |
1098 | // If enabled is nonzero, we know we hold the GIL. This means that no | |
1099 | // other threads are attached, and nobody else can be concurrently | |
1100 | // mutating it. | |
1101 | _Py_atomic_store_int_relaxed(&gil->enabled, enabled + 1); | |
1102 | return 0; | |
1103 | } | |
1104 | ||
1105 | // Enabling the GIL changes what it means to be an "attached" thread. To | |
1106 | // safely make this transition, we: | |
1107 | // 1. Detach the current thread. | |
1108 | // 2. Stop the world to detach (and suspend) all other threads. | |
1109 | // 3. Enable the GIL, if nobody else did between our check above and when | |
1110 | // our stop-the-world begins. | |
1111 | // 4. Start the world. | |
1112 | // 5. Attach the current thread. Other threads may attach and hold the GIL | |
1113 | // before this thread, which is harmless. | |
1114 | _PyThreadState_Detach(tstate); | |
1115 | ||
1116 | // This could be an interpreter-local stop-the-world in situations where we | |
1117 | // know that this interpreter's GIL is not shared, and that it won't become | |
1118 | // shared before the stop-the-world begins. For now, we always stop all | |
1119 | // interpreters for simplicity. | |
1120 | _PyEval_StopTheWorldAll(&_PyRuntime); | |
1121 | ||
1122 | enabled = _Py_atomic_load_int_relaxed(&gil->enabled); | |
1123 | int this_thread_enabled = enabled == 0; | |
1124 | _Py_atomic_store_int_relaxed(&gil->enabled, enabled + 1); | |
1125 | ||
1126 | _PyEval_StartTheWorldAll(&_PyRuntime); | |
1127 | _PyThreadState_Attach(tstate); | |
1128 | ||
1129 | return this_thread_enabled; | |
1130 | } | |
1131 | ||
1132 | int | |
1133 | _PyEval_EnableGILPermanent(PyThreadState *tstate) | |
1134 | { | |
1135 | const PyConfig *config = _PyInterpreterState_GetConfig(tstate->interp); | |
1136 | if (config->enable_gil != _PyConfig_GIL_DEFAULT) { | |
1137 | return 0; | |
1138 | } | |
1139 | ||
1140 | struct _gil_runtime_state *gil = tstate->interp->ceval.gil; | |
1141 | assert(current_thread_holds_gil(gil, tstate)); | |
1142 | ||
1143 | int enabled = _Py_atomic_load_int_relaxed(&gil->enabled); | |
1144 | if (enabled == INT_MAX) { | |
1145 | return 0; | |
1146 | } | |
1147 | ||
1148 | _Py_atomic_store_int_relaxed(&gil->enabled, INT_MAX); | |
1149 | return 1; | |
1150 | } | |
1151 | ||
1152 | int | |
1153 | _PyEval_DisableGIL(PyThreadState *tstate) | |
1154 | { | |
1155 | const PyConfig *config = _PyInterpreterState_GetConfig(tstate->interp); | |
1156 | if (config->enable_gil != _PyConfig_GIL_DEFAULT) { | |
1157 | return 0; | |
1158 | } | |
1159 | ||
1160 | struct _gil_runtime_state *gil = tstate->interp->ceval.gil; | |
1161 | assert(current_thread_holds_gil(gil, tstate)); | |
1162 | ||
1163 | int enabled = _Py_atomic_load_int_relaxed(&gil->enabled); | |
1164 | if (enabled == INT_MAX) { | |
1165 | return 0; | |
1166 | } | |
1167 | ||
1168 | assert(enabled >= 1); | |
1169 | enabled--; | |
1170 | ||
1171 | // Disabling the GIL is much simpler than enabling it, since we know we are | |
1172 | // the only attached thread. Other threads may start free-threading as soon | |
1173 | // as this store is complete, if it sets gil->enabled to 0. | |
1174 | _Py_atomic_store_int_relaxed(&gil->enabled, enabled); | |
1175 | ||
1176 | if (enabled == 0) { | |
1177 | // We're attached, so we know the GIL will remain disabled until at | |
1178 | // least the next time we detach, which must be after this function | |
1179 | // returns. | |
1180 | // | |
1181 | // Drop the GIL, which will wake up any threads waiting in take_gil() | |
1182 | // and let them resume execution without the GIL. | |
be1dfccd BS |
1183 | drop_gil_impl(tstate, gil); |
1184 | ||
1185 | // If another thread asked us to drop the GIL, they should be | |
1186 | // free-threading by now. Remove any such request so we have a clean | |
1187 | // slate if/when the GIL is enabled again. | |
1188 | _Py_unset_eval_breaker_bit(tstate, _PY_GIL_DROP_REQUEST_BIT); | |
853163d3 BS |
1189 | return 1; |
1190 | } | |
1191 | return 0; | |
1192 | } | |
1193 | #endif | |
1194 | ||
943cc143 | 1195 | #if defined(Py_REMOTE_DEBUG) && defined(Py_SUPPORTS_REMOTE_DEBUG) |
a94c7528 MW |
1196 | // Note that this function is inline to avoid creating a PLT entry |
1197 | // that would be an easy target for a ROP gadget. | |
1198 | static inline int run_remote_debugger_source(PyObject *source) | |
1199 | { | |
1200 | const char *str = PyBytes_AsString(source); | |
1201 | if (!str) { | |
1202 | return -1; | |
1203 | } | |
1204 | ||
1205 | PyObject *ns = PyDict_New(); | |
1206 | if (!ns) { | |
1207 | return -1; | |
1208 | } | |
1209 | ||
1210 | PyObject *res = PyRun_String(str, Py_file_input, ns, ns); | |
1211 | Py_DECREF(ns); | |
1212 | if (!res) { | |
1213 | return -1; | |
1214 | } | |
1215 | Py_DECREF(res); | |
1216 | return 0; | |
1217 | } | |
1218 | ||
943cc143 PGS |
1219 | // Note that this function is inline to avoid creating a PLT entry |
1220 | // that would be an easy target for a ROP gadget. | |
c09cec5d | 1221 | static inline void run_remote_debugger_script(PyObject *path) |
943cc143 | 1222 | { |
1ddfe593 | 1223 | if (0 != PySys_Audit("cpython.remote_debugger_script", "O", path)) { |
943cc143 | 1224 | PyErr_FormatUnraisable( |
c09cec5d | 1225 | "Audit hook failed for remote debugger script %U", path); |
943cc143 PGS |
1226 | return; |
1227 | } | |
1228 | ||
1229 | // Open the debugger script with the open code hook, and reopen the | |
1230 | // resulting file object to get a C FILE* object. | |
c09cec5d | 1231 | PyObject* fileobj = PyFile_OpenCodeObject(path); |
943cc143 | 1232 | if (!fileobj) { |
c09cec5d | 1233 | PyErr_FormatUnraisable("Can't open debugger script %U", path); |
943cc143 PGS |
1234 | return; |
1235 | } | |
1236 | ||
c9a855a9 PGS |
1237 | PyObject* source = PyObject_CallMethodNoArgs(fileobj, &_Py_ID(read)); |
1238 | if (!source) { | |
c09cec5d | 1239 | PyErr_FormatUnraisable("Error reading debugger script %U", path); |
943cc143 PGS |
1240 | } |
1241 | ||
1242 | PyObject* res = PyObject_CallMethodNoArgs(fileobj, &_Py_ID(close)); | |
1243 | if (!res) { | |
c09cec5d | 1244 | PyErr_FormatUnraisable("Error closing debugger script %U", path); |
943cc143 PGS |
1245 | } else { |
1246 | Py_DECREF(res); | |
1247 | } | |
1248 | Py_DECREF(fileobj); | |
c9a855a9 PGS |
1249 | |
1250 | if (source) { | |
a94c7528 | 1251 | if (0 != run_remote_debugger_source(source)) { |
c09cec5d | 1252 | PyErr_FormatUnraisable("Error executing debugger script %U", path); |
c9a855a9 PGS |
1253 | } |
1254 | Py_DECREF(source); | |
1255 | } | |
943cc143 | 1256 | } |
99b13775 PGS |
1257 | |
1258 | int _PyRunRemoteDebugger(PyThreadState *tstate) | |
1259 | { | |
1260 | const PyConfig *config = _PyInterpreterState_GetConfig(tstate->interp); | |
1261 | if (config->remote_debug == 1 | |
1262 | && tstate->remote_debugger_support.debugger_pending_call == 1) | |
1263 | { | |
1264 | tstate->remote_debugger_support.debugger_pending_call = 0; | |
1265 | ||
1266 | // Immediately make a copy in case of a race with another debugger | |
1267 | // process that's trying to write to the buffer. At least this way | |
1268 | // we'll be internally consistent: what we audit is what we run. | |
1269 | const size_t pathsz | |
1270 | = sizeof(tstate->remote_debugger_support.debugger_script_path); | |
1271 | ||
1272 | char *path = PyMem_Malloc(pathsz); | |
1273 | if (path) { | |
1274 | // And don't assume the debugger correctly null terminated it. | |
1275 | memcpy( | |
1276 | path, | |
1277 | tstate->remote_debugger_support.debugger_script_path, | |
1278 | pathsz); | |
1279 | path[pathsz - 1] = '\0'; | |
1280 | if (*path) { | |
c09cec5d SS |
1281 | PyObject *path_obj = PyUnicode_DecodeFSDefault(path); |
1282 | if (path_obj == NULL) { | |
1283 | PyErr_FormatUnraisable("Can't decode debugger script"); | |
1284 | } | |
1285 | else { | |
1286 | run_remote_debugger_script(path_obj); | |
1287 | Py_DECREF(path_obj); | |
1288 | } | |
99b13775 PGS |
1289 | } |
1290 | PyMem_Free(path); | |
1291 | } | |
1292 | } | |
1293 | return 0; | |
1294 | } | |
1295 | ||
943cc143 | 1296 | #endif |
e5862113 MS |
1297 | |
1298 | /* Do periodic things, like check for signals and async I/0. | |
1299 | * We need to do reasonably frequently, but not too frequently. | |
1300 | * All loops should include a check of the eval breaker. | |
1301 | * We also check on return from any builtin function. | |
1302 | * | |
1303 | * ## More Details ### | |
1304 | * | |
1305 | * The eval loop (this function) normally executes the instructions | |
1306 | * of a code object sequentially. However, the runtime supports a | |
1307 | * number of out-of-band execution scenarios that may pause that | |
1308 | * sequential execution long enough to do that out-of-band work | |
1309 | * in the current thread using the current PyThreadState. | |
1310 | * | |
1311 | * The scenarios include: | |
1312 | * | |
1313 | * - cyclic garbage collection | |
1314 | * - GIL drop requests | |
1315 | * - "async" exceptions | |
1316 | * - "pending calls" (some only in the main thread) | |
1317 | * - signal handling (only in the main thread) | |
1318 | * | |
1319 | * When the need for one of the above is detected, the eval loop | |
1320 | * pauses long enough to handle the detected case. Then, if doing | |
1321 | * so didn't trigger an exception, the eval loop resumes executing | |
1322 | * the sequential instructions. | |
1323 | * | |
1324 | * To make this work, the eval loop periodically checks if any | |
1325 | * of the above needs to happen. The individual checks can be | |
1326 | * expensive if computed each time, so a while back we switched | |
1327 | * to using pre-computed, per-interpreter variables for the checks, | |
1328 | * and later consolidated that to a single "eval breaker" variable | |
1329 | * (now a PyInterpreterState field). | |
1330 | * | |
1331 | * For the longest time, the eval breaker check would happen | |
1332 | * frequently, every 5 or so times through the loop, regardless | |
1333 | * of what instruction ran last or what would run next. Then, in | |
1334 | * early 2021 (gh-18334, commit 4958f5d), we switched to checking | |
1335 | * the eval breaker less frequently, by hard-coding the check to | |
1336 | * specific places in the eval loop (e.g. certain instructions). | |
1337 | * The intent then was to check after returning from calls | |
1338 | * and on the back edges of loops. | |
1339 | * | |
1340 | * In addition to being more efficient, that approach keeps | |
1341 | * the eval loop from running arbitrary code between instructions | |
1342 | * that don't handle that well. (See gh-74174.) | |
1343 | * | |
1344 | * Currently, the eval breaker check happens on back edges in | |
1345 | * the control flow graph, which pretty much applies to all loops, | |
1346 | * and most calls. | |
1347 | * (See bytecodes.c for exact information.) | |
1348 | * | |
1349 | * One consequence of this approach is that it might not be obvious | |
1350 | * how to force any specific thread to pick up the eval breaker, | |
1351 | * or for any specific thread to not pick it up. Mostly this | |
1352 | * involves judicious uses of locks and careful ordering of code, | |
1353 | * while avoiding code that might trigger the eval breaker | |
1354 | * until so desired. | |
1355 | */ | |
a4a9f2e8 MS |
1356 | int |
1357 | _Py_HandlePending(PyThreadState *tstate) | |
1358 | { | |
0749244d | 1359 | uintptr_t breaker = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker); |
a4a9f2e8 | 1360 | |
441affc9 | 1361 | /* Stop-the-world */ |
0749244d BS |
1362 | if ((breaker & _PY_EVAL_PLEASE_STOP_BIT) != 0) { |
1363 | _Py_unset_eval_breaker_bit(tstate, _PY_EVAL_PLEASE_STOP_BIT); | |
441affc9 SG |
1364 | _PyThreadState_Suspend(tstate); |
1365 | ||
1366 | /* The attach blocks until the stop-the-world event is complete. */ | |
1367 | _PyThreadState_Attach(tstate); | |
1368 | } | |
1369 | ||
a4a9f2e8 | 1370 | /* Pending signals */ |
0749244d | 1371 | if ((breaker & _PY_SIGNALS_PENDING_BIT) != 0) { |
a4a9f2e8 MS |
1372 | if (handle_signals(tstate) != 0) { |
1373 | return -1; | |
1374 | } | |
1375 | } | |
1376 | ||
1377 | /* Pending calls */ | |
0749244d BS |
1378 | if ((breaker & _PY_CALLS_TO_DO_BIT) != 0) { |
1379 | if (make_pending_calls(tstate) != 0) { | |
a4a9f2e8 MS |
1380 | return -1; |
1381 | } | |
1382 | } | |
1383 | ||
a3af3cb4 SG |
1384 | #ifdef Py_GIL_DISABLED |
1385 | /* Objects with refcounts to merge */ | |
0749244d BS |
1386 | if ((breaker & _PY_EVAL_EXPLICIT_MERGE_BIT) != 0) { |
1387 | _Py_unset_eval_breaker_bit(tstate, _PY_EVAL_EXPLICIT_MERGE_BIT); | |
a3af3cb4 SG |
1388 | _Py_brc_merge_refcounts(tstate); |
1389 | } | |
113de854 NS |
1390 | /* Process deferred memory frees held by QSBR */ |
1391 | if (_Py_qsbr_should_process(((_PyThreadStateImpl *)tstate)->qsbr)) { | |
1392 | _PyMem_ProcessDelayed(tstate); | |
1393 | } | |
a3af3cb4 SG |
1394 | #endif |
1395 | ||
83eb8272 | 1396 | /* GC scheduled to run */ |
0749244d BS |
1397 | if ((breaker & _PY_GC_SCHEDULED_BIT) != 0) { |
1398 | _Py_unset_eval_breaker_bit(tstate, _PY_GC_SCHEDULED_BIT); | |
83eb8272 PGS |
1399 | _Py_RunGC(tstate); |
1400 | } | |
1401 | ||
65f12370 SO |
1402 | if ((breaker & _PY_EVAL_JIT_INVALIDATE_COLD_BIT) != 0) { |
1403 | _Py_unset_eval_breaker_bit(tstate, _PY_EVAL_JIT_INVALIDATE_COLD_BIT); | |
1404 | _Py_Executors_InvalidateCold(tstate->interp); | |
1405 | tstate->interp->trace_run_counter = JIT_CLEANUP_THRESHOLD; | |
1406 | } | |
1407 | ||
a4a9f2e8 | 1408 | /* GIL drop request */ |
0749244d | 1409 | if ((breaker & _PY_GIL_DROP_REQUEST_BIT) != 0) { |
a4a9f2e8 | 1410 | /* Give another thread a chance */ |
6e97a964 | 1411 | _PyThreadState_Detach(tstate); |
a4a9f2e8 MS |
1412 | |
1413 | /* Other threads may run now */ | |
1414 | ||
6e97a964 | 1415 | _PyThreadState_Attach(tstate); |
a4a9f2e8 MS |
1416 | } |
1417 | ||
1418 | /* Check for asynchronous exception. */ | |
0749244d BS |
1419 | if ((breaker & _PY_ASYNC_EXCEPTION_BIT) != 0) { |
1420 | _Py_unset_eval_breaker_bit(tstate, _PY_ASYNC_EXCEPTION_BIT); | |
1421 | PyObject *exc = _Py_atomic_exchange_ptr(&tstate->async_exc, NULL); | |
1422 | if (exc != NULL) { | |
bf4bc360 MS |
1423 | _PyErr_SetNone(tstate, exc); |
1424 | Py_DECREF(exc); | |
1425 | return -1; | |
1426 | } | |
a4a9f2e8 | 1427 | } |
943cc143 PGS |
1428 | |
1429 | #if defined(Py_REMOTE_DEBUG) && defined(Py_SUPPORTS_REMOTE_DEBUG) | |
99b13775 | 1430 | _PyRunRemoteDebugger(tstate); |
943cc143 PGS |
1431 | #endif |
1432 | ||
a4a9f2e8 MS |
1433 | return 0; |
1434 | } |