]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-109693: Update _gil_runtime_state.last_holder to use pyatomic.h (#110605)
authorDonghee Na <donghee.na@python.org>
Fri, 13 Oct 2023 01:07:27 +0000 (10:07 +0900)
committerGitHub <noreply@github.com>
Fri, 13 Oct 2023 01:07:27 +0000 (10:07 +0900)
Include/internal/pycore_gil.h
Python/ceval_gil.c

index a1a15070eef461d78d844e34d2f486be3ebcd735..daf1e73e7827e8ef5aff8a0d733107532c1bdd8c 100644 (file)
@@ -8,7 +8,7 @@ extern "C" {
 #  error "this header requires Py_BUILD_CORE define"
 #endif
 
-#include "pycore_atomic.h"        // _Py_atomic_address
+#include "pycore_atomic.h"        // _Py_atomic_int
 #include "pycore_condvar.h"       // PyCOND_T
 
 #ifndef Py_HAVE_CONDVAR
@@ -25,7 +25,7 @@ struct _gil_runtime_state {
     unsigned long interval;
     /* Last PyThreadState holding / having held the GIL. This helps us
        know whether anyone else was scheduled after we dropped the GIL. */
-    _Py_atomic_address last_holder;
+    PyThreadState* last_holder;
     /* Whether the GIL is already taken (-1 if uninitialized). This is
        atomic because it can be read without any lock taken in ceval.c. */
     _Py_atomic_int locked;
index 78f0217e5fe8213ca1d2f80066fc94856d71c2b0..bbb1e784dfa04e07a2bba03bec00c848a9ae0a81 100644 (file)
@@ -189,7 +189,7 @@ static void create_gil(struct _gil_runtime_state *gil)
 #ifdef FORCE_SWITCHING
     COND_INIT(gil->switch_cond);
 #endif
-    _Py_atomic_store_relaxed(&gil->last_holder, 0);
+    _Py_atomic_store_ptr_relaxed(&gil->last_holder, 0);
     _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
     _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
 }
@@ -233,7 +233,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
     // XXX assert(tstate == NULL || !tstate->_status.cleared);
 
     struct _gil_runtime_state *gil = ceval->gil;
-    if (!_Py_atomic_load_relaxed(&gil->locked)) {
+    if (!_Py_atomic_load_ptr_relaxed(&gil->locked)) {
         Py_FatalError("drop_gil: GIL is not locked");
     }
 
@@ -242,7 +242,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
         /* Sub-interpreter support: threads might have been switched
            under our feet using PyThreadState_Swap(). Fix the GIL last
            holder variable so that our heuristics work. */
-        _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
+        _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate);
     }
 
     MUTEX_LOCK(gil->mutex);
@@ -263,7 +263,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
     if (tstate != NULL && _Py_eval_breaker_bit_is_set(interp, _PY_GIL_DROP_REQUEST_BIT)) {
         MUTEX_LOCK(gil->switch_mutex);
         /* Not switched yet => wait */
-        if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
+        if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) == tstate)
         {
             assert(_PyThreadState_CheckConsistency(tstate));
             RESET_GIL_DROP_REQUEST(tstate->interp);
@@ -361,8 +361,8 @@ _ready:
     _Py_atomic_store_relaxed(&gil->locked, 1);
     _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
 
-    if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
-        _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
+    if (tstate != (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) {
+        _Py_atomic_store_ptr_relaxed(&gil->last_holder, tstate);
         ++gil->switch_number;
     }
 
@@ -434,7 +434,7 @@ PyEval_ThreadsInitialized(void)
 static inline int
 current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
 {
-    if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) != tstate) {
+    if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) != tstate) {
         return 0;
     }
     return _Py_atomic_load_relaxed(&gil->locked);