]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-109693: Update _gil_runtime_state.locked to use pyatomic.h (gh-110836)
authorDonghee Na <donghee.na@python.org>
Mon, 16 Oct 2023 22:32:50 +0000 (07:32 +0900)
committerGitHub <noreply@github.com>
Mon, 16 Oct 2023 22:32:50 +0000 (07:32 +0900)
Include/cpython/pyatomic.h
Include/cpython/pyatomic_gcc.h
Include/cpython/pyatomic_msc.h
Include/cpython/pyatomic_std.h
Include/internal/pycore_gil.h
Modules/_testcapi/pyatomic.c
Python/ceval_gil.c
Python/thread_pthread.h

index 7a783058c173aa2e609943575eeafd11edb14de6..5314a70436bfc39f06f6c82abd335a282272e12f 100644 (file)
@@ -463,6 +463,12 @@ _Py_atomic_load_ptr_acquire(const void *obj);
 static inline void
 _Py_atomic_store_ptr_release(void *obj, void *value);
 
+static inline void
+_Py_atomic_store_int_release(int *obj, int value);
+
+static inline int
+_Py_atomic_load_int_acquire(const int *obj);
+
 
 // --- _Py_atomic_fence ------------------------------------------------------
 
index f1a38c7b52871a149dad0180cf2f862e6a78358e..70f2b7e1b5706a9fed28d572386328af8425b902 100644 (file)
@@ -487,6 +487,14 @@ static inline void
 _Py_atomic_store_ptr_release(void *obj, void *value)
 { __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); }
 
+static inline void
+_Py_atomic_store_int_release(int *obj, int value)
+{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
+
+static inline int
+_Py_atomic_load_int_acquire(const int *obj)
+{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
+
 
 // --- _Py_atomic_fence ------------------------------------------------------
 
index 287ed43b5714cd8e45e9a29aa8b904d0975d891c..601a0cf65afc1c52b4255b637d76d8d22590f7f6 100644 (file)
@@ -912,6 +912,32 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
 #endif
 }
 
+static inline void
+_Py_atomic_store_int_release(int *obj, int value)
+{
+#if defined(_M_X64) || defined(_M_IX86)
+    *(int volatile *)obj = value;
+#elif defined(_M_ARM64)
+    _Py_atomic_ASSERT_ARG_TYPE(unsigned __int32);
+    __stlr32((unsigned __int32 volatile *)obj, (unsigned __int32)value);
+#else
+#  error "no implementation of _Py_atomic_store_int_release"
+#endif
+}
+
+static inline int
+_Py_atomic_load_int_acquire(const int *obj)
+{
+#if defined(_M_X64) || defined(_M_IX86)
+    return *(int volatile *)obj;
+#elif defined(_M_ARM64)
+    _Py_atomic_ASSERT_ARG_TYPE(unsigned __int32);
+    return (int)__ldar32((unsigned __int32 volatile *)obj);
+#else
+#  error "no implementation of _Py_atomic_load_int_acquire"
+#endif
+}
+
 
 // --- _Py_atomic_fence ------------------------------------------------------
 
index bf74a90887c63461af7c03f63e9b804ad85916a0..a05bfaec47e89d535113cd2a59bf1bbe9e8e6fd5 100644 (file)
@@ -854,6 +854,23 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
                           memory_order_release);
 }
 
+static inline void
+_Py_atomic_store_int_release(int *obj, int value)
+{
+    _Py_USING_STD;
+    atomic_store_explicit((_Atomic(int)*)obj, value,
+                          memory_order_release);
+}
+
+static inline int
+_Py_atomic_load_int_acquire(const int *obj)
+{
+    _Py_USING_STD;
+    return atomic_load_explicit((const _Atomic(int)*)obj,
+                                memory_order_acquire);
+}
+
+
 
 // --- _Py_atomic_fence ------------------------------------------------------
 
index daf1e73e7827e8ef5aff8a0d733107532c1bdd8c..19b0d23a68568a29f0ca4ef23e203d7e7a4a9a3c 100644 (file)
@@ -8,7 +8,6 @@ extern "C" {
 #  error "this header requires Py_BUILD_CORE define"
 #endif
 
-#include "pycore_atomic.h"        // _Py_atomic_int
 #include "pycore_condvar.h"       // PyCOND_T
 
 #ifndef Py_HAVE_CONDVAR
@@ -28,7 +27,7 @@ struct _gil_runtime_state {
     PyThreadState* last_holder;
     /* Whether the GIL is already taken (-1 if uninitialized). This is
        atomic because it can be read without any lock taken in ceval.c. */
-    _Py_atomic_int locked;
+    int locked;
     /* Number of GIL switches since the beginning. */
     unsigned long switch_number;
     /* This condition variable allows one or several threads to wait
index 5aedf6877057072e8a6acadf36286b7d623b57d3..4f72844535ebd645d8a7692f9c6fdcf67637c472 100644 (file)
@@ -140,6 +140,21 @@ test_atomic_release_acquire(PyObject *self, PyObject *obj) {
     Py_RETURN_NONE;
 }
 
+static PyObject *
+test_atomic_load_store_int_release_acquire(PyObject *self, PyObject *obj) { \
+    int x = 0;
+    int y = 1;
+    int z = 2;
+    assert(_Py_atomic_load_int_acquire(&x) == 0);
+    _Py_atomic_store_int_release(&x, y);
+    assert(x == y);
+    assert(_Py_atomic_load_int_acquire(&x) == y);
+    _Py_atomic_store_int_release(&x, z);
+    assert(x == z);
+    assert(_Py_atomic_load_int_acquire(&x) == z);
+    Py_RETURN_NONE;
+}
+
 // NOTE: all tests should start with "test_atomic_" to be included
 // in test_pyatomic.py
 
@@ -162,6 +177,7 @@ static PyMethodDef test_methods[] = {
     FOR_BITWISE_TYPES(BIND_TEST_AND_OR)
     {"test_atomic_fences", test_atomic_fences, METH_NOARGS},
     {"test_atomic_release_acquire", test_atomic_release_acquire, METH_NOARGS},
+    {"test_atomic_load_store_int_release_acquire", test_atomic_load_store_int_release_acquire, METH_NOARGS},
     {NULL, NULL} /* sentinel */
 };
 
index bbb1e784dfa04e07a2bba03bec00c848a9ae0a81..97ef39e80aab255ee1f392bb7c5181103cfce3b9 100644 (file)
@@ -1,6 +1,6 @@
 
 #include "Python.h"
-#include "pycore_atomic.h"        // _Py_atomic_int
+#include "pycore_atomic.h"        // _Py_ANNOTATE_RWLOCK_CREATE
 #include "pycore_ceval.h"         // _PyEval_SignalReceived()
 #include "pycore_initconfig.h"    // _PyStatus_OK()
 #include "pycore_interp.h"        // _Py_RunGC()
@@ -120,9 +120,6 @@ UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
 #include <stdlib.h>
 #include <errno.h>
 
-#include "pycore_atomic.h"
-
-
 #include "condvar.h"
 
 #define MUTEX_INIT(mut) \
@@ -166,8 +163,7 @@ UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
 
 static void _gil_initialize(struct _gil_runtime_state *gil)
 {
-    _Py_atomic_int uninitialized = {-1};
-    gil->locked = uninitialized;
+    gil->locked = -1;
     gil->interval = DEFAULT_INTERVAL;
 }
 
@@ -176,7 +172,7 @@ static int gil_created(struct _gil_runtime_state *gil)
     if (gil == NULL) {
         return 0;
     }
-    return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
+    return (_Py_atomic_load_int_acquire(&gil->locked) >= 0);
 }
 
 static void create_gil(struct _gil_runtime_state *gil)
@@ -191,7 +187,7 @@ static void create_gil(struct _gil_runtime_state *gil)
 #endif
     _Py_atomic_store_ptr_relaxed(&gil->last_holder, 0);
     _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
-    _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
+    _Py_atomic_store_int_release(&gil->locked, 0);
 }
 
 static void destroy_gil(struct _gil_runtime_state *gil)
@@ -205,8 +201,7 @@ static void destroy_gil(struct _gil_runtime_state *gil)
     COND_FINI(gil->switch_cond);
     MUTEX_FINI(gil->switch_mutex);
 #endif
-    _Py_atomic_store_explicit(&gil->locked, -1,
-                              _Py_memory_order_release);
+    _Py_atomic_store_int_release(&gil->locked, -1);
     _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
 }
 
@@ -247,7 +242,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
 
     MUTEX_LOCK(gil->mutex);
     _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
-    _Py_atomic_store_relaxed(&gil->locked, 0);
+    _Py_atomic_store_int_relaxed(&gil->locked, 0);
     COND_SIGNAL(gil->cond);
     MUTEX_UNLOCK(gil->mutex);
 
@@ -313,12 +308,12 @@ take_gil(PyThreadState *tstate)
 
     MUTEX_LOCK(gil->mutex);
 
-    if (!_Py_atomic_load_relaxed(&gil->locked)) {
+    if (!_Py_atomic_load_int_relaxed(&gil->locked)) {
         goto _ready;
     }
 
     int drop_requested = 0;
-    while (_Py_atomic_load_relaxed(&gil->locked)) {
+    while (_Py_atomic_load_int_relaxed(&gil->locked)) {
         unsigned long saved_switchnum = gil->switch_number;
 
         unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
@@ -328,7 +323,7 @@ take_gil(PyThreadState *tstate)
         /* If we timed out and no switch occurred in the meantime, it is time
            to ask the GIL-holding thread to drop it. */
         if (timed_out &&
-            _Py_atomic_load_relaxed(&gil->locked) &&
+            _Py_atomic_load_int_relaxed(&gil->locked) &&
             gil->switch_number == saved_switchnum)
         {
             if (_PyThreadState_MustExit(tstate)) {
@@ -358,7 +353,7 @@ _ready:
     MUTEX_LOCK(gil->switch_mutex);
 #endif
     /* We now hold the GIL */
-    _Py_atomic_store_relaxed(&gil->locked, 1);
+    _Py_atomic_store_int_relaxed(&gil->locked, 1);
     _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
 
     if (tstate != (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) {
@@ -437,7 +432,7 @@ current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
     if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) != tstate) {
         return 0;
     }
-    return _Py_atomic_load_relaxed(&gil->locked);
+    return _Py_atomic_load_int_relaxed(&gil->locked);
 }
 
 static void
index 76a1f7763f23b9fb61402613794f5f71a926b7e5..7a6aef7ad18a5ab936651621545f5abf3bc65f25 100644 (file)
@@ -1,5 +1,6 @@
 #include "pycore_interp.h"        // _PyInterpreterState.threads.stacksize
 #include "pycore_pythread.h"      // _POSIX_SEMAPHORES
+#include "pycore_atomic.h"        // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
 
 /* Posix threads interface */