]> git.ipfire.org Git - thirdparty/Python/cpython.git/commitdiff
gh-109693: Remove pycore_atomic_funcs.h (#109694)
authorSam Gross <colesbury@gmail.com>
Thu, 21 Sep 2023 20:57:20 +0000 (16:57 -0400)
committerGitHub <noreply@github.com>
Thu, 21 Sep 2023 20:57:20 +0000 (22:57 +0200)
_PyUnicode_FromId() now uses pyatomic.h functions instead.

Include/internal/pycore_atomic_funcs.h [deleted file]
Makefile.pre.in
Modules/_testinternalcapi.c
Objects/unicodeobject.c
PCbuild/pythoncore.vcxproj
PCbuild/pythoncore.vcxproj.filters

diff --git a/Include/internal/pycore_atomic_funcs.h b/Include/internal/pycore_atomic_funcs.h
deleted file mode 100644 (file)
index a708789..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/* Atomic functions: similar to pycore_atomic.h, but don't need
-   to declare variables as atomic.
-
-   Py_ssize_t type:
-
-   * value = _Py_atomic_size_get(&var)
-   * _Py_atomic_size_set(&var, value)
-
-   Use sequentially-consistent ordering (__ATOMIC_SEQ_CST memory order):
-   enforce total ordering with all other atomic functions.
-*/
-#ifndef Py_ATOMIC_FUNC_H
-#define Py_ATOMIC_FUNC_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef Py_BUILD_CORE
-#  error "this header requires Py_BUILD_CORE define"
-#endif
-
-#if defined(_MSC_VER)
-#  include <intrin.h>             // _InterlockedExchange()
-#endif
-
-
-// Use builtin atomic operations in GCC >= 4.7 and clang
-#ifdef HAVE_BUILTIN_ATOMIC
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
-    return __atomic_load_n(var, __ATOMIC_SEQ_CST);
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
-    __atomic_store_n(var, value, __ATOMIC_SEQ_CST);
-}
-
-#elif defined(_MSC_VER)
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
-#if SIZEOF_VOID_P == 8
-    Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
-    volatile __int64 *volatile_var = (volatile __int64 *)var;
-    __int64 old;
-    do {
-        old = *volatile_var;
-    } while(_InterlockedCompareExchange64(volatile_var, old, old) != old);
-#else
-    Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
-    volatile long *volatile_var = (volatile long *)var;
-    long old;
-    do {
-        old = *volatile_var;
-    } while(_InterlockedCompareExchange(volatile_var, old, old) != old);
-#endif
-    return old;
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
-#if SIZEOF_VOID_P == 8
-    Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
-    volatile __int64 *volatile_var = (volatile __int64 *)var;
-    _InterlockedExchange64(volatile_var, value);
-#else
-    Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
-    volatile long *volatile_var = (volatile long *)var;
-    _InterlockedExchange(volatile_var, value);
-#endif
-}
-
-#else
-// Fallback implementation using volatile
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
-    volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
-    return *volatile_var;
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
-    volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
-    *volatile_var = value;
-}
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif  /* Py_ATOMIC_FUNC_H */
index b6d00d5dc45007d8c5f84bc93e02c6f99ddba5aa..d123fa3e6f4a478e13e3f5ffb8be46905f930d30 100644 (file)
@@ -1743,7 +1743,6 @@ PYTHON_HEADERS= \
                $(srcdir)/Include/internal/pycore_ast_state.h \
                $(srcdir)/Include/internal/pycore_atexit.h \
                $(srcdir)/Include/internal/pycore_atomic.h \
-               $(srcdir)/Include/internal/pycore_atomic_funcs.h \
                $(srcdir)/Include/internal/pycore_bitutils.h \
                $(srcdir)/Include/internal/pycore_bytes_methods.h \
                $(srcdir)/Include/internal/pycore_bytesobject.h \
index 934e3637a9164d737e3e40da862e6f9990b55e4c..f97b609f7ad2efa8e08bfa7b3eaaa14f74e682ee 100644 (file)
@@ -10,7 +10,6 @@
 #undef NDEBUG
 
 #include "Python.h"
-#include "pycore_atomic_funcs.h"  // _Py_atomic_int_get()
 #include "pycore_bitutils.h"      // _Py_bswap32()
 #include "pycore_bytesobject.h"   // _PyBytes_Find()
 #include "pycore_ceval.h"         // _PyEval_AddPendingCall()
@@ -349,17 +348,6 @@ test_reset_path_config(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(arg))
 }
 
 
-static PyObject*
-test_atomic_funcs(PyObject *self, PyObject *Py_UNUSED(args))
-{
-    // Test _Py_atomic_size_get() and _Py_atomic_size_set()
-    Py_ssize_t var = 1;
-    _Py_atomic_size_set(&var, 2);
-    assert(_Py_atomic_size_get(&var) == 2);
-    Py_RETURN_NONE;
-}
-
-
 static int
 check_edit_cost(const char *a, const char *b, Py_ssize_t expected)
 {
@@ -1488,7 +1476,6 @@ static PyMethodDef module_functions[] = {
     {"get_config", test_get_config, METH_NOARGS},
     {"set_config", test_set_config, METH_O},
     {"reset_path_config", test_reset_path_config, METH_NOARGS},
-    {"test_atomic_funcs", test_atomic_funcs, METH_NOARGS},
     {"test_edit_cost", test_edit_cost, METH_NOARGS},
     {"test_bytes_find", test_bytes_find, METH_NOARGS},
     {"normalize_path", normalize_path, METH_O, NULL},
index 4b87bf8e37aa94cdba4dd20052240f0800e18759..aca28e4842d6450542e0f009f671c4192ae12c42 100644 (file)
@@ -40,7 +40,6 @@ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
 #include "Python.h"
 #include "pycore_abstract.h"      // _PyIndex_Check()
-#include "pycore_atomic_funcs.h"  // _Py_atomic_size_get()
 #include "pycore_bytes_methods.h" // _Py_bytes_lower()
 #include "pycore_bytesobject.h"   // _PyBytes_Repeat()
 #include "pycore_ceval.h"         // _PyEval_GetBuiltin()
@@ -1906,19 +1905,19 @@ _PyUnicode_FromId(_Py_Identifier *id)
     PyInterpreterState *interp = _PyInterpreterState_GET();
     struct _Py_unicode_ids *ids = &interp->unicode.ids;
 
-    Py_ssize_t index = _Py_atomic_size_get(&id->index);
+    Py_ssize_t index = _Py_atomic_load_ssize(&id->index);
     if (index < 0) {
         struct _Py_unicode_runtime_ids *rt_ids = &interp->runtime->unicode_state.ids;
 
         PyThread_acquire_lock(rt_ids->lock, WAIT_LOCK);
         // Check again to detect concurrent access. Another thread can have
         // initialized the index while this thread waited for the lock.
-        index = _Py_atomic_size_get(&id->index);
+        index = _Py_atomic_load_ssize(&id->index);
         if (index < 0) {
             assert(rt_ids->next_index < PY_SSIZE_T_MAX);
             index = rt_ids->next_index;
             rt_ids->next_index++;
-            _Py_atomic_size_set(&id->index, index);
+            _Py_atomic_store_ssize(&id->index, index);
         }
         PyThread_release_lock(rt_ids->lock);
     }
index 190eaa16daa8af49272494365f566061f53d39cc..1ec106777db56d77d501e3ecd1a226c3bdc50a06 100644 (file)
     <ClInclude Include="..\Include\internal\pycore_ast_state.h" />
     <ClInclude Include="..\Include\internal\pycore_atexit.h" />
     <ClInclude Include="..\Include\internal\pycore_atomic.h" />
-    <ClInclude Include="..\Include\internal\pycore_atomic_funcs.h" />
     <ClInclude Include="..\Include\internal\pycore_bitutils.h" />
     <ClInclude Include="..\Include\internal\pycore_bytes_methods.h" />
     <ClInclude Include="..\Include\internal\pycore_bytesobject.h" />
index f4fddfdd11f4c129669b20596d6a1d365a087ebb..f381120c9b035ac0098ac6b5bddca05586890b14 100644 (file)
     <ClInclude Include="..\Include\internal\pycore_atomic.h">
       <Filter>Include\internal</Filter>
     </ClInclude>
-    <ClInclude Include="..\Include\internal\pycore_atomic_funcs.h">
-      <Filter>Include</Filter>
-    </ClInclude>
     <ClInclude Include="..\Include\internal\pycore_bitutils.h">
       <Filter>Include\internal</Filter>
     </ClInclude>