_PyUnicode_FromId() now uses pyatomic.h functions instead.
+++ /dev/null
-/* Atomic functions: similar to pycore_atomic.h, but don't need
- to declare variables as atomic.
-
- Py_ssize_t type:
-
- * value = _Py_atomic_size_get(&var)
- * _Py_atomic_size_set(&var, value)
-
- Use sequentially-consistent ordering (__ATOMIC_SEQ_CST memory order):
- enforce total ordering with all other atomic functions.
-*/
-#ifndef Py_ATOMIC_FUNC_H
-#define Py_ATOMIC_FUNC_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef Py_BUILD_CORE
-# error "this header requires Py_BUILD_CORE define"
-#endif
-
-#if defined(_MSC_VER)
-# include <intrin.h> // _InterlockedExchange()
-#endif
-
-
-// Use builtin atomic operations in GCC >= 4.7 and clang
-#ifdef HAVE_BUILTIN_ATOMIC
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
- return __atomic_load_n(var, __ATOMIC_SEQ_CST);
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
- __atomic_store_n(var, value, __ATOMIC_SEQ_CST);
-}
-
-#elif defined(_MSC_VER)
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
-#if SIZEOF_VOID_P == 8
- Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
- volatile __int64 *volatile_var = (volatile __int64 *)var;
- __int64 old;
- do {
- old = *volatile_var;
- } while(_InterlockedCompareExchange64(volatile_var, old, old) != old);
-#else
- Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
- volatile long *volatile_var = (volatile long *)var;
- long old;
- do {
- old = *volatile_var;
- } while(_InterlockedCompareExchange(volatile_var, old, old) != old);
-#endif
- return old;
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
-#if SIZEOF_VOID_P == 8
- Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
- volatile __int64 *volatile_var = (volatile __int64 *)var;
- _InterlockedExchange64(volatile_var, value);
-#else
- Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
- volatile long *volatile_var = (volatile long *)var;
- _InterlockedExchange(volatile_var, value);
-#endif
-}
-
-#else
-// Fallback implementation using volatile
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
- volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
- return *volatile_var;
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
- volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
- *volatile_var = value;
-}
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* Py_ATOMIC_FUNC_H */
$(srcdir)/Include/internal/pycore_ast_state.h \
$(srcdir)/Include/internal/pycore_atexit.h \
$(srcdir)/Include/internal/pycore_atomic.h \
- $(srcdir)/Include/internal/pycore_atomic_funcs.h \
$(srcdir)/Include/internal/pycore_bitutils.h \
$(srcdir)/Include/internal/pycore_bytes_methods.h \
$(srcdir)/Include/internal/pycore_bytesobject.h \
#undef NDEBUG
#include "Python.h"
-#include "pycore_atomic_funcs.h" // _Py_atomic_int_get()
#include "pycore_bitutils.h" // _Py_bswap32()
#include "pycore_bytesobject.h" // _PyBytes_Find()
#include "pycore_ceval.h" // _PyEval_AddPendingCall()
}
-static PyObject*
-test_atomic_funcs(PyObject *self, PyObject *Py_UNUSED(args))
-{
- // Test _Py_atomic_size_get() and _Py_atomic_size_set()
- Py_ssize_t var = 1;
- _Py_atomic_size_set(&var, 2);
- assert(_Py_atomic_size_get(&var) == 2);
- Py_RETURN_NONE;
-}
-
-
static int
check_edit_cost(const char *a, const char *b, Py_ssize_t expected)
{
{"get_config", test_get_config, METH_NOARGS},
{"set_config", test_set_config, METH_O},
{"reset_path_config", test_reset_path_config, METH_NOARGS},
- {"test_atomic_funcs", test_atomic_funcs, METH_NOARGS},
{"test_edit_cost", test_edit_cost, METH_NOARGS},
{"test_bytes_find", test_bytes_find, METH_NOARGS},
{"normalize_path", normalize_path, METH_O, NULL},
#include "Python.h"
#include "pycore_abstract.h" // _PyIndex_Check()
-#include "pycore_atomic_funcs.h" // _Py_atomic_size_get()
#include "pycore_bytes_methods.h" // _Py_bytes_lower()
#include "pycore_bytesobject.h" // _PyBytes_Repeat()
#include "pycore_ceval.h" // _PyEval_GetBuiltin()
PyInterpreterState *interp = _PyInterpreterState_GET();
struct _Py_unicode_ids *ids = &interp->unicode.ids;
- Py_ssize_t index = _Py_atomic_size_get(&id->index);
+ Py_ssize_t index = _Py_atomic_load_ssize(&id->index);
if (index < 0) {
struct _Py_unicode_runtime_ids *rt_ids = &interp->runtime->unicode_state.ids;
PyThread_acquire_lock(rt_ids->lock, WAIT_LOCK);
// Check again to detect concurrent access. Another thread can have
// initialized the index while this thread waited for the lock.
- index = _Py_atomic_size_get(&id->index);
+ index = _Py_atomic_load_ssize(&id->index);
if (index < 0) {
assert(rt_ids->next_index < PY_SSIZE_T_MAX);
index = rt_ids->next_index;
rt_ids->next_index++;
- _Py_atomic_size_set(&id->index, index);
+ _Py_atomic_store_ssize(&id->index, index);
}
PyThread_release_lock(rt_ids->lock);
}
<ClInclude Include="..\Include\internal\pycore_ast_state.h" />
<ClInclude Include="..\Include\internal\pycore_atexit.h" />
<ClInclude Include="..\Include\internal\pycore_atomic.h" />
- <ClInclude Include="..\Include\internal\pycore_atomic_funcs.h" />
<ClInclude Include="..\Include\internal\pycore_bitutils.h" />
<ClInclude Include="..\Include\internal\pycore_bytes_methods.h" />
<ClInclude Include="..\Include\internal\pycore_bytesobject.h" />
<ClInclude Include="..\Include\internal\pycore_atomic.h">
<Filter>Include\internal</Filter>
</ClInclude>
- <ClInclude Include="..\Include\internal\pycore_atomic_funcs.h">
- <Filter>Include</Filter>
- </ClInclude>
<ClInclude Include="..\Include\internal\pycore_bitutils.h">
<Filter>Include\internal</Filter>
</ClInclude>