Add `FT_MUTEX_LOCK`/`FT_MUTEX_UNLOCK`, which call `PyMutex_Lock` and `PyMutex_Unlock` on the free-threaded build, and no-op otherwise.
_Py_atomic_load_ullong_relaxed(&value)
#define FT_ATOMIC_ADD_SSIZE(value, new_value) \
(void)_Py_atomic_add_ssize(&value, new_value)
+#define FT_MUTEX_LOCK(lock) PyMutex_Lock(lock)
+#define FT_MUTEX_UNLOCK(lock) PyMutex_Unlock(lock)
#else
#define FT_ATOMIC_LOAD_PTR(value) value
#define FT_ATOMIC_LOAD_ULLONG_RELAXED(value) value
#define FT_ATOMIC_STORE_ULLONG_RELAXED(value, new_value) value = new_value
#define FT_ATOMIC_ADD_SSIZE(value, new_value) (void)(value += new_value)
+#define FT_MUTEX_LOCK(lock) do {} while (0)
+#define FT_MUTEX_UNLOCK(lock) do {} while (0)
#endif
#ifdef Py_GIL_DISABLED
static PyMutex malloc_closure_lock;
-# define MALLOC_CLOSURE_LOCK() PyMutex_Lock(&malloc_closure_lock)
-# define MALLOC_CLOSURE_UNLOCK() PyMutex_Unlock(&malloc_closure_lock)
-#else
-# define MALLOC_CLOSURE_LOCK() ((void)0)
-# define MALLOC_CLOSURE_UNLOCK() ((void)0)
#endif
typedef union _tagITEM {
}
#endif
#endif
- MALLOC_CLOSURE_LOCK();
+ FT_MUTEX_LOCK(&malloc_closure_lock);
ITEM *item = (ITEM *)p;
item->next = free_list;
free_list = item;
- MALLOC_CLOSURE_UNLOCK();
+ FT_MUTEX_UNLOCK(&malloc_closure_lock);
}
/* return one item from the free list, allocating more if needed */
}
#endif
#endif
- MALLOC_CLOSURE_LOCK();
+ FT_MUTEX_LOCK(&malloc_closure_lock);
ITEM *item;
if (!free_list) {
more_core();
}
if (!free_list) {
- MALLOC_CLOSURE_UNLOCK();
+ FT_MUTEX_UNLOCK(&malloc_closure_lock);
return NULL;
}
item = free_list;
#else
*codeloc = (void *)item;
#endif
- MALLOC_CLOSURE_UNLOCK();
+ FT_MUTEX_UNLOCK(&malloc_closure_lock);
return (void *)item;
}
co->co_framesize = nlocalsplus + con->stacksize + FRAME_SPECIALS_SIZE;
co->co_ncellvars = ncellvars;
co->co_nfreevars = nfreevars;
-#ifdef Py_GIL_DISABLED
- PyMutex_Lock(&interp->func_state.mutex);
-#endif
+ FT_MUTEX_LOCK(&interp->func_state.mutex);
co->co_version = interp->func_state.next_version;
if (interp->func_state.next_version != 0) {
interp->func_state.next_version++;
}
-#ifdef Py_GIL_DISABLED
- PyMutex_Unlock(&interp->func_state.mutex);
-#endif
+ FT_MUTEX_UNLOCK(&interp->func_state.mutex);
co->_co_monitoring = NULL;
co->_co_instrumentation_version = 0;
/* not set */
#ifdef Py_GIL_DISABLED
PyInterpreterState *interp = _PyInterpreterState_GET();
struct _py_code_state *state = &interp->code_state;
- PyMutex_Lock(&state->mutex);
+ FT_MUTEX_LOCK(&state->mutex);
#endif
if (intern_strings(con->names) < 0) {
goto error;
if (intern_strings(con->localsplusnames) < 0) {
goto error;
}
-#ifdef Py_GIL_DISABLED
- PyMutex_Unlock(&state->mutex);
-#endif
+ FT_MUTEX_UNLOCK(&state->mutex);
return 0;
error:
-#ifdef Py_GIL_DISABLED
- PyMutex_Unlock(&state->mutex);
-#endif
+ FT_MUTEX_UNLOCK(&state->mutex);
return -1;
}
# define _PyUnicode_CHECK(op) PyUnicode_Check(op)
#endif
-#ifdef Py_GIL_DISABLED
-# define LOCK_INTERNED(interp) PyMutex_Lock(&_Py_INTERP_CACHED_OBJECT(interp, interned_mutex))
-# define UNLOCK_INTERNED(interp) PyMutex_Unlock(&_Py_INTERP_CACHED_OBJECT(interp, interned_mutex))
-#else
-# define LOCK_INTERNED(interp)
-# define UNLOCK_INTERNED(interp)
-#endif
-
static inline char* _PyUnicode_UTF8(PyObject *op)
{
return FT_ATOMIC_LOAD_PTR_ACQUIRE(_PyCompactUnicodeObject_CAST(op)->utf8);
/* Do a setdefault on the per-interpreter cache. */
PyObject *interned = get_interned_dict(interp);
assert(interned != NULL);
-
- LOCK_INTERNED(interp);
+#ifdef Py_GIL_DISABLED
+# define INTERN_MUTEX &_Py_INTERP_CACHED_OBJECT(interp, interned_mutex)
+#endif
+ FT_MUTEX_LOCK(INTERN_MUTEX);
PyObject *t;
{
int res = PyDict_SetDefaultRef(interned, s, s, &t);
if (res < 0) {
PyErr_Clear();
- UNLOCK_INTERNED(interp);
+ FT_MUTEX_UNLOCK(INTERN_MUTEX);
return s;
}
else if (res == 1) {
PyUnicode_CHECK_INTERNED(t) == SSTATE_INTERNED_MORTAL) {
immortalize_interned(t);
}
- UNLOCK_INTERNED(interp);
+ FT_MUTEX_UNLOCK(INTERN_MUTEX);
return t;
}
else {
immortalize_interned(s);
}
- UNLOCK_INTERNED(interp);
+ FT_MUTEX_UNLOCK(INTERN_MUTEX);
return s;
}
#include "pycore_pyerrors.h" // PyExc_IncompleteInputError
#include "pycore_runtime.h" // _PyRuntime
#include "pycore_unicodeobject.h" // _PyUnicode_InternImmortal
+#include "pycore_pyatomic_ft_wrappers.h"
#include <errcode.h>
#include "lexer/lexer.h"
#define NSTATISTICS _PYPEGEN_NSTATISTICS
#define memo_statistics _PyRuntime.parser.memo_statistics
-#ifdef Py_GIL_DISABLED
-#define MUTEX_LOCK() PyMutex_Lock(&_PyRuntime.parser.mutex)
-#define MUTEX_UNLOCK() PyMutex_Unlock(&_PyRuntime.parser.mutex)
-#else
-#define MUTEX_LOCK()
-#define MUTEX_UNLOCK()
-#endif
-
void
_PyPegen_clear_memo_statistics(void)
{
- MUTEX_LOCK();
+ FT_MUTEX_LOCK(&_PyRuntime.parser.mutex);
for (int i = 0; i < NSTATISTICS; i++) {
memo_statistics[i] = 0;
}
- MUTEX_UNLOCK();
+ FT_MUTEX_UNLOCK(&_PyRuntime.parser.mutex);
}
PyObject *
return NULL;
}
- MUTEX_LOCK();
+ FT_MUTEX_LOCK(&_PyRuntime.parser.mutex);
for (int i = 0; i < NSTATISTICS; i++) {
PyObject *value = PyLong_FromLong(memo_statistics[i]);
if (value == NULL) {
- MUTEX_UNLOCK();
+ FT_MUTEX_UNLOCK(&_PyRuntime.parser.mutex);
Py_DECREF(ret);
return NULL;
}
// PyList_SetItem borrows a reference to value.
if (PyList_SetItem(ret, i, value) < 0) {
- MUTEX_UNLOCK();
+ FT_MUTEX_UNLOCK(&_PyRuntime.parser.mutex);
Py_DECREF(ret);
return NULL;
}
}
- MUTEX_UNLOCK();
+ FT_MUTEX_UNLOCK(&_PyRuntime.parser.mutex);
return ret;
}
#endif
if (count <= 0) {
count = 1;
}
- MUTEX_LOCK();
+ FT_MUTEX_LOCK(&_PyRuntime.parser.mutex);
memo_statistics[type] += count;
- MUTEX_UNLOCK();
+ FT_MUTEX_UNLOCK(&_PyRuntime.parser.mutex);
}
#endif
p->mark = m->mark;
static void
clear_pending_handling_thread(struct _pending_calls *pending)
{
-#ifdef Py_GIL_DISABLED
- PyMutex_Lock(&pending->mutex);
- pending->handling_thread = NULL;
- PyMutex_Unlock(&pending->mutex);
-#else
+ FT_MUTEX_LOCK(&pending->mutex);
pending->handling_thread = NULL;
-#endif
+ FT_MUTEX_UNLOCK(&pending->mutex);
}
static int
#include "pycore_runtime.h" // _Py_ID()
#include "pycore_ucnhash.h" // _PyUnicode_Name_CAPI
#include "pycore_unicodeobject.h" // _PyUnicode_InternMortal()
-
+#include "pycore_pyatomic_ft_wrappers.h"
static const char *codecs_builtin_error_handlers[] = {
"strict", "ignore", "replace",
PyErr_SetString(PyExc_TypeError, "argument must be callable");
goto onError;
}
-#ifdef Py_GIL_DISABLED
- PyMutex_Lock(&interp->codecs.search_path_mutex);
-#endif
+ FT_MUTEX_LOCK(&interp->codecs.search_path_mutex);
int ret = PyList_Append(interp->codecs.search_path, search_function);
-#ifdef Py_GIL_DISABLED
- PyMutex_Unlock(&interp->codecs.search_path_mutex);
-#endif
+ FT_MUTEX_UNLOCK(&interp->codecs.search_path_mutex);
+
return ret;
onError:
PyObject *codec_search_path = interp->codecs.search_path;
assert(PyList_CheckExact(codec_search_path));
for (Py_ssize_t i = 0; i < PyList_GET_SIZE(codec_search_path); i++) {
-#ifdef Py_GIL_DISABLED
- PyMutex_Lock(&interp->codecs.search_path_mutex);
-#endif
+ FT_MUTEX_LOCK(&interp->codecs.search_path_mutex);
PyObject *item = PyList_GetItemRef(codec_search_path, i);
int ret = 1;
if (item == search_function) {
// while we hold search_path_mutex.
ret = PyList_SetSlice(codec_search_path, i, i+1, NULL);
}
-#ifdef Py_GIL_DISABLED
- PyMutex_Unlock(&interp->codecs.search_path_mutex);
-#endif
+ FT_MUTEX_UNLOCK(&interp->codecs.search_path_mutex);
Py_DECREF(item);
if (ret != 1) {
assert(interp->codecs.search_cache != NULL);
#define _PyLegacyEventHandler_CAST(op) ((_PyLegacyEventHandler *)(op))
-#ifdef Py_GIL_DISABLED
-#define LOCK_SETUP() PyMutex_Lock(&_PyRuntime.ceval.sys_trace_profile_mutex);
-#define UNLOCK_SETUP() PyMutex_Unlock(&_PyRuntime.ceval.sys_trace_profile_mutex);
-#else
-#define LOCK_SETUP()
-#define UNLOCK_SETUP()
-#endif
/* The Py_tracefunc function expects the following arguments:
* obj: the trace object (PyObject *)
* frame: the current frame (PyFrameObject *)
// needs to be decref'd outside of the lock
PyObject *old_profileobj;
- LOCK_SETUP();
+ FT_MUTEX_LOCK(&_PyRuntime.ceval.sys_trace_profile_mutex);
Py_ssize_t profiling_threads = setup_profile(tstate, func, arg, &old_profileobj);
- UNLOCK_SETUP();
+ FT_MUTEX_UNLOCK(&_PyRuntime.ceval.sys_trace_profile_mutex);
Py_XDECREF(old_profileobj);
uint32_t events = 0;
}
// needs to be decref'd outside of the lock
PyObject *old_traceobj;
- LOCK_SETUP();
+ FT_MUTEX_LOCK(&_PyRuntime.ceval.sys_trace_profile_mutex);
assert(tstate->interp->sys_tracing_threads >= 0);
Py_ssize_t tracing_threads = setup_tracing(tstate, func, arg, &old_traceobj);
- UNLOCK_SETUP();
+ FT_MUTEX_UNLOCK(&_PyRuntime.ceval.sys_trace_profile_mutex);
Py_XDECREF(old_traceobj);
if (tracing_threads < 0) {
return -1;
"PyThreadState_Clear: warning: thread still has a generator\n");
}
-#ifdef Py_GIL_DISABLED
- PyMutex_Lock(&_PyRuntime.ceval.sys_trace_profile_mutex);
-#endif
+ FT_MUTEX_LOCK(&_PyRuntime.ceval.sys_trace_profile_mutex);
if (tstate->c_profilefunc != NULL) {
tstate->interp->sys_profiling_threads--;
tstate->c_tracefunc = NULL;
}
-#ifdef Py_GIL_DISABLED
- PyMutex_Unlock(&_PyRuntime.ceval.sys_trace_profile_mutex);
-#endif
+ FT_MUTEX_UNLOCK(&_PyRuntime.ceval.sys_trace_profile_mutex);
Py_CLEAR(tstate->c_profileobj);
Py_CLEAR(tstate->c_traceobj);