#include "pycore_list.h" // struct _Py_list_state
#include "pycore_object_state.h" // struct _py_object_state
#include "pycore_obmalloc.h" // struct _obmalloc_state
+#include "pycore_tstate.h" // _PyThreadStateImpl
#include "pycore_tuple.h" // struct _Py_tuple_state
#include "pycore_typeobject.h" // struct types_state
#include "pycore_unicodeobject.h" // struct _Py_unicode_state
struct _Py_interp_cached_objects cached_objects;
struct _Py_interp_static_objects static_objects;
- /* the initial PyInterpreterState.threads.head */
- PyThreadState _initial_thread;
+ /* the initial PyInterpreterState.threads.head */
+ _PyThreadStateImpl _initial_thread;
Py_ssize_t _interactive_src_count;
};
--- /dev/null
+#ifndef Py_INTERNAL_TSTATE_H
+#define Py_INTERNAL_TSTATE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+
+// Every PyThreadState is actually allocated as a _PyThreadStateImpl. The
+// PyThreadState fields are exposed as part of the C API, although most fields
+// are intended to be private. The _PyThreadStateImpl fields not exposed.
+typedef struct _PyThreadStateImpl {
+ // semi-public fields are in PyThreadState.
+ PyThreadState base;
+
+ // TODO: add private fields here
+} _PyThreadStateImpl;
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_TSTATE_H */
return res;
}
-static PyThreadState *
+static _PyThreadStateImpl *
alloc_threadstate(void)
{
- return PyMem_RawCalloc(1, sizeof(PyThreadState));
+ return PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl));
}
static void
-free_threadstate(PyThreadState *tstate)
+free_threadstate(_PyThreadStateImpl *tstate)
{
// The initial thread state of the interpreter is allocated
// as part of the interpreter state so should not be freed.
- if (tstate == &tstate->interp->_initial_thread) {
+ if (tstate == &tstate->base.interp->_initial_thread) {
// Restore to _PyThreadState_INIT.
- tstate = &tstate->interp->_initial_thread;
memcpy(tstate,
&initial._main_interpreter._initial_thread,
sizeof(*tstate));
*/
static void
-init_threadstate(PyThreadState *tstate,
+init_threadstate(_PyThreadStateImpl *_tstate,
PyInterpreterState *interp, uint64_t id, int whence)
{
+ PyThreadState *tstate = (PyThreadState *)_tstate;
if (tstate->_status.initialized) {
Py_FatalError("thread state already initialized");
}
static PyThreadState *
new_threadstate(PyInterpreterState *interp, int whence)
{
- PyThreadState *tstate;
+ _PyThreadStateImpl *tstate;
_PyRuntimeState *runtime = interp->runtime;
// We don't need to allocate a thread state for the main interpreter
// (the common case), but doing it later for the other case revealed a
// reentrancy problem (deadlock). So for now we always allocate before
// taking the interpreters lock. See GH-96071.
- PyThreadState *new_tstate = alloc_threadstate();
+ _PyThreadStateImpl *new_tstate = alloc_threadstate();
int used_newtstate;
if (new_tstate == NULL) {
return NULL;
}
init_threadstate(tstate, interp, id, whence);
- add_threadstate(interp, tstate, old_head);
+ add_threadstate(interp, (PyThreadState *)tstate, old_head);
HEAD_UNLOCK(runtime);
if (!used_newtstate) {
// Must be called with lock unlocked to avoid re-entrancy deadlock.
PyMem_RawFree(new_tstate);
}
- return tstate;
+ return (PyThreadState *)tstate;
}
PyThreadState *
while ((tstate = interp->threads.head) != NULL) {
tstate_verify_not_active(tstate);
tstate_delete_common(tstate);
- free_threadstate(tstate);
+ free_threadstate((_PyThreadStateImpl *)tstate);
}
}
_Py_EnsureTstateNotNULL(tstate);
tstate_verify_not_active(tstate);
tstate_delete_common(tstate);
- free_threadstate(tstate);
+ free_threadstate((_PyThreadStateImpl *)tstate);
}
tstate_delete_common(tstate);
current_fast_clear(tstate->interp->runtime);
_PyEval_ReleaseLock(tstate->interp, NULL);
- free_threadstate(tstate);
+ free_threadstate((_PyThreadStateImpl *)tstate);
}
void
for (p = list; p; p = next) {
next = p->next;
PyThreadState_Clear(p);
- free_threadstate(p);
+ free_threadstate((_PyThreadStateImpl *)p);
}
}