* Moves most structs in pycore_ header files into pycore_structs.h and pycore_runtime_structs.h
* Removes many cross-header dependencies
extern "C" {
#endif
-/* Total tool ids available */
-#define _PY_MONITORING_TOOL_IDS 8
-/* Count of all local monitoring events */
-#define _PY_MONITORING_LOCAL_EVENTS 11
-/* Count of all "real" monitoring events (not derived from other events) */
-#define _PY_MONITORING_UNGROUPED_EVENTS 16
-/* Count of all monitoring events */
-#define _PY_MONITORING_EVENTS 19
-
-/* Tables of which tools are active for each monitored event. */
-typedef struct _Py_LocalMonitors {
- uint8_t tools[_PY_MONITORING_LOCAL_EVENTS];
-} _Py_LocalMonitors;
-
-typedef struct _Py_GlobalMonitors {
- uint8_t tools[_PY_MONITORING_UNGROUPED_EVENTS];
-} _Py_GlobalMonitors;
-
-
typedef struct {
PyObject *_co_code;
PyObject *_co_varnames;
PyObject *_co_freevars;
} _PyCoCached;
-/* Ancillary data structure used for instrumentation.
- Line instrumentation creates this with sufficient
- space for one entry per code unit. The total size
- of the data will be `bytes_per_entry * Py_SIZE(code)` */
-typedef struct {
- uint8_t bytes_per_entry;
- uint8_t data[1];
-} _PyCoLineInstrumentationData;
-
-
typedef struct {
int size;
int capacity;
struct _PyExecutorObject *executors[1];
} _PyExecutorArray;
-/* Main data structure used for instrumentation.
- * This is allocated when needed for instrumentation
- */
-typedef struct {
- /* Monitoring specific to this code object */
- _Py_LocalMonitors local_monitors;
- /* Monitoring that is active on this code object */
- _Py_LocalMonitors active_monitors;
- /* The tools that are to be notified for events for the matching code unit */
- uint8_t *tools;
- /* The version of tools when they instrument the code */
- uintptr_t tool_versions[_PY_MONITORING_TOOL_IDS];
- /* Information to support line events */
- _PyCoLineInstrumentationData *lines;
- /* The tools that are to be notified for line events for the matching code unit */
- uint8_t *line_tools;
- /* Information to support instruction events */
- /* The underlying instructions, which can themselves be instrumented */
- uint8_t *per_instruction_opcodes;
- /* The tools that are to be notified for instruction events for the matching code unit */
- uint8_t *per_instruction_tools;
-} _PyCoMonitoringData;
#ifdef Py_GIL_DISABLED
_PyExecutorArray *co_executors; /* executors from optimizer */ \
_PyCoCached *_co_cached; /* cached co_* attributes */ \
uintptr_t _co_instrumentation_version; /* current instrumentation version */ \
- _PyCoMonitoringData *_co_monitoring; /* Monitoring data */ \
+ struct _PyCoMonitoringData *_co_monitoring; /* Monitoring data */ \
Py_ssize_t _co_unique_id; /* ID used for per-thread refcounting */ \
int _co_firsttraceable; /* index of first traceable instruction */ \
/* Scratch space for extra data relating to the code object. \
PyAPI_FUNC(int) _PyObject_SetManagedDict(PyObject *obj, PyObject *new_dict);
PyAPI_FUNC(void) PyObject_ClearManagedDict(PyObject *obj);
-#define TYPE_MAX_WATCHERS 8
typedef int(*PyType_WatchCallback)(PyTypeObject *);
PyAPI_FUNC(int) PyType_AddWatcher(PyType_WatchCallback callback);
#endif
-//###############
-// runtime atexit
-
-typedef void (*atexit_callbackfunc)(void);
-
-struct _atexit_runtime_state {
- PyMutex mutex;
-#define NEXITFUNCS 32
- atexit_callbackfunc callbacks[NEXITFUNCS];
- int ncallbacks;
-};
-
-
-//###################
-// interpreter atexit
-
-typedef void (*atexit_datacallbackfunc)(void *);
-
-typedef struct atexit_callback {
- atexit_datacallbackfunc func;
- void *data;
- struct atexit_callback *next;
-} atexit_callback;
-
-struct atexit_state {
-#ifdef Py_GIL_DISABLED
- PyMutex ll_callbacks_lock;
-#endif
- atexit_callback *ll_callbacks;
-
- // XXX The rest of the state could be moved to the atexit module state
- // and a low-level callback added for it during module exec.
- // For the moment we leave it here.
-
- // List containing tuples with callback information.
- // e.g. [(func, args, kwargs), ...]
- PyObject *callbacks;
-};
#ifdef Py_GIL_DISABLED
# define _PyAtExit_LockCallbacks(state) PyMutex_Lock(&state->ll_callbacks_lock);
#endif
#include <assert.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-
-typedef struct {
- uint16_t value_and_backoff;
-} _Py_BackoffCounter;
-
+#include "pycore_structs.h" // _Py_BackoffCounter
/* 16-bit countdown counters using exponential backoff.
#include "pycore_gil.h" // struct _gil_runtime_state
-typedef int (*_Py_pending_call_func)(void *);
-
-struct _pending_call {
- _Py_pending_call_func func;
- void *arg;
- int flags;
-};
-
-#define PENDINGCALLSARRAYSIZE 300
-
#define MAXPENDINGCALLS PENDINGCALLSARRAYSIZE
/* For interpreter-level pending calls, we want to avoid spending too
much time on pending calls in any one thread, so we apply a limit. */
pending calls for the main thread. */
#define MAXPENDINGCALLSLOOP_MAIN 0
-struct _pending_calls {
- PyThreadState *handling_thread;
- PyMutex mutex;
- /* Request for running pending calls. */
- int32_t npending;
- /* The maximum allowed number of pending calls.
- If the queue fills up to this point then _PyEval_AddPendingCall()
- will return _Py_ADD_PENDING_FULL. */
- int32_t max;
- /* We don't want a flood of pending calls to interrupt any one thread
- for too long, so we keep a limit on the number handled per pass.
- A value of 0 means there is no limit (other than the maximum
- size of the list of pending calls). */
- int32_t maxloop;
- struct _pending_call calls[PENDINGCALLSARRAYSIZE];
- int first;
- int next;
-};
-
-
-typedef enum {
- PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state
- PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized
- PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed
-} perf_status_t;
-
-#ifdef PY_HAVE_PERF_TRAMPOLINE
-struct code_arena_st;
-
-struct trampoline_api_st {
- void* (*init_state)(void);
- void (*write_state)(void* state, const void *code_addr,
- unsigned int code_size, PyCodeObject* code);
- int (*free_state)(void* state);
- void *state;
- Py_ssize_t code_padding;
-};
-#endif
-
-
-struct _ceval_runtime_state {
- struct {
-#ifdef PY_HAVE_PERF_TRAMPOLINE
- perf_status_t status;
- int perf_trampoline_type;
- Py_ssize_t extra_code_index;
- struct code_arena_st *code_arena;
- struct trampoline_api_st trampoline_api;
- FILE *map_file;
- Py_ssize_t persist_after_fork;
-#else
- int _not_used;
-#endif
- } perf;
- /* Pending calls to be made only on the main thread. */
- // The signal machinery falls back on this
- // so it must be especially stable and efficient.
- // For example, we use a preallocated array
- // for the list of pending calls.
- struct _pending_calls pending_mainthread;
- PyMutex sys_trace_profile_mutex;
-};
-
#ifdef PY_HAVE_PERF_TRAMPOLINE
# define _PyEval_RUNTIME_PERF_INIT \
#endif
-struct _ceval_state {
- /* This variable holds the global instrumentation version. When a thread is
- running, this value is overlaid onto PyThreadState.eval_breaker so that
- changes in the instrumentation version will trigger the eval breaker. */
- uintptr_t instrumentation_version;
- int recursion_limit;
- struct _gil_runtime_state *gil;
- int own_gil;
- struct _pending_calls pending;
-};
-
-
#ifdef __cplusplus
}
#endif
# error "this header requires Py_BUILD_CORE define"
#endif
+#include "pycore_structs.h" // _Py_CODEUNIT
#include "pycore_stackref.h" // _PyStackRef
#include "pycore_lock.h" // PyMutex
#include "pycore_backoff.h" // _Py_BackoffCounter
#include "pycore_tstate.h" // _PyThreadStateImpl
-/* Each instruction in a code object is a fixed-width value,
- * currently 2 bytes: 1-byte opcode + 1-byte oparg. The EXTENDED_ARG
- * opcode allows for larger values but the current limit is 3 uses
- * of EXTENDED_ARG (see Python/compile.c), for a maximum
- * 32-bit value. This aligns with the note in Python/compile.c
- * (compiler_addop_i_line) indicating that the max oparg value is
- * 2**32 - 1, rather than INT_MAX.
- */
-
-typedef union {
- uint16_t cache;
- struct {
- uint8_t code;
- uint8_t arg;
- } op;
- _Py_BackoffCounter counter; // First cache entry of specializable op
-} _Py_CODEUNIT;
-
#define _PyCode_CODE(CO) _Py_RVALUE((_Py_CODEUNIT *)(CO)->co_code_adaptive)
#define _PyCode_NBYTES(CO) (Py_SIZE(CO) * (Py_ssize_t)sizeof(_Py_CODEUNIT))
#define _PyCode_HAS_INSTRUMENTATION(CODE) \
(CODE->_co_instrumentation_version > 0)
-struct _py_code_state {
- PyMutex mutex;
- // Interned constants from code objects. Used by the free-threaded build.
- struct _Py_hashtable_t *constants;
-};
extern PyStatus _PyCode_Init(PyInterpreterState *interp);
extern void _PyCode_Fini(PyInterpreterState *interp);
-#define CODE_MAX_WATCHERS 8
/* PEP 659
* Specialization and quickening structs and helper functions
#define INLINE_CACHE_ENTRIES_CONTAINS_OP CACHE_ENTRIES(_PyContainsOpCache)
-// Borrowed references to common callables:
-struct callable_cache {
- PyObject *isinstance;
- PyObject *len;
- PyObject *list_append;
- PyObject *object__getattribute__;
-};
-
/* "Locals plus" for a code object is the set of locals + cell vars +
* free vars. This relates to variable names as well as offsets into
* the "fast locals" storage array of execution frames. The compiler
#endif
#include "pycore_lock.h" // PyMutex
+#include "pycore_runtime_structs.h" // struct codecs_state
/* Initialize codecs-related state for the given interpreter, including
registering the first codec search function. Must be called before any other
PyObject *codec_info,
const char *errors);
-// Per-interpreter state used by codecs.c.
-struct codecs_state {
- // A list of callable objects used to search for codecs.
- PyObject *search_path;
-
- // A dict mapping codec names to codecs returned from a callable in
- // search_path.
- PyObject *search_cache;
-
- // A dict mapping error handling strategies to functions to implement them.
- PyObject *error_registry;
-
-#ifdef Py_GIL_DISABLED
- // Used to safely delete a specific item from search_path.
- PyMutex search_path_mutex;
-#endif
-
- // Whether or not the rest of the state is initialized.
- int initialized;
-};
-
#ifdef __cplusplus
}
#endif
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_hamt.h" // PyHamtObject
-
-#define CONTEXT_MAX_WATCHERS 8
+#include "pycore_structs.h"
extern PyTypeObject _PyContextTokenMissing_Type;
#include "pycore_pymath.h" // _PY_SHORT_FLOAT_REPR
-typedef uint32_t ULong;
-
-struct
-Bigint {
- struct Bigint *next;
- int k, maxwds, sign, wds;
- ULong x[1];
-};
-
#if defined(Py_USING_MEMORY_DEBUGGER) || _PY_SHORT_FLOAT_REPR == 0
-struct _dtoa_state {
- int _not_used;
-};
#define _dtoa_state_INIT(INTERP) \
{0}
-#else // !Py_USING_MEMORY_DEBUGGER && _PY_SHORT_FLOAT_REPR != 0
-
-/* The size of the Bigint freelist */
-#define Bigint_Kmax 7
+#else
-/* The size of the cached powers of 5 array */
-#define Bigint_Pow5size 8
-
-#ifndef PRIVATE_MEM
-#define PRIVATE_MEM 2304
-#endif
-#define Bigint_PREALLOC_SIZE \
- ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
-
-struct _dtoa_state {
- // p5s is an array of powers of 5 of the form:
- // 5**(2**(i+2)) for 0 <= i < Bigint_Pow5size
- struct Bigint *p5s[Bigint_Pow5size];
- // XXX This should be freed during runtime fini.
- struct Bigint *freelist[Bigint_Kmax+1];
- double preallocated[Bigint_PREALLOC_SIZE];
- double *preallocated_next;
-};
#define _dtoa_state_INIT(INTERP) \
{ \
.preallocated_next = (INTERP)->dtoa.preallocated, \
}
-
-#endif // !Py_USING_MEMORY_DEBUGGER
-
+#endif
extern double _Py_dg_strtod(const char *str, char **ptr);
extern char* _Py_dg_dtoa(double d, int mode, int ndigits,
#endif
#include <locale.h> // struct lconv
+#include "pycore_runtime_structs.h" // _Py_error_handler
/* A routine to check if a file descriptor can be select()-ed. */
#define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE)
#endif
-struct _fileutils_state {
- int force_ascii;
-};
-
-typedef enum {
- _Py_ERROR_UNKNOWN=0,
- _Py_ERROR_STRICT,
- _Py_ERROR_SURROGATEESCAPE,
- _Py_ERROR_REPLACE,
- _Py_ERROR_IGNORE,
- _Py_ERROR_BACKSLASHREPLACE,
- _Py_ERROR_SURROGATEPASS,
- _Py_ERROR_XMLCHARREFREPLACE,
- _Py_ERROR_OTHER
-} _Py_error_handler;
-
// Export for '_testinternalcapi' shared extension
PyAPI_FUNC(_Py_error_handler) _Py_GetErrorHandler(const char *errors);
extern void _PyFloat_FiniType(PyInterpreterState *);
-/* other API */
-
-enum _py_float_format_type {
- _py_float_format_unknown,
- _py_float_format_ieee_big_endian,
- _py_float_format_ieee_little_endian,
-};
-
-struct _Py_float_runtime_state {
- enum _py_float_format_type float_format;
- enum _py_float_format_type double_format;
-};
-
-
PyAPI_FUNC(void) _PyFloat_ExactDealloc(PyObject *op);
size_t nargsf,
PyObject *kwnames);
-#define FUNC_MAX_WATCHERS 8
#define FUNC_VERSION_UNSET 0
#define FUNC_VERSION_CLEARED 1
#define FUNC_VERSION_FIRST_VALID 2
-#define FUNC_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */
-
-struct _func_version_cache_item {
- PyFunctionObject *func;
- PyObject *code;
-};
-
-struct _py_func_state {
-#ifdef Py_GIL_DISABLED
- // Protects next_version
- PyMutex mutex;
-#endif
-
- uint32_t next_version;
- // Borrowed references to function and code objects whose
- // func_version % FUNC_VERSION_CACHE_SIZE
- // once was equal to the index in the table.
- // They are cleared when the function or code object is deallocated.
- struct _func_version_cache_item func_version_cache[FUNC_VERSION_CACHE_SIZE];
-};
-
extern PyFunctionObject* _PyFunction_FromConstructor(PyFrameConstructor *constr);
static inline int
# error "this header requires Py_BUILD_CORE define"
#endif
-/* GC information is stored BEFORE the object structure. */
-typedef struct {
- // Tagged pointer to next object in the list.
- // 0 means the object is not tracked
- uintptr_t _gc_next;
-
- // Tagged pointer to previous object in the list.
- // Lowest two bits are used for flags documented later.
- uintptr_t _gc_prev;
-} PyGC_Head;
-
-#define _PyGC_Head_UNUSED PyGC_Head
+#include "pycore_runtime_structs.h"
/* Get an object's GC head */
#endif
}
-
-/* GC runtime state */
-
-/* If we change this, we need to change the default value in the
- signature of gc.collect. */
-#define NUM_GENERATIONS 3
/*
NOTE: about untracking of mutable objects.
the algorithm was refined in response to issue #14775.
*/
-struct gc_generation {
- PyGC_Head head;
- int threshold; /* collection threshold */
- int count; /* count of allocations or collections of younger
- generations */
-};
-
-struct gc_collection_stats {
- /* number of collected objects */
- Py_ssize_t collected;
- /* total number of uncollectable objects (put into gc.garbage) */
- Py_ssize_t uncollectable;
-};
-
-/* Running stats per generation */
-struct gc_generation_stats {
- /* total number of collections */
- Py_ssize_t collections;
- /* total number of collected objects */
- Py_ssize_t collected;
- /* total number of uncollectable objects (put into gc.garbage) */
- Py_ssize_t uncollectable;
-};
-
-enum _GCPhase {
- GC_PHASE_MARK = 0,
- GC_PHASE_COLLECT = 1
-};
-
-struct _gc_runtime_state {
- /* List of objects that still need to be cleaned up, singly linked
- * via their gc headers' gc_prev pointers. */
- PyObject *trash_delete_later;
- /* Current call-stack depth of tp_dealloc calls. */
- int trash_delete_nesting;
-
- /* Is automatic collection enabled? */
- int enabled;
- int debug;
- /* linked lists of container objects */
- struct gc_generation young;
- struct gc_generation old[2];
- /* a permanent generation which won't be collected */
- struct gc_generation permanent_generation;
- struct gc_generation_stats generation_stats[NUM_GENERATIONS];
- /* true if we are currently running the collector */
- int collecting;
- /* list of uncollectable objects */
- PyObject *garbage;
- /* a list of callbacks to be invoked when collection is performed */
- PyObject *callbacks;
-
- Py_ssize_t heap_size;
- Py_ssize_t work_to_do;
- /* Which of the old spaces is the visited space */
- int visited_space;
- int phase;
-
-#ifdef Py_GIL_DISABLED
- /* This is the number of objects that survived the last full
- collection. It approximates the number of long lived objects
- tracked by the GC.
-
- (by "full collection", we mean a collection of the oldest
- generation). */
- Py_ssize_t long_lived_total;
- /* This is the number of objects that survived all "non-full"
- collections, and are awaiting to undergo a full collection for
- the first time. */
- Py_ssize_t long_lived_pending;
-
- /* True if gc.freeze() has been used. */
- int freeze_active;
-#endif
-};
-
-#ifdef Py_GIL_DISABLED
-struct _gc_thread_state {
- /* Thread-local allocation count. */
- Py_ssize_t alloc_count;
-};
-#endif
-
-
extern void _PyGC_InitState(struct _gc_runtime_state *);
extern Py_ssize_t _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason);
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_context.h" // _PyContextTokenMissing
-#include "pycore_gc.h" // _PyGC_Head_UNUSED
-#include "pycore_global_strings.h" // struct _Py_global_strings
-#include "pycore_hamt.h" // PyHamtNode_Bitmap
-#include "pycore_hashtable.h" // _Py_hashtable_t
-#include "pycore_typeobject.h" // pytype_slotdef
-
-
-// These would be in pycore_long.h if it weren't for an include cycle.
-#define _PY_NSMALLPOSINTS 257
-#define _PY_NSMALLNEGINTS 5
-
// Only immutable objects should be considered runtime-global.
// All others must be per-interpreter.
#define _Py_SINGLETON(NAME) \
_Py_GLOBAL_OBJECT(singletons.NAME)
-struct _Py_cached_objects {
- // XXX We could statically allocate the hashtable.
- _Py_hashtable_t *interned_strings;
-};
-
-struct _Py_static_objects {
- struct {
- /* Small integers are preallocated in this array so that they
- * can be shared.
- * The integers that are preallocated are those in the range
- * -_PY_NSMALLNEGINTS (inclusive) to _PY_NSMALLPOSINTS (exclusive).
- */
- PyLongObject small_ints[_PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS];
-
- PyBytesObject bytes_empty;
- struct {
- PyBytesObject ob;
- char eos;
- } bytes_characters[256];
-
- struct _Py_global_strings strings;
-
- _PyGC_Head_UNUSED _tuple_empty_gc_not_used;
- PyTupleObject tuple_empty;
-
- _PyGC_Head_UNUSED _hamt_bitmap_node_empty_gc_not_used;
- PyHamtNode_Bitmap hamt_bitmap_node_empty;
- _PyContextTokenMissing context_token_missing;
- } singletons;
-};
#define _Py_INTERP_CACHED_OBJECT(interp, NAME) \
(interp)->cached_objects.NAME
-struct _Py_interp_cached_objects {
-#ifdef Py_GIL_DISABLED
- PyMutex interned_mutex;
-#endif
- PyObject *interned_strings;
-
- /* object.__reduce__ */
- PyObject *objreduce;
- PyObject *type_slots_pname;
- pytype_slotdef *type_slots_ptrs[MAX_EQUIV];
-
- /* TypeVar and related types */
- PyTypeObject *generic_type;
- PyTypeObject *typevar_type;
- PyTypeObject *typevartuple_type;
- PyTypeObject *paramspec_type;
- PyTypeObject *paramspecargs_type;
- PyTypeObject *paramspeckwargs_type;
- PyTypeObject *constevaluator_type;
-};
#define _Py_INTERP_STATIC_OBJECT(interp, NAME) \
(interp)->static_objects.NAME
#define _Py_INTERP_SINGLETON(interp, NAME) \
_Py_INTERP_STATIC_OBJECT(interp, singletons.NAME)
-struct _Py_interp_static_objects {
- struct {
- int _not_used;
- // hamt_empty is here instead of global because of its weakreflist.
- _PyGC_Head_UNUSED _hamt_empty_gc_not_used;
- PyHamtObject hamt_empty;
- PyBaseExceptionObject last_resort_memory_error;
- } singletons;
-};
-
#ifdef __cplusplus
}
# error "this header requires Py_BUILD_CORE define"
#endif
+#include "pycore_global_objects.h" // struct _Py_SINGLETON
+
// The data structure & init here are inspired by Tools/build/deepfreeze.py.
// All field names generated by ASCII_STR() have a common prefix,
# error "this header requires Py_BUILD_CORE define"
#endif
+#include "pycore_structs.h" // PyHamtNode
/*
HAMT tree is shaped by hashes of keys. Every group of 5 bits of a hash denotes
#define PyHamt_Check(o) Py_IS_TYPE((o), &_PyHamt_Type)
-/* Abstract tree node. */
-typedef struct {
- PyObject_HEAD
-} PyHamtNode;
-
-
-/* An HAMT immutable mapping collection. */
-typedef struct {
- PyObject_HEAD
- PyHamtNode *h_root;
- PyObject *h_weakreflist;
- Py_ssize_t h_count;
-} PyHamtObject;
-
-
-typedef struct {
- PyObject_VAR_HEAD
- uint32_t b_bitmap;
- PyObject *b_array[1];
-} PyHamtNode_Bitmap;
-
-
/* A struct to hold the state of depth-first traverse of the tree.
HAMT is an immutable collection. Iterators will hold a strong reference
#endif
#include "pycore_lock.h" // PyMutex
+#include "pycore_runtime_structs.h" // _import_state
#include "pycore_hashtable.h" // _Py_hashtable_t
extern int _PyImport_IsInitialized(PyInterpreterState *);
PyObject *modules
);
-
-struct _import_runtime_state {
- /* The builtin modules (defined in config.c). */
- struct _inittab *inittab;
- /* The most recent value assigned to a PyModuleDef.m_base.m_index.
- This is incremented each time PyModuleDef_Init() is called,
- which is just about every time an extension module is imported.
- See PyInterpreterState.modules_by_index for more info. */
- Py_ssize_t last_module_index;
- struct {
- /* A lock to guard the cache. */
- PyMutex mutex;
- /* The actual cache of (filename, name, PyModuleDef) for modules.
- Only legacy (single-phase init) extension modules are added
- and only if they support multiple initialization (m_size >= 0)
- or are imported in the main interpreter.
- This is initialized lazily in fix_up_extension() in import.c.
- Modules are added there and looked up in _imp.find_extension(). */
- _Py_hashtable_t *hashtable;
- } extensions;
- /* Package context -- the full module name for package imports */
- const char * pkgcontext;
-};
-
-struct _import_state {
- /* cached sys.modules dictionary */
- PyObject *modules;
- /* This is the list of module objects for all legacy (single-phase init)
- extension modules ever loaded in this process (i.e. imported
- in this interpreter or in any other). Py_None stands in for
- modules that haven't actually been imported in this interpreter.
-
- A module's index (PyModuleDef.m_base.m_index) is used to look up
- the corresponding module object for this interpreter, if any.
- (See PyState_FindModule().) When any extension module
- is initialized during import, its moduledef gets initialized by
- PyModuleDef_Init(), and the first time that happens for each
- PyModuleDef, its index gets set to the current value of
- a global counter (see _PyRuntimeState.imports.last_module_index).
- The entry for that index in this interpreter remains unset until
- the module is actually imported here. (Py_None is used as
- a placeholder.) Note that multi-phase init modules always get
- an index for which there will never be a module set.
-
- This is initialized lazily in PyState_AddModule(), which is also
- where modules get added. */
- PyObject *modules_by_index;
- /* importlib module._bootstrap */
- PyObject *importlib;
- /* override for config->use_frozen_modules (for tests)
- (-1: "off", 1: "on", 0: no override) */
- int override_frozen_modules;
- int override_multi_interp_extensions_check;
-#ifdef HAVE_DLOPEN
- int dlopenflags;
-#endif
- PyObject *import_func;
- /* The global import lock. */
- _PyRecursiveMutex lock;
- /* diagnostic info in PyImport_ImportModuleLevelObject() */
- struct {
- int import_level;
- PyTime_t accumulated;
- int header;
- } find_and_load;
-};
-
#ifdef HAVE_DLOPEN
# include <dlfcn.h> // RTLD_NOW, RTLD_LAZY
# if HAVE_DECL_RTLD_NOW
#ifdef Py_GIL_DISABLED
+#include "pycore_interp_structs.h"
+
// This contains code for allocating unique indices in an array. It is used by
// the free-threaded build to assign each thread a globally unique index into
// each code object's thread-local bytecode array.
-// A min-heap of indices
-typedef struct _PyIndexHeap {
- int32_t *values;
-
- // Number of items stored in values
- Py_ssize_t size;
-
- // Maximum number of items that can be stored in values
- Py_ssize_t capacity;
-} _PyIndexHeap;
-
-// An unbounded pool of indices. Indices are allocated starting from 0. They
-// may be released back to the pool once they are no longer in use.
-typedef struct _PyIndexPool {
- PyMutex mutex;
-
- // Min heap of indices available for allocation
- _PyIndexHeap free_indices;
-
- // Next index to allocate if no free indices are available
- int32_t next_index;
-} _PyIndexPool;
// Allocate the smallest available index. Returns -1 on error.
extern int32_t _PyIndexPool_AllocIndex(_PyIndexPool *indices);
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_frame.h" // _PyInterpreterFrame
+#include "pycore_structs.h" // _Py_CODEUNIT
#ifdef __cplusplus
extern "C" {
#endif
-#define PY_MONITORING_TOOL_IDS 8
-
typedef uint32_t _PyMonitoringEventSet;
/* Tool IDs */
extern int
_Py_call_instrumentation(PyThreadState *tstate, int event,
- _PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
+ struct _PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
extern int
-_Py_call_instrumentation_line(PyThreadState *tstate, _PyInterpreterFrame* frame,
+_Py_call_instrumentation_line(PyThreadState *tstate, struct _PyInterpreterFrame* frame,
_Py_CODEUNIT *instr, _Py_CODEUNIT *prev);
extern int
_Py_call_instrumentation_instruction(
- PyThreadState *tstate, _PyInterpreterFrame* frame, _Py_CODEUNIT *instr);
+ PyThreadState *tstate, struct _PyInterpreterFrame* frame, _Py_CODEUNIT *instr);
_Py_CODEUNIT *
_Py_call_instrumentation_jump(
_Py_CODEUNIT *instr, PyThreadState *tstate, int event,
- _PyInterpreterFrame *frame, _Py_CODEUNIT *src, _Py_CODEUNIT *dest);
+ struct _PyInterpreterFrame *frame, _Py_CODEUNIT *src, _Py_CODEUNIT *dest);
extern int
_Py_call_instrumentation_arg(PyThreadState *tstate, int event,
- _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg);
+ struct _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg);
extern int
_Py_call_instrumentation_2args(PyThreadState *tstate, int event,
- _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
+ struct _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
extern void
_Py_call_instrumentation_exc2(PyThreadState *tstate, int event,
- _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
+ struct _PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
extern int
_Py_Instrumentation_GetLine(PyCodeObject *code, int index);
extern PyObject _PyInstrumentation_MISSING;
extern PyObject _PyInstrumentation_DISABLE;
+
+/* Total tool ids available */
+#define PY_MONITORING_TOOL_IDS 8
+/* Count of all local monitoring events */
+#define _PY_MONITORING_LOCAL_EVENTS 11
+/* Count of all "real" monitoring events (not derived from other events) */
+#define _PY_MONITORING_UNGROUPED_EVENTS 16
+/* Count of all monitoring events */
+#define _PY_MONITORING_EVENTS 19
+
+/* Tables of which tools are active for each monitored event. */
+typedef struct _Py_LocalMonitors {
+ uint8_t tools[_PY_MONITORING_LOCAL_EVENTS];
+} _Py_LocalMonitors;
+
+typedef struct _Py_GlobalMonitors {
+ uint8_t tools[_PY_MONITORING_UNGROUPED_EVENTS];
+} _Py_GlobalMonitors;
+
+/* Ancillary data structure used for instrumentation.
+ Line instrumentation creates this with sufficient
+ space for one entry per code unit. The total size
+ of the data will be `bytes_per_entry * Py_SIZE(code)` */
+typedef struct {
+ uint8_t bytes_per_entry;
+ uint8_t data[1];
+} _PyCoLineInstrumentationData;
+
+
+/* Main data structure used for instrumentation.
+ * This is allocated when needed for instrumentation
+ */
+typedef struct _PyCoMonitoringData {
+ /* Monitoring specific to this code object */
+ _Py_LocalMonitors local_monitors;
+ /* Monitoring that is active on this code object */
+ _Py_LocalMonitors active_monitors;
+ /* The tools that are to be notified for events for the matching code unit */
+ uint8_t *tools;
+ /* The version of tools when they instrument the code */
+ uintptr_t tool_versions[PY_MONITORING_TOOL_IDS];
+ /* Information to support line events */
+ _PyCoLineInstrumentationData *lines;
+ /* The tools that are to be notified for line events for the matching code unit */
+ uint8_t *line_tools;
+ /* Information to support instruction events */
+ /* The underlying instructions, which can themselves be instrumented */
+ uint8_t *per_instruction_opcodes;
+ /* The tools that are to be notified for instruction events for the matching code unit */
+ uint8_t *per_instruction_tools;
+} _PyCoMonitoringData;
+
+
#ifdef __cplusplus
}
#endif
#include <stdbool.h> // bool
-#include "pycore_ast_state.h" // struct ast_state
-#include "pycore_atexit.h" // struct atexit_state
+#include "pycore_runtime_structs.h"
#include "pycore_ceval_state.h" // struct _ceval_state
#include "pycore_code.h" // struct callable_cache
#include "pycore_codecs.h" // struct codecs_state
#include "pycore_warnings.h" // struct _warnings_runtime_state
-struct _Py_long_state {
- int max_str_digits;
-};
-
-// Support for stop-the-world events. This exists in both the PyRuntime struct
-// for global pauses and in each PyInterpreterState for per-interpreter pauses.
-struct _stoptheworld_state {
- PyMutex mutex; // Serializes stop-the-world attempts.
-
- // NOTE: The below fields are protected by HEAD_LOCK(runtime), not by the
- // above mutex.
- bool requested; // Set when a pause is requested.
- bool world_stopped; // Set when the world is stopped.
- bool is_global; // Set when contained in PyRuntime struct.
-
- PyEvent stop_event; // Set when thread_countdown reaches zero.
- Py_ssize_t thread_countdown; // Number of threads that must pause.
-
- PyThreadState *requester; // Thread that requested the pause (may be NULL).
-};
-
-#ifdef Py_GIL_DISABLED
-// This should be prime but otherwise the choice is arbitrary. A larger value
-// increases concurrency at the expense of memory.
-# define NUM_WEAKREF_LIST_LOCKS 127
-#endif
-
-/* cross-interpreter data registry */
-
-/* Tracks some rare events per-interpreter, used by the optimizer to turn on/off
- specific optimizations. */
-typedef struct _rare_events {
- /* Setting an object's class, obj.__class__ = ... */
- uint8_t set_class;
- /* Setting the bases of a class, cls.__bases__ = ... */
- uint8_t set_bases;
- /* Setting the PEP 523 frame eval function, _PyInterpreterState_SetFrameEvalFunc() */
- uint8_t set_eval_frame_func;
- /* Modifying the builtins, __builtins__.__dict__[var] = ... */
- uint8_t builtin_dict;
- /* Modifying a function, e.g. func.__defaults__ = ..., etc. */
- uint8_t func_modification;
-} _rare_events;
/* interpreter state */
-/* PyInterpreterState holds the global state for one of the runtime's
- interpreters. Typically the initial (main) interpreter is the only one.
-
- The PyInterpreterState typedef is in Include/pytypedefs.h.
- */
-struct _is {
-
- /* This struct contains the eval_breaker,
- * which is by far the hottest field in this struct
- * and should be placed at the beginning. */
- struct _ceval_state ceval;
-
- PyInterpreterState *next;
-
- int64_t id;
- Py_ssize_t id_refcount;
- int requires_idref;
-
#define _PyInterpreterState_WHENCE_NOTSET -1
#define _PyInterpreterState_WHENCE_UNKNOWN 0
#define _PyInterpreterState_WHENCE_RUNTIME 1
#define _PyInterpreterState_WHENCE_XI 4
#define _PyInterpreterState_WHENCE_STDLIB 5
#define _PyInterpreterState_WHENCE_MAX 5
- long _whence;
-
- /* Has been initialized to a safe state.
-
- In order to be effective, this must be set to 0 during or right
- after allocation. */
- int _initialized;
- /* Has been fully initialized via pylifecycle.c. */
- int _ready;
- int finalizing;
-
- uintptr_t last_restart_version;
- struct pythreads {
- uint64_t next_unique_id;
- /* The linked list of threads, newest first. */
- PyThreadState *head;
- _PyThreadStateImpl *preallocated;
- /* The thread currently executing in the __main__ module, if any. */
- PyThreadState *main;
- /* Used in Modules/_threadmodule.c. */
- Py_ssize_t count;
- /* Support for runtime thread stack size tuning.
- A value of 0 means using the platform's default stack size
- or the size specified by the THREAD_STACK_SIZE macro. */
- /* Used in Python/thread.c. */
- size_t stacksize;
- } threads;
-
- /* Reference to the _PyRuntime global variable. This field exists
- to not have to pass runtime in addition to tstate to a function.
- Get runtime from tstate: tstate->interp->runtime. */
- struct pyruntimestate *runtime;
-
- /* Set by Py_EndInterpreter().
-
- Use _PyInterpreterState_GetFinalizing()
- and _PyInterpreterState_SetFinalizing()
- to access it, don't access it directly. */
- PyThreadState* _finalizing;
- /* The ID of the OS thread in which we are finalizing. */
- unsigned long _finalizing_id;
-
- struct _gc_runtime_state gc;
-
- /* The following fields are here to avoid allocation during init.
- The data is exposed through PyInterpreterState pointer fields.
- These fields should not be accessed directly outside of init.
-
- All other PyInterpreterState pointer fields are populated when
- needed and default to NULL.
-
- For now there are some exceptions to that rule, which require
- allocation during init. These will be addressed on a case-by-case
- basis. Also see _PyRuntimeState regarding the various mutex fields.
- */
-
- // Dictionary of the sys module
- PyObject *sysdict;
-
- // Dictionary of the builtins module
- PyObject *builtins;
-
- struct _import_state imports;
-
- /* The per-interpreter GIL, which might not be used. */
- struct _gil_runtime_state _gil;
-
- /* ---------- IMPORTANT ---------------------------
- The fields above this line are declared as early as
- possible to facilitate out-of-process observability
- tools. */
-
- struct codecs_state codecs;
-
- PyConfig config;
- unsigned long feature_flags;
-
- PyObject *dict; /* Stores per-interpreter state */
-
- PyObject *sysdict_copy;
- PyObject *builtins_copy;
- // Initialized to _PyEval_EvalFrameDefault().
- _PyFrameEvalFunction eval_frame;
-
- PyFunction_WatchCallback func_watchers[FUNC_MAX_WATCHERS];
- // One bit is set for each non-NULL entry in func_watchers
- uint8_t active_func_watchers;
-
- Py_ssize_t co_extra_user_count;
- freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
-
- /* cross-interpreter data and utils */
- _PyXI_state_t xi;
-
-#ifdef HAVE_FORK
- PyObject *before_forkers;
- PyObject *after_forkers_parent;
- PyObject *after_forkers_child;
-#endif
-
- struct _warnings_runtime_state warnings;
- struct atexit_state atexit;
- struct _stoptheworld_state stoptheworld;
- struct _qsbr_shared qsbr;
-
-#if defined(Py_GIL_DISABLED)
- struct _mimalloc_interp_state mimalloc;
- struct _brc_state brc; // biased reference counting state
- struct _Py_unique_id_pool unique_ids; // object ids for per-thread refcounts
- PyMutex weakref_locks[NUM_WEAKREF_LIST_LOCKS];
- _PyIndexPool tlbc_indices;
-#endif
- // Per-interpreter list of tasks, any lingering tasks from thread
- // states gets added here and removed from the corresponding
- // thread state's list.
- struct llist_node asyncio_tasks_head;
- // `asyncio_tasks_lock` is used when tasks are moved
- // from thread's list to interpreter's list.
- PyMutex asyncio_tasks_lock;
-
- // Per-interpreter state for the obmalloc allocator. For the main
- // interpreter and for all interpreters that don't have their
- // own obmalloc state, this points to the static structure in
- // obmalloc.c obmalloc_state_main. For other interpreters, it is
- // heap allocated by _PyMem_init_obmalloc() and freed when the
- // interpreter structure is freed. In the case of a heap allocated
- // obmalloc state, it is not safe to hold on to or use memory after
- // the interpreter is freed. The obmalloc state corresponding to
- // that allocated memory is gone. See free_obmalloc_arenas() for
- // more comments.
- struct _obmalloc_state *obmalloc;
-
- PyObject *audit_hooks;
- PyType_WatchCallback type_watchers[TYPE_MAX_WATCHERS];
- PyCode_WatchCallback code_watchers[CODE_MAX_WATCHERS];
- PyContext_WatchCallback context_watchers[CONTEXT_MAX_WATCHERS];
- // One bit is set for each non-NULL entry in code_watchers
- uint8_t active_code_watchers;
- uint8_t active_context_watchers;
-
- struct _py_object_state object_state;
- struct _Py_unicode_state unicode;
- struct _Py_long_state long_state;
- struct _dtoa_state dtoa;
- struct _py_func_state func_state;
- struct _py_code_state code_state;
-
- struct _Py_dict_state dict_state;
- struct _Py_exc_state exc_state;
- struct _Py_mem_interp_free_queue mem_free_queue;
-
- struct ast_state ast;
- struct types_state types;
- struct callable_cache callable_cache;
- bool jit;
- _PyExecutorObject *executor_list_head;
- size_t trace_run_counter;
- _rare_events rare_events;
- PyDict_WatchCallback builtins_dict_watcher;
-
- _Py_GlobalMonitors monitors;
- bool sys_profile_initialized;
- bool sys_trace_initialized;
- Py_ssize_t sys_profiling_threads; /* Count of threads with c_profilefunc set */
- Py_ssize_t sys_tracing_threads; /* Count of threads with c_tracefunc set */
- PyObject *monitoring_callables[PY_MONITORING_TOOL_IDS][_PY_MONITORING_EVENTS];
- PyObject *monitoring_tool_names[PY_MONITORING_TOOL_IDS];
- uintptr_t monitoring_tool_versions[PY_MONITORING_TOOL_IDS];
-
- struct _Py_interp_cached_objects cached_objects;
- struct _Py_interp_static_objects static_objects;
-
- Py_ssize_t _interactive_src_count;
-
- /* the initial PyInterpreterState.threads.head */
- _PyThreadStateImpl _initial_thread;
- // _initial_thread should be the last field of PyInterpreterState.
- // See https://github.com/python/cpython/issues/127117.
-
-#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
- uint64_t next_stackref;
- _Py_hashtable_t *open_stackrefs_table;
-# ifdef Py_STACKREF_CLOSE_DEBUG
- _Py_hashtable_t *closed_stackrefs_table;
-# endif
-#endif
-};
/* other API */
--- /dev/null
+#ifndef Py_INTERNAL_INTERP_STRUCTS_H
+#define Py_INTERNAL_INTERP_STRUCTS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "pycore_structs.h"
+#include "pycore_pymath.h" // _PY_SHORT_FLOAT_REPR
+#include "pycore_llist.h"
+#include "pycore_ast_state.h" // struct ast_state
+
+
+/* This file contains the struct definitions for interpreter state
+ * and other necessary structs */
+
+#define CODE_MAX_WATCHERS 8
+#define CONTEXT_MAX_WATCHERS 8
+#define FUNC_MAX_WATCHERS 8
+#define TYPE_MAX_WATCHERS 8
+
+
+#ifdef Py_GIL_DISABLED
+// This should be prime but otherwise the choice is arbitrary. A larger value
+// increases concurrency at the expense of memory.
+# define NUM_WEAKREF_LIST_LOCKS 127
+#endif
+
+typedef int (*_Py_pending_call_func)(void *);
+
+struct _pending_call {
+ _Py_pending_call_func func;
+ void *arg;
+ int flags;
+};
+
+#define PENDINGCALLSARRAYSIZE 300
+
+struct _pending_calls {
+ PyThreadState *handling_thread;
+ PyMutex mutex;
+ /* Request for running pending calls. */
+ int32_t npending;
+ /* The maximum allowed number of pending calls.
+ If the queue fills up to this point then _PyEval_AddPendingCall()
+ will return _Py_ADD_PENDING_FULL. */
+ int32_t max;
+ /* We don't want a flood of pending calls to interrupt any one thread
+ for too long, so we keep a limit on the number handled per pass.
+ A value of 0 means there is no limit (other than the maximum
+ size of the list of pending calls). */
+ int32_t maxloop;
+ struct _pending_call calls[PENDINGCALLSARRAYSIZE];
+ int first;
+ int next;
+};
+
+typedef enum {
+ PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state
+ PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized
+ PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed
+} perf_status_t;
+
+#ifdef PY_HAVE_PERF_TRAMPOLINE
+struct code_arena_st;
+
+struct trampoline_api_st {
+ void* (*init_state)(void);
+ void (*write_state)(void* state, const void *code_addr,
+ unsigned int code_size, PyCodeObject* code);
+ int (*free_state)(void* state);
+ void *state;
+ Py_ssize_t code_padding;
+};
+#endif
+
+
+struct _ceval_runtime_state {
+ struct {
+#ifdef PY_HAVE_PERF_TRAMPOLINE
+ perf_status_t status;
+ int perf_trampoline_type;
+ Py_ssize_t extra_code_index;
+ struct code_arena_st *code_arena;
+ struct trampoline_api_st trampoline_api;
+ FILE *map_file;
+ Py_ssize_t persist_after_fork;
+#else
+ int _not_used;
+#endif
+ } perf;
+ /* Pending calls to be made only on the main thread. */
+ // The signal machinery falls back on this
+ // so it must be especially stable and efficient.
+ // For example, we use a preallocated array
+ // for the list of pending calls.
+ struct _pending_calls pending_mainthread;
+ PyMutex sys_trace_profile_mutex;
+};
+
+
+struct _ceval_state {
+ /* This variable holds the global instrumentation version. When a thread is
+ running, this value is overlaid onto PyThreadState.eval_breaker so that
+ changes in the instrumentation version will trigger the eval breaker. */
+ uintptr_t instrumentation_version;
+ int recursion_limit;
+ struct _gil_runtime_state *gil;
+ int own_gil;
+ struct _pending_calls pending;
+};
+
+
+//###############
+// runtime atexit
+
+typedef void (*atexit_callbackfunc)(void);
+
+struct _atexit_runtime_state {
+ PyMutex mutex;
+#define NEXITFUNCS 32
+ atexit_callbackfunc callbacks[NEXITFUNCS];
+ int ncallbacks;
+};
+
+
+//###################
+// interpreter atexit
+
+typedef void (*atexit_datacallbackfunc)(void *);
+
+typedef struct atexit_callback {
+ atexit_datacallbackfunc func;
+ void *data;
+ struct atexit_callback *next;
+} atexit_callback;
+
+struct atexit_state {
+#ifdef Py_GIL_DISABLED
+ PyMutex ll_callbacks_lock;
+#endif
+ atexit_callback *ll_callbacks;
+
+ // XXX The rest of the state could be moved to the atexit module state
+ // and a low-level callback added for it during module exec.
+ // For the moment we leave it here.
+
+ // List containing tuples with callback information.
+ // e.g. [(func, args, kwargs), ...]
+ PyObject *callbacks;
+};
+
+
+/****** Garbage collector **********/
+
+/* GC information is stored BEFORE the object structure. */
+typedef struct {
+ // Tagged pointer to next object in the list.
+ // 0 means the object is not tracked
+ uintptr_t _gc_next;
+
+ // Tagged pointer to previous object in the list.
+ // Lowest two bits are used for flags documented later.
+ uintptr_t _gc_prev;
+} PyGC_Head;
+
+#define _PyGC_Head_UNUSED PyGC_Head
+
+struct gc_generation {
+ PyGC_Head head;
+ int threshold; /* collection threshold */
+ int count; /* count of allocations or collections of younger
+ generations */
+};
+
+struct gc_collection_stats {
+ /* number of collected objects */
+ Py_ssize_t collected;
+ /* total number of uncollectable objects (put into gc.garbage) */
+ Py_ssize_t uncollectable;
+};
+
+/* Running stats per generation */
+struct gc_generation_stats {
+ /* total number of collections */
+ Py_ssize_t collections;
+ /* total number of collected objects */
+ Py_ssize_t collected;
+ /* total number of uncollectable objects (put into gc.garbage) */
+ Py_ssize_t uncollectable;
+};
+
+enum _GCPhase {
+ GC_PHASE_MARK = 0,
+ GC_PHASE_COLLECT = 1
+};
+
+/* If we change this, we need to change the default value in the
+ signature of gc.collect. */
+#define NUM_GENERATIONS 3
+
+struct _gc_runtime_state {
+ /* List of objects that still need to be cleaned up, singly linked
+ * via their gc headers' gc_prev pointers. */
+ PyObject *trash_delete_later;
+ /* Current call-stack depth of tp_dealloc calls. */
+ int trash_delete_nesting;
+
+ /* Is automatic collection enabled? */
+ int enabled;
+ int debug;
+ /* linked lists of container objects */
+ struct gc_generation young;
+ struct gc_generation old[2];
+ /* a permanent generation which won't be collected */
+ struct gc_generation permanent_generation;
+ struct gc_generation_stats generation_stats[NUM_GENERATIONS];
+ /* true if we are currently running the collector */
+ int collecting;
+ /* list of uncollectable objects */
+ PyObject *garbage;
+ /* a list of callbacks to be invoked when collection is performed */
+ PyObject *callbacks;
+
+ Py_ssize_t heap_size;
+ Py_ssize_t work_to_do;
+ /* Which of the old spaces is the visited space */
+ int visited_space;
+ int phase;
+
+#ifdef Py_GIL_DISABLED
+ /* This is the number of objects that survived the last full
+ collection. It approximates the number of long lived objects
+ tracked by the GC.
+
+ (by "full collection", we mean a collection of the oldest
+ generation). */
+ Py_ssize_t long_lived_total;
+ /* This is the number of objects that survived all "non-full"
+ collections, and are awaiting to undergo a full collection for
+ the first time. */
+ Py_ssize_t long_lived_pending;
+
+ /* True if gc.freeze() has been used. */
+ int freeze_active;
+#endif
+};
+
+#ifdef Py_GIL_DISABLED
+struct _gc_thread_state {
+ /* Thread-local allocation count. */
+ Py_ssize_t alloc_count;
+};
+#endif
+
+#include "pycore_gil.h"
+
+/****** Thread state **************/
+#include "pytypedefs.h"
+#include "pystate.h"
+#include "pycore_tstate.h"
+
+
+/**** Import ********/
+
+struct _import_runtime_state {
+ /* The builtin modules (defined in config.c). */
+ struct _inittab *inittab;
+ /* The most recent value assigned to a PyModuleDef.m_base.m_index.
+ This is incremented each time PyModuleDef_Init() is called,
+ which is just about every time an extension module is imported.
+ See PyInterpreterState.modules_by_index for more info. */
+ Py_ssize_t last_module_index;
+ struct {
+ /* A lock to guard the cache. */
+ PyMutex mutex;
+ /* The actual cache of (filename, name, PyModuleDef) for modules.
+ Only legacy (single-phase init) extension modules are added
+ and only if they support multiple initialization (m_size >= 0)
+ or are imported in the main interpreter.
+ This is initialized lazily in fix_up_extension() in import.c.
+ Modules are added there and looked up in _imp.find_extension(). */
+ struct _Py_hashtable_t *hashtable;
+ } extensions;
+ /* Package context -- the full module name for package imports */
+ const char * pkgcontext;
+};
+
+struct _import_state {
+ /* cached sys.modules dictionary */
+ PyObject *modules;
+ /* This is the list of module objects for all legacy (single-phase init)
+ extension modules ever loaded in this process (i.e. imported
+ in this interpreter or in any other). Py_None stands in for
+ modules that haven't actually been imported in this interpreter.
+
+ A module's index (PyModuleDef.m_base.m_index) is used to look up
+ the corresponding module object for this interpreter, if any.
+ (See PyState_FindModule().) When any extension module
+ is initialized during import, its moduledef gets initialized by
+ PyModuleDef_Init(), and the first time that happens for each
+ PyModuleDef, its index gets set to the current value of
+ a global counter (see _PyRuntimeState.imports.last_module_index).
+ The entry for that index in this interpreter remains unset until
+ the module is actually imported here. (Py_None is used as
+ a placeholder.) Note that multi-phase init modules always get
+ an index for which there will never be a module set.
+
+ This is initialized lazily in PyState_AddModule(), which is also
+ where modules get added. */
+ PyObject *modules_by_index;
+ /* importlib module._bootstrap */
+ PyObject *importlib;
+ /* override for config->use_frozen_modules (for tests)
+ (-1: "off", 1: "on", 0: no override) */
+ int override_frozen_modules;
+ int override_multi_interp_extensions_check;
+#ifdef HAVE_DLOPEN
+ int dlopenflags;
+#endif
+ PyObject *import_func;
+ /* The global import lock. */
+ _PyRecursiveMutex lock;
+ /* diagnostic info in PyImport_ImportModuleLevelObject() */
+ struct {
+ int import_level;
+ PyTime_t accumulated;
+ int header;
+ } find_and_load;
+};
+
+
+
+/********** Interpreter state **************/
+
+#include "pycore_object_state.h"
+#include "pycore_crossinterp.h"
+
+
+struct _Py_long_state {
+ int max_str_digits;
+};
+
+struct codecs_state {
+ // A list of callable objects used to search for codecs.
+ PyObject *search_path;
+
+ // A dict mapping codec names to codecs returned from a callable in
+ // search_path.
+ PyObject *search_cache;
+
+ // A dict mapping error handling strategies to functions to implement them.
+ PyObject *error_registry;
+
+#ifdef Py_GIL_DISABLED
+ // Used to safely delete a specific item from search_path.
+ PyMutex search_path_mutex;
+#endif
+
+ // Whether or not the rest of the state is initialized.
+ int initialized;
+};
+
+// Support for stop-the-world events. This exists in both the PyRuntime struct
+// for global pauses and in each PyInterpreterState for per-interpreter pauses.
+struct _stoptheworld_state {
+ PyMutex mutex; // Serializes stop-the-world attempts.
+
+ // NOTE: The below fields are protected by HEAD_LOCK(runtime), not by the
+ // above mutex.
+ bool requested; // Set when a pause is requested.
+ bool world_stopped; // Set when the world is stopped.
+ bool is_global; // Set when contained in PyRuntime struct.
+
+ PyEvent stop_event; // Set when thread_countdown reaches zero.
+ Py_ssize_t thread_countdown; // Number of threads that must pause.
+
+ PyThreadState *requester; // Thread that requested the pause (may be NULL).
+};
+
+/* Tracks some rare events per-interpreter, used by the optimizer to turn on/off
+ specific optimizations. */
+typedef struct _rare_events {
+ /* Setting an object's class, obj.__class__ = ... */
+ uint8_t set_class;
+ /* Setting the bases of a class, cls.__bases__ = ... */
+ uint8_t set_bases;
+ /* Setting the PEP 523 frame eval function, _PyInterpreterState_SetFrameEvalFunc() */
+ uint8_t set_eval_frame_func;
+ /* Modifying the builtins, __builtins__.__dict__[var] = ... */
+ uint8_t builtin_dict;
+ /* Modifying a function, e.g. func.__defaults__ = ..., etc. */
+ uint8_t func_modification;
+} _rare_events;
+
+struct
+Bigint {
+ struct Bigint *next;
+ int k, maxwds, sign, wds;
+ uint32_t x[1];
+};
+
+#if defined(Py_USING_MEMORY_DEBUGGER) || _PY_SHORT_FLOAT_REPR == 0
+
+struct _dtoa_state {
+ int _not_used;
+};
+
+#else // !Py_USING_MEMORY_DEBUGGER && _PY_SHORT_FLOAT_REPR != 0
+
+/* The size of the Bigint freelist */
+#define Bigint_Kmax 7
+
+/* The size of the cached powers of 5 array */
+#define Bigint_Pow5size 8
+
+#ifndef PRIVATE_MEM
+#define PRIVATE_MEM 2304
+#endif
+#define Bigint_PREALLOC_SIZE \
+ ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
+
+struct _dtoa_state {
+ // p5s is an array of powers of 5 of the form:
+ // 5**(2**(i+2)) for 0 <= i < Bigint_Pow5size
+ struct Bigint *p5s[Bigint_Pow5size];
+ // XXX This should be freed during runtime fini.
+ struct Bigint *freelist[Bigint_Kmax+1];
+ double preallocated[Bigint_PREALLOC_SIZE];
+ double *preallocated_next;
+};
+
+#endif // !Py_USING_MEMORY_DEBUGGER
+
+struct _py_code_state {
+ PyMutex mutex;
+ // Interned constants from code objects. Used by the free-threaded build.
+ struct _Py_hashtable_t *constants;
+};
+
+#define FUNC_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */
+
+struct _func_version_cache_item {
+ PyFunctionObject *func;
+ PyObject *code;
+};
+
+struct _py_func_state {
+#ifdef Py_GIL_DISABLED
+ // Protects next_version
+ PyMutex mutex;
+#endif
+
+ uint32_t next_version;
+ // Borrowed references to function and code objects whose
+ // func_version % FUNC_VERSION_CACHE_SIZE
+ // once was equal to the index in the table.
+ // They are cleared when the function or code object is deallocated.
+ struct _func_version_cache_item func_version_cache[FUNC_VERSION_CACHE_SIZE];
+};
+
+#include "pycore_dict_state.h"
+#include "pycore_exceptions.h"
+
+
+/****** type state *********/
+
+/* For now we hard-code this to a value for which we are confident
+ all the static builtin types will fit (for all builds). */
+#define _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES 200
+#define _Py_MAX_MANAGED_STATIC_EXT_TYPES 10
+#define _Py_MAX_MANAGED_STATIC_TYPES \
+ (_Py_MAX_MANAGED_STATIC_BUILTIN_TYPES + _Py_MAX_MANAGED_STATIC_EXT_TYPES)
+
+struct _types_runtime_state {
+ /* Used to set PyTypeObject.tp_version_tag for core static types. */
+ // bpo-42745: next_version_tag remains shared by all interpreters
+ // because of static types.
+ unsigned int next_version_tag;
+
+ struct {
+ struct {
+ PyTypeObject *type;
+ int64_t interp_count;
+ } types[_Py_MAX_MANAGED_STATIC_TYPES];
+ } managed_static;
+};
+
+
+// Type attribute lookup cache: speed up attribute and method lookups,
+// see _PyType_Lookup().
+struct type_cache_entry {
+ unsigned int version; // initialized from type->tp_version_tag
+#ifdef Py_GIL_DISABLED
+ _PySeqLock sequence;
+#endif
+ PyObject *name; // reference to exactly a str or None
+ PyObject *value; // borrowed reference or NULL
+};
+
+#define MCACHE_SIZE_EXP 12
+
+struct type_cache {
+ struct type_cache_entry hashtable[1 << MCACHE_SIZE_EXP];
+};
+
+typedef struct {
+ PyTypeObject *type;
+ int isbuiltin;
+ int readying;
+ int ready;
+ // XXX tp_dict can probably be statically allocated,
+ // instead of dynamically and stored on the interpreter.
+ PyObject *tp_dict;
+ PyObject *tp_subclasses;
+ /* We never clean up weakrefs for static builtin types since
+ they will effectively never get triggered. However, there
+ are also some diagnostic uses for the list of weakrefs,
+ so we still keep it. */
+ PyObject *tp_weaklist;
+} managed_static_type_state;
+
+#define TYPE_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */
+
+struct types_state {
+ /* Used to set PyTypeObject.tp_version_tag.
+ It starts at _Py_MAX_GLOBAL_TYPE_VERSION_TAG + 1,
+ where all those lower numbers are used for core static types. */
+ unsigned int next_version_tag;
+
+ struct type_cache type_cache;
+
+ /* Every static builtin type is initialized for each interpreter
+ during its own initialization, including for the main interpreter
+ during global runtime initialization. This is done by calling
+ _PyStaticType_InitBuiltin().
+
+ The first time a static builtin type is initialized, all the
+ normal PyType_Ready() stuff happens. The only difference from
+ normal is that there are three PyTypeObject fields holding
+ objects which are stored here (on PyInterpreterState) rather
+ than in the corresponding PyTypeObject fields. Those are:
+ tp_dict (cls.__dict__), tp_subclasses (cls.__subclasses__),
+ and tp_weaklist.
+
+ When a subinterpreter is initialized, each static builtin type
+ is still initialized, but only the interpreter-specific portion,
+ namely those three objects.
+
+ Those objects are stored in the PyInterpreterState.types.builtins
+ array, at the index corresponding to each specific static builtin
+ type. That index (a size_t value) is stored in the tp_subclasses
+ field. For static builtin types, we re-purposed the now-unused
+ tp_subclasses to avoid adding another field to PyTypeObject.
+ In all other cases tp_subclasses holds a dict like before.
+ (The field was previously defined as PyObject*, but is now void*
+ to reflect its dual use.)
+
+ The index for each static builtin type isn't statically assigned.
+ Instead it is calculated the first time a type is initialized
+ (by the main interpreter). The index matches the order in which
+ the type was initialized relative to the others. The actual
+ value comes from the current value of num_builtins_initialized,
+ as each type is initialized for the main interpreter.
+
+ num_builtins_initialized is incremented once for each static
+ builtin type. Once initialization is over for a subinterpreter,
+ the value will be the same as for all other interpreters. */
+ struct {
+ size_t num_initialized;
+ managed_static_type_state initialized[_Py_MAX_MANAGED_STATIC_BUILTIN_TYPES];
+ } builtins;
+ /* We apply a similar strategy for managed extension modules. */
+ struct {
+ size_t num_initialized;
+ size_t next_index;
+ managed_static_type_state initialized[_Py_MAX_MANAGED_STATIC_EXT_TYPES];
+ } for_extensions;
+ PyMutex mutex;
+
+ // Borrowed references to type objects whose
+ // tp_version_tag % TYPE_VERSION_CACHE_SIZE
+ // once was equal to the index in the table.
+ // They are cleared when the type object is deallocated.
+ PyTypeObject *type_version_cache[TYPE_VERSION_CACHE_SIZE];
+};
+
+struct _warnings_runtime_state {
+ /* Both 'filters' and 'onceregistry' can be set in warnings.py;
+ get_warnings_attr() will reset these variables accordingly. */
+ PyObject *filters; /* List */
+ PyObject *once_registry; /* Dict */
+ PyObject *default_action; /* String */
+ _PyRecursiveMutex lock;
+ long filters_version;
+};
+
+struct _Py_mem_interp_free_queue {
+ int has_work; // true if the queue is not empty
+ PyMutex mutex; // protects the queue
+ struct llist_node head; // queue of _mem_work_chunk items
+};
+
+
+/****** Unicode state *********/
+
+typedef enum {
+ _Py_ERROR_UNKNOWN=0,
+ _Py_ERROR_STRICT,
+ _Py_ERROR_SURROGATEESCAPE,
+ _Py_ERROR_REPLACE,
+ _Py_ERROR_IGNORE,
+ _Py_ERROR_BACKSLASHREPLACE,
+ _Py_ERROR_SURROGATEPASS,
+ _Py_ERROR_XMLCHARREFREPLACE,
+ _Py_ERROR_OTHER
+} _Py_error_handler;
+
+struct _Py_unicode_runtime_ids {
+ PyMutex mutex;
+ // next_index value must be preserved when Py_Initialize()/Py_Finalize()
+ // is called multiple times: see _PyUnicode_FromId() implementation.
+ Py_ssize_t next_index;
+};
+
+struct _Py_unicode_runtime_state {
+ struct _Py_unicode_runtime_ids ids;
+};
+
+/* fs_codec.encoding is initialized to NULL.
+ Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */
+struct _Py_unicode_fs_codec {
+ char *encoding; // Filesystem encoding (encoded to UTF-8)
+ int utf8; // encoding=="utf-8"?
+ char *errors; // Filesystem errors (encoded to UTF-8)
+ _Py_error_handler error_handler;
+};
+
+struct _Py_unicode_ids {
+ Py_ssize_t size;
+ PyObject **array;
+};
+
+#include "pycore_ucnhash.h"
+
+struct _Py_unicode_state {
+ struct _Py_unicode_fs_codec fs_codec;
+
+ _PyUnicode_Name_CAPI *ucnhash_capi;
+
+ // Unicode identifiers (_Py_Identifier): see _PyUnicode_FromId()
+ struct _Py_unicode_ids ids;
+};
+
+// Borrowed references to common callables:
+struct callable_cache {
+ PyObject *isinstance;
+ PyObject *len;
+ PyObject *list_append;
+ PyObject *object__getattribute__;
+};
+
+#include "pycore_obmalloc.h"
+
+/* Length of array of slotdef pointers used to store slots with the
+ same __name__. There should be at most MAX_EQUIV-1 slotdef entries with
+ the same __name__, for any __name__. Since that's a static property, it is
+ appropriate to declare fixed-size arrays for this. */
+#define MAX_EQUIV 10
+
+typedef struct wrapperbase pytype_slotdef;
+
+
+struct _Py_interp_cached_objects {
+#ifdef Py_GIL_DISABLED
+ PyMutex interned_mutex;
+#endif
+ PyObject *interned_strings;
+
+ /* object.__reduce__ */
+ PyObject *objreduce;
+ PyObject *type_slots_pname;
+ pytype_slotdef *type_slots_ptrs[MAX_EQUIV];
+
+ /* TypeVar and related types */
+ PyTypeObject *generic_type;
+ PyTypeObject *typevar_type;
+ PyTypeObject *typevartuple_type;
+ PyTypeObject *paramspec_type;
+ PyTypeObject *paramspecargs_type;
+ PyTypeObject *paramspeckwargs_type;
+ PyTypeObject *constevaluator_type;
+};
+
+struct _Py_interp_static_objects {
+ struct {
+ int _not_used;
+ // hamt_empty is here instead of global because of its weakreflist.
+ _PyGC_Head_UNUSED _hamt_empty_gc_not_used;
+ PyHamtObject hamt_empty;
+ PyBaseExceptionObject last_resort_memory_error;
+ } singletons;
+};
+
+#include "pycore_instruments.h"
+
+
+#ifdef Py_GIL_DISABLED
+
+// A min-heap of indices
+typedef struct _PyIndexHeap {
+ int32_t *values;
+
+ // Number of items stored in values
+ Py_ssize_t size;
+
+ // Maximum number of items that can be stored in values
+ Py_ssize_t capacity;
+} _PyIndexHeap;
+
+// An unbounded pool of indices. Indices are allocated starting from 0. They
+// may be released back to the pool once they are no longer in use.
+typedef struct _PyIndexPool {
+ PyMutex mutex;
+
+ // Min heap of indices available for allocation
+ _PyIndexHeap free_indices;
+
+ // Next index to allocate if no free indices are available
+ int32_t next_index;
+} _PyIndexPool;
+
+typedef union _Py_unique_id_entry {
+ // Points to the next free type id, when part of the freelist
+ union _Py_unique_id_entry *next;
+
+ // Stores the object when the id is assigned
+ PyObject *obj;
+} _Py_unique_id_entry;
+
+struct _Py_unique_id_pool {
+ PyMutex mutex;
+
+ // combined table of object with allocated unique ids and unallocated ids.
+ _Py_unique_id_entry *table;
+
+ // Next entry to allocate inside 'table' or NULL
+ _Py_unique_id_entry *freelist;
+
+ // size of 'table'
+ Py_ssize_t size;
+};
+
+#endif
+
+
+/* PyInterpreterState holds the global state for one of the runtime's
+ interpreters. Typically the initial (main) interpreter is the only one.
+
+ The PyInterpreterState typedef is in Include/pytypedefs.h.
+ */
+struct _is {
+
+ /* This struct contains the eval_breaker,
+ * which is by far the hottest field in this struct
+ * and should be placed at the beginning. */
+ struct _ceval_state ceval;
+
+ PyInterpreterState *next;
+
+ int64_t id;
+ Py_ssize_t id_refcount;
+ int requires_idref;
+
+ long _whence;
+
+ /* Has been initialized to a safe state.
+
+ In order to be effective, this must be set to 0 during or right
+ after allocation. */
+ int _initialized;
+ /* Has been fully initialized via pylifecycle.c. */
+ int _ready;
+ int finalizing;
+
+ uintptr_t last_restart_version;
+ struct pythreads {
+ uint64_t next_unique_id;
+ /* The linked list of threads, newest first. */
+ PyThreadState *head;
+ _PyThreadStateImpl *preallocated;
+ /* The thread currently executing in the __main__ module, if any. */
+ PyThreadState *main;
+ /* Used in Modules/_threadmodule.c. */
+ Py_ssize_t count;
+ /* Support for runtime thread stack size tuning.
+ A value of 0 means using the platform's default stack size
+ or the size specified by the THREAD_STACK_SIZE macro. */
+ /* Used in Python/thread.c. */
+ size_t stacksize;
+ } threads;
+
+ /* Reference to the _PyRuntime global variable. This field exists
+ to not have to pass runtime in addition to tstate to a function.
+ Get runtime from tstate: tstate->interp->runtime. */
+ struct pyruntimestate *runtime;
+
+ /* Set by Py_EndInterpreter().
+
+ Use _PyInterpreterState_GetFinalizing()
+ and _PyInterpreterState_SetFinalizing()
+ to access it, don't access it directly. */
+ PyThreadState* _finalizing;
+ /* The ID of the OS thread in which we are finalizing. */
+ unsigned long _finalizing_id;
+
+ struct _gc_runtime_state gc;
+
+ /* The following fields are here to avoid allocation during init.
+ The data is exposed through PyInterpreterState pointer fields.
+ These fields should not be accessed directly outside of init.
+
+ All other PyInterpreterState pointer fields are populated when
+ needed and default to NULL.
+
+ For now there are some exceptions to that rule, which require
+ allocation during init. These will be addressed on a case-by-case
+ basis. Also see _PyRuntimeState regarding the various mutex fields.
+ */
+
+ // Dictionary of the sys module
+ PyObject *sysdict;
+
+ // Dictionary of the builtins module
+ PyObject *builtins;
+
+ struct _import_state imports;
+
+ /* The per-interpreter GIL, which might not be used. */
+ struct _gil_runtime_state _gil;
+
+ /* ---------- IMPORTANT ---------------------------
+ The fields above this line are declared as early as
+ possible to facilitate out-of-process observability
+ tools. */
+
+ struct codecs_state codecs;
+
+ PyConfig config;
+ unsigned long feature_flags;
+
+ PyObject *dict; /* Stores per-interpreter state */
+
+ PyObject *sysdict_copy;
+ PyObject *builtins_copy;
+ // Initialized to _PyEval_EvalFrameDefault().
+ _PyFrameEvalFunction eval_frame;
+
+ PyFunction_WatchCallback func_watchers[FUNC_MAX_WATCHERS];
+ // One bit is set for each non-NULL entry in func_watchers
+ uint8_t active_func_watchers;
+
+ Py_ssize_t co_extra_user_count;
+ freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
+
+ /* cross-interpreter data and utils */
+ _PyXI_state_t xi;
+
+#ifdef HAVE_FORK
+ PyObject *before_forkers;
+ PyObject *after_forkers_parent;
+ PyObject *after_forkers_child;
+#endif
+
+ struct _warnings_runtime_state warnings;
+ struct atexit_state atexit;
+ struct _stoptheworld_state stoptheworld;
+ struct _qsbr_shared qsbr;
+
+#if defined(Py_GIL_DISABLED)
+ struct _mimalloc_interp_state mimalloc;
+ struct _brc_state brc; // biased reference counting state
+ struct _Py_unique_id_pool unique_ids; // object ids for per-thread refcounts
+ PyMutex weakref_locks[NUM_WEAKREF_LIST_LOCKS];
+ _PyIndexPool tlbc_indices;
+#endif
+ // Per-interpreter list of tasks, any lingering tasks from thread
+ // states gets added here and removed from the corresponding
+ // thread state's list.
+ struct llist_node asyncio_tasks_head;
+ // `asyncio_tasks_lock` is used when tasks are moved
+ // from thread's list to interpreter's list.
+ PyMutex asyncio_tasks_lock;
+
+ // Per-interpreter state for the obmalloc allocator. For the main
+ // interpreter and for all interpreters that don't have their
+ // own obmalloc state, this points to the static structure in
+ // obmalloc.c obmalloc_state_main. For other interpreters, it is
+ // heap allocated by _PyMem_init_obmalloc() and freed when the
+ // interpreter structure is freed. In the case of a heap allocated
+ // obmalloc state, it is not safe to hold on to or use memory after
+ // the interpreter is freed. The obmalloc state corresponding to
+ // that allocated memory is gone. See free_obmalloc_arenas() for
+ // more comments.
+ struct _obmalloc_state *obmalloc;
+
+ PyObject *audit_hooks;
+ PyType_WatchCallback type_watchers[TYPE_MAX_WATCHERS];
+ PyCode_WatchCallback code_watchers[CODE_MAX_WATCHERS];
+ PyContext_WatchCallback context_watchers[CONTEXT_MAX_WATCHERS];
+ // One bit is set for each non-NULL entry in code_watchers
+ uint8_t active_code_watchers;
+ uint8_t active_context_watchers;
+
+ struct _py_object_state object_state;
+ struct _Py_unicode_state unicode;
+ struct _Py_long_state long_state;
+ struct _dtoa_state dtoa;
+ struct _py_func_state func_state;
+ struct _py_code_state code_state;
+
+ struct _Py_dict_state dict_state;
+ struct _Py_exc_state exc_state;
+ struct _Py_mem_interp_free_queue mem_free_queue;
+
+ struct ast_state ast;
+ struct types_state types;
+ struct callable_cache callable_cache;
+ bool jit;
+ struct _PyExecutorObject *executor_list_head;
+ size_t trace_run_counter;
+ _rare_events rare_events;
+ PyDict_WatchCallback builtins_dict_watcher;
+
+ _Py_GlobalMonitors monitors;
+ bool sys_profile_initialized;
+ bool sys_trace_initialized;
+ Py_ssize_t sys_profiling_threads; /* Count of threads with c_profilefunc set */
+ Py_ssize_t sys_tracing_threads; /* Count of threads with c_tracefunc set */
+ PyObject *monitoring_callables[PY_MONITORING_TOOL_IDS][_PY_MONITORING_EVENTS];
+ PyObject *monitoring_tool_names[PY_MONITORING_TOOL_IDS];
+ uintptr_t monitoring_tool_versions[PY_MONITORING_TOOL_IDS];
+
+ struct _Py_interp_cached_objects cached_objects;
+ struct _Py_interp_static_objects static_objects;
+
+ Py_ssize_t _interactive_src_count;
+
+ /* the initial PyInterpreterState.threads.head */
+ _PyThreadStateImpl _initial_thread;
+ // _initial_thread should be the last field of PyInterpreterState.
+ // See https://github.com/python/cpython/issues/127117.
+
+#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
+ uint64_t next_stackref;
+ _Py_hashtable_t *open_stackrefs_table;
+# ifdef Py_STACKREF_CLOSE_DEBUG
+ _Py_hashtable_t *closed_stackrefs_table;
+# endif
+#endif
+};
+
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* Py_INTERNAL_INTERP_STRUCTS_H */
#endif
#include "pycore_bytesobject.h" // _PyBytesWriter
-#include "pycore_global_objects.h"// _PY_NSMALLNEGINTS
+#include "pycore_runtime_structs.h"// _PY_NSMALLNEGINTS
+#include "pycore_global_objects.h"// _PY_SINGLETON
#include "pycore_runtime.h" // _PyRuntime
/*
#include "pycore_global_strings.h" // _Py_DECLARE_STR()
#include "pycore_pyarena.h" // PyArena
-
-#ifdef Py_DEBUG
-#define _PYPEGEN_NSTATISTICS 2000
-#endif
-
-struct _parser_runtime_state {
-#ifdef Py_DEBUG
- long memo_statistics[_PYPEGEN_NSTATISTICS];
-#ifdef Py_GIL_DISABLED
- PyMutex mutex;
-#endif
-#else
- int _not_used;
-#endif
- struct _expr dummy_name;
-};
-
_Py_DECLARE_STR(empty, "")
#if defined(Py_DEBUG) && defined(Py_GIL_DISABLED)
#define _parser_runtime_state_INIT \
#endif
-struct pyhash_runtime_state {
- struct {
-#ifndef MS_WINDOWS
- int fd;
- dev_t st_dev;
- ino_t st_ino;
-#else
- // This is a placeholder so the struct isn't empty on Windows.
- int _not_used;
-#endif
- } urandom_cache;
-};
-
#ifndef MS_WINDOWS
# define _py_urandom_cache_INIT \
{ \
// wcsdup() using PyMem_RawMalloc()
extern wchar_t* _PyMem_RawWcsdup(const wchar_t *str);
-typedef struct {
- /* We tag each block with an API ID in order to tag API violations */
- char api_id;
- PyMemAllocatorEx alloc;
-} debug_alloc_api_t;
-
-struct _pymem_allocators {
- PyMutex mutex;
- struct {
- PyMemAllocatorEx raw;
- PyMemAllocatorEx mem;
- PyMemAllocatorEx obj;
- } standard;
- struct {
- debug_alloc_api_t raw;
- debug_alloc_api_t mem;
- debug_alloc_api_t obj;
- } debug;
- int is_debug_enabled;
- PyObjectArenaAllocator obj_arena;
-};
-
-struct _Py_mem_interp_free_queue {
- int has_work; // true if the queue is not empty
- PyMutex mutex; // protects the queue
- struct llist_node head; // queue of _mem_work_chunk items
-};
-
/* Special bytes broadcast into debug memory blocks at appropriate times.
Strings of these are unlikely to be valid addresses, floats, ints or
7-bit ASCII.
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_runtime.h" // _PyRuntime
-#include "pycore_tstate.h" // _PyThreadStateImpl
+#include "pycore_runtime_structs.h" // _PyRuntime
+#include "pycore_runtime.h" // _PyRuntimeState_GetFinalizing
+#include "pycore_tstate.h" // _PyThreadStateImpl
+#include "pycore_interp.h" // _PyInterpreterState_GetConfig
// Values for PyThreadState.state. A thread must be in the "attached" state
// before calling most Python APIs. If the GIL is enabled, then "attached"
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "dynamic_annotations.h" // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
-#include "pycore_llist.h" // struct llist_node
+#include "dynamic_annotations.h" // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
+#include "pycore_llist.h" // struct llist_node
// Get _POSIX_THREADS and _POSIX_SEMAPHORES macros if available
#if (defined(HAVE_UNISTD_H) && !defined(_POSIX_THREADS) \
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_atexit.h" // struct _atexit_runtime_state
-#include "pycore_audit.h" // _Py_AuditHookEntry
-#include "pycore_ceval_state.h" // struct _ceval_runtime_state
-#include "pycore_crossinterp.h" // _PyXI_global_state_t
-#include "pycore_debug_offsets.h" // _Py_DebugOffsets
-#include "pycore_faulthandler.h" // struct _faulthandler_runtime_state
-#include "pycore_floatobject.h" // struct _Py_float_runtime_state
-#include "pycore_import.h" // struct _import_runtime_state
-#include "pycore_interp.h" // PyInterpreterState
-#include "pycore_object_state.h" // struct _py_object_runtime_state
-#include "pycore_parser.h" // struct _parser_runtime_state
-#include "pycore_pyhash.h" // struct pyhash_runtime_state
-#include "pycore_pymem.h" // struct _pymem_allocators
-#include "pycore_pythread.h" // struct _pythread_runtime_state
-#include "pycore_signal.h" // struct _signals_runtime_state
-#include "pycore_time.h" // struct _PyTime_runtime_state
-#include "pycore_tracemalloc.h" // struct _tracemalloc_runtime_state
-#include "pycore_typeobject.h" // struct _types_runtime_state
-#include "pycore_unicodeobject.h" // struct _Py_unicode_runtime_state
+#include "pycore_runtime_structs.h"
-
-/* Full Python runtime state */
-
-/* _PyRuntimeState holds the global state for the CPython runtime.
- That data is exported by the internal API as a global variable
- (_PyRuntime, defined near the top of pylifecycle.c).
- */
-typedef struct pyruntimestate {
- /* This field must be first to facilitate locating it by out of process
- * debuggers. Out of process debuggers will use the offsets contained in this
- * field to be able to locate other fields in several interpreter structures
- * in a way that doesn't require them to know the exact layout of those
- * structures.
- *
- * IMPORTANT:
- * This struct is **NOT** backwards compatible between minor version of the
- * interpreter and the members, order of members and size can change between
- * minor versions. This struct is only guaranteed to be stable between patch
- * versions for a given minor version of the interpreter.
- */
- _Py_DebugOffsets debug_offsets;
-
- /* Has been initialized to a safe state.
-
- In order to be effective, this must be set to 0 during or right
- after allocation. */
- int _initialized;
-
- /* Is running Py_PreInitialize()? */
- int preinitializing;
-
- /* Is Python preinitialized? Set to 1 by Py_PreInitialize() */
- int preinitialized;
-
- /* Is Python core initialized? Set to 1 by _Py_InitializeCore() */
- int core_initialized;
-
- /* Is Python fully initialized? Set to 1 by Py_Initialize() */
- int initialized;
-
- /* Set by Py_FinalizeEx(). Only reset to NULL if Py_Initialize()
- is called again.
-
- Use _PyRuntimeState_GetFinalizing() and _PyRuntimeState_SetFinalizing()
- to access it, don't access it directly. */
- PyThreadState *_finalizing;
- /* The ID of the OS thread in which we are finalizing. */
- unsigned long _finalizing_id;
-
- struct pyinterpreters {
- PyMutex mutex;
- /* The linked list of interpreters, newest first. */
- PyInterpreterState *head;
- /* The runtime's initial interpreter, which has a special role
- in the operation of the runtime. It is also often the only
- interpreter. */
- PyInterpreterState *main;
- /* next_id is an auto-numbered sequence of small
- integers. It gets initialized in _PyInterpreterState_Enable(),
- which is called in Py_Initialize(), and used in
- PyInterpreterState_New(). A negative interpreter ID
- indicates an error occurred. The main interpreter will
- always have an ID of 0. Overflow results in a RuntimeError.
- If that becomes a problem later then we can adjust, e.g. by
- using a Python int. */
- int64_t next_id;
- } interpreters;
-
- /* Platform-specific identifier and PyThreadState, respectively, for the
- main thread in the main interpreter. */
- unsigned long main_thread;
- PyThreadState *main_tstate;
-
- /* ---------- IMPORTANT ---------------------------
- The fields above this line are declared as early as
- possible to facilitate out-of-process observability
- tools. */
-
- /* cross-interpreter data and utils */
- _PyXI_global_state_t xi;
-
- struct _pymem_allocators allocators;
- struct _obmalloc_global_state obmalloc;
- struct pyhash_runtime_state pyhash_state;
- struct _pythread_runtime_state threads;
- struct _signals_runtime_state signals;
-
- /* Used for the thread state bound to the current thread. */
- Py_tss_t autoTSSkey;
-
- /* Used instead of PyThreadState.trash when there is not current tstate. */
- Py_tss_t trashTSSkey;
-
- PyWideStringList orig_argv;
-
- struct _parser_runtime_state parser;
-
- struct _atexit_runtime_state atexit;
-
- struct _import_runtime_state imports;
- struct _ceval_runtime_state ceval;
- struct _gilstate_runtime_state {
- /* bpo-26558: Flag to disable PyGILState_Check().
- If set to non-zero, PyGILState_Check() always return 1. */
- int check_enabled;
- /* The single PyInterpreterState used by this process'
- GILState implementation
- */
- /* TODO: Given interp_main, it may be possible to kill this ref */
- PyInterpreterState *autoInterpreterState;
- } gilstate;
- struct _getargs_runtime_state {
- struct _PyArg_Parser *static_parsers;
- } getargs;
- struct _fileutils_state fileutils;
- struct _faulthandler_runtime_state faulthandler;
- struct _tracemalloc_runtime_state tracemalloc;
- struct _reftracer_runtime_state ref_tracer;
-
- // The rwmutex is used to prevent overlapping global and per-interpreter
- // stop-the-world events. Global stop-the-world events lock the mutex
- // exclusively (as a "writer"), while per-interpreter stop-the-world events
- // lock it non-exclusively (as "readers").
- _PyRWMutex stoptheworld_mutex;
- struct _stoptheworld_state stoptheworld;
-
- PyPreConfig preconfig;
-
- // Audit values must be preserved when Py_Initialize()/Py_Finalize()
- // is called multiple times.
- Py_OpenCodeHookFunction open_code_hook;
- void *open_code_userdata;
- struct {
- PyMutex mutex;
- _Py_AuditHookEntry *head;
- } audit_hooks;
-
- struct _py_object_runtime_state object_state;
- struct _Py_float_runtime_state float_state;
- struct _Py_unicode_runtime_state unicode_state;
- struct _types_runtime_state types;
- struct _Py_time_runtime_state time;
-
-#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
- // Used in "Python/emscripten_trampoline.c" to choose between type
- // reflection trampoline and EM_JS trampoline.
- int (*emscripten_count_args_function)(PyCFunctionWithKeywords func);
-#endif
-
- /* All the objects that are shared by the runtime's interpreters. */
- struct _Py_cached_objects cached_objects;
- struct _Py_static_objects static_objects;
-
- /* The following fields are here to avoid allocation during init.
- The data is exposed through _PyRuntimeState pointer fields.
- These fields should not be accessed directly outside of init.
-
- All other _PyRuntimeState pointer fields are populated when
- needed and default to NULL.
-
- For now there are some exceptions to that rule, which require
- allocation during init. These will be addressed on a case-by-case
- basis. Most notably, we don't pre-allocated the several mutex
- (PyThread_type_lock) fields, because on Windows we only ever get
- a pointer type.
- */
-
- /* _PyRuntimeState.interpreters.main */
- PyInterpreterState _main_interpreter;
- // _main_interpreter should be the last field of _PyRuntimeState.
- // See https://github.com/python/cpython/issues/127117.
-} _PyRuntimeState;
-
-
-/* other API */
+/* API */
// Export _PyRuntime for shared extensions which use it in static inline
// functions for best performance, like _Py_IsMainThread() or _Py_ID().
# error "this header requires Py_BUILD_CORE define"
#endif
+#include "pycore_structs.h"
#include "pycore_ceval_state.h" // _PyEval_RUNTIME_PERF_INIT
#include "pycore_debug_offsets.h" // _Py_DebugOffsets_INIT()
#include "pycore_faulthandler.h" // _faulthandler_runtime_state_INIT
#include "pycore_floatobject.h" // _py_float_format_unknown
#include "pycore_function.h"
+#include "pycore_hamt.h" // _PyHamt_BitmapNode_Type
#include "pycore_object.h" // _PyObject_HEAD_INIT
#include "pycore_obmalloc_init.h" // _obmalloc_global_state_INIT
#include "pycore_parser.h" // _parser_runtime_state_INIT
--- /dev/null
+#ifndef Py_INTERNAL_RUNTIME_STRUCTS_H
+#define Py_INTERNAL_RUNTIME_STRUCTS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This file contains the struct definitions for the runtime, interpreter
+ * and thread states, plus all smaller structs contained therein */
+
+#include "pycore_structs.h"
+#include "pycore_interp_structs.h"
+
+/************ Runtime state ************/
+
+typedef struct {
+ /* We tag each block with an API ID in order to tag API violations */
+ char api_id;
+ PyMemAllocatorEx alloc;
+} debug_alloc_api_t;
+
+struct _pymem_allocators {
+ PyMutex mutex;
+ struct {
+ PyMemAllocatorEx raw;
+ PyMemAllocatorEx mem;
+ PyMemAllocatorEx obj;
+ } standard;
+ struct {
+ debug_alloc_api_t raw;
+ debug_alloc_api_t mem;
+ debug_alloc_api_t obj;
+ } debug;
+ int is_debug_enabled;
+ PyObjectArenaAllocator obj_arena;
+};
+
+enum _py_float_format_type {
+ _py_float_format_unknown,
+ _py_float_format_ieee_big_endian,
+ _py_float_format_ieee_little_endian,
+};
+
+struct _Py_float_runtime_state {
+ enum _py_float_format_type float_format;
+ enum _py_float_format_type double_format;
+};
+
+struct pyhash_runtime_state {
+ struct {
+#ifndef MS_WINDOWS
+ int fd;
+ dev_t st_dev;
+ ino_t st_ino;
+#else
+ // This is a placeholder so the struct isn't empty on Windows.
+ int _not_used;
+#endif
+ } urandom_cache;
+};
+
+#include "pycore_tracemalloc.h"
+
+struct _fileutils_state {
+ int force_ascii;
+};
+
+#include "pycore_debug_offsets.h"
+#include "pycore_signal.h"
+#include "pycore_faulthandler.h"
+#include "pycore_pythread.h"
+#include "pycore_ast.h"
+
+#ifdef Py_DEBUG
+#define _PYPEGEN_NSTATISTICS 2000
+#endif
+
+struct _parser_runtime_state {
+#ifdef Py_DEBUG
+ long memo_statistics[_PYPEGEN_NSTATISTICS];
+#ifdef Py_GIL_DISABLED
+ PyMutex mutex;
+#endif
+#else
+ int _not_used;
+#endif
+ struct _expr dummy_name;
+};
+
+typedef struct {
+ PyTime_t numer;
+ PyTime_t denom;
+} _PyTimeFraction;
+
+struct _Py_time_runtime_state {
+#if defined(MS_WINDOWS) || defined(__APPLE__)
+ _PyTimeFraction base;
+#else
+ char _unused;
+#endif
+};
+
+
+struct _Py_cached_objects {
+ // XXX We could statically allocate the hashtable.
+ _Py_hashtable_t *interned_strings;
+};
+
+// These would be in pycore_long.h if it weren't for an include cycle.
+#define _PY_NSMALLPOSINTS 257
+#define _PY_NSMALLNEGINTS 5
+
+#include "pycore_global_strings.h"
+
+struct _Py_static_objects {
+ struct {
+ /* Small integers are preallocated in this array so that they
+ * can be shared.
+ * The integers that are preallocated are those in the range
+ * -_PY_NSMALLNEGINTS (inclusive) to _PY_NSMALLPOSINTS (exclusive).
+ */
+ PyLongObject small_ints[_PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS];
+
+ PyBytesObject bytes_empty;
+ struct {
+ PyBytesObject ob;
+ char eos;
+ } bytes_characters[256];
+
+ struct _Py_global_strings strings;
+
+ _PyGC_Head_UNUSED _tuple_empty_gc_not_used;
+ PyTupleObject tuple_empty;
+
+ _PyGC_Head_UNUSED _hamt_bitmap_node_empty_gc_not_used;
+ PyHamtNode_Bitmap hamt_bitmap_node_empty;
+ _PyContextTokenMissing context_token_missing;
+ } singletons;
+};
+
+/* Full Python runtime state */
+
+/* _PyRuntimeState holds the global state for the CPython runtime.
+ That data is exported by the internal API as a global variable
+ (_PyRuntime, defined near the top of pylifecycle.c).
+ */
+typedef struct pyruntimestate {
+ /* This field must be first to facilitate locating it by out of process
+ * debuggers. Out of process debuggers will use the offsets contained in this
+ * field to be able to locate other fields in several interpreter structures
+ * in a way that doesn't require them to know the exact layout of those
+ * structures.
+ *
+ * IMPORTANT:
+ * This struct is **NOT** backwards compatible between minor version of the
+ * interpreter and the members, order of members and size can change between
+ * minor versions. This struct is only guaranteed to be stable between patch
+ * versions for a given minor version of the interpreter.
+ */
+ _Py_DebugOffsets debug_offsets;
+
+ /* Has been initialized to a safe state.
+
+ In order to be effective, this must be set to 0 during or right
+ after allocation. */
+ int _initialized;
+
+ /* Is running Py_PreInitialize()? */
+ int preinitializing;
+
+ /* Is Python preinitialized? Set to 1 by Py_PreInitialize() */
+ int preinitialized;
+
+ /* Is Python core initialized? Set to 1 by _Py_InitializeCore() */
+ int core_initialized;
+
+ /* Is Python fully initialized? Set to 1 by Py_Initialize() */
+ int initialized;
+
+ /* Set by Py_FinalizeEx(). Only reset to NULL if Py_Initialize()
+ is called again.
+
+ Use _PyRuntimeState_GetFinalizing() and _PyRuntimeState_SetFinalizing()
+ to access it, don't access it directly. */
+ PyThreadState *_finalizing;
+ /* The ID of the OS thread in which we are finalizing. */
+ unsigned long _finalizing_id;
+
+ struct pyinterpreters {
+ PyMutex mutex;
+ /* The linked list of interpreters, newest first. */
+ PyInterpreterState *head;
+ /* The runtime's initial interpreter, which has a special role
+ in the operation of the runtime. It is also often the only
+ interpreter. */
+ PyInterpreterState *main;
+ /* next_id is an auto-numbered sequence of small
+ integers. It gets initialized in _PyInterpreterState_Enable(),
+ which is called in Py_Initialize(), and used in
+ PyInterpreterState_New(). A negative interpreter ID
+ indicates an error occurred. The main interpreter will
+ always have an ID of 0. Overflow results in a RuntimeError.
+ If that becomes a problem later then we can adjust, e.g. by
+ using a Python int. */
+ int64_t next_id;
+ } interpreters;
+
+ /* Platform-specific identifier and PyThreadState, respectively, for the
+ main thread in the main interpreter. */
+ unsigned long main_thread;
+ PyThreadState *main_tstate;
+
+ /* ---------- IMPORTANT ---------------------------
+ The fields above this line are declared as early as
+ possible to facilitate out-of-process observability
+ tools. */
+
+ /* cross-interpreter data and utils */
+ _PyXI_global_state_t xi;
+
+ struct _pymem_allocators allocators;
+ struct _obmalloc_global_state obmalloc;
+ struct pyhash_runtime_state pyhash_state;
+ struct _pythread_runtime_state threads;
+ struct _signals_runtime_state signals;
+
+ /* Used for the thread state bound to the current thread. */
+ Py_tss_t autoTSSkey;
+
+ /* Used instead of PyThreadState.trash when there is not current tstate. */
+ Py_tss_t trashTSSkey;
+
+ PyWideStringList orig_argv;
+
+ struct _parser_runtime_state parser;
+
+ struct _atexit_runtime_state atexit;
+
+ struct _import_runtime_state imports;
+ struct _ceval_runtime_state ceval;
+ struct _gilstate_runtime_state {
+ /* bpo-26558: Flag to disable PyGILState_Check().
+ If set to non-zero, PyGILState_Check() always return 1. */
+ int check_enabled;
+ /* The single PyInterpreterState used by this process'
+ GILState implementation
+ */
+ /* TODO: Given interp_main, it may be possible to kill this ref */
+ PyInterpreterState *autoInterpreterState;
+ } gilstate;
+ struct _getargs_runtime_state {
+ struct _PyArg_Parser *static_parsers;
+ } getargs;
+ struct _fileutils_state fileutils;
+ struct _faulthandler_runtime_state faulthandler;
+ struct _tracemalloc_runtime_state tracemalloc;
+ struct _reftracer_runtime_state ref_tracer;
+
+ // The rwmutex is used to prevent overlapping global and per-interpreter
+ // stop-the-world events. Global stop-the-world events lock the mutex
+ // exclusively (as a "writer"), while per-interpreter stop-the-world events
+ // lock it non-exclusively (as "readers").
+ _PyRWMutex stoptheworld_mutex;
+ struct _stoptheworld_state stoptheworld;
+
+ PyPreConfig preconfig;
+
+ // Audit values must be preserved when Py_Initialize()/Py_Finalize()
+ // is called multiple times.
+ Py_OpenCodeHookFunction open_code_hook;
+ void *open_code_userdata;
+ struct {
+ PyMutex mutex;
+ struct _Py_AuditHookEntry *head;
+ } audit_hooks;
+
+ struct _py_object_runtime_state object_state;
+ struct _Py_float_runtime_state float_state;
+ struct _Py_unicode_runtime_state unicode_state;
+ struct _types_runtime_state types;
+ struct _Py_time_runtime_state time;
+
+#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
+ // Used in "Python/emscripten_trampoline.c" to choose between type
+ // reflection trampoline and EM_JS trampoline.
+ int (*emscripten_count_args_function)(PyCFunctionWithKeywords func);
+#endif
+
+ /* All the objects that are shared by the runtime's interpreters. */
+ struct _Py_cached_objects cached_objects;
+ struct _Py_static_objects static_objects;
+
+ /* The following fields are here to avoid allocation during init.
+ The data is exposed through _PyRuntimeState pointer fields.
+ These fields should not be accessed directly outside of init.
+
+ All other _PyRuntimeState pointer fields are populated when
+ needed and default to NULL.
+
+ For now there are some exceptions to that rule, which require
+ allocation during init. These will be addressed on a case-by-case
+ basis. Most notably, we don't pre-allocated the several mutex
+ (PyThread_type_lock) fields, because on Windows we only ever get
+ a pointer type.
+ */
+
+ /* _PyRuntimeState.interpreters.main */
+ PyInterpreterState _main_interpreter;
+ // _main_interpreter should be the last field of _PyRuntimeState.
+ // See https://github.com/python/cpython/issues/127117.
+} _PyRuntimeState;
+
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* Py_INTERNAL_RUNTIME_STRUCTS_H */
extern "C" {
#endif
-// Define this to get precise tracking of stackrefs.
-// #define Py_STACKREF_DEBUG 1
-
// Define this to get precise tracking of closed stackrefs.
// This will use unbounded memory, as it can only grow.
// Use this to track double closes in short-lived programs
#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
-typedef union _PyStackRef {
- uint64_t index;
-} _PyStackRef;
-
#define Py_TAG_BITS 0
PyAPI_FUNC(PyObject *) _Py_stackref_get_object(_PyStackRef ref);
#else
-typedef union _PyStackRef {
- uintptr_t bits;
-} _PyStackRef;
-
#ifdef Py_GIL_DISABLED
--- /dev/null
+#ifndef Py_INTERNAL_STRUCTS_H
+#define Py_INTERNAL_STRUCTS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This files contains various key structs that are widely used
+ * and do not depend on other headers. */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+
+typedef struct {
+ uint16_t value_and_backoff;
+} _Py_BackoffCounter;
+
+/* Each instruction in a code object is a fixed-width value,
+ * currently 2 bytes: 1-byte opcode + 1-byte oparg. The EXTENDED_ARG
+ * opcode allows for larger values but the current limit is 3 uses
+ * of EXTENDED_ARG (see Python/compile.c), for a maximum
+ * 32-bit value. This aligns with the note in Python/compile.c
+ * (compiler_addop_i_line) indicating that the max oparg value is
+ * 2**32 - 1, rather than INT_MAX.
+ */
+typedef union {
+ uint16_t cache;
+ struct {
+ uint8_t code;
+ uint8_t arg;
+ } op;
+ _Py_BackoffCounter counter; // First cache entry of specializable op
+} _Py_CODEUNIT;
+
+
+/* Abstract tree node. */
+typedef struct {
+ PyObject_HEAD
+} PyHamtNode;
+
+
+/* An HAMT immutable mapping collection. */
+typedef struct {
+ PyObject_HEAD
+ PyHamtNode *h_root;
+ PyObject *h_weakreflist;
+ Py_ssize_t h_count;
+} PyHamtObject;
+
+typedef struct {
+ PyObject_VAR_HEAD
+ uint32_t b_bitmap;
+ PyObject *b_array[1];
+} PyHamtNode_Bitmap;
+
+#include "pycore_context.h"
+
+// Define this to get precise tracking of stackrefs.
+// #define Py_STACKREF_DEBUG 1
+
+typedef union _PyStackRef {
+#if !defined(Py_GIL_DISABLED) && defined(Py_STACKREF_DEBUG)
+ uint64_t index;
+#else
+ uintptr_t bits;
+#endif
+} _PyStackRef;
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* Py_INTERNAL_STRUCTS_H */
# error "this header requires Py_BUILD_CORE define"
#endif
+#include "pycore_runtime_structs.h"
#ifdef __clang__
struct timeval;
// --- _PyTimeFraction -------------------------------------------------------
-typedef struct {
- PyTime_t numer;
- PyTime_t denom;
-} _PyTimeFraction;
-
// Set a fraction.
// Return 0 on success.
// Return -1 if the fraction is invalid.
extern double _PyTimeFraction_Resolution(
const _PyTimeFraction *frac);
-
-// --- _Py_time_runtime_state ------------------------------------------------
-
-struct _Py_time_runtime_state {
-#if defined(MS_WINDOWS) || defined(__APPLE__)
- _PyTimeFraction base;
-#else
- char _unused;
-#endif
-};
-
extern PyStatus _PyTime_Init(struct _Py_time_runtime_state *state);
#ifdef __cplusplus
#include "pycore_moduleobject.h" // PyModuleObject
#include "pycore_lock.h" // PyMutex
+#include "pycore_runtime_structs.h" // type state
/* state */
#define _Py_TYPE_BASE_VERSION_TAG (2<<16)
#define _Py_MAX_GLOBAL_TYPE_VERSION_TAG (_Py_TYPE_BASE_VERSION_TAG - 1)
-/* For now we hard-code this to a value for which we are confident
- all the static builtin types will fit (for all builds). */
-#define _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES 200
-#define _Py_MAX_MANAGED_STATIC_EXT_TYPES 10
-#define _Py_MAX_MANAGED_STATIC_TYPES \
- (_Py_MAX_MANAGED_STATIC_BUILTIN_TYPES + _Py_MAX_MANAGED_STATIC_EXT_TYPES)
-
-struct _types_runtime_state {
- /* Used to set PyTypeObject.tp_version_tag for core static types. */
- // bpo-42745: next_version_tag remains shared by all interpreters
- // because of static types.
- unsigned int next_version_tag;
-
- struct {
- struct {
- PyTypeObject *type;
- int64_t interp_count;
- } types[_Py_MAX_MANAGED_STATIC_TYPES];
- } managed_static;
-};
-
-
-// Type attribute lookup cache: speed up attribute and method lookups,
-// see _PyType_Lookup().
-struct type_cache_entry {
- unsigned int version; // initialized from type->tp_version_tag
-#ifdef Py_GIL_DISABLED
- _PySeqLock sequence;
-#endif
- PyObject *name; // reference to exactly a str or None
- PyObject *value; // borrowed reference or NULL
-};
-
-#define MCACHE_SIZE_EXP 12
-
-struct type_cache {
- struct type_cache_entry hashtable[1 << MCACHE_SIZE_EXP];
-};
-
-typedef struct {
- PyTypeObject *type;
- int isbuiltin;
- int readying;
- int ready;
- // XXX tp_dict can probably be statically allocated,
- // instead of dynamically and stored on the interpreter.
- PyObject *tp_dict;
- PyObject *tp_subclasses;
- /* We never clean up weakrefs for static builtin types since
- they will effectively never get triggered. However, there
- are also some diagnostic uses for the list of weakrefs,
- so we still keep it. */
- PyObject *tp_weaklist;
-} managed_static_type_state;
-
-#define TYPE_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */
-
-struct types_state {
- /* Used to set PyTypeObject.tp_version_tag.
- It starts at _Py_MAX_GLOBAL_TYPE_VERSION_TAG + 1,
- where all those lower numbers are used for core static types. */
- unsigned int next_version_tag;
-
- struct type_cache type_cache;
-
- /* Every static builtin type is initialized for each interpreter
- during its own initialization, including for the main interpreter
- during global runtime initialization. This is done by calling
- _PyStaticType_InitBuiltin().
-
- The first time a static builtin type is initialized, all the
- normal PyType_Ready() stuff happens. The only difference from
- normal is that there are three PyTypeObject fields holding
- objects which are stored here (on PyInterpreterState) rather
- than in the corresponding PyTypeObject fields. Those are:
- tp_dict (cls.__dict__), tp_subclasses (cls.__subclasses__),
- and tp_weaklist.
-
- When a subinterpreter is initialized, each static builtin type
- is still initialized, but only the interpreter-specific portion,
- namely those three objects.
-
- Those objects are stored in the PyInterpreterState.types.builtins
- array, at the index corresponding to each specific static builtin
- type. That index (a size_t value) is stored in the tp_subclasses
- field. For static builtin types, we re-purposed the now-unused
- tp_subclasses to avoid adding another field to PyTypeObject.
- In all other cases tp_subclasses holds a dict like before.
- (The field was previously defined as PyObject*, but is now void*
- to reflect its dual use.)
-
- The index for each static builtin type isn't statically assigned.
- Instead it is calculated the first time a type is initialized
- (by the main interpreter). The index matches the order in which
- the type was initialized relative to the others. The actual
- value comes from the current value of num_builtins_initialized,
- as each type is initialized for the main interpreter.
-
- num_builtins_initialized is incremented once for each static
- builtin type. Once initialization is over for a subinterpreter,
- the value will be the same as for all other interpreters. */
- struct {
- size_t num_initialized;
- managed_static_type_state initialized[_Py_MAX_MANAGED_STATIC_BUILTIN_TYPES];
- } builtins;
- /* We apply a similar strategy for managed extension modules. */
- struct {
- size_t num_initialized;
- size_t next_index;
- managed_static_type_state initialized[_Py_MAX_MANAGED_STATIC_EXT_TYPES];
- } for_extensions;
- PyMutex mutex;
-
- // Borrowed references to type objects whose
- // tp_version_tag % TYPE_VERSION_CACHE_SIZE
- // once was equal to the index in the table.
- // They are cleared when the type object is deallocated.
- PyTypeObject *type_version_cache[TYPE_VERSION_CACHE_SIZE];
-};
-
/* runtime lifecycle */
extern void _PyTypes_Fini(PyInterpreterState *);
extern void _PyTypes_AfterFork(void);
-/* other API */
-
-/* Length of array of slotdef pointers used to store slots with the
- same __name__. There should be at most MAX_EQUIV-1 slotdef entries with
- the same __name__, for any __name__. Since that's a static property, it is
- appropriate to declare fixed-size arrays for this. */
-#define MAX_EQUIV 10
-
-typedef struct wrapperbase pytype_slotdef;
-
-
static inline PyObject **
_PyStaticType_GET_WEAKREFS_LISTPTR(managed_static_type_state *state)
{
/* --- Other API ---------------------------------------------------------- */
-struct _Py_unicode_runtime_ids {
- PyMutex mutex;
- // next_index value must be preserved when Py_Initialize()/Py_Finalize()
- // is called multiple times: see _PyUnicode_FromId() implementation.
- Py_ssize_t next_index;
-};
-
-struct _Py_unicode_runtime_state {
- struct _Py_unicode_runtime_ids ids;
-};
-
-/* fs_codec.encoding is initialized to NULL.
- Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */
-struct _Py_unicode_fs_codec {
- char *encoding; // Filesystem encoding (encoded to UTF-8)
- int utf8; // encoding=="utf-8"?
- char *errors; // Filesystem errors (encoded to UTF-8)
- _Py_error_handler error_handler;
-};
-
-struct _Py_unicode_ids {
- Py_ssize_t size;
- PyObject **array;
-};
-
-struct _Py_unicode_state {
- struct _Py_unicode_fs_codec fs_codec;
-
- _PyUnicode_Name_CAPI *ucnhash_capi;
-
- // Unicode identifiers (_Py_Identifier): see _PyUnicode_FromId()
- struct _Py_unicode_ids ids;
-};
-
extern void _PyUnicode_ClearInterned(PyInterpreterState *interp);
// Like PyUnicode_AsUTF8(), but check for embedded null characters.
// Each entry implicitly represents a unique id based on its offset in the
// table. Non-allocated entries form a free-list via the 'next' pointer.
// Allocated entries store the corresponding PyObject.
-typedef union _Py_unique_id_entry {
- // Points to the next free type id, when part of the freelist
- union _Py_unique_id_entry *next;
-
- // Stores the object when the id is assigned
- PyObject *obj;
-} _Py_unique_id_entry;
-
-struct _Py_unique_id_pool {
- PyMutex mutex;
-
- // combined table of object with allocated unique ids and unallocated ids.
- _Py_unique_id_entry *table;
-
- // Next entry to allocate inside 'table' or NULL
- _Py_unique_id_entry *freelist;
-
- // size of 'table'
- Py_ssize_t size;
-};
#define _Py_INVALID_UNIQUE_ID 0
# error "this header requires Py_BUILD_CORE define"
#endif
-struct _warnings_runtime_state {
- /* Both 'filters' and 'onceregistry' can be set in warnings.py;
- get_warnings_attr() will reset these variables accordingly. */
- PyObject *filters; /* List */
- PyObject *once_registry; /* Dict */
- PyObject *default_action; /* String */
- _PyRecursiveMutex lock;
- long filters_version;
-};
-
extern int _PyWarnings_InitState(PyInterpreterState *interp);
extern PyObject* _PyWarnings_Init(void);
$(srcdir)/Include/internal/pycore_instruments.h \
$(srcdir)/Include/internal/pycore_instruction_sequence.h \
$(srcdir)/Include/internal/pycore_interp.h \
+ $(srcdir)/Include/internal/pycore_interp_structs.h \
$(srcdir)/Include/internal/pycore_intrinsics.h \
$(srcdir)/Include/internal/pycore_jit.h \
$(srcdir)/Include/internal/pycore_list.h \
$(srcdir)/Include/internal/pycore_runtime.h \
$(srcdir)/Include/internal/pycore_runtime_init.h \
$(srcdir)/Include/internal/pycore_runtime_init_generated.h \
+ $(srcdir)/Include/internal/pycore_runtime_structs.h \
$(srcdir)/Include/internal/pycore_semaphore.h \
$(srcdir)/Include/internal/pycore_setobject.h \
$(srcdir)/Include/internal/pycore_signal.h \
$(srcdir)/Include/internal/pycore_sliceobject.h \
$(srcdir)/Include/internal/pycore_strhex.h \
+ $(srcdir)/Include/internal/pycore_structs.h \
$(srcdir)/Include/internal/pycore_structseq.h \
$(srcdir)/Include/internal/pycore_symtable.h \
$(srcdir)/Include/internal/pycore_sysmodule.h \
#include "Python.h"
#include "pycore_codecs.h" // _PyCodec_Lookup()
+#include "pycore_unicodeobject.h" // _PyUnicode_EncodeCharmap
#ifdef MS_WINDOWS
#include <windows.h>
#include "pycore_long.h" // _PyLong_GetZero()
#include "pycore_structseq.h" // _PyStructSequence_NewType()
#include "pycore_sysmodule.h" // _PySys_GetOptionalAttrString()
+#include "pycore_fileutils.h" // _Py_set_inheritable
#ifdef __hpux
#define STRICT_SYSV_CURSES
#include "pycore_long.h" // _PyLong_IsNegative()
#include "pycore_sysmodule.h" // _PySys_GetOptionalAttrString()
+#include "pycore_unicodeobject.h" // _PyUnicode_AsUTF8String
#ifdef MS_WINDOWS
# include <windows.h>
#define _PY_INTERPRETER
#include "Python.h"
+#include "pycore_genobject.h"
+
#include "pycore_call.h" // _PyObject_CallNoArgs()
#include "pycore_ceval.h" // _PyEval_EvalFrame()
#include "pycore_frame.h" // _PyInterpreterFrame
#include "pycore_modsupport.h" // _PyArg_NoKwnames()
#include "pycore_object.h" // _PyObject_GC_TRACK(), _PyDebugAllocatorStats()
#include "pycore_tuple.h" // _PyTuple_FromArray()
+#include "pycore_typeobject.h" // _Py_TYPE_VERSION_LIST
#include "pycore_setobject.h" // _PySet_NextEntry()
#include <stddef.h>
#include "pycore_initconfig.h" // _PyStatus_EXCEPTION()
#include "pycore_instruction_sequence.h" // _PyInstructionSequence_Type
#include "pycore_hashtable.h" // _Py_hashtable_new()
+#include "pycore_hamt.h" // _PyHamtItems_Type
#include "pycore_memoryobject.h" // _PyManagedBuffer_Type
#include "pycore_namespace.h" // _PyNamespace_Type
#include "pycore_object.h" // PyAPI_DATA() _Py_SwappedOp definition
#include "pycore_object.h" // _PyObject_GC_TRACK(), _Py_FatalRefcountError()
#include "pycore_pathconfig.h" // _Py_DumpPathConfig()
#include "pycore_pyerrors.h" // _PyUnicodeTranslateError_Create()
+#include "pycore_pyhash.h" // _Py_HashSecret_t
#include "pycore_pylifecycle.h" // _Py_SetFileSystemEncoding()
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include "pycore_ucnhash.h" // _PyUnicode_Name_CAPI
#include <Python.h>
#include "pycore_ast.h" // _PyAST_Validate(),
#include "pycore_pystate.h" // _PyThreadState_GET()
+#include "pycore_parser.h" // _PYPEGEN_NSTATISTICS
#include "pycore_pyerrors.h" // PyExc_IncompleteInputError
+#include "pycore_runtime.h" // _PyRuntime
+#include "pycore_unicodeobject.h" // _PyUnicode_InternImmortal
#include <errcode.h>
#include "lexer/lexer.h"
#include "Python.h"
#include "pycore_fileutils.h" // _Py_fstat_noraise()
#include "pycore_initconfig.h"
+#include "pycore_pyhash.h" // _Py_HashSecret_t
#include "pycore_pylifecycle.h" // _PyOS_URandomNonblock()
#include "pycore_runtime.h" // _PyRuntime
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include <stdlib.h> // exit()
+
/* if _PY_SHORT_FLOAT_REPR == 0, then don't even try to compile
the following code */
#if _PY_SHORT_FLOAT_REPR == 1
#endif
-// ULong is defined in pycore_dtoa.h.
+typedef uint32_t ULong;
typedef int32_t Long;
typedef uint64_t ULLong;
#include "Python.h"
#include "pycore_fileutils.h" // _Py_GetLocaleconvNumeric()
#include "pycore_long.h" // _PyLong_FormatWriter()
+#include "pycore_unicodeobject.h" // PyUnicode_MAX_CHAR_VALUE()
#include <locale.h>
/* Raises an exception about an unknown presentation type for this
#include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
#include "pycore_pylifecycle.h" // _Py_PreInitializeFromConfig()
#include "pycore_pymem.h" // _PyMem_DefaultRawMalloc()
+#include "pycore_pyhash.h" // _Py_HashSecret
#include "pycore_pystate.h" // _PyThreadState_GET()
#include "pycore_pystats.h" // _Py_StatsOn()
#include "pycore_sysmodule.h" // _PySys_SetIntMaxStrDigits()
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_STORE_UINTPTR_RELEASE
#include "pycore_pyerrors.h"
#include "pycore_pystate.h" // _PyInterpreterState_GET()
+#include "pycore_runtime_structs.h" // _PyCoMonitoringData
/* Uncomment this to dump debugging output when assertions fail */
// #define INSTRUMENT_DEBUG 1
/* PyInterpreterConfig API */
#include "Python.h"
+#include "pycore_interp.h" // Py_RTFLAGS
#include "pycore_pylifecycle.h"
#include <stdbool.h>
#include "pycore_frame.h"
#include "pycore_function.h"
#include "pycore_global_objects.h"
+#include "pycore_genobject.h" // _PyAsyncGenValueWrapperNew
#include "pycore_compile.h" // _PyCompile_GetUnaryIntrinsicName, etc
#include "pycore_intrinsics.h" // INTRINSIC_PRINT
#include "pycore_pyerrors.h" // _PyErr_SetString()
#include "pycore_runtime.h" // _Py_ID()
#include "pycore_sysmodule.h" // _PySys_GetRequiredAttr()
+#include "pycore_tuple.h" // _PyTuple_FromArray()
#include "pycore_typevarobject.h" // _Py_make_typevar()
+#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII
/******** Unary functions ********/
/* Python interpreter top-level routines, including init/exit */
#include "Python.h"
-
+#include "pycore_runtime_structs.h"
+#include "pycore_unicodeobject.h"
#include "pycore_audit.h" // _PySys_ClearAuditHooks()
#include "pycore_call.h" // _PyObject_CallMethod()
#include "pycore_ceval.h" // _PyEval_FiniGIL()
#include "pycore_freelist.h" // _PyObject_ClearFreeLists()
#include "pycore_floatobject.h" // _PyFloat_InitTypes()
#include "pycore_global_objects_fini_generated.h" // "_PyStaticObjects_CheckRefcnt()
+#include "pycore_hamt.h" // _PyHamt_Type
#include "pycore_import.h" // _PyImport_BootstrapImp()
#include "pycore_initconfig.h" // _PyStatus_OK()
#include "pycore_list.h" // _PyList_Fini()
/* Thread and interpreter state structures and their interfaces */
#include "Python.h"
+#include "pycore_runtime_structs.h"
+
#include "pycore_abstract.h" // _PyIndex_Check()
#include "pycore_audit.h" // _Py_AuditHookEntry
#include "pycore_ceval.h"
#include "pycore_pylifecycle.h" // _PyAST_Fini()
#include "pycore_pymem.h" // _PyMem_DebugEnabled()
#include "pycore_pystate.h"
+#include "pycore_runtime.h" // _PyRuntime
#include "pycore_runtime_init.h" // _PyRuntimeState_INIT
#include "pycore_stackref.h" // Py_STACKREF_DEBUG
#include "pycore_time.h" // _PyTime_Init()
#include "pycore_frame.h"
#include "pycore_pyerrors.h" // export _Py_UTF8_Edit_Cost()
#include "pycore_runtime.h" // _Py_ID()
+#include "pycore_unicodeobject.h" // _PyUnicode_Equal()
#define MAX_CANDIDATE_ITEMS 750
#define MAX_STRING_SIZE 40
# First get some info from the declarations.
nsmallposints = None
nsmallnegints = None
- with open(os.path.join(INTERNAL, 'pycore_global_objects.h')) as infile:
+ with open(os.path.join(INTERNAL, 'pycore_runtime_structs.h')) as infile:
for line in infile:
if line.startswith('#define _PY_NSMALLPOSINTS'):
nsmallposints = int(line.split()[-1])