--- /dev/null
+#ifndef Py_INTERNAL_CEVAL_STATE_H
+#define Py_INTERNAL_CEVAL_STATE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+
+#include "pycore_atomic.h" /* _Py_atomic_address */
+#include "pycore_gil.h" // struct _gil_runtime_state
+
+
+typedef enum {
+ PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state
+ PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized
+ PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed
+} perf_status_t;
+
+
+#ifdef PY_HAVE_PERF_TRAMPOLINE
+struct code_arena_st;
+
+struct trampoline_api_st {
+ void* (*init_state)(void);
+ void (*write_state)(void* state, const void *code_addr,
+ unsigned int code_size, PyCodeObject* code);
+ int (*free_state)(void* state);
+ void *state;
+};
+#endif
+
+struct _ceval_runtime_state {
+ struct {
+#ifdef PY_HAVE_PERF_TRAMPOLINE
+ perf_status_t status;
+ Py_ssize_t extra_code_index;
+ struct code_arena_st *code_arena;
+ struct trampoline_api_st trampoline_api;
+ FILE *map_file;
+#else
+ int _not_used;
+#endif
+ } perf;
+ /* Request for checking signals. It is shared by all interpreters (see
+ bpo-40513). Any thread of any interpreter can receive a signal, but only
+ the main thread of the main interpreter can handle signals: see
+ _Py_ThreadCanHandleSignals(). */
+ _Py_atomic_int signals_pending;
+ struct _gil_runtime_state gil;
+};
+
+#ifdef PY_HAVE_PERF_TRAMPOLINE
+# define _PyEval_RUNTIME_PERF_INIT \
+ { \
+ .status = PERF_STATUS_NO_INIT, \
+ .extra_code_index = -1, \
+ }
+#else
+# define _PyEval_RUNTIME_PERF_INIT {0}
+#endif
+
+
+struct _pending_calls {
+ int busy;
+ PyThread_type_lock lock;
+ /* Request for running pending calls. */
+ _Py_atomic_int calls_to_do;
+ /* Request for looking at the `async_exc` field of the current
+ thread state.
+ Guarded by the GIL. */
+ int async_exc;
+#define NPENDINGCALLS 32
+ struct {
+ int (*func)(void *);
+ void *arg;
+ } calls[NPENDINGCALLS];
+ int first;
+ int last;
+};
+
+struct _ceval_state {
+ int recursion_limit;
+ /* This single variable consolidates all requests to break out of
+ the fast path in the eval loop. */
+ _Py_atomic_int eval_breaker;
+ /* Request for dropping the GIL */
+ _Py_atomic_int gil_drop_request;
+ /* The GC is ready to be executed */
+ _Py_atomic_int gc_scheduled;
+ struct _pending_calls pending;
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_CEVAL_STATE_H */
#include "pycore_atomic.h" // _Py_atomic_address
#include "pycore_ast_state.h" // struct ast_state
+#include "pycore_ceval_state.h" // struct _ceval_state
#include "pycore_code.h" // struct callable_cache
#include "pycore_context.h" // struct _Py_context_state
#include "pycore_dict_state.h" // struct _Py_dict_state
#include "pycore_warnings.h" // struct _warnings_runtime_state
-struct _pending_calls {
- int busy;
- PyThread_type_lock lock;
- /* Request for running pending calls. */
- _Py_atomic_int calls_to_do;
- /* Request for looking at the `async_exc` field of the current
- thread state.
- Guarded by the GIL. */
- int async_exc;
-#define NPENDINGCALLS 32
- struct {
- int (*func)(void *);
- void *arg;
- } calls[NPENDINGCALLS];
- int first;
- int last;
-};
-
-struct _ceval_state {
- int recursion_limit;
- /* This single variable consolidates all requests to break out of
- the fast path in the eval loop. */
- _Py_atomic_int eval_breaker;
- /* Request for dropping the GIL */
- _Py_atomic_int gil_drop_request;
- /* The GC is ready to be executed */
- _Py_atomic_int gc_scheduled;
- struct _pending_calls pending;
-};
-
-
// atexit state
typedef struct {
PyObject *func;
#endif
#include "pycore_atomic.h" /* _Py_atomic_address */
+#include "pycore_ceval_state.h" // struct _ceval_runtime_state
#include "pycore_dict_state.h" // struct _Py_dict_runtime_state
#include "pycore_dtoa.h" // struct _dtoa_runtime_state
#include "pycore_floatobject.h" // struct _Py_float_runtime_state
#include "pycore_function.h" // struct _func_runtime_state
-#include "pycore_gil.h" // struct _gil_runtime_state
#include "pycore_global_objects.h" // struct _Py_global_objects
#include "pycore_import.h" // struct _import_runtime_state
#include "pycore_interp.h" // PyInterpreterState
/* ceval state */
-struct _ceval_runtime_state {
- /* Request for checking signals. It is shared by all interpreters (see
- bpo-40513). Any thread of any interpreter can receive a signal, but only
- the main thread of the main interpreter can handle signals: see
- _Py_ThreadCanHandleSignals(). */
- _Py_atomic_int signals_pending;
- struct _gil_runtime_state gil;
-};
-
/* GIL state */
struct _gilstate_runtime_state {
.header = 1, \
}, \
}, \
+ .ceval = { \
+ .perf = _PyEval_RUNTIME_PERF_INIT, \
+ }, \
.gilstate = { \
.check_enabled = 1, \
/* A TSS key must be initialized with Py_tss_NEEDS_INIT \
$(srcdir)/Include/internal/pycore_bytesobject.h \
$(srcdir)/Include/internal/pycore_call.h \
$(srcdir)/Include/internal/pycore_ceval.h \
+ $(srcdir)/Include/internal/pycore_ceval_state.h \
$(srcdir)/Include/internal/pycore_code.h \
$(srcdir)/Include/internal/pycore_compile.h \
$(srcdir)/Include/internal/pycore_condvar.h \
<ClInclude Include="..\Include\internal\pycore_bytesobject.h" />
<ClInclude Include="..\Include\internal\pycore_call.h" />
<ClInclude Include="..\Include\internal\pycore_ceval.h" />
+ <ClInclude Include="..\Include\internal\pycore_ceval_state.h" />
<ClInclude Include="..\Include\internal\pycore_code.h" />
<ClInclude Include="..\Include\internal\pycore_compile.h" />
<ClInclude Include="..\Include\internal\pycore_condvar.h" />
<ClInclude Include="..\Include\internal\pycore_ceval.h">
<Filter>Include\internal</Filter>
</ClInclude>
+ <ClInclude Include="..\Include\internal\pycore_ceval_state.h">
+ <Filter>Include\internal</Filter>
+ </ClInclude>
<ClInclude Include="..\Include\internal\pycore_code.h">
<Filter>Include\internal</Filter>
</ClInclude>
#include "pycore_frame.h"
#include "pycore_interp.h"
-typedef enum {
- PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state
- PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized
- PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed
-} perf_status_t;
#ifdef PY_HAVE_PERF_TRAMPOLINE
};
typedef struct code_arena_st code_arena_t;
-
-struct trampoline_api_st {
- void* (*init_state)(void);
- void (*write_state)(void* state, const void *code_addr,
- unsigned int code_size, PyCodeObject* code);
- int (*free_state)(void* state);
- void *state;
-};
-
typedef struct trampoline_api_st trampoline_api_t;
-
-static perf_status_t perf_status = PERF_STATUS_NO_INIT;
-static Py_ssize_t extra_code_index = -1;
-static code_arena_t *code_arena;
-static trampoline_api_t trampoline_api;
-
-static FILE *perf_map_file;
+#define perf_status _PyRuntime.ceval.perf.status
+#define extra_code_index _PyRuntime.ceval.perf.extra_code_index
+#define perf_code_arena _PyRuntime.ceval.perf.code_arena
+#define trampoline_api _PyRuntime.ceval.perf.trampoline_api
+#define perf_map_file _PyRuntime.ceval.perf.map_file
static void *
perf_map_get_file(void)
new_arena->size = mem_size;
new_arena->size_left = mem_size;
new_arena->code_size = code_size;
- new_arena->prev = code_arena;
- code_arena = new_arena;
+ new_arena->prev = perf_code_arena;
+ perf_code_arena = new_arena;
return 0;
}
static void
free_code_arenas(void)
{
- code_arena_t *cur = code_arena;
+ code_arena_t *cur = perf_code_arena;
code_arena_t *prev;
- code_arena = NULL; // invalid static pointer
+ perf_code_arena = NULL; // invalid static pointer
while (cur) {
munmap(cur->start_addr, cur->size);
prev = cur->prev;
static inline py_trampoline
compile_trampoline(void)
{
- if ((code_arena == NULL) ||
- (code_arena->size_left <= code_arena->code_size)) {
+ if ((perf_code_arena == NULL) ||
+ (perf_code_arena->size_left <= perf_code_arena->code_size)) {
if (new_code_arena() < 0) {
return NULL;
}
}
- assert(code_arena->size_left <= code_arena->size);
- return code_arena_new_code(code_arena);
+ assert(perf_code_arena->size_left <= perf_code_arena->size);
+ return code_arena_new_code(perf_code_arena);
}
static PyObject *
goto default_eval;
}
trampoline_api.write_state(trampoline_api.state, new_trampoline,
- code_arena->code_size, co);
+ perf_code_arena->code_size, co);
_PyCode_SetExtra((PyObject *)co, extra_code_index,
(void *)new_trampoline);
f = new_trampoline;
##################################
## global non-objects to fix in core code
-##-----------------------
-## effectively-const but initialized lazily
-
-## others
-Python/perf_trampoline.c - perf_map_file -
-
##-----------------------
## state
-## other
Objects/object.c - _Py_RefTotal -
-Python/perf_trampoline.c - perf_status -
-Python/perf_trampoline.c - extra_code_index -
-Python/perf_trampoline.c - code_arena -
-Python/perf_trampoline.c - trampoline_api -
Python/thread_pthread_stubs.h - py_tls_entries -
Modules/itertoolsmodule.c - ziplongest_type -
##-----------------------
-## other
-
## state
+
Modules/faulthandler.c - fatal_error -
Modules/faulthandler.c - thread -
Modules/faulthandler.c - user_signals -