_Py_Specialize_BinaryOp(lhs, rhs, next_instr, oparg, LOCALS_ARRAY);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_Call(callable, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_CallKw(callable, next_instr, oparg + !PyStackRef_IsNull(self_or_null));
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_CompareOp(left, right, next_instr, oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_ContainsOp(right, next_instr);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_ForIter(iter, null_or_index, next_instr, oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
FT_ATOMIC_STORE_UINT8_RELAXED(this_instr->op.code, desired);
next_instr = this_instr;
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
if (!IS_JIT_TRACING() && backoff_counter_triggers(counter) &&
this_instr->op.code == JUMP_BACKWARD_JIT &&
next_instr->op.code != ENTER_EXECUTOR) {
- if (tstate->interp->jit_state.code_buffer == NULL) {
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
+ if (_tstate->jit_state.code_buffer == NULL) {
_PyFrame_SetStackPointer(frame, stack_pointer);
- tstate->interp->jit_state.code_buffer = (_PyUOpInstruction *)_PyObject_VirtualAlloc(UOP_BUFFER_SIZE);
+ _tstate->jit_state.code_buffer = (_PyUOpInstruction *)_PyObject_VirtualAlloc(UOP_BUFFER_SIZE);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (tstate->interp->jit_state.code_buffer == NULL) {
+ if (_tstate->jit_state.code_buffer == NULL) {
DISPATCH();
}
}
_Py_Specialize_LoadAttr(owner, next_instr, name);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_LoadGlobal(GLOBALS(), BUILTINS(), next_instr, name);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_LoadSuperAttr(global_super_st, class_st, next_instr, load_method);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_Send(receiver, next_instr);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_StoreAttr(owner, next_instr, name);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_StoreSubscr(container, sub, next_instr);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_ToBool(value, next_instr);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
_Py_Specialize_UnpackSequence(seq, next_instr, oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
#if _Py_TIER2
- tstate->interp->jit_state.prev_state.specialize_counter++;
+ ((_PyThreadStateImpl *)tstate)->jit_state.prev_state.specialize_counter++;
#endif
DISPATCH_SAME_OPARG();
}
}
DISPATCH_GOTO_NON_TRACING();
}
- if ((tstate->interp->jit_state.prev_state.instr->op.code == CALL_LIST_APPEND &&
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
+ if ((_tstate->jit_state.prev_state.instr->op.code == CALL_LIST_APPEND &&
opcode == POP_TOP) ||
- (tstate->interp->jit_state.prev_state.instr->op.code == BINARY_OP_INPLACE_ADD_UNICODE &&
+ (_tstate->jit_state.prev_state.instr->op.code == BINARY_OP_INPLACE_ADD_UNICODE &&
opcode == STORE_FAST)) {
- tstate->interp->jit_state.prev_state.instr_is_super = true;
+ _tstate->jit_state.prev_state.instr_is_super = true;
}
else {
- tstate->interp->jit_state.prev_state.instr = next_instr;
+ _tstate->jit_state.prev_state.instr = next_instr;
}
- tstate->interp->jit_state.prev_state.specialize_counter = 0;
+ _tstate->jit_state.prev_state.specialize_counter = 0;
PyCodeObject *prev_code = (PyCodeObject *)Py_NewRef(PyStackRef_AsPyObjectBorrow(frame->f_executable));
- if (tstate->interp->jit_state.prev_state.instr_code != prev_code) {
+ if (_tstate->jit_state.prev_state.instr_code != prev_code) {
_PyFrame_SetStackPointer(frame, stack_pointer);
- Py_SETREF(tstate->interp->jit_state.prev_state.instr_code, prev_code);
+ Py_SETREF(_tstate->jit_state.prev_state.instr_code, prev_code);
stack_pointer = _PyFrame_GetStackPointer(frame);
}
- tstate->interp->jit_state.prev_state.instr_frame = frame;
- tstate->interp->jit_state.prev_state.instr_oparg = oparg;
- tstate->interp->jit_state.prev_state.instr_stacklevel = PyStackRef_IsNone(frame->f_executable) ? 2 : STACK_LEVEL();
+ _tstate->jit_state.prev_state.instr_frame = frame;
+ _tstate->jit_state.prev_state.instr_oparg = oparg;
+ _tstate->jit_state.prev_state.instr_stacklevel = PyStackRef_IsNone(frame->f_executable) ? 2 : STACK_LEVEL();
DISPATCH_GOTO_NON_TRACING();
#else
Py_FatalError("JIT label executed in non-jit build.");
_PyOptimizer_Optimize(
_PyInterpreterFrame *frame, PyThreadState *tstate)
{
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
+ int chain_depth = _tstate->jit_state.initial_state.chain_depth;
PyInterpreterState *interp = _PyInterpreterState_GET();
- int chain_depth = tstate->interp->jit_state.initial_state.chain_depth;
assert(interp->jit);
assert(!interp->compiling);
- assert(tstate->interp->jit_state.initial_state.stack_depth >= 0);
+ assert(_tstate->jit_state.initial_state.stack_depth >= 0);
#ifndef Py_GIL_DISABLED
// Trace got stomped on by another thread.
- if (tstate->interp->jit_state.initial_state.func == NULL) {
+ if (_tstate->jit_state.initial_state.func == NULL) {
return 0;
}
interp->compiling = true;
// this is true, since a deopt won't infinitely re-enter the executor:
chain_depth %= MAX_CHAIN_DEPTH;
bool progress_needed = chain_depth == 0;
- PyCodeObject *code = (PyCodeObject *)tstate->interp->jit_state.initial_state.code;
- _Py_CODEUNIT *start = tstate->interp->jit_state.initial_state.start_instr;
+ PyCodeObject *code = (PyCodeObject *)_tstate->jit_state.initial_state.code;
+ _Py_CODEUNIT *start = _tstate->jit_state.initial_state.start_instr;
if (progress_needed && !has_space_for_executor(code, start)) {
interp->compiling = false;
return 0;
}
// One of our dependencies while tracing was invalidated. Not worth compiling.
- if (!tstate->interp->jit_state.prev_state.dependencies_still_valid) {
+ if (!_tstate->jit_state.prev_state.dependencies_still_valid) {
interp->compiling = false;
return 0;
}
else {
executor->vm_data.code = NULL;
}
- _PyExitData *exit = tstate->interp->jit_state.initial_state.exit;
+ _PyExitData *exit = _tstate->jit_state.initial_state.exit;
if (exit != NULL) {
exit->executor = executor;
}
lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that
}
#endif
-
- PyCodeObject *old_code = tstate->interp->jit_state.prev_state.instr_code;
- // Something else finalized the trace. This can happen in multi-threaded scenarios as our trace
- // addition from bytecode execution to here is not atomic.
- // Though in GIL builds, the GIL protects the rest.
- if (old_code == NULL) {
- return 0;
- }
- bool progress_needed = (tstate->interp->jit_state.initial_state.chain_depth % MAX_CHAIN_DEPTH) == 0;
- _PyBloomFilter *dependencies = &tstate->interp->jit_state.prev_state.dependencies;
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
+ PyCodeObject *old_code = _tstate->jit_state.prev_state.instr_code;
+ bool progress_needed = (_tstate->jit_state.initial_state.chain_depth % MAX_CHAIN_DEPTH) == 0;
+ _PyBloomFilter *dependencies = &_tstate->jit_state.prev_state.dependencies;
_Py_BloomFilter_Add(dependencies, old_code);
- int trace_length = tstate->interp->jit_state.prev_state.code_curr_size;
- _PyUOpInstruction *trace = tstate->interp->jit_state.code_buffer;
- int max_length = tstate->interp->jit_state.prev_state.code_max_size;
+ int trace_length = _tstate->jit_state.prev_state.code_curr_size;
+ _PyUOpInstruction *trace = _tstate->jit_state.code_buffer;
+ int max_length = _tstate->jit_state.prev_state.code_max_size;
int is_sys_tracing = (tstate->c_tracefunc != NULL) || (tstate->c_profilefunc != NULL);
if (is_sys_tracing) {
goto full;
}
- _Py_CODEUNIT *this_instr = tstate->interp->jit_state.prev_state.instr;
+ _Py_CODEUNIT *this_instr = _tstate->jit_state.prev_state.instr;
_Py_CODEUNIT *target_instr = this_instr;
uint32_t target = 0;
// Rewind EXTENDED_ARG so that we see the whole thing.
// We must point to the first EXTENDED_ARG when deopting.
- int oparg = tstate->interp->jit_state.prev_state.instr_oparg;
+ int oparg = _tstate->jit_state.prev_state.instr_oparg;
int opcode = this_instr->op.code;
// Failed specialization many times. Deopt!
- if (tstate->interp->jit_state.prev_state.specialize_counter >= MAX_SPECIALIZATION_TRIES) {
+ if (_tstate->jit_state.prev_state.specialize_counter >= MAX_SPECIALIZATION_TRIES) {
opcode = _PyOpcode_Deopt[opcode];
}
int rewind_oparg = oparg;
target--;
}
- int old_stack_level = tstate->interp->jit_state.prev_state.instr_stacklevel;
+ int old_stack_level = _tstate->jit_state.prev_state.instr_stacklevel;
// Strange control-flow
bool has_dynamic_jump_taken = OPCODE_HAS_UNPREDICTABLE_JUMP(opcode) &&
/* Special case the first instruction,
* so that we can guarantee forward progress */
- if (progress_needed && tstate->interp->jit_state.prev_state.code_curr_size <= 3) {
+ if (progress_needed && _tstate->jit_state.prev_state.code_curr_size <= 3) {
if (OPCODE_HAS_EXIT(opcode) || OPCODE_HAS_DEOPT(opcode)) {
opcode = _PyOpcode_Deopt[opcode];
}
#endif
// Skip over super instructions.
- if (tstate->interp->jit_state.prev_state.instr_is_super) {
- tstate->interp->jit_state.prev_state.instr_is_super = false;
+ if (_tstate->jit_state.prev_state.instr_is_super) {
+ _tstate->jit_state.prev_state.instr_is_super = false;
return 1;
}
goto full;
}
- if (!tstate->interp->jit_state.prev_state.dependencies_still_valid) {
+ if (!_tstate->jit_state.prev_state.dependencies_still_valid) {
goto done;
}
// This happens when a recursive call happens that we can't trace. Such as Python -> C -> Python calls
// If we haven't guarded the IP, then it's untraceable.
- if (frame != tstate->interp->jit_state.prev_state.instr_frame && !needs_guard_ip) {
+ if (frame != _tstate->jit_state.prev_state.instr_frame && !needs_guard_ip) {
DPRINTF(2, "Unsupported: unguardable jump taken\n");
goto unsupported;
}
_Py_FALLTHROUGH;
case JUMP_BACKWARD_NO_INTERRUPT:
{
- if ((next_instr != tstate->interp->jit_state.initial_state.close_loop_instr) &&
- (next_instr != tstate->interp->jit_state.initial_state.start_instr) &&
- tstate->interp->jit_state.prev_state.code_curr_size > 5 &&
+ if ((next_instr != _tstate->jit_state.initial_state.close_loop_instr) &&
+ (next_instr != _tstate->jit_state.initial_state.start_instr) &&
+ _tstate->jit_state.prev_state.code_curr_size > 5 &&
// These are coroutines, and we want to unroll those usually.
opcode != JUMP_BACKWARD_NO_INTERRUPT) {
// We encountered a JUMP_BACKWARD but not to the top of our own loop.
ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
trace[trace_length-1].operand1 = true; // is_control_flow
DPRINTF(2, "JUMP_BACKWARD not to top ends trace %p %p %p\n", next_instr,
- tstate->interp->jit_state.initial_state.close_loop_instr, tstate->interp->jit_state.initial_state.start_instr);
+ _tstate->jit_state.initial_state.close_loop_instr, _tstate->jit_state.initial_state.start_instr);
goto done;
}
break;
}
}
// Loop back to the start
- int is_first_instr = tstate->interp->jit_state.initial_state.close_loop_instr == next_instr ||
- tstate->interp->jit_state.initial_state.start_instr == next_instr;
- if (is_first_instr && tstate->interp->jit_state.prev_state.code_curr_size > 5) {
+ int is_first_instr = _tstate->jit_state.initial_state.close_loop_instr == next_instr ||
+ _tstate->jit_state.initial_state.start_instr == next_instr;
+ if (is_first_instr && _tstate->jit_state.prev_state.code_curr_size > 5) {
if (needs_guard_ip) {
ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)next_instr, 0);
}
goto done;
}
DPRINTF(2, "Trace continuing\n");
- tstate->interp->jit_state.prev_state.code_curr_size = trace_length;
- tstate->interp->jit_state.prev_state.code_max_size = max_length;
+ _tstate->jit_state.prev_state.code_curr_size = trace_length;
+ _tstate->jit_state.prev_state.code_max_size = max_length;
return 1;
done:
DPRINTF(2, "Trace done\n");
- tstate->interp->jit_state.prev_state.code_curr_size = trace_length;
- tstate->interp->jit_state.prev_state.code_max_size = max_length;
+ _tstate->jit_state.prev_state.code_curr_size = trace_length;
+ _tstate->jit_state.prev_state.code_max_size = max_length;
return 0;
full:
DPRINTF(2, "Trace full\n");
- if (!is_terminator(&tstate->interp->jit_state.code_buffer[trace_length-1])) {
+ if (!is_terminator(&_tstate->jit_state.code_buffer[trace_length-1])) {
// Undo the last few instructions.
- trace_length = tstate->interp->jit_state.prev_state.code_curr_size;
- max_length = tstate->interp->jit_state.prev_state.code_max_size;
+ trace_length = _tstate->jit_state.prev_state.code_curr_size;
+ max_length = _tstate->jit_state.prev_state.code_max_size;
// We previously reversed one.
max_length += 1;
ADD_TO_TRACE(_EXIT_TRACE, 0, 0, target);
trace[trace_length-1].operand1 = true; // is_control_flow
}
- tstate->interp->jit_state.prev_state.code_curr_size = trace_length;
- tstate->interp->jit_state.prev_state.code_max_size = max_length;
+ _tstate->jit_state.prev_state.code_curr_size = trace_length;
+ _tstate->jit_state.prev_state.code_max_size = max_length;
return 0;
}
_Py_CODEUNIT *start_instr, _Py_CODEUNIT *close_loop_instr, int curr_stackdepth, int chain_depth,
_PyExitData *exit, int oparg)
{
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
// A recursive trace.
// Don't trace into the inner call because it will stomp on the previous trace, causing endless retraces.
- if (tstate->interp->jit_state.prev_state.code_curr_size > 2) {
+ if (_tstate->jit_state.prev_state.code_curr_size > 2) {
return 0;
}
if (oparg > 0xFFFF) {
return 0;
}
+ if (_tstate->jit_state.code_buffer == NULL) {
+ _tstate->jit_state.code_buffer = (_PyUOpInstruction *)_PyObject_VirtualAlloc(UOP_BUFFER_SIZE);
+ if (_tstate->jit_state.code_buffer == NULL) {
+ // Don't error, just go to next instruction.
+ return 0;
+ }
+ }
PyCodeObject *code = _PyFrame_GetCode(frame);
#ifdef Py_DEBUG
chain_depth);
#endif
- add_to_trace(tstate->interp->jit_state.code_buffer, 0, _START_EXECUTOR, 0, (uintptr_t)start_instr, INSTR_IP(start_instr, code));
- add_to_trace(tstate->interp->jit_state.code_buffer, 1, _MAKE_WARM, 0, 0, 0);
- tstate->interp->jit_state.prev_state.code_curr_size = 2;
-
- tstate->interp->jit_state.prev_state.code_max_size = UOP_MAX_TRACE_LENGTH;
- tstate->interp->jit_state.initial_state.start_instr = start_instr;
- tstate->interp->jit_state.initial_state.close_loop_instr = close_loop_instr;
- tstate->interp->jit_state.initial_state.code = (PyCodeObject *)Py_NewRef(code);
- tstate->interp->jit_state.initial_state.func = (PyFunctionObject *)Py_XNewRef(PyStackRef_AsPyObjectBorrow(frame->f_funcobj));
- tstate->interp->jit_state.initial_state.exit = exit;
- tstate->interp->jit_state.initial_state.stack_depth = curr_stackdepth;
- tstate->interp->jit_state.initial_state.chain_depth = chain_depth;
- tstate->interp->jit_state.prev_state.instr_frame = frame;
- tstate->interp->jit_state.prev_state.dependencies_still_valid = true;
- tstate->interp->jit_state.prev_state.specialize_counter = 0;
- tstate->interp->jit_state.prev_state.instr_code = (PyCodeObject *)Py_NewRef(_PyFrame_GetCode(frame));
- tstate->interp->jit_state.prev_state.instr = curr_instr;
- tstate->interp->jit_state.prev_state.instr_frame = frame;
- tstate->interp->jit_state.prev_state.instr_oparg = oparg;
- tstate->interp->jit_state.prev_state.instr_stacklevel = curr_stackdepth;
- tstate->interp->jit_state.prev_state.instr_is_super = false;
+ add_to_trace(_tstate->jit_state.code_buffer, 0, _START_EXECUTOR, 0, (uintptr_t)start_instr, INSTR_IP(start_instr, code));
+ add_to_trace(_tstate->jit_state.code_buffer, 1, _MAKE_WARM, 0, 0, 0);
+ _tstate->jit_state.prev_state.code_curr_size = 2;
+
+ _tstate->jit_state.prev_state.code_max_size = UOP_MAX_TRACE_LENGTH;
+ _tstate->jit_state.initial_state.start_instr = start_instr;
+ _tstate->jit_state.initial_state.close_loop_instr = close_loop_instr;
+ _tstate->jit_state.initial_state.code = (PyCodeObject *)Py_NewRef(code);
+ _tstate->jit_state.initial_state.func = (PyFunctionObject *)Py_XNewRef(PyStackRef_AsPyObjectBorrow(frame->f_funcobj));
+ _tstate->jit_state.initial_state.exit = exit;
+ _tstate->jit_state.initial_state.stack_depth = curr_stackdepth;
+ _tstate->jit_state.initial_state.chain_depth = chain_depth;
+ _tstate->jit_state.prev_state.instr_frame = frame;
+ _tstate->jit_state.prev_state.dependencies_still_valid = true;
+ _tstate->jit_state.prev_state.specialize_counter = 0;
+ _tstate->jit_state.prev_state.instr_code = (PyCodeObject *)Py_NewRef(_PyFrame_GetCode(frame));
+ _tstate->jit_state.prev_state.instr = curr_instr;
+ _tstate->jit_state.prev_state.instr_frame = frame;
+ _tstate->jit_state.prev_state.instr_oparg = oparg;
+ _tstate->jit_state.prev_state.instr_stacklevel = curr_stackdepth;
+ _tstate->jit_state.prev_state.instr_is_super = false;
assert(curr_instr->op.code == JUMP_BACKWARD_JIT || (exit != NULL));
- tstate->interp->jit_state.initial_state.jump_backward_instr = curr_instr;
+ _tstate->jit_state.initial_state.jump_backward_instr = curr_instr;
assert(curr_instr->op.code == JUMP_BACKWARD_JIT || (exit != NULL));
- _Py_BloomFilter_Init(&tstate->interp->jit_state.prev_state.dependencies);
+ _Py_BloomFilter_Init(&_tstate->jit_state.prev_state.dependencies);
return 1;
}
void
_PyJit_FinalizeTracing(PyThreadState *tstate)
{
- Py_CLEAR(tstate->interp->jit_state.initial_state.code);
- Py_CLEAR(tstate->interp->jit_state.initial_state.func);
- Py_CLEAR(tstate->interp->jit_state.prev_state.instr_code);
- tstate->interp->jit_state.prev_state.code_curr_size = 2;
- tstate->interp->jit_state.prev_state.code_max_size = UOP_MAX_TRACE_LENGTH - 1;
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
+ Py_CLEAR(_tstate->jit_state.initial_state.code);
+ Py_CLEAR(_tstate->jit_state.initial_state.func);
+ Py_CLEAR(_tstate->jit_state.prev_state.instr_code);
+ _tstate->jit_state.prev_state.code_curr_size = 2;
+ _tstate->jit_state.prev_state.code_max_size = UOP_MAX_TRACE_LENGTH - 1;
}
_PyExecutorObject **exec_ptr,
bool progress_needed)
{
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
// Note: the executor has a slightly different set of dependencies than the tracer.
// For example: the tracer depends on function and code objects.
// The executor may only depend on the code object.
// It is the optimizer's responsibility to add the dependencies it requires on its own.
_PyBloomFilter new_dependencies;
_Py_BloomFilter_Init(&new_dependencies);
- _Py_BloomFilter_Add(&new_dependencies, tstate->interp->jit_state.initial_state.code);
- PyInterpreterState *interp = _PyInterpreterState_GET();
- _PyUOpInstruction *buffer = interp->jit_state.code_buffer;
+ _Py_BloomFilter_Add(&new_dependencies, _tstate->jit_state.initial_state.code);
+ _PyUOpInstruction *buffer = _tstate->jit_state.code_buffer;
OPT_STAT_INC(attempts);
char *env_var = Py_GETENV("PYTHON_UOPS_OPTIMIZE");
bool is_noopt = true;
if (env_var == NULL || *env_var == '\0' || *env_var > '0') {
is_noopt = false;
}
- int curr_stackentries = tstate->interp->jit_state.initial_state.stack_depth;
- int length = interp->jit_state.prev_state.code_curr_size;
+ int curr_stackentries = _tstate->jit_state.initial_state.stack_depth;
+ int length = _tstate->jit_state.prev_state.code_curr_size;
// Trace too short, don't bother.
if (length <= 5) {
return 0;
assert(length < UOP_MAX_TRACE_LENGTH);
OPT_STAT_INC(traces_created);
if (!is_noopt) {
- length = _Py_uop_analyze_and_optimize(tstate->interp->jit_state.initial_state.func, buffer,
+ length = _Py_uop_analyze_and_optimize(_tstate->jit_state.initial_state.func, buffer,
length,
curr_stackentries, &new_dependencies);
if (length <= 0) {
length = prepare_for_execution(buffer, length);
assert(length <= UOP_MAX_TRACE_LENGTH);
_PyExecutorObject *executor = make_executor_from_uops(
- buffer, length, &new_dependencies, tstate->interp->jit_state.initial_state.chain_depth);
+ buffer, length, &new_dependencies, _tstate->jit_state.initial_state.chain_depth);
if (executor == NULL) {
return -1;
}
_PyBloomFilter obj_filter;
_Py_BloomFilter_Init(&obj_filter);
_Py_BloomFilter_Add(&obj_filter, obj);
-
- if (bloom_filter_may_contain(&tstate->interp->jit_state.prev_state.dependencies, &obj_filter))
+ _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
+ if (bloom_filter_may_contain(&_tstate->jit_state.prev_state.dependencies, &obj_filter))
{
- tstate->interp->jit_state.prev_state.dependencies_still_valid = false;
+ _tstate->jit_state.prev_state.dependencies_still_valid = false;
}
}
/* Invalidate all executors */