typedef struct _PyExecutorObject {
PyObject_VAR_HEAD
const _PyUOpInstruction *trace;
- _Py_CODEUNIT *expected_entrypoint;
_PyVMData vm_data; /* Used by the VM, but opaque to the optimizer */
uint32_t exit_count;
uint32_t code_size;
assert(exit != NULL);
bool is_dynamic = exit->is_dynamic;
_Py_CODEUNIT *target = is_dynamic ? frame->instr_ptr : (_PyFrame_GetBytecode(frame) + exit->target);
- GOTO_TIER_ONE(target, 0);
- // _Py_BackoffCounter temperature = exit->temperature;
- // if (target->op.code == ENTER_EXECUTOR) {
- // PyCodeObject *code = _PyFrame_GetCode(frame);
- // _PyExecutorObject *executor = code->co_executors->executors[target->op.arg];
- // if (is_dynamic && executor->expected_entrypoint != target) {
- // GOTO_TIER_ONE(target, 0);
- // }
- // Py_INCREF(executor);
- // assert(tstate->jit_exit == exit);
- // exit->executor = executor;
- // TIER2_TO_TIER2(exit->executor);
- // }
- // else {
- // if (!backoff_counter_triggers(temperature)) {
- // exit->temperature = advance_backoff_counter(temperature);
- // GOTO_TIER_ONE(target, 0);
- // }
- // if (is_dynamic) {
- // GOTO_TIER_ONE(target, 0);
- // }
- // _PyExecutorObject *previous_executor = _PyExecutor_FromExit(exit);
- // assert(tstate->current_executor == (PyObject *)previous_executor);
- // int chain_depth = 0;
- // _PyJIT_InitializeTracing(tstate, frame, target, STACK_LEVEL(), chain_depth);
- // GOTO_TIER_ONE(target, 1);
- // }
+ _Py_BackoffCounter temperature = exit->temperature;
+ if (target->op.code == ENTER_EXECUTOR) {
+ PyCodeObject *code = _PyFrame_GetCode(frame);
+ _PyExecutorObject *executor = code->co_executors->executors[target->op.arg];
+ Py_INCREF(executor);
+ assert(tstate->jit_exit == exit);
+ exit->executor = executor;
+ TIER2_TO_TIER2(exit->executor);
+ }
+ else {
+ if (!backoff_counter_triggers(temperature)) {
+ exit->temperature = advance_backoff_counter(temperature);
+ GOTO_TIER_ONE(target, 0);
+ }
+ _PyExecutorObject *previous_executor = _PyExecutor_FromExit(exit);
+ assert(tstate->current_executor == (PyObject *)previous_executor);
+ int chain_depth = 0;
+ _PyJIT_InitializeTracing(tstate, frame, target, STACK_LEVEL(), chain_depth);
+ GOTO_TIER_ONE(target, 1);
+ }
}
tier2 op(_GUARD_IP, (ip/4 --)) {
JUMP_TO_LABEL(start_frame); \
} while (0)
+#define TRACING_DISPATCH_INLINED(NEW_FRAME) \
+ RECORD_TRACE_NO_DISPATCH(); \
+ DISPATCH_INLINED(NEW_FRAME);
+
#define TRACING_DISPATCH() \
{ \
assert(frame->stackpointer == NULL); \
JUMP_TO_LABEL(error); \
} \
if (keep_tracing_bit) { \
- assert(next_instr == frame->instr_ptr); \
assert(next_instr->op.code != ENTER_EXECUTOR); \
assert(tstate->interp->jit_tracer_code_curr_size == 2); \
ENTER_TRACING(); \
assert(exit != NULL);
bool is_dynamic = exit->is_dynamic;
_Py_CODEUNIT *target = is_dynamic ? frame->instr_ptr : (_PyFrame_GetBytecode(frame) + exit->target);
- GOTO_TIER_ONE(target, 0);
+ _Py_BackoffCounter temperature = exit->temperature;
+ if (target->op.code == ENTER_EXECUTOR) {
+ PyCodeObject *code = _PyFrame_GetCode(frame);
+ _PyExecutorObject *executor = code->co_executors->executors[target->op.arg];
+ Py_INCREF(executor);
+ assert(tstate->jit_exit == exit);
+ exit->executor = executor;
+ TIER2_TO_TIER2(exit->executor);
+ }
+ else {
+ if (!backoff_counter_triggers(temperature)) {
+ exit->temperature = advance_backoff_counter(temperature);
+ GOTO_TIER_ONE(target, 0);
+ }
+ _PyExecutorObject *previous_executor = _PyExecutor_FromExit(exit);
+ assert(tstate->current_executor == (PyObject *)previous_executor);
+ int chain_depth = 0;
+ _PyJIT_InitializeTracing(tstate, frame, target, STACK_LEVEL(), chain_depth);
+ GOTO_TIER_ONE(target, 1);
+ }
break;
}
TRACING_JUMP_TO_LABEL(error);
}
frame->return_offset = 4u ;
- DISPATCH_INLINED(new_frame);
+ TRACING_DISPATCH_INLINED(new_frame);
}
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
}
assert( 1u == 1);
frame->return_offset = 1;
- DISPATCH_INLINED(new_frame);
+ TRACING_DISPATCH_INLINED(new_frame);
}
PyObject *callargs = PyStackRef_AsPyObjectBorrow(callargs_st);
assert(PyTuple_CheckExact(callargs));
}
assert( 4u == 1 + INLINE_CACHE_ENTRIES_CALL_KW);
frame->return_offset = 4u ;
- DISPATCH_INLINED(new_frame);
+ TRACING_DISPATCH_INLINED(new_frame);
}
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
TRACING_JUMP_TO_LABEL(error);
}
frame->return_offset = 4u ;
- DISPATCH_INLINED(new_frame);
+ TRACING_DISPATCH_INLINED(new_frame);
}
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
}
assert( 1u == 1);
frame->return_offset = 1;
- DISPATCH_INLINED(new_frame);
+ TRACING_DISPATCH_INLINED(new_frame);
}
PyObject *callargs = PyStackRef_AsPyObjectBorrow(callargs_st);
assert(PyTuple_CheckExact(callargs));
}
assert( 4u == 1 + INLINE_CACHE_ENTRIES_CALL_KW);
frame->return_offset = 4u ;
- DISPATCH_INLINED(new_frame);
+ TRACING_DISPATCH_INLINED(new_frame);
}
STACKREFS_TO_PYOBJECTS(arguments, total_args, args_o);
if (CONVERSION_FAILED(args_o)) {
assert(WITHIN_STACK_BOUNDS());
new_frame->localsplus[1] = PyStackRef_FromPyObjectNew(name);
frame->return_offset = 10u ;
- DISPATCH_INLINED(new_frame);
+ TRACING_DISPATCH_INLINED(new_frame);
}
TRACING_TARGET(LOAD_ATTR_INSTANCE_VALUE) {
frame->return_offset = (uint16_t)( 2u + oparg);
assert(gen_frame->previous == NULL);
gen_frame->previous = frame;
- DISPATCH_INLINED(gen_frame);
+ TRACING_DISPATCH_INLINED(gen_frame);
}
if (PyStackRef_IsNone(v) && PyIter_Check(receiver_o)) {
_PyFrame_SetStackPointer(frame, stack_pointer);
return 1;
}
+ if (opcode == JUMP_BACKWARD_NO_INTERRUPT) {
+ return 1;
+ }
+
if (opcode == ENTER_EXECUTOR) {
ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target);
ADD_TO_TRACE(_SET_IP, 0, (uintptr_t)target_instr, target);
exit_op = _HANDLE_PENDING_AND_DEOPT;
}
int32_t jump_target = target;
- if (opcode == _FOR_ITER_TIER_TWO || opcode == _GUARD_IP) {
+ bool unique_target = false;
+ if (opcode == _FOR_ITER_TIER_TWO) {
+ exit_op = _DYNAMIC_EXIT;
+ }
+ else if (opcode == _GUARD_IP) {
exit_op = _DYNAMIC_EXIT;
- jump_target = current_jump_target + 1;
+ unique_target = true;
}
if (is_for_iter_test[opcode]) {
/* Target the POP_TOP immediately after the END_FOR,
int32_t next_inst = target + 1 + INLINE_CACHE_ENTRIES_FOR_ITER + extended_arg;
jump_target = next_inst + inst->oparg + 1;
}
- if (jump_target != current_jump_target || current_exit_op != exit_op) {
+ if (unique_target || jump_target != current_jump_target || current_exit_op != exit_op) {
make_exit(&buffer[next_spare], exit_op, jump_target);
current_exit_op = exit_op;
current_jump_target = jump_target;
self._replacers = {
**self._replacers,
"DISPATCH": self.dispatch,
+ "DISPATCH_INLINED": self.dispatch_inlined,
}
def dispatch(
self.emit("TRACING_DISPATCH")
return False
+ def dispatch_inlined(
+ self,
+ tkn: Token,
+ tkn_iter: TokenIterator,
+ uop: CodeSection,
+ storage: Storage,
+ inst: Instruction | None,
+ ) -> bool:
+ if storage.spilled:
+ raise analysis_error("stack_pointer needs reloading before dispatch", tkn)
+ storage.stack.flush(self.out)
+ self.out.start_line()
+ self.emit("TRACING_DISPATCH_INLINED")
+ return False
def record_jump_taken(
self,
tkn: Token,