# 1 extra iteration for tracing.
for i in range(_testinternalcapi.TIER2_THRESHOLD + 2):
# Careful, doing this in the reverse order breaks tracing:
- expected = {enabled} and i >= _testinternalcapi.TIER2_THRESHOLD + 1
+ expected = {enabled} and i >= _testinternalcapi.TIER2_THRESHOLD
assert sys._jit.is_active() is expected
frame_2_jit(expected)
assert sys._jit.is_active() is expected
goto stop_tracing;
}
PyCodeObject *code = _PyFrame_GetCode(frame);
- _PyExecutorObject *executor = code->co_executors->executors[this_instr->op.arg];
+ _PyExecutorObject *executor = code->co_executors->executors[oparg & 255];
assert(executor->vm_data.index == INSTR_OFFSET() - 1);
assert(executor->vm_data.code == code);
assert(executor->vm_data.valid);
LEAVE_TRACING();
int err = stop_tracing_and_jit(tstate, frame);
ERROR_IF(err < 0);
- DISPATCH_GOTO_NON_TRACING();
+ DISPATCH();
}
// Super instructions. Instruction deopted. There's a mismatch in what the stack expects
// in the optimizer. So we have to reflect in the trace correctly.
JUMP_TO_LABEL(stop_tracing);
}
PyCodeObject *code = _PyFrame_GetCode(frame);
- _PyExecutorObject *executor = code->co_executors->executors[this_instr->op.arg];
+ _PyExecutorObject *executor = code->co_executors->executors[oparg & 255];
assert(executor->vm_data.index == INSTR_OFFSET() - 1);
assert(executor->vm_data.code == code);
assert(executor->vm_data.valid);
if (err < 0) {
JUMP_TO_LABEL(error);
}
- DISPATCH_GOTO_NON_TRACING();
+ DISPATCH();
}
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
if ((_tstate->jit_tracer_state.prev_state.instr->op.code == CALL_LIST_APPEND &&