From: Ken Jin <28750310+Fidget-Spinner@users.noreply.github.com> Date: Fri, 24 Oct 2025 19:20:23 +0000 (+0100) Subject: Fix the counters X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ab7527c5a70398dececf80fda944e2fe6550f217;p=thirdparty%2FPython%2Fcpython.git Fix the counters --- diff --git a/Include/internal/pycore_backoff.h b/Include/internal/pycore_backoff.h index 454c8dde031f..f92dfd278a1e 100644 --- a/Include/internal/pycore_backoff.h +++ b/Include/internal/pycore_backoff.h @@ -129,6 +129,14 @@ initial_unreachable_backoff_counter(void) return make_backoff_counter(0, UNREACHABLE_BACKOFF); } +// Required to not get stuck in infinite specialization loops due to specialization failure. +// We use 2 here as tnere are a few scenarios: +// 1. Freshly specialized from unspecialized, in which case the counter will be 1. +// 2. Re-specialized from deopt, in which case the counter will be 1. +// 3. Deopt -> Specialize -> Deopt -> Specialize, in which case the counter will be 2. +// We do not want the 3rd case. +#define MAX_SPECIALIZATION_TRIES 2 + #ifdef __cplusplus } #endif diff --git a/Python/ceval_macros.h b/Python/ceval_macros.h index 9dd7b1dea165..0d6d5174204d 100644 --- a/Python/ceval_macros.h +++ b/Python/ceval_macros.h @@ -135,8 +135,7 @@ #if (_Py_TAIL_CALL_INTERP || USE_COMPUTED_GOTOS) && _Py_TIER2 # define IS_JIT_TRACING() (DISPATCH_TABLE_VAR == TRACING_DISPATCH_TABLE) -// Required to not get stuck in infinite specialization loops due to specialization failure. -# define IS_JIT_TRACING_MAKING_PROGRESS() (IS_JIT_TRACING() && tstate->interp->jit_state.specialize_counter < 1) +# define IS_JIT_TRACING_MAKING_PROGRESS() (IS_JIT_TRACING() && tstate->interp->jit_state.specialize_counter < MAX_SPECIALIZATION_TRIES) # define ENTER_TRACING() \ DISPATCH_TABLE_VAR = TRACING_DISPATCH_TABLE; # define LEAVE_TRACING() \ diff --git a/Python/optimizer.c b/Python/optimizer.c index 0c8202f60f10..9071265b5081 100644 --- a/Python/optimizer.c +++ b/Python/optimizer.c @@ -557,6 +557,14 @@ _PyJit_translate_single_bytecode_to_trace( _Py_CODEUNIT *next_instr) { +#ifdef Py_DEBUG + char *python_lltrace = Py_GETENV("PYTHON_LLTRACE"); + int lltrace = 0; + if (python_lltrace != NULL && *python_lltrace >= '0') { + lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that + } +#endif + PyCodeObject *old_code = tstate->interp->jit_state.prev_instr_code; // Something else finalized the trace. This can happen in multi-threaded scenarios as our trace // addition from bytecode execution to here is not atomic. @@ -576,13 +584,6 @@ _PyJit_translate_single_bytecode_to_trace( goto full; } -#ifdef Py_DEBUG - char *python_lltrace = Py_GETENV("PYTHON_LLTRACE"); - int lltrace = 0; - if (python_lltrace != NULL && *python_lltrace >= '0') { - lltrace = *python_lltrace - '0'; // TODO: Parse an int and all that - } -#endif _Py_CODEUNIT *this_instr = tstate->interp->jit_state.prev_instr; _Py_CODEUNIT *target_instr = this_instr; uint32_t target = 0; @@ -593,8 +594,8 @@ _PyJit_translate_single_bytecode_to_trace( // We must point to the first EXTENDED_ARG when deopting. int oparg = tstate->interp->jit_state.prev_instr_oparg; int opcode = this_instr->op.code; - // Failed specialization twice in a row. Deopt! - if (tstate->interp->jit_state.specialize_counter >= 1) { + // Failed specialization many times. Deopt! + if (tstate->interp->jit_state.specialize_counter >= MAX_SPECIALIZATION_TRIES) { opcode = _PyOpcode_Deopt[opcode]; } int rewind_oparg = oparg;