_Py_CODEUNIT counter;
} _PyBinaryOpCache;
+typedef struct {
+ _Py_CODEUNIT counter;
+} _PyUnpackSequenceCache;
+
#define INLINE_CACHE_ENTRIES_BINARY_OP \
(sizeof(_PyBinaryOpCache) / sizeof(_Py_CODEUNIT))
+#define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \
+ (sizeof(_PyUnpackSequenceCache) / sizeof(_Py_CODEUNIT))
+
/* Maximum size of code to quicken, in code units. */
#define MAX_SIZE_TO_QUICKEN 5000
int oparg);
extern void _Py_Specialize_CompareOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr, SpecializedCacheEntry *cache);
extern void _Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr,
- SpecializedCacheEntry *cache);
+ int oparg);
/* Deallocator function for static codeobjects used in deepfreeze.py */
extern void _PyStaticCode_Dealloc(PyCodeObject *co);
#define NB_INPLACE_XOR 25
static const uint8_t _PyOpcode_InlineCacheEntries[256] = {
+ [UNPACK_SEQUENCE] = 1,
[BINARY_OP] = 1,
};
# Python 3.11a5 3479 (Add PUSH_NULL opcode)
# Python 3.11a5 3480 (New CALL opcodes, second iteration)
# Python 3.11a5 3481 (Use inline CACHE instructions)
+# Python 3.11a5 3482 (Use inline caching for UNPACK_SEQUENCE)
# Python 3.12 will start with magic number 3500
# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
# in PC/launcher.c must also be updated.
-MAGIC_NUMBER = (3481).to_bytes(2, 'little') + b'\r\n'
+MAGIC_NUMBER = (3482).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
-def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
+def_op('UNPACK_SEQUENCE', 92, 1) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
--- /dev/null
+Use inline caching for :opcode:`UNPACK_SEQUENCE`.
}
STACK_GROW(oparg);
Py_DECREF(seq);
+ JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
DISPATCH();
}
TARGET(UNPACK_SEQUENCE_ADAPTIVE) {
assert(cframe.use_tracing == 0);
- SpecializedCacheEntry *cache = GET_CACHE();
- if (cache->adaptive.counter == 0) {
+ _PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)next_instr;
+ if (cache->counter == 0) {
PyObject *seq = TOP();
next_instr--;
- _Py_Specialize_UnpackSequence(seq, next_instr, cache);
+ _Py_Specialize_UnpackSequence(seq, next_instr, oparg);
DISPATCH();
}
else {
STAT_INC(UNPACK_SEQUENCE, deferred);
- cache->adaptive.counter--;
- oparg = cache->adaptive.original_oparg;
+ cache->counter--;
JUMP_TO_INSTRUCTION(UNPACK_SEQUENCE);
}
}
SET_TOP(Py_NewRef(PyTuple_GET_ITEM(seq, 1)));
PUSH(Py_NewRef(PyTuple_GET_ITEM(seq, 0)));
Py_DECREF(seq);
+ JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
NOTRACE_DISPATCH();
}
TARGET(UNPACK_SEQUENCE_TUPLE) {
PyObject *seq = TOP();
- int len = GET_CACHE()->adaptive.original_oparg;
DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE);
- DEOPT_IF(PyTuple_GET_SIZE(seq) != len, UNPACK_SEQUENCE);
+ DEOPT_IF(PyTuple_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE);
STAT_INC(UNPACK_SEQUENCE, hit);
STACK_SHRINK(1);
PyObject **items = _PyTuple_ITEMS(seq);
- while (len--) {
- PUSH(Py_NewRef(items[len]));
+ while (oparg--) {
+ PUSH(Py_NewRef(items[oparg]));
}
Py_DECREF(seq);
+ JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
NOTRACE_DISPATCH();
}
TARGET(UNPACK_SEQUENCE_LIST) {
PyObject *seq = TOP();
- int len = GET_CACHE()->adaptive.original_oparg;
DEOPT_IF(!PyList_CheckExact(seq), UNPACK_SEQUENCE);
- DEOPT_IF(PyList_GET_SIZE(seq) != len, UNPACK_SEQUENCE);
+ DEOPT_IF(PyList_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE);
STAT_INC(UNPACK_SEQUENCE, hit);
STACK_SHRINK(1);
PyObject **items = _PyList_ITEMS(seq);
- while (len--) {
- PUSH(Py_NewRef(items[len]));
+ while (oparg--) {
+ PUSH(Py_NewRef(items[oparg]));
}
Py_DECREF(seq);
+ JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
NOTRACE_DISPATCH();
}
MISS_WITH_INLINE_CACHE(BINARY_OP)
MISS_WITH_CACHE(COMPARE_OP)
MISS_WITH_CACHE(BINARY_SUBSCR)
-MISS_WITH_CACHE(UNPACK_SEQUENCE)
+MISS_WITH_INLINE_CACHE(UNPACK_SEQUENCE)
MISS_WITH_OPARG_COUNTER(STORE_SUBSCR)
LOAD_ATTR_INSTANCE_VALUE_miss:
[PRECALL] = 2, /* _PyAdaptiveEntry and _PyObjectCache/_PyCallCache */
[STORE_ATTR] = 1, // _PyAdaptiveEntry
[COMPARE_OP] = 1, /* _PyAdaptiveEntry */
- [UNPACK_SEQUENCE] = 1, // _PyAdaptiveEntry
};
Py_ssize_t _Py_QuickenedCount = 0;
#endif
void
-_Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr,
- SpecializedCacheEntry *cache)
+_Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr, int oparg)
{
- _PyAdaptiveEntry *adaptive = &cache->adaptive;
+ assert(_PyOpcode_InlineCacheEntries[UNPACK_SEQUENCE] ==
+ INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
+ _PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)(instr + 1);
if (PyTuple_CheckExact(seq)) {
- if (PyTuple_GET_SIZE(seq) != adaptive->original_oparg) {
+ if (PyTuple_GET_SIZE(seq) != oparg) {
SPECIALIZATION_FAIL(UNPACK_SEQUENCE, SPEC_FAIL_EXPECTED_ERROR);
goto failure;
}
if (PyTuple_GET_SIZE(seq) == 2) {
- *instr = _Py_MAKECODEUNIT(UNPACK_SEQUENCE_TWO_TUPLE,
- _Py_OPARG(*instr));
+ *instr = _Py_MAKECODEUNIT(UNPACK_SEQUENCE_TWO_TUPLE, oparg);
goto success;
}
- *instr = _Py_MAKECODEUNIT(UNPACK_SEQUENCE_TUPLE, _Py_OPARG(*instr));
+ *instr = _Py_MAKECODEUNIT(UNPACK_SEQUENCE_TUPLE, oparg);
goto success;
}
if (PyList_CheckExact(seq)) {
- if (PyList_GET_SIZE(seq) != adaptive->original_oparg) {
+ if (PyList_GET_SIZE(seq) != oparg) {
SPECIALIZATION_FAIL(UNPACK_SEQUENCE, SPEC_FAIL_EXPECTED_ERROR);
goto failure;
}
- *instr = _Py_MAKECODEUNIT(UNPACK_SEQUENCE_LIST, _Py_OPARG(*instr));
+ *instr = _Py_MAKECODEUNIT(UNPACK_SEQUENCE_LIST, oparg);
goto success;
}
SPECIALIZATION_FAIL(UNPACK_SEQUENCE, unpack_sequence_fail_kind(seq));
failure:
STAT_INC(UNPACK_SEQUENCE, failure);
- cache_backoff(adaptive);
+ cache->counter = ADAPTIVE_CACHE_BACKOFF;
return;
success:
STAT_INC(UNPACK_SEQUENCE, success);
- adaptive->counter = initial_counter_value();
+ cache->counter = initial_counter_value();
}
#ifdef Py_STATS