#define NB_INPLACE_TRUE_DIVIDE 24
#define NB_INPLACE_XOR 25
+/* Defined in Lib/opcode.py */
+#define ENABLE_SPECIALIZATION 1
#define IS_PSEUDO_OPCODE(op) (((op) >= MIN_PSEUDO_OPCODE) && ((op) <= MAX_PSEUDO_OPCODE))
hasfree = []
hasexc = []
+
+ENABLE_SPECIALIZATION = True
+
def is_pseudo(op):
return op >= MIN_PSEUDO_OPCODE and op <= MAX_PSEUDO_OPCODE
import contextlib
import functools
import getpass
+import opcode
import os
import re
import stat
"anticipate_failure", "load_package_tests", "detect_api_mismatch",
"check__all__", "skip_if_buggy_ucrt_strfptime",
"check_disallow_instantiation", "check_sanitizer", "skip_if_sanitizer",
- "requires_limited_api",
+ "requires_limited_api", "requires_specialization",
# sys
"is_jython", "is_android", "is_emscripten", "is_wasi",
"check_impl_detail", "unix_shell", "setswitchinterval",
return unittest.skipUnless(
_testcapi.LIMITED_API_AVAILABLE, 'needs Limited API support')(test)
+def requires_specialization(test):
+ return unittest.skipUnless(opcode.ENABLE_SPECIALIZATION, "requires specialization")
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
import textwrap
import warnings
from test import support
-from test.support import script_helper, requires_debug_ranges
+from test.support import (script_helper, requires_debug_ranges,
+ requires_specialization)
from test.support.os_helper import FakePath
self.assertOpcodeSourcePositionIs(compiled_code, 'CALL',
line=1, end_line=3, column=0, end_column=1)
+ @requires_specialization
def test_multiline_boolean_expression(self):
snippet = """\
if (a or
import sys
import types
import unittest
-from test.support import captured_stdout, requires_debug_ranges, cpython_only
+from test.support import (captured_stdout, requires_debug_ranges,
+ requires_specialization, cpython_only)
from test.support.bytecode_helper import BytecodeTestCase
import opcode
f()
@cpython_only
+ @requires_specialization
def test_super_instructions(self):
self.code_quicken(lambda: load_test(0, 0))
got = self.get_disassembly(load_test, adaptive=True)
self.do_disassembly_compare(got, dis_load_test_quickened_code, True)
@cpython_only
+ @requires_specialization
def test_binary_specialize(self):
binary_op_quicken = """\
0 0 RESUME 0
self.do_disassembly_compare(got, binary_subscr_quicken % "BINARY_SUBSCR_DICT", True)
@cpython_only
+ @requires_specialization
def test_load_attr_specialize(self):
load_attr_quicken = """\
0 0 RESUME 0
self.do_disassembly_compare(got, load_attr_quicken, True)
@cpython_only
+ @requires_specialization
def test_call_specialize(self):
call_quicken = """\
0 RESUME 0
self.do_disassembly_compare(got, call_quicken)
@cpython_only
+ @requires_specialization
def test_loop_quicken(self):
# Loop can trigger a quicken where the loop is located
self.code_quicken(loop_test, 1)
from test import support
from test.support import import_helper
from test.support import os_helper
+from test.support import requires_specialization
import unittest
from collections import namedtuple
out, err = self.run_embedded_interpreter("test_repeated_simple_init")
self.assertEqual(out, 'Finalized\n' * INIT_LOOPS)
+ @requires_specialization
def test_specialized_static_code_gets_unspecialized_at_Py_FINALIZE(self):
# https://github.com/python/cpython/issues/92031
--- /dev/null
+Added option to build cpython with specialization disabled, by setting ``ENABLE_SPECIALIZATION=False`` in :mod:`opcode`, followed by ``make regen-all``.
};
inst(BINARY_SUBSCR, (unused/4, container, sub -- res)) {
+ #if ENABLE_SPECIALIZATION
_PyBinarySubscrCache *cache = (_PyBinarySubscrCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(BINARY_SUBSCR, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
res = PyObject_GetItem(container, sub);
DECREF_INPUTS();
ERROR_IF(res == NULL, error);
};
inst(STORE_SUBSCR, (counter/1, v, container, sub -- )) {
+ #if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
assert(cframe.use_tracing == 0);
next_instr--;
STAT_INC(STORE_SUBSCR, deferred);
_PyStoreSubscrCache *cache = (_PyStoreSubscrCache *)next_instr;
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #else
+ (void)counter; // Unused.
+ #endif /* ENABLE_SPECIALIZATION */
/* container[sub] = v */
int err = PyObject_SetItem(container, sub, v);
DECREF_INPUTS();
// stack effect: (__0 -- __array[oparg])
inst(UNPACK_SEQUENCE) {
+ #if ENABLE_SPECIALIZATION
_PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(UNPACK_SEQUENCE, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
PyObject *seq = POP();
PyObject **top = stack_pointer + oparg;
if (!unpack_iterable(tstate, seq, oparg, -1, top)) {
};
inst(STORE_ATTR, (counter/1, unused/3, v, owner --)) {
+ #if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
assert(cframe.use_tracing == 0);
PyObject *name = GETITEM(names, oparg);
STAT_INC(STORE_ATTR, deferred);
_PyAttrCache *cache = (_PyAttrCache *)next_instr;
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #else
+ (void)counter; // Unused.
+ #endif /* ENABLE_SPECIALIZATION */
PyObject *name = GETITEM(names, oparg);
int err = PyObject_SetAttr(owner, name, v);
Py_DECREF(v);
// error: LOAD_GLOBAL has irregular stack effect
inst(LOAD_GLOBAL) {
+ #if ENABLE_SPECIALIZATION
_PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(LOAD_GLOBAL, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
int push_null = oparg & 1;
PEEK(0) = NULL;
PyObject *name = GETITEM(names, oparg>>1);
// error: LOAD_ATTR has irregular stack effect
inst(LOAD_ATTR) {
+ #if ENABLE_SPECIALIZATION
_PyAttrCache *cache = (_PyAttrCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(LOAD_ATTR, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
PyObject *name = GETITEM(names, oparg >> 1);
PyObject *owner = TOP();
if (oparg & 1) {
};
inst(COMPARE_AND_BRANCH, (unused/2, left, right -- )) {
+ #if ENABLE_SPECIALIZATION
_PyCompareOpCache *cache = (_PyCompareOpCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(COMPARE_AND_BRANCH, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
assert((oparg >> 4) <= Py_GE);
PyObject *cond = PyObject_RichCompare(left, right, oparg>>4);
Py_DECREF(left);
// stack effect: ( -- __0)
inst(FOR_ITER) {
+ #if ENABLE_SPECIALIZATION
_PyForIterCache *cache = (_PyForIterCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(FOR_ITER, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
/* before: [iter]; after: [iter, iter()] *or* [] */
PyObject *iter = TOP();
PyObject *next = (*Py_TYPE(iter)->tp_iternext)(iter);
// stack effect: (__0, __array[oparg] -- )
inst(CALL) {
+ #if ENABLE_SPECIALIZATION
_PyCallCache *cache = (_PyCallCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(CALL, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
int total_args, is_meth;
is_meth = is_method(stack_pointer, oparg);
PyObject *function = PEEK(oparg + 1);
}
inst(BINARY_OP, (unused/1, lhs, rhs -- res)) {
+ #if ENABLE_SPECIALIZATION
_PyBinaryOpCache *cache = (_PyBinaryOpCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(BINARY_OP, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
assert(0 <= oparg);
assert((unsigned)oparg < Py_ARRAY_LENGTH(binary_ops));
assert(binary_ops[oparg]);
PyObject *sub = PEEK(1);
PyObject *container = PEEK(2);
PyObject *res;
+ #if ENABLE_SPECIALIZATION
_PyBinarySubscrCache *cache = (_PyBinarySubscrCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(BINARY_SUBSCR, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
res = PyObject_GetItem(container, sub);
Py_DECREF(container);
Py_DECREF(sub);
PyObject *container = PEEK(2);
PyObject *v = PEEK(3);
uint16_t counter = read_u16(&next_instr[0].cache);
+ #if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
assert(cframe.use_tracing == 0);
next_instr--;
STAT_INC(STORE_SUBSCR, deferred);
_PyStoreSubscrCache *cache = (_PyStoreSubscrCache *)next_instr;
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #else
+ (void)counter; // Unused.
+ #endif /* ENABLE_SPECIALIZATION */
/* container[sub] = v */
int err = PyObject_SetItem(container, sub, v);
Py_DECREF(v);
TARGET(UNPACK_SEQUENCE) {
PREDICTED(UNPACK_SEQUENCE);
+ #if ENABLE_SPECIALIZATION
_PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(UNPACK_SEQUENCE, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
PyObject *seq = POP();
PyObject **top = stack_pointer + oparg;
if (!unpack_iterable(tstate, seq, oparg, -1, top)) {
PyObject *owner = PEEK(1);
PyObject *v = PEEK(2);
uint16_t counter = read_u16(&next_instr[0].cache);
+ #if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
assert(cframe.use_tracing == 0);
PyObject *name = GETITEM(names, oparg);
STAT_INC(STORE_ATTR, deferred);
_PyAttrCache *cache = (_PyAttrCache *)next_instr;
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #else
+ (void)counter; // Unused.
+ #endif /* ENABLE_SPECIALIZATION */
PyObject *name = GETITEM(names, oparg);
int err = PyObject_SetAttr(owner, name, v);
Py_DECREF(v);
TARGET(LOAD_GLOBAL) {
PREDICTED(LOAD_GLOBAL);
+ #if ENABLE_SPECIALIZATION
_PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(LOAD_GLOBAL, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
int push_null = oparg & 1;
PEEK(0) = NULL;
PyObject *name = GETITEM(names, oparg>>1);
TARGET(LOAD_ATTR) {
PREDICTED(LOAD_ATTR);
+ #if ENABLE_SPECIALIZATION
_PyAttrCache *cache = (_PyAttrCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(LOAD_ATTR, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
PyObject *name = GETITEM(names, oparg >> 1);
PyObject *owner = TOP();
if (oparg & 1) {
PREDICTED(COMPARE_AND_BRANCH);
PyObject *right = PEEK(1);
PyObject *left = PEEK(2);
+ #if ENABLE_SPECIALIZATION
_PyCompareOpCache *cache = (_PyCompareOpCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(COMPARE_AND_BRANCH, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
assert((oparg >> 4) <= Py_GE);
PyObject *cond = PyObject_RichCompare(left, right, oparg>>4);
Py_DECREF(left);
TARGET(FOR_ITER) {
PREDICTED(FOR_ITER);
+ #if ENABLE_SPECIALIZATION
_PyForIterCache *cache = (_PyForIterCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(FOR_ITER, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
/* before: [iter]; after: [iter, iter()] *or* [] */
PyObject *iter = TOP();
PyObject *next = (*Py_TYPE(iter)->tp_iternext)(iter);
TARGET(CALL) {
PREDICTED(CALL);
+ #if ENABLE_SPECIALIZATION
_PyCallCache *cache = (_PyCallCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(CALL, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
int total_args, is_meth;
is_meth = is_method(stack_pointer, oparg);
PyObject *function = PEEK(oparg + 1);
PyObject *rhs = PEEK(1);
PyObject *lhs = PEEK(2);
PyObject *res;
+ #if ENABLE_SPECIALIZATION
_PyBinaryOpCache *cache = (_PyBinaryOpCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
assert(cframe.use_tracing == 0);
}
STAT_INC(BINARY_OP, deferred);
DECREMENT_ADAPTIVE_COUNTER(cache->counter);
+ #endif /* ENABLE_SPECIALIZATION */
assert(0 <= oparg);
assert((unsigned)oparg < Py_ARRAY_LENGTH(binary_ops));
assert(binary_ops[oparg]);
void
_PyCode_Quicken(PyCodeObject *code)
{
+ #if ENABLE_SPECIALIZATION
int opcode = 0;
_Py_CODEUNIT *instructions = _PyCode_CODE(code);
for (int i = 0; i < Py_SIZE(code); i++) {
}
}
}
+ #endif /* ENABLE_SPECIALIZATION */
}
#define SIMPLE_FUNCTION 0
void
_Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr, PyObject *name)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_ATTR] == INLINE_CACHE_ENTRIES_LOAD_ATTR);
_PyAttrCache *cache = (_PyAttrCache *)(instr + 1);
PyTypeObject *type = Py_TYPE(owner);
void
_Py_Specialize_StoreAttr(PyObject *owner, _Py_CODEUNIT *instr, PyObject *name)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[STORE_ATTR] == INLINE_CACHE_ENTRIES_STORE_ATTR);
_PyAttrCache *cache = (_PyAttrCache *)(instr + 1);
PyTypeObject *type = Py_TYPE(owner);
PyObject *globals, PyObject *builtins,
_Py_CODEUNIT *instr, PyObject *name)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[LOAD_GLOBAL] == INLINE_CACHE_ENTRIES_LOAD_GLOBAL);
/* Use inline cache */
_PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)(instr + 1);
_Py_Specialize_BinarySubscr(
PyObject *container, PyObject *sub, _Py_CODEUNIT *instr)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[BINARY_SUBSCR] ==
INLINE_CACHE_ENTRIES_BINARY_SUBSCR);
_PyBinarySubscrCache *cache = (_PyBinarySubscrCache *)(instr + 1);
void
_Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub, _Py_CODEUNIT *instr)
{
+ assert(ENABLE_SPECIALIZATION);
_PyStoreSubscrCache *cache = (_PyStoreSubscrCache *)(instr + 1);
PyTypeObject *container_type = Py_TYPE(container);
if (container_type == &PyList_Type) {
_Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr, int nargs,
PyObject *kwnames)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[CALL] == INLINE_CACHE_ENTRIES_CALL);
_PyCallCache *cache = (_PyCallCache *)(instr + 1);
int fail;
_Py_Specialize_BinaryOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr,
int oparg, PyObject **locals)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[BINARY_OP] == INLINE_CACHE_ENTRIES_BINARY_OP);
_PyBinaryOpCache *cache = (_PyBinaryOpCache *)(instr + 1);
switch (oparg) {
_Py_Specialize_CompareAndBranch(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr,
int oparg)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[COMPARE_AND_BRANCH] == INLINE_CACHE_ENTRIES_COMPARE_OP);
_PyCompareOpCache *cache = (_PyCompareOpCache *)(instr + 1);
#ifndef NDEBUG
void
_Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr, int oparg)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[UNPACK_SEQUENCE] ==
INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE);
_PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)(instr + 1);
void
_Py_Specialize_ForIter(PyObject *iter, _Py_CODEUNIT *instr, int oparg)
{
+ assert(ENABLE_SPECIALIZATION);
assert(_PyOpcode_Caches[FOR_ITER] == INLINE_CACHE_ENTRIES_FOR_ITER);
_PyForIterCache *cache = (_PyForIterCache *)(instr + 1);
PyTypeObject *tp = Py_TYPE(iter);
is_pseudo = opcode['is_pseudo']
_pseudo_ops = opcode['_pseudo_ops']
+ ENABLE_SPECIALIZATION = opcode["ENABLE_SPECIALIZATION"]
HAVE_ARGUMENT = opcode["HAVE_ARGUMENT"]
MIN_PSEUDO_OPCODE = opcode["MIN_PSEUDO_OPCODE"]
MAX_PSEUDO_OPCODE = opcode["MAX_PSEUDO_OPCODE"]
for i, (op, _) in enumerate(opcode["_nb_ops"]):
fobj.write(DEFINE.format(op, i))
+ fobj.write("\n")
+ fobj.write("/* Defined in Lib/opcode.py */\n")
+ fobj.write(f"#define ENABLE_SPECIALIZATION {int(ENABLE_SPECIALIZATION)}")
+
iobj.write("\n")
iobj.write("#ifdef Py_DEBUG\n")
iobj.write(f"static const char *const _PyOpcode_OpName[{NUM_OPCODES}] = {{\n")