extern "C" {
#endif
+#include "pycore_interp.h"
+#include "pycore_optimizer.h"
+#include "pycore_stackref.h"
+
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
def test_effect_sizes(self):
stack = Stack()
inputs = [
- x := StackItem("x", None, "", "1"),
- y := StackItem("y", None, "", "oparg"),
- z := StackItem("z", None, "", "oparg*2"),
+ x := StackItem("x", None, "1"),
+ y := StackItem("y", None, "oparg"),
+ z := StackItem("z", None, "oparg*2"),
]
outputs = [
- StackItem("x", None, "", "1"),
- StackItem("b", None, "", "oparg*4"),
- StackItem("c", None, "", "1"),
+ StackItem("x", None, "1"),
+ StackItem("b", None, "oparg*4"),
+ StackItem("c", None, "1"),
]
stack.pop(z)
stack.pop(y)
"""
self.run_cases_test(input, output)
- def test_cond_effect(self):
- input = """
- inst(OP, (aa, input if ((oparg & 1) == 1), cc -- xx, output if (oparg & 2), zz)) {
- output = SPAM(oparg, aa, cc, input);
- INPUTS_DEAD();
- xx = 0;
- zz = 0;
- }
- """
- output = """
- TARGET(OP) {
- #if Py_TAIL_CALL_INTERP
- int opcode = OP;
- (void)(opcode);
- #endif
- frame->instr_ptr = next_instr;
- next_instr += 1;
- INSTRUCTION_STATS(OP);
- _PyStackRef aa;
- _PyStackRef input = PyStackRef_NULL;
- _PyStackRef cc;
- _PyStackRef xx;
- _PyStackRef output = PyStackRef_NULL;
- _PyStackRef zz;
- cc = stack_pointer[-1];
- if ((oparg & 1) == 1) { input = stack_pointer[-1 - (((oparg & 1) == 1) ? 1 : 0)]; }
- aa = stack_pointer[-2 - (((oparg & 1) == 1) ? 1 : 0)];
- output = SPAM(oparg, aa, cc, input);
- xx = 0;
- zz = 0;
- stack_pointer[-2 - (((oparg & 1) == 1) ? 1 : 0)] = xx;
- if (oparg & 2) stack_pointer[-1 - (((oparg & 1) == 1) ? 1 : 0)] = output;
- stack_pointer[-1 - (((oparg & 1) == 1) ? 1 : 0) + ((oparg & 2) ? 1 : 0)] = zz;
- stack_pointer += -(((oparg & 1) == 1) ? 1 : 0) + ((oparg & 2) ? 1 : 0);
- assert(WITHIN_STACK_BOUNDS());
- DISPATCH();
- }
- """
- self.run_cases_test(input, output)
-
- def test_macro_cond_effect(self):
- input = """
- op(A, (left, middle, right --)) {
- USE(left, middle, right);
- INPUTS_DEAD();
- }
- op(B, (-- deep, extra if (oparg), res)) {
- deep = -1;
- res = 0;
- extra = 1;
- INPUTS_DEAD();
- }
- macro(M) = A + B;
- """
- output = """
- TARGET(M) {
- #if Py_TAIL_CALL_INTERP
- int opcode = M;
- (void)(opcode);
- #endif
- frame->instr_ptr = next_instr;
- next_instr += 1;
- INSTRUCTION_STATS(M);
- _PyStackRef left;
- _PyStackRef middle;
- _PyStackRef right;
- _PyStackRef deep;
- _PyStackRef extra = PyStackRef_NULL;
- _PyStackRef res;
- // A
- {
- right = stack_pointer[-1];
- middle = stack_pointer[-2];
- left = stack_pointer[-3];
- USE(left, middle, right);
- }
- // B
- {
- deep = -1;
- res = 0;
- extra = 1;
- }
- stack_pointer[-3] = deep;
- if (oparg) stack_pointer[-2] = extra;
- stack_pointer[-2 + ((oparg) ? 1 : 0)] = res;
- stack_pointer += -1 + ((oparg) ? 1 : 0);
- assert(WITHIN_STACK_BOUNDS());
- DISPATCH();
- }
- """
- self.run_cases_test(input, output)
-
def test_macro_push_push(self):
input = """
op(A, (-- val1)) {
#include "pycore_instruction_sequence.h" // _PyInstructionSequence_New()
#include "pycore_interpframe.h" // _PyFrame_GetFunction()
#include "pycore_object.h" // _PyObject_IsFreed()
+#include "pycore_optimizer.h" // _Py_Executor_DependsOn
#include "pycore_pathconfig.h" // _PyPathConfig_ClearGlobal()
#include "pycore_pyerrors.h" // _PyErr_ChainExceptions1()
#include "pycore_pylifecycle.h" // _PyInterpreterConfig_InitFromDict()
#include "pycore_interpframe.h" // FRAME_SPECIALS_SIZE
#include "pycore_opcode_metadata.h" // _PyOpcode_Caches
#include "pycore_opcode_utils.h" // RESUME_AT_FUNC_START
+#include "pycore_optimizer.h" // _Py_ExecutorDetach
#include "pycore_pymem.h" // _PyMem_FreeDelayed()
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include "pycore_setobject.h" // _PySet_NextEntry()
#define guard
#define override
#define specializing
-#define split
#define replicate(TIMES)
#define tier1
#define no_save_ip
ERROR_IF(PyStackRef_IsNull(*res), error);
}
- op(_PUSH_NULL_CONDITIONAL, ( -- null if (oparg & 1))) {
- null = PyStackRef_NULL;
+ op(_PUSH_NULL_CONDITIONAL, ( -- null[oparg & 1])) {
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
macro(LOAD_GLOBAL) =
}
case _PUSH_NULL_CONDITIONAL: {
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
oparg = CURRENT_OPARG();
- null = PyStackRef_NULL;
- if (oparg & 1) stack_pointer[0] = null;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
break;
_PyStackRef class_st;
_PyStackRef self_st;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _LOAD_SUPER_ATTR
{
}
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[1];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
stack_pointer[0] = attr;
- if (oparg & 1) stack_pointer[1] = null;
stack_pointer += 1 + (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
_PyStackRef owner;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _CHECK_ATTR_CLASS
{
}
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
- if (oparg & 1) stack_pointer[0] = null;
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
_PyStackRef owner;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _CHECK_ATTR_CLASS
{
}
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
- if (oparg & 1) stack_pointer[0] = null;
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
_PyStackRef owner;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _GUARD_TYPE_VERSION
{
/* Skip 5 cache entries */
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
- if (oparg & 1) stack_pointer[0] = null;
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
_PyStackRef owner;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _LOAD_ATTR_MODULE
{
/* Skip 5 cache entries */
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
- if (oparg & 1) stack_pointer[0] = null;
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
_PyStackRef owner;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _GUARD_TYPE_VERSION
{
/* Skip 5 cache entries */
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
- if (oparg & 1) stack_pointer[0] = null;
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
_PyStackRef owner;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _GUARD_TYPE_VERSION
{
/* Skip 5 cache entries */
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
- if (oparg & 1) stack_pointer[0] = null;
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
_Py_CODEUNIT* const this_instr = next_instr - 5;
(void)this_instr;
_PyStackRef *res;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
// _SPECIALIZE_LOAD_GLOBAL
{
uint16_t counter = read_u16(&this_instr[1].cache);
}
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[1];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
- if (oparg & 1) stack_pointer[1] = null;
stack_pointer += 1 + (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
INSTRUCTION_STATS(LOAD_GLOBAL_BUILTIN);
static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 4, "incorrect cache size");
_PyStackRef res;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _GUARD_GLOBALS_VERSION
{
}
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[1];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
stack_pointer[0] = res;
- if (oparg & 1) stack_pointer[1] = null;
stack_pointer += 1 + (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
INSTRUCTION_STATS(LOAD_GLOBAL_MODULE);
static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 4, "incorrect cache size");
_PyStackRef res;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
/* Skip 1 cache entry */
// _NOP
{
}
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[1];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
stack_pointer[0] = res;
- if (oparg & 1) stack_pointer[1] = null;
stack_pointer += 1 + (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
_PyStackRef class_st;
_PyStackRef self_st;
_PyStackRef attr;
- _PyStackRef null = PyStackRef_NULL;
+ _PyStackRef *null;
// _SPECIALIZE_LOAD_SUPER_ATTR
{
class_st = stack_pointer[-2];
}
// _PUSH_NULL_CONDITIONAL
{
- null = PyStackRef_NULL;
+ null = &stack_pointer[1];
+ if (oparg & 1) {
+ null[0] = PyStackRef_NULL;
+ }
}
stack_pointer[0] = attr;
- if (oparg & 1) stack_pointer[1] = null;
stack_pointer += 1 + (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
#include "pycore_ceval.h"
#include "pycore_critical_section.h"
#include "pycore_dict.h"
+#include "pycore_floatobject.h"
+#include "pycore_frame.h"
+#include "pycore_interpframe.h"
#include "pycore_intrinsics.h"
+#include "pycore_list.h"
#include "pycore_long.h"
#include "pycore_opcode_metadata.h"
#include "pycore_opcode_utils.h"
#include "pycore_pyerrors.h"
#include "pycore_setobject.h"
#include "pycore_sliceobject.h"
+#include "pycore_tuple.h"
+#include "pycore_unicodeobject.h"
+
#include "pycore_jit.h"
// Memory management stuff: ////////////////////////////////////////////////////
#include "pycore_interp.h"
#include "pycore_backoff.h"
#include "pycore_bitutils.h" // _Py_popcount32()
+#include "pycore_code.h" // _Py_GetBaseCodeUnit
+#include "pycore_interpframe.h"
#include "pycore_object.h" // _PyObject_GC_UNTRACK()
#include "pycore_opcode_metadata.h" // _PyOpcode_OpName[]
#include "pycore_opcode_utils.h" // MAX_REAL_OPCODE
#include "pycore_optimizer.h" // _Py_uop_analyze_and_optimize()
#include "pycore_pystate.h" // _PyInterpreterState_GET()
+#include "pycore_tuple.h" // _PyTuple_FromArraySteal
+#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII
#include "pycore_uop_ids.h"
#include "pycore_jit.h"
#include <stdbool.h>
for (int pc = 0; pc < length; pc++) {
int opcode = buffer[pc].opcode;
int oparg = buffer[pc].oparg;
- if (_PyUop_Flags[opcode] & HAS_OPARG_AND_1_FLAG) {
- buffer[pc].opcode = opcode + 1 + (oparg & 1);
- assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0);
- }
- else if (oparg < _PyUop_Replication[opcode]) {
+ if (oparg < _PyUop_Replication[opcode]) {
buffer[pc].opcode = opcode + oparg + 1;
assert(strncmp(_PyOpcode_uop_name[buffer[pc].opcode], _PyOpcode_uop_name[opcode], strlen(_PyOpcode_uop_name[opcode])) == 0);
}
#include "pycore_uop_metadata.h"
#include "pycore_dict.h"
#include "pycore_long.h"
+#include "pycore_interpframe.h" // _PyFrame_GetCode
#include "pycore_optimizer.h"
#include "pycore_object.h"
#include "pycore_dict.h"
}
}
- op (_PUSH_NULL_CONDITIONAL, ( -- null if (oparg & 1))) {
- int opcode = (oparg & 1) ? _PUSH_NULL : _NOP;
- REPLACE_OP(this_instr, opcode, 0, 0);
- null = sym_new_null(ctx);
+ op (_PUSH_NULL_CONDITIONAL, ( -- null[oparg & 1])) {
+ if (oparg & 1) {
+ REPLACE_OP(this_instr, _PUSH_NULL, 0, 0);
+ null[0] = sym_new_null(ctx);
+ }
+ else {
+ REPLACE_OP(this_instr, _NOP, 0, 0);
+ }
}
op(_LOAD_ATTR, (owner -- attr, self_or_null[oparg&1])) {
Py_UNREACHABLE();
}
- op(_PUSH_FRAME, (new_frame: _Py_UOpsAbstractFrame * -- unused if (0))) {
+ op(_PUSH_FRAME, (new_frame: _Py_UOpsAbstractFrame * -- )) {
SYNC_SP();
ctx->frame->stack_pointer = stack_pointer;
ctx->frame = new_frame;
}
case _PUSH_NULL_CONDITIONAL: {
- JitOptSymbol *null = NULL;
- int opcode = (oparg & 1) ? _PUSH_NULL : _NOP;
- REPLACE_OP(this_instr, opcode, 0, 0);
- null = sym_new_null(ctx);
- if (oparg & 1) stack_pointer[0] = null;
+ JitOptSymbol **null;
+ null = &stack_pointer[0];
+ if (oparg & 1) {
+ REPLACE_OP(this_instr, _PUSH_NULL, 0, 0);
+ null[0] = sym_new_null(ctx);
+ }
+ else {
+ REPLACE_OP(this_instr, _NOP, 0, 0);
+ }
stack_pointer += (oparg & 1);
assert(WITHIN_STACK_BOUNDS());
break;
#include "pycore_frame.h"
#include "pycore_long.h"
#include "pycore_optimizer.h"
+#include "pycore_stats.h"
#include "pycore_tuple.h" // _PyTuple_FromArray()
#include <stdbool.h>
#include "pycore_long.h" // _PyLong_InitTypes()
#include "pycore_object.h" // _PyDebug_PrintTotalRefs()
#include "pycore_obmalloc.h" // _PyMem_init_obmalloc()
+#include "pycore_optimizer.h" // _Py_Executors_InvalidateAll
#include "pycore_pathconfig.h" // _PyPathConfig_UpdateGlobal()
#include "pycore_pyerrors.h" // _PyErr_Occurred()
#include "pycore_pylifecycle.h" // _PyErr_Print()
pure: bool
uses_opcode: bool
tier: int | None = None
- oparg_and_1: bool = False
const_oparg: int = -1
needs_prev: bool = False
no_save_ip: bool = False
class StackItem:
name: str
type: str | None
- condition: str | None
size: str
peek: bool = False
used: bool = False
def __str__(self) -> str:
- cond = f" if ({self.condition})" if self.condition else ""
size = f"[{self.size}]" if self.size else ""
type = "" if self.type is None else f"{self.type} "
- return f"{type}{self.name}{size}{cond} {self.peek}"
+ return f"{type}{self.name}{size} {self.peek}"
def is_array(self) -> bool:
return self.size != ""
def convert_stack_item(
item: parser.StackEffect, replace_op_arg_1: str | None
) -> StackItem:
- cond = item.cond
- if replace_op_arg_1 and OPARG_AND_1.match(item.cond):
- cond = replace_op_arg_1
- return StackItem(item.name, item.type, cond, item.size)
+ return StackItem(item.name, item.type, item.size)
def check_unused(stack: list[StackItem], input_names: dict[str, lexer.Token]) -> None:
"Unused items cannot be on the stack above used, non-peek items"
return False
if len(stack_inputs) == 0:
return False
- if any(s.cond for s in stack_inputs) or any(s.cond for s in instr.outputs):
- return False
return all(
(s.name == other.name and s.type == other.type and s.size == other.size)
for s, other in zip(stack_inputs, instr.outputs)
)
-OPARG_AND_1 = re.compile("\\(*oparg *& *1")
-
-
-def effect_depends_on_oparg_1(op: parser.InstDef) -> bool:
- for effect in op.inputs:
- if isinstance(effect, parser.CacheEffect):
- continue
- if not effect.cond:
- continue
- if OPARG_AND_1.match(effect.cond):
- return True
- for effect in op.outputs:
- if not effect.cond:
- continue
- if OPARG_AND_1.match(effect.cond):
- return True
- return False
-
-
def compute_properties(op: parser.CodeDef) -> Properties:
escaping_calls = find_escaping_api_calls(op)
has_free = (
body=op.block.tokens,
properties=compute_properties(op),
)
- if effect_depends_on_oparg_1(op) and "split" in op.annotations:
- result.properties.oparg_and_1 = True
- for bit in ("0", "1"):
- name_x = name + "_" + bit
- properties = compute_properties(op)
- if properties.oparg:
- # May not need oparg anymore
- properties.oparg = any(
- token.text == "oparg" for token in op.block.tokens
- )
- rep = Uop(
- name=name_x,
- context=op.context,
- annotations=op.annotations,
- stack=analyze_stack(op, bit),
- caches=analyze_caches(inputs),
- deferred_refs=analyze_deferred_refs(op),
- output_stores=find_stores_outputs(op),
- body=op.block.tokens,
- properties=properties,
- )
- rep.replicates = result
- uops[name_x] = rep
for anno in op.annotations:
if anno.startswith("replicate"):
result.replicated = int(anno[10:-1])
flags.append("HAS_PURE_FLAG")
if p.no_save_ip:
flags.append("HAS_NO_SAVE_IP_FLAG")
- if p.oparg_and_1:
- flags.append("HAS_OPARG_AND_1_FLAG")
if flags:
return " | ".join(flags)
else:
"register",
"replaced",
"pure",
- "split",
"replicate",
"tier1",
"tier2",
for var in reversed(uop.stack.inputs):
if var.used and var.name not in variables:
variables.add(var.name)
- if var.condition:
- out.emit(f"{type_name(var)}{var.name} = NULL;\n")
- else:
- out.emit(f"{type_name(var)}{var.name};\n")
+ out.emit(f"{type_name(var)}{var.name};\n")
for var in uop.stack.outputs:
if var.peek:
continue
if var.name not in variables:
variables.add(var.name)
- if var.condition:
- out.emit(f"{type_name(var)}{var.name} = NULL;\n")
- else:
- out.emit(f"{type_name(var)}{var.name};\n")
+ out.emit(f"{type_name(var)}{var.name};\n")
def decref_inputs(
class StackEffect(Node):
name: str = field(compare=False) # __eq__ only uses type, cond, size
type: str = "" # Optional `:type`
- cond: str = "" # Optional `if (cond)`
size: str = "" # Optional `[size]`
# Note: size cannot be combined with type or cond
def __repr__(self) -> str:
- items = [self.name, self.type, self.cond, self.size]
+ items = [self.name, self.type, self.size]
while items and items[-1] == "":
del items[-1]
return f"StackEffect({', '.join(repr(item) for item in items)})"
type_text = self.require(lx.IDENTIFIER).text.strip()
if self.expect(lx.TIMES):
type_text += " *"
- cond_text = ""
- if self.expect(lx.IF):
- self.require(lx.LPAREN)
- if not (cond := self.expression()):
- raise self.make_syntax_error("Expected condition")
- self.require(lx.RPAREN)
- cond_text = cond.text.strip()
size_text = ""
if self.expect(lx.LBRACKET):
- if type_text or cond_text:
+ if type_text:
raise self.make_syntax_error("Unexpected [")
if not (size := self.expression()):
raise self.make_syntax_error("Expected expression")
self.require(lx.RBRACKET)
size_text = size.text.strip()
- return StackEffect(tkn.text, type_text, cond_text, size_text)
+ return StackEffect(tkn.text, type_text, size_text)
return None
@contextual
def var_size(var: StackItem) -> str:
- if var.condition:
- # Special case simplifications
- if var.condition == "0":
- return "0"
- elif var.condition == "1":
- return var.get_size()
- elif var.condition == "oparg & 1" and not var.size:
- return f"({var.condition})"
- else:
- return f"(({var.condition}) ? {var.get_size()} : 0)"
- elif var.size:
+ if var.size:
return var.size
else:
return "1"
def name(self) -> str:
return self.item.name
- @property
- def condition(self) -> str | None:
- return self.item.condition
-
def is_array(self) -> bool:
return self.item.is_array()
cast = f"({var.type})" if (not indirect and var.type) else ""
bits = ".bits" if cast and self.extract_bits else ""
assign = f"{var.name} = {cast}{indirect}stack_pointer[{self.base_offset.to_c()}]{bits};"
- if var.condition:
- if var.condition == "1":
- assign = f"{assign}\n"
- elif var.condition == "0":
- return "", Local.unused(var)
- else:
- assign = f"if ({var.condition}) {{ {assign} }}\n"
- else:
- assign = f"{assign}\n"
+ assign = f"{assign}\n"
return assign, Local.from_memory(var)
def push(self, var: Local) -> None:
) -> None:
cast = f"({cast_type})" if var.type else ""
bits = ".bits" if cast and extract_bits else ""
- if var.condition == "0":
- return
- if var.condition and var.condition != "1":
- out.emit(f"if ({var.condition}) ")
out.emit(f"stack_pointer[{base_offset.to_c()}]{bits} = {cast}{var.name};\n")
def _adjust_stack_pointer(self, out: CWriter, number: str) -> None:
def close_variable(var: Local, overwrite: str) -> None:
nonlocal tmp_defined
close = "PyStackRef_CLOSE"
- if "null" in var.name or var.condition and var.condition != "1":
+ if "null" in var.name:
close = "PyStackRef_XCLOSE"
if var.size:
if var.size == "1":
close_named(close, f"{var.name}[_i]", overwrite)
out.emit("}\n")
else:
- if var.condition and var.condition == "0":
- return
close_named(close, var.name, overwrite)
self.clear_dead_inputs()
def declare_variable(var: StackItem, out: CWriter) -> None:
type, null = type_and_null(var)
space = " " if type[-1].isalnum() else ""
- if var.condition:
- out.emit(f"{type}{space}{var.name} = {null};\n")
- else:
- out.emit(f"{type}{space}{var.name};\n")
+ out.emit(f"{type}{space}{var.name};\n")
def declare_variables(inst: Instruction, out: CWriter) -> None:
required.remove(var.name)
type, null = type_and_null(var)
space = " " if type[-1].isalnum() else ""
- if var.condition:
- out.emit(f"{type}{space}{var.name} = {null};\n")
- if uop.replicates:
- # Replicas may not use all their conditional variables
- # So avoid a compiler warning with a fake use
- out.emit(f"(void){var.name};\n")
- else:
- out.emit(f"{type}{space}{var.name};\n")
+ out.emit(f"{type}{space}{var.name};\n")
def declare_variables(uop: Uop, out: CWriter) -> None:
for name, uop in analysis.uops.items():
if uop.properties.tier == 1:
continue
- if uop.properties.oparg_and_1:
- out.emit(f"/* {uop.name} is split on (oparg & 1) */\n\n")
- continue
if uop.is_super():
continue
why_not_viable = uop.why_not_viable()
#include "pycore_call.h"
#include "pycore_ceval.h"
#include "pycore_cell.h"
+#include "pycore_code.h"
#include "pycore_dict.h"
+#include "pycore_floatobject.h"
#include "pycore_emscripten_signal.h"
+#include "pycore_frame.h"
+#include "pycore_genobject.h"
+#include "pycore_interpframe.h"
#include "pycore_intrinsics.h"
#include "pycore_jit.h"
+#include "pycore_list.h"
#include "pycore_long.h"
#include "pycore_opcode_metadata.h"
#include "pycore_opcode_utils.h"
#include "pycore_sliceobject.h"
#include "pycore_descrobject.h"
#include "pycore_stackref.h"
+#include "pycore_tuple.h"
+#include "pycore_unicodeobject.h"
#include "ceval_macros.h"