local colorize, irtype
-- Lookup tables to convert some literals into names.
-local tointname = { [0] = "check", "index", "", "Z", "S", "T", }
local litname = {
["SLOAD "] = setmetatable({}, { __index = function(t, mode)
local s = ""
t[mode] = s
return s
end}),
- ["TOINT "] = tointname,
- ["TOI64 "] = tointname,
["FLOAD "] = vmdef.irfield,
["FREF "] = vmdef.irfield,
["FPMATH"] = vmdef.irfpm,
/* -- Type conversions ---------------------------------------------------- */
-static void asm_tonum(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = asm_fuseload(as, ir->op1, RSET_GPR);
- emit_mrm(as, XO_CVTSI2SD, dest, left);
- if (!(as->flags & JIT_F_SPLIT_XMM))
- emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
-}
-
static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
{
Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
/* Can't fuse since left is needed twice. */
}
-static void asm_toint(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
- emit_mrm(as, XO_CVTSD2SI, dest, left);
-}
-
static void asm_tobit(ASMState *as, IRIns *ir)
{
Reg dest = ra_dest(as, ir, RSET_GPR);
ra_left(as, tmp, ir->op1);
}
-static void asm_toi64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRRef lref = ir->op1;
- lua_assert(LJ_64); /* NYI: 32 bit register pairs. */
- if (ir->op2 == IRTOINT_TRUNCI64) {
- Reg left = asm_fuseload(as, lref, RSET_FPR);
- emit_mrm(as, XO_CVTTSD2SI, dest|REX_64, left);
- } else if (ir->op2 == IRTOINT_ZEXT64) {
- /* Nothing to do. This assumes 32 bit regs are already zero-extended. */
- ra_left(as, dest, lref); /* But may need to move regs. */
- } else {
- Reg left = asm_fuseload(as, lref, RSET_GPR);
- emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
- lua_assert(ir->op2 == IRTOINT_SEXT64);
- }
-}
-
static void asm_conv(ASMState *as, IRIns *ir)
{
IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
emit_x87op(as, XI_FLD1);
else
emit_rma(as, XO_FLDq, XOg_FLDq, tv);
- } else if (ir->o == IR_TONUM && !ra_used(ir) &&
+ } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
!irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
IRIns *iri = IR(ir->op1);
emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
case IR_OBAR: asm_obar(as, ir); break;
/* Type conversions. */
- case IR_TONUM: asm_tonum(as, ir); break;
- case IR_TOINT:
- if (irt_isguard(ir->t))
- asm_tointg(as, ir, ra_alloc1(as, ir->op1, RSET_FPR));
- else
- asm_toint(as, ir); break;
- break;
case IR_TOBIT: asm_tobit(as, ir); break;
- case IR_TOI64: asm_toi64(as, ir); break;
case IR_CONV: asm_conv(as, ir); break;
case IR_TOSTR: asm_tostr(as, ir); break;
case IR_STRTO: asm_strto(as, ir); break;
}
break;
/* Do not propagate hints across type conversions. */
- case IR_CONV: case IR_TONUM: case IR_TOINT: case IR_TOBIT:
+ case IR_CONV: case IR_TOBIT:
break;
default:
/* Propagate hints across likely 'op reg, imm' or 'op reg'. */
\
/* Type conversions. */ \
_(CONV, N , ref, lit) \
- _(TONUM, N , ref, ___) \
- _(TOINT, N , ref, lit) \
_(TOBIT, N , ref, ref) \
- _(TOI64, N , ref, lit) \
_(TOSTR, N , ref, ___) \
_(STRTO, N , ref, ___) \
\
#define IRXLOAD_READONLY 1 /* Load from read-only data. */
#define IRXLOAD_UNALIGNED 2 /* Unaligned load. */
-/* TOINT/TOI64 mode, stored in op2. Ordered by strength of the checks. */
-#define IRTOINT_CHECK 0 /* Number checked for integerness. */
-#define IRTOINT_INDEX 1 /* Checked + special backprop rules. */
-#define IRTOINT_ANY 2 /* Any FP number is ok. */
-#define IRTOINT_ZEXT64 3 /* Convert uint32_t to int64_t. */
-#define IRTOINT_SEXT64 4 /* Convert int32_t to int64_t. */
-#define IRTOINT_TRUNCI64 5 /* Truncate number to int64_t. */
-#define IRTOINT_TOBIT 6 /* Cache only: TOBIT conversion. */
-
/* CONV mode, stored in op2. */
#define IRCONV_SRCMASK 0x001f /* Source IRType. */
#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
-
/* C call info for CALL* instructions. */
typedef struct CCallInfo {
ASMFunction func; /* Function pointer. */
/* -- Constant folding of conversions ------------------------------------- */
-LJFOLD(TONUM KINT)
-LJFOLDF(kfold_tonum)
-{
- return lj_ir_knum(J, cast_num(fleft->i));
-}
-
LJFOLD(TOBIT KNUM KNUM)
LJFOLDF(kfold_tobit)
{
return INTFOLD((int32_t)tv.u32.lo);
}
-LJFOLD(TOINT KNUM any)
-LJFOLDF(kfold_toint)
-{
- lua_Number n = knumleft;
- int32_t k = lj_num2int(n);
- if (irt_isguard(fins->t) && n != cast_num(k)) {
- /* We're about to create a guard which always fails, like TOINT +1.5.
- ** Some pathological loops cause this during LICM, e.g.:
- ** local x,k,t = 0,1.5,{1,[1.5]=2}
- ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
- ** assert(x == 300)
- */
- return FAILFOLD;
- }
- return INTFOLD(k);
-}
-
-LJFOLD(TOI64 KINT any)
-LJFOLDF(kfold_toi64_kint)
-{
- lua_assert(fins->op2 == IRTOINT_ZEXT64 || fins->op2 == IRTOINT_SEXT64);
- if (fins->op2 == IRTOINT_ZEXT64)
- return INT64FOLD((uint64_t)(uint32_t)fleft->i);
- else
- return INT64FOLD((uint64_t)(int32_t)fleft->i);
-}
-
-LJFOLD(TOI64 KNUM any)
-LJFOLDF(kfold_toi64_knum)
-{
- lua_assert(fins->op2 == IRTOINT_TRUNCI64);
- return INT64FOLD((uint64_t)(int64_t)knumleft);
-}
-
LJFOLD(CONV KINT IRCONV_NUM_INT)
LJFOLDF(kfold_conv_kint_num)
{
return NEXTFOLD;
}
-LJFOLD(FPMATH TONUM IRFPM_FLOOR)
-LJFOLD(FPMATH TONUM IRFPM_CEIL)
-LJFOLD(FPMATH TONUM IRFPM_TRUNC)
LJFOLD(ABS ABS KNUM)
LJFOLDF(shortcut_left)
{
return fleft->op1; /* f(g(x)) ==> x */
}
-LJFOLD(TONUM TOINT)
-LJFOLDF(shortcut_leftleft_toint)
-{
- PHIBARRIER(fleft);
- if (irt_isguard(fleft->t)) /* Only safe with a guarded TOINT. */
- return fleft->op1; /* f(g(x)) ==> x */
- return NEXTFOLD;
-}
-
-LJFOLD(TOINT TONUM any)
-LJFOLD(TOBIT TONUM KNUM) /* The inverse must NOT be shortcut! */
-LJFOLDF(shortcut_leftleft_across_phi)
-{
- /* Fold even across PHI to avoid expensive int->num->int conversions. */
- return fleft->op1; /* f(g(x)) ==> x */
-}
-
-LJFOLD(TOI64 TONUM any)
-LJFOLDF(shortcut_leftleft_toint64)
-{
- /* Fold even across PHI to avoid expensive int->num->int64 conversions. */
- fins->op1 = fleft->op1; /* (int64_t)(double)(int)x ==> (int64_t)x */
- fins->op2 = IRTOINT_SEXT64;
- return RETRYFOLD;
-}
-
/* -- FP algebraic simplifications ---------------------------------------- */
/* FP arithmetic is tricky -- there's not much to simplify.
return lj_opt_narrow_convert(J);
}
-/* Relaxed CSE rule for TOINT allows commoning with stronger checks, too. */
-LJFOLD(TOINT any any)
-LJFOLDF(cse_toint)
-{
- if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
- IRRef ref, op1 = fins->op1;
- uint8_t guard = irt_isguard(fins->t);
- for (ref = J->chain[IR_TOINT]; ref > op1; ref = IR(ref)->prev)
- if (IR(ref)->op1 == op1 && irt_isguard(IR(ref)->t) >= guard)
- return ref;
- }
- return EMITFOLD; /* No fallthrough to regular CSE. */
-}
-
-/* -- Strength reduction of widening -------------------------------------- */
-
-LJFOLD(TOI64 any 3) /* IRTOINT_ZEXT64 */
-LJFOLDF(simplify_zext64)
-{
-#if LJ_TARGET_X64
- /* Eliminate widening. All 32 bit ops implicitly zero-extend the result. */
- PHIBARRIER(fleft);
- return LEFTFOLD;
-#else
- UNUSED(J);
- return NEXTFOLD;
-#endif
-}
-
-LJFOLD(TOI64 any 4) /* IRTOINT_SEXT64 */
-LJFOLDF(simplify_sext64)
-{
- IRRef ref = fins->op1;
- int64_t ofs = 0;
- PHIBARRIER(fleft);
- if (fleft->o == IR_ADD && irref_isk(fleft->op2)) {
- ofs = (int64_t)IR(fleft->op2)->i;
- ref = fleft->op1;
- }
- /* Use scalar evolution analysis results to strength-reduce sign-extension. */
- if (ref == J->scev.idx) {
- IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
- lua_assert(irt_isint(J->scev.t));
- if (lo && IR(lo)->i + ofs >= 0) {
-#if LJ_TARGET_X64
- /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */
- return LEFTFOLD;
-#else
- /* Reduce to a (cheaper) zero-extension. */
- fins->op2 = IRTOINT_ZEXT64;
- return RETRYFOLD;
-#endif
- }
- }
- return NEXTFOLD;
-}
-
/* -- Integer algebraic simplifications ----------------------------------- */
LJFOLD(ADD any KINT)