{
int32_t slot = ir->s;
if (!ra_hasspill(slot)) {
- if (irt_isnum(ir->t) || (LJ_64 && irt_islightud(ir->t))) {
+ if (irt_is64(ir->t)) {
slot = as->evenspill;
as->evenspill += 2;
} else if (as->oddspill) {
return r;
}
-/* Use 64 bit operations to handle 64 bit lightuserdata. */
-#define REX_64LU(ir, r) \
- ((r) | ((LJ_64 && irt_islightud((ir)->t)) ? REX_64 : 0))
+/* Use 64 bit operations to handle 64 bit IR types. */
+#define REX_64IR(ir, r) \
+ ((r) | ((LJ_64 && irt_is64((ir)->t)) ? REX_64 : 0))
/* Generic move between two regs. */
static void ra_movrr(ASMState *as, IRIns *ir, Reg r1, Reg r2)
{
- emit_rr(as, r1 < RID_MAX_GPR ? XO_MOV : XMM_MOVRR(as), REX_64LU(ir, r1), r2);
+ emit_rr(as, r1 < RID_MAX_GPR ? XO_MOV : XMM_MOVRR(as), REX_64IR(ir, r1), r2);
}
/* Restore a register (marked as free). Rematerialize or force a spill. */
ra_modified(as, r);
RA_DBGX((as, "restore $i $r", ir, r));
emit_rmro(as, r < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as),
- REX_64LU(ir, r), RID_ESP, ofs);
+ REX_64IR(ir, r), RID_ESP, ofs);
}
return r;
}
{
RA_DBGX((as, "save $i $r", ir, r));
emit_rmro(as, r < RID_MAX_GPR ? XO_MOVto : XO_MOVSDto,
- REX_64LU(ir, r), RID_ESP, sps_scale(ir->s));
+ REX_64IR(ir, r), RID_ESP, sps_scale(ir->s));
}
#define MINCOST(r) \
lj_trace_err(as->J, LJ_TRERR_NYICOAL);
r = ra_alloc1(as, args[n], allow & RSET_GPR);
allow &= ~RID2RSET(r);
- emit_movtomro(as, REX_64LU(ir, r), RID_ESP, ofs);
+ emit_movtomro(as, REX_64IR(ir, r), RID_ESP, ofs);
}
ofs += sizeof(intptr_t);
}
/* Otherwise use g->tmptv to hold the TValue. */
if (!irref_isk(ir->op2)) {
Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
- emit_movtomro(as, REX_64LU(irkey, src), tmp, 0);
+ emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
} else if (!irt_ispri(irkey->t)) {
emit_movmroi(as, tmp, 0, irkey->i);
}
case IRT_U8: xo = XO_MOVZXb; break;
case IRT_I16: xo = XO_MOVSXw; break;
case IRT_U16: xo = XO_MOVZXw; break;
-#if LJ_64
- case IRT_LIGHTUD:
- dest |= REX_64;
- /* fallthrough */
-#endif
default:
- lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t));
+ if (LJ_64 && irt_is64(ir->t))
+ dest |= REX_64;
+ else
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
xo = XO_MOV;
break;
}
/* The IRT_I16/IRT_U16 stores should never be simplified for constant
** values since mov word [mem], imm16 has a length-changing prefix.
*/
+ lua_assert(!(irref_isk(ir->op2) && irt_is64(ir->t))); /* NYI: KINT64. */
if (!irref_isk(ir->op2) || irt_isi16(ir->t) || irt_isu16(ir->t)) {
RegSet allow8 = (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
src = ra_alloc1(as, ir->op2, allow8);
case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
#endif
default:
- lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t));
+ if (LJ_64 && irt_is64(ir->t))
+ src |= REX_64;
+ else
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
xo = XO_MOVto;
break;
}
emit_i8(as, IR(ir->op2)->i);
emit_mrm(as, XO_MOVmib, 0, RID_MRM);
} else {
- lua_assert(irt_isint(ir->t) || irt_isaddr(ir->t));
+ lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
emit_i32(as, IR(ir->op2)->i);
emit_mrm(as, XO_MOVmi, 0, RID_MRM);
}
asm_guardcc(as, cc);
if (usetest && left != RID_MRM) {
/* Use test r,r instead of cmp r,0. */
- emit_rr(as, XO_TEST, REX_64LU(ir, left), left);
+ emit_rr(as, XO_TEST, REX_64IR(ir, left), left);
if (irl+1 == ir) /* Referencing previous ins? */
as->testmcp = as->mcp; /* Set flag to drop test r,r if possible. */
} else {
Reg left = ra_alloc1(as, lref, RSET_GPR);
Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left));
asm_guardcc(as, cc);
- emit_mrm(as, XO_CMP, REX_64LU(ir, left), right);
+ emit_mrm(as, XO_CMP, REX_64IR(ir, left), right);
}
}
}
lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t));
if (!irref_isk(ref)) {
Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
- emit_movtomro(as, REX_64LU(ir, src), RID_BASE, ofs);
+ emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
} else if (!irt_ispri(ir->t)) {
emit_movmroi(as, RID_BASE, ofs, ir->i);
}
int32_t ofs = sps_scale(regsp_spill(rs));
ra_free(as, r);
emit_rmro(as, r < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as),
- REX_64LU(ir, r), RID_ESP, ofs);
+ REX_64IR(ir, r), RID_ESP, ofs);
checkmclim(as);
}
}
IRT_THREAD,
IRT_PROTO,
IRT_FUNC,
- IRT_9, /* Unused (map of LJ_TTRACE). */
+ IRT_P64, /* IRT_P64 never escapes the IR (map of LJ_TTRACE). */
IRT_CDATA,
IRT_TAB,
IRT_UDATA,
** a TValue after implicit or explicit conversion (TONUM). Their types
** must be contiguous and next to IRT_NUM (see the typerange macros below).
*/
- IRT_INT,
IRT_I8,
IRT_U8,
IRT_I16,
IRT_U16,
- /* There is room for 13 more types. */
+ IRT_INT,
+ IRT_U32,
+ IRT_I64,
+ IRT_U64,
+ /* There is room for 10 more types. */
/* Additional flags. */
IRT_MARK = 0x20, /* Marker for misc. purposes. */
#define irt_isu8(t) (irt_type(t) == IRT_U8)
#define irt_isi16(t) (irt_type(t) == IRT_I16)
#define irt_isu16(t) (irt_type(t) == IRT_U16)
+#define irt_isu32(t) (irt_type(t) == IRT_U32)
-#define irt_isinteger(t) (irt_typerange((t), IRT_INT, IRT_U16))
+#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
+#define IRT_IS64 \
+ ((1u<<IRT_NUM) | (1u<<IRT_I64) | (1u<<IRT_U64) | (1u<<IRT_P64) | \
+ (LJ_64 ? (1u<<IRT_LIGHTUD) : 0))
+#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1)
+
static LJ_AINLINE IRType itype2irt(const TValue *tv)
{
if (tvisnum(tv))
#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
-#define tref_isinteger(tr) (tref_typerange((tr), IRT_INT, IRT_U16))
-#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_U16))
+#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
+#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))