static
Bool dis_ARM64_data_processing_immediate(/*MB_OUT*/DisResult* dres,
- UInt insn)
+ UInt insn, Bool sigill_diag)
{
# define INSN(_bMax,_bMin) SLICE_UInt(insn, (_bMax), (_bMin))
}
after_extr:
- vex_printf("ARM64 front end: data_processing_immediate\n");
+ if (sigill_diag) {
+ vex_printf("ARM64 front end: data_processing_immediate\n");
+ }
return False;
# undef INSN
}
static
Bool dis_ARM64_data_processing_register(/*MB_OUT*/DisResult* dres,
- UInt insn)
+ UInt insn, Bool sigill_diag)
{
# define INSN(_bMax,_bMin) SLICE_UInt(insn, (_bMax), (_bMin))
/* fall through */
}
- vex_printf("ARM64 front end: data_processing_register\n");
+ if (sigill_diag) {
+ vex_printf("ARM64 front end: data_processing_register\n");
+ }
return False;
# undef INSN
}
return res;
fail:
- vex_printf("gen_indexed_EA: unhandled case optS == 0x%x\n", optS);
+ if (0 /*really, sigill_diag, but that causes too much plumbing*/) {
+ vex_printf("gen_indexed_EA: unhandled case optS == 0x%x\n", optS);
+ }
return IRTemp_INVALID;
}
static
Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn,
- const VexAbiInfo* abiinfo
-)
+ const VexAbiInfo* abiinfo, Bool sigill_diag)
{
# define INSN(_bMax,_bMin) SLICE_UInt(insn, (_bMax), (_bMin))
return True;
}
- vex_printf("ARM64 front end: load_store\n");
+ if (sigill_diag) {
+ vex_printf("ARM64 front end: load_store\n");
+ }
return False;
# undef INSN
}
static
Bool dis_ARM64_branch_etc(/*MB_OUT*/DisResult* dres, UInt insn,
const VexArchInfo* archinfo,
- const VexAbiInfo* abiinfo)
+ const VexAbiInfo* abiinfo, Bool sigill_diag)
{
# define INSN(_bMax,_bMin) SLICE_UInt(insn, (_bMax), (_bMin))
/* D5 0B 7B 001 Rt dc cvau, rT
*/
if ((INSN(31,0) & 0xFFFFFFE0) == 0xD50B7B20) {
+ /* JRS 2019Nov24: should we handle DC_CIVAC the same?
+ || (INSN(31,0) & 0xFFFFFFE0) == 0xD50B7E20 */
/* Exactly the same scheme as for IC IVAU, except we observe the
dMinLine size, and request an Ijk_FlushDCache instead of
Ijk_InvalICache. */
return True;
}
- vex_printf("ARM64 front end: branch_etc\n");
+ if (sigill_diag) {
+ vex_printf("ARM64 front end: branch_etc\n");
+ }
return False;
# undef INSN
}
/*MB_OUT*/DisResult* dres,
const UChar* guest_instr,
const VexArchInfo* archinfo,
- const VexAbiInfo* abiinfo
+ const VexAbiInfo* abiinfo,
+ Bool sigill_diag
)
{
// A macro to fish bits out of 'insn'.
switch (INSN(28,25)) {
case BITS4(1,0,0,0): case BITS4(1,0,0,1):
// Data processing - immediate
- ok = dis_ARM64_data_processing_immediate(dres, insn);
+ ok = dis_ARM64_data_processing_immediate(dres, insn, sigill_diag);
break;
case BITS4(1,0,1,0): case BITS4(1,0,1,1):
// Branch, exception generation and system instructions
- ok = dis_ARM64_branch_etc(dres, insn, archinfo, abiinfo);
+ ok = dis_ARM64_branch_etc(dres, insn, archinfo, abiinfo, sigill_diag);
break;
case BITS4(0,1,0,0): case BITS4(0,1,1,0):
case BITS4(1,1,0,0): case BITS4(1,1,1,0):
// Loads and stores
- ok = dis_ARM64_load_store(dres, insn, abiinfo);
+ ok = dis_ARM64_load_store(dres, insn, abiinfo, sigill_diag);
break;
case BITS4(0,1,0,1): case BITS4(1,1,0,1):
// Data processing - register
- ok = dis_ARM64_data_processing_register(dres, insn);
+ ok = dis_ARM64_data_processing_register(dres, insn, sigill_diag);
break;
case BITS4(0,1,1,1): case BITS4(1,1,1,1):
// Data processing - SIMD and floating point
/* Try to decode */
Bool ok = disInstr_ARM64_WRK( &dres,
&guest_code_IN[delta_IN],
- archinfo, abiinfo );
+ archinfo, abiinfo, sigill_diag_IN );
if (ok) {
/* All decode successes end up here. */
vassert(dres.len == 4 || dres.len == 20);
return !primopMightTrap(e->Iex.Unop.op);
case Iex_Binop:
return !primopMightTrap(e->Iex.Binop.op);
+ case Iex_Triop:
+ return !primopMightTrap(e->Iex.Triop.details->op);
case Iex_ITE:
case Iex_CCall:
case Iex_Get:
+ case Iex_Const:
return True;
default:
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
static Bool stmt_is_guardable ( const IRStmt* st )
{
switch (st->tag) {
+ // These are easily guarded.
case Ist_IMark:
case Ist_Put:
return True;
- case Ist_Store: // definitely not
- case Ist_CAS: // definitely not
- case Ist_Exit: // We could in fact spec this, if required
+ // These are definitely not guardable, or at least it's way too much
+ // hassle to do so.
+ case Ist_CAS:
+ case Ist_LLSC:
+ case Ist_MBE:
return False;
+ // These could be guarded, with some effort, if really needed, but
+ // currently aren't guardable.
+ case Ist_Store:
+ case Ist_Exit:
+ return False;
+ // This is probably guardable, but it depends on the RHS of the
+ // assignment.
case Ist_WrTmp:
return expr_is_guardable(st->Ist.WrTmp.data);
default:
i->ARM64in.Unary.op = op;
return i;
}
+ARM64Instr* ARM64Instr_Set64 ( HReg dst, ARM64CondCode cond ) {
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+ i->tag = ARM64in_Set64;
+ i->ARM64in.Set64.dst = dst;
+ i->ARM64in.Set64.cond = cond;
+ return i;
+}
ARM64Instr* ARM64Instr_MovI ( HReg dst, HReg src ) {
ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_MovI;
vex_printf(", ");
ppHRegARM64(i->ARM64in.Unary.src);
return;
+ case ARM64in_Set64:
+ vex_printf("cset ");
+ ppHRegARM64(i->ARM64in.Set64.dst);
+ vex_printf(", %s", showARM64CondCode(i->ARM64in.Set64.cond));
+ return;
case ARM64in_MovI:
vex_printf("mov ");
ppHRegARM64(i->ARM64in.MovI.dst);
addHRegUse(u, HRmWrite, i->ARM64in.Unary.dst);
addHRegUse(u, HRmRead, i->ARM64in.Unary.src);
return;
+ case ARM64in_Set64:
+ addHRegUse(u, HRmWrite, i->ARM64in.Set64.dst);
+ return;
case ARM64in_MovI:
addHRegUse(u, HRmWrite, i->ARM64in.MovI.dst);
addHRegUse(u, HRmRead, i->ARM64in.MovI.src);
i->ARM64in.Unary.dst = lookupHRegRemap(m, i->ARM64in.Unary.dst);
i->ARM64in.Unary.src = lookupHRegRemap(m, i->ARM64in.Unary.src);
return;
+ case ARM64in_Set64:
+ i->ARM64in.Set64.dst = lookupHRegRemap(m, i->ARM64in.Set64.dst);
+ return;
case ARM64in_MovI:
i->ARM64in.MovI.dst = lookupHRegRemap(m, i->ARM64in.MovI.dst);
i->ARM64in.MovI.src = lookupHRegRemap(m, i->ARM64in.MovI.src);
}
goto bad;
}
+ case ARM64in_Set64: {
+ /* 1 00 1101 0100 11111 invert(cond) 01 11111 Rd CSET Rd, Cond */
+ UInt rDst = iregEnc(i->ARM64in.Set64.dst);
+ UInt cc = (UInt)i->ARM64in.Set64.cond;
+ vassert(cc < 14);
+ *p++ = X_3_8_5_6_5_5(X100, X11010100, X11111,
+ ((cc ^ 1) << 2) | X01, X11111, rDst);
+ goto done;
+ }
case ARM64in_MovI: {
/* We generate the "preferred form", ORR Xd, XZR, Xm
101 01010 00 0 m 000000 11111 d
ARM64in_Test,
ARM64in_Shift,
ARM64in_Unary,
+ ARM64in_Set64,
ARM64in_MovI, /* int reg-reg move */
ARM64in_Imm64,
ARM64in_LdSt64,
HReg src;
ARM64UnaryOp op;
} Unary;
+ /* CSET -- Convert a condition code to a 64-bit value (0 or 1). */
+ struct {
+ HReg dst;
+ ARM64CondCode cond;
+ } Set64;
/* MOV dst, src -- reg-reg move for integer registers */
struct {
HReg dst;
extern ARM64Instr* ARM64Instr_Test ( HReg, ARM64RIL* );
extern ARM64Instr* ARM64Instr_Shift ( HReg, HReg, ARM64RI6*, ARM64ShiftOp );
extern ARM64Instr* ARM64Instr_Unary ( HReg, HReg, ARM64UnaryOp );
+extern ARM64Instr* ARM64Instr_Set64 ( HReg, ARM64CondCode );
extern ARM64Instr* ARM64Instr_MovI ( HReg, HReg );
extern ARM64Instr* ARM64Instr_Imm64 ( HReg, ULong );
extern ARM64Instr* ARM64Instr_LdSt64 ( Bool isLoad, HReg, ARM64AMode* );
return ARM64cc_NE;
}
+ /* Constant 1:Bit */
+ if (e->tag == Iex_Const) {
+ /* This is a very stupid translation. Hopefully it doesn't occur much,
+ if ever. */
+ vassert(e->Iex.Const.con->tag == Ico_U1);
+ vassert(e->Iex.Const.con->Ico.U1 == True
+ || e->Iex.Const.con->Ico.U1 == False);
+ HReg rTmp = newVRegI(env);
+ addInstr(env, ARM64Instr_Imm64(rTmp, 0));
+ ARM64RIL* one = mb_mkARM64RIL_I(1);
+ vassert(one);
+ addInstr(env, ARM64Instr_Test(rTmp, one));
+ return e->Iex.Const.con->Ico.U1 ? ARM64cc_EQ : ARM64cc_NE;
+ }
+
/* Not1(e) */
if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
/* Generate code for the arg, and negate the test condition */
}
}
+ /* --- And1(x,y), Or1(x,y) --- */
+ /* FIXME: We could (and probably should) do a lot better here, by using the
+ iselCondCode_C/_R scheme used in the amd64 insn selector. */
+ if (e->tag == Iex_Binop
+ && (e->Iex.Binop.op == Iop_And1 || e->Iex.Binop.op == Iop_Or1)) {
+ HReg x_as_64 = newVRegI(env);
+ ARM64CondCode cc_x = iselCondCode(env, e->Iex.Binop.arg1);
+ addInstr(env, ARM64Instr_Set64(x_as_64, cc_x));
+
+ HReg y_as_64 = newVRegI(env);
+ ARM64CondCode cc_y = iselCondCode(env, e->Iex.Binop.arg2);
+ addInstr(env, ARM64Instr_Set64(y_as_64, cc_y));
+
+ HReg tmp = newVRegI(env);
+ ARM64LogicOp lop
+ = e->Iex.Binop.op == Iop_And1 ? ARM64lo_AND : ARM64lo_OR;
+ addInstr(env, ARM64Instr_Logic(tmp, x_as_64, ARM64RIL_R(y_as_64), lop));
+
+ ARM64RIL* one = mb_mkARM64RIL_I(1);
+ vassert(one);
+ addInstr(env, ARM64Instr_Test(tmp, one));
+
+ return ARM64cc_NE;
+ }
+
ppIRExpr(e);
vpanic("iselCondCode");
}
} /* if (e->tag == Iex_Triop) */
+ if (0 && e->tag == Iex_ITE) {
+ /* JRS 2019Nov24: I think this is right, and it is somewhat tested, but
+ not as much as I'd like. Hence disabled till it can be tested more. */
+ // This is pretty feeble. We'd do better to generate BSL here.
+ HReg rX = newVRegI(env);
+
+ ARM64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+ addInstr(env, ARM64Instr_Set64(rX, cc));
+ // cond: rX = 1 !cond: rX = 0
+
+ // Mask the Set64 result. This is paranoia (should be unnecessary).
+ ARM64RIL* one = mb_mkARM64RIL_I(1);
+ vassert(one);
+ addInstr(env, ARM64Instr_Logic(rX, rX, one, ARM64lo_AND));
+ // cond: rX = 1 !cond: rX = 0
+
+ // Propagate to all bits in the 64 bit word by subtracting 1 from it.
+ // This also inverts the sense of the value.
+ addInstr(env, ARM64Instr_Arith(rX, rX, ARM64RIA_I12(1,0),
+ /*isAdd=*/False));
+ // cond: rX = 0-(62)-0 !cond: rX = 1-(62)-1
+
+ // Duplicate rX into a vector register
+ HReg vMask = newVRegV(env);
+ addInstr(env, ARM64Instr_VQfromXX(vMask, rX, rX));
+ // cond: vMask = 0-(126)-0 !cond: vMask = 1-(126)-1
+
+ HReg vIfTrue = iselV128Expr(env, e->Iex.ITE.iftrue);
+ HReg vIfFalse = iselV128Expr(env, e->Iex.ITE.iffalse);
+
+ // Mask out iffalse value as needed
+ addInstr(env,
+ ARM64Instr_VBinV(ARM64vecb_AND, vIfFalse, vIfFalse, vMask));
+
+ // Invert the mask so we can use it for the iftrue value
+ addInstr(env, ARM64Instr_VUnaryV(ARM64vecu_NOT, vMask, vMask));
+ // cond: vMask = 1-(126)-1 !cond: vMask = 0-(126)-0
+
+ // Mask out iftrue value as needed
+ addInstr(env,
+ ARM64Instr_VBinV(ARM64vecb_AND, vIfTrue, vIfTrue, vMask));
+
+ // Merge the masked iftrue and iffalse results.
+ HReg res = newVRegV(env);
+ addInstr(env, ARM64Instr_VBinV(ARM64vecb_ORR, res, vIfTrue, vIfFalse));
+
+ return res;
+ }
+
v128_expr_bad:
ppIRExpr(e);
vpanic("iselV128Expr_wrk");