and stores.
git-svn-id: svn://svn.valgrind.org/vex/branches/COMEM@2571
i->ARMin.Imm32.imm32 = imm32;
return i;
}
-ARMInstr* ARMInstr_LdSt32 ( Bool isLoad, HReg rD, ARMAMode1* amode ) {
+ARMInstr* ARMInstr_LdSt32 ( ARMCondCode cc,
+ Bool isLoad, HReg rD, ARMAMode1* amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_LdSt32;
+ i->ARMin.LdSt32.cc = cc;
i->ARMin.LdSt32.isLoad = isLoad;
i->ARMin.LdSt32.rD = rD;
i->ARMin.LdSt32.amode = amode;
+ vassert(cc != ARMcc_NV);
return i;
}
-ARMInstr* ARMInstr_LdSt16 ( Bool isLoad, Bool signedLoad,
+ARMInstr* ARMInstr_LdSt16 ( ARMCondCode cc,
+ Bool isLoad, Bool signedLoad,
HReg rD, ARMAMode2* amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_LdSt16;
+ i->ARMin.LdSt16.cc = cc;
i->ARMin.LdSt16.isLoad = isLoad;
i->ARMin.LdSt16.signedLoad = signedLoad;
i->ARMin.LdSt16.rD = rD;
i->ARMin.LdSt16.amode = amode;
+ vassert(cc != ARMcc_NV);
return i;
}
-ARMInstr* ARMInstr_LdSt8U ( Bool isLoad, HReg rD, ARMAMode1* amode ) {
+ARMInstr* ARMInstr_LdSt8U ( ARMCondCode cc,
+ Bool isLoad, HReg rD, ARMAMode1* amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_LdSt8U;
+ i->ARMin.LdSt8U.cc = cc;
i->ARMin.LdSt8U.isLoad = isLoad;
i->ARMin.LdSt8U.rD = rD;
i->ARMin.LdSt8U.amode = amode;
+ vassert(cc != ARMcc_NV);
+ return i;
+}
+ARMInstr* ARMInstr_Ld8S ( ARMCondCode cc, HReg rD, ARMAMode2* amode ) {
+ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ i->tag = ARMin_Ld8S;
+ i->ARMin.Ld8S.cc = cc;
+ i->ARMin.Ld8S.rD = rD;
+ i->ARMin.Ld8S.amode = amode;
+ vassert(cc != ARMcc_NV);
return i;
}
ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T,
vassert(cond != ARMcc_AL);
return i;
}
-ARMInstr* ARMInstr_Call ( ARMCondCode cond, HWord target, Int nArgRegs ) {
+ARMInstr* ARMInstr_Call ( ARMCondCode cond, HWord target, Int nArgRegs,
+ RetLoc rloc ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Call;
i->ARMin.Call.cond = cond;
i->ARMin.Call.target = target;
i->ARMin.Call.nArgRegs = nArgRegs;
+ i->ARMin.Call.rloc = rloc;
+ vassert(rloc != RetLocINVALID);
return i;
}
ARMInstr* ARMInstr_Mul ( ARMMulOp op ) {
return;
case ARMin_LdSt32:
if (i->ARMin.LdSt32.isLoad) {
- vex_printf("ldr ");
+ vex_printf("ldr%s ", i->ARMin.LdSt32.cc == ARMcc_AL ? " "
+ : showARMCondCode(i->ARMin.LdSt32.cc));
ppHRegARM(i->ARMin.LdSt32.rD);
vex_printf(", ");
ppARMAMode1(i->ARMin.LdSt32.amode);
} else {
- vex_printf("str ");
+ vex_printf("str%s ", i->ARMin.LdSt32.cc == ARMcc_AL ? " "
+ : showARMCondCode(i->ARMin.LdSt32.cc));
ppARMAMode1(i->ARMin.LdSt32.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.LdSt32.rD);
return;
case ARMin_LdSt16:
if (i->ARMin.LdSt16.isLoad) {
- vex_printf("%s", i->ARMin.LdSt16.signedLoad
- ? "ldrsh " : "ldrh " );
+ vex_printf("%s%s%s",
+ i->ARMin.LdSt16.signedLoad ? "ldrsh" : "ldrh",
+ i->ARMin.LdSt16.cc == ARMcc_AL ? " "
+ : showARMCondCode(i->ARMin.LdSt16.cc),
+ i->ARMin.LdSt16.signedLoad ? " " : " ");
ppHRegARM(i->ARMin.LdSt16.rD);
vex_printf(", ");
ppARMAMode2(i->ARMin.LdSt16.amode);
} else {
- vex_printf("strh ");
+ vex_printf("strh%s ",
+ i->ARMin.LdSt16.cc == ARMcc_AL ? " "
+ : showARMCondCode(i->ARMin.LdSt16.cc));
ppARMAMode2(i->ARMin.LdSt16.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.LdSt16.rD);
return;
case ARMin_LdSt8U:
if (i->ARMin.LdSt8U.isLoad) {
- vex_printf("ldrb ");
+ vex_printf("ldrb%s ", i->ARMin.LdSt8U.cc == ARMcc_AL ? " "
+ : showARMCondCode(i->ARMin.LdSt8U.cc));
ppHRegARM(i->ARMin.LdSt8U.rD);
vex_printf(", ");
ppARMAMode1(i->ARMin.LdSt8U.amode);
} else {
- vex_printf("strb ");
+ vex_printf("strb%s ", i->ARMin.LdSt8U.cc == ARMcc_AL ? " "
+ : showARMCondCode(i->ARMin.LdSt8U.cc));
ppARMAMode1(i->ARMin.LdSt8U.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.LdSt8U.rD);
}
return;
case ARMin_Ld8S:
- goto unhandled;
+ vex_printf("ldrsb%s ", i->ARMin.Ld8S.cc == ARMcc_AL ? " "
+ : showARMCondCode(i->ARMin.Ld8S.cc));
+ ppARMAMode2(i->ARMin.Ld8S.amode);
+ vex_printf(", ");
+ ppHRegARM(i->ARMin.Ld8S.rD);
+ return;
case ARMin_XDirect:
vex_printf("(xDirect) ");
vex_printf("if (%%cpsr.%s) { ",
vex_printf("call%s ",
i->ARMin.Call.cond==ARMcc_AL
? "" : showARMCondCode(i->ARMin.Call.cond));
- vex_printf("0x%lx [nArgRegs=%d]",
+ vex_printf("0x%lx [nArgRegs=%d, ",
i->ARMin.Call.target, i->ARMin.Call.nArgRegs);
+ ppRetLoc(i->ARMin.Call.rloc);
+ vex_printf("]");
return;
case ARMin_Mul:
vex_printf("%-5s ", showARMMulOp(i->ARMin.Mul.op));
"str r11,[r12+4]");
return;
default:
- unhandled:
vex_printf("ppARMInstr: unhandled case (tag %d)", (Int)i->tag);
vpanic("ppARMInstr(1)");
return;
addRegUsage_ARMAMode1(u, i->ARMin.LdSt32.amode);
if (i->ARMin.LdSt32.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.LdSt32.rD);
+ if (i->ARMin.LdSt32.cc != ARMcc_AL)
+ addHRegUse(u, HRmRead, i->ARMin.LdSt32.rD);
} else {
addHRegUse(u, HRmRead, i->ARMin.LdSt32.rD);
}
addRegUsage_ARMAMode2(u, i->ARMin.LdSt16.amode);
if (i->ARMin.LdSt16.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.LdSt16.rD);
+ if (i->ARMin.LdSt16.cc != ARMcc_AL)
+ addHRegUse(u, HRmRead, i->ARMin.LdSt16.rD);
} else {
addHRegUse(u, HRmRead, i->ARMin.LdSt16.rD);
}
addRegUsage_ARMAMode1(u, i->ARMin.LdSt8U.amode);
if (i->ARMin.LdSt8U.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.LdSt8U.rD);
+ if (i->ARMin.LdSt8U.cc != ARMcc_AL)
+ addHRegUse(u, HRmRead, i->ARMin.LdSt8U.rD);
} else {
addHRegUse(u, HRmRead, i->ARMin.LdSt8U.rD);
}
return;
case ARMin_Ld8S:
- goto unhandled;
+ addRegUsage_ARMAMode2(u, i->ARMin.Ld8S.amode);
+ addHRegUse(u, HRmWrite, i->ARMin.Ld8S.rD);
+ if (i->ARMin.Ld8S.cc != ARMcc_AL)
+ addHRegUse(u, HRmRead, i->ARMin.Ld8S.rD);
+ return;
/* XDirect/XIndir/XAssisted are also a bit subtle. They
conditionally exit the block. Hence we only need to list (1)
the registers that they read, and (2) the registers that they
addHRegUse(u, HRmWrite, hregARM_R12());
addHRegUse(u, HRmWrite, hregARM_R11());
return;
- unhandled:
default:
ppARMInstr(i);
vpanic("getRegUsage_ARMInstr");
mapRegs_ARMAMode1(m, i->ARMin.LdSt8U.amode);
return;
case ARMin_Ld8S:
- goto unhandled;
+ i->ARMin.Ld8S.rD = lookupHRegRemap(m, i->ARMin.Ld8S.rD);
+ mapRegs_ARMAMode2(m, i->ARMin.Ld8S.amode);
+ return;
case ARMin_XDirect:
mapRegs_ARMAMode1(m, i->ARMin.XDirect.amR15T);
return;
case ARMin_ProfInc:
/* hardwires r11 and r12 -- nothing to modify. */
return;
- unhandled:
default:
ppARMInstr(i);
vpanic("mapRegs_ARMInstr");
switch (rclass) {
case HRcInt32:
vassert(offsetB <= 4095);
- *i1 = ARMInstr_LdSt32( False/*!isLoad*/,
+ *i1 = ARMInstr_LdSt32( ARMcc_AL, False/*!isLoad*/,
rreg,
ARMAMode1_RI(hregARM_R8(), offsetB) );
return;
switch (rclass) {
case HRcInt32:
vassert(offsetB <= 4095);
- *i1 = ARMInstr_LdSt32( True/*isLoad*/,
+ *i1 = ARMInstr_LdSt32( ARMcc_AL, True/*isLoad*/,
rreg,
ARMAMode1_RI(hregARM_R8(), offsetB) );
return;
}
case ARMin_LdSt32:
case ARMin_LdSt8U: {
- UInt bL, bB;
- HReg rD;
- ARMAMode1* am;
+ UInt bL, bB;
+ HReg rD;
+ ARMAMode1* am;
+ ARMCondCode cc;
if (i->tag == ARMin_LdSt32) {
bB = 0;
bL = i->ARMin.LdSt32.isLoad ? 1 : 0;
am = i->ARMin.LdSt32.amode;
rD = i->ARMin.LdSt32.rD;
+ cc = i->ARMin.LdSt32.cc;
} else {
bB = 1;
bL = i->ARMin.LdSt8U.isLoad ? 1 : 0;
am = i->ARMin.LdSt8U.amode;
rD = i->ARMin.LdSt8U.rD;
+ cc = i->ARMin.LdSt8U.cc;
}
+ vassert(cc != ARMcc_NV);
if (am->tag == ARMam1_RI) {
Int simm12;
UInt instr, bP;
simm12 = am->ARMam1.RI.simm13;
}
vassert(simm12 >= 0 && simm12 <= 4095);
- instr = XXXXX___(X1110,X0101,BITS4(bP,bB,0,bL),
+ instr = XXXXX___(cc,X0101,BITS4(bP,bB,0,bL),
iregNo(am->ARMam1.RI.reg),
iregNo(rD));
instr |= simm12;
}
}
case ARMin_LdSt16: {
- HReg rD = i->ARMin.LdSt16.rD;
- UInt bS = i->ARMin.LdSt16.signedLoad ? 1 : 0;
- UInt bL = i->ARMin.LdSt16.isLoad ? 1 : 0;
- ARMAMode2* am = i->ARMin.LdSt16.amode;
+ HReg rD = i->ARMin.LdSt16.rD;
+ UInt bS = i->ARMin.LdSt16.signedLoad ? 1 : 0;
+ UInt bL = i->ARMin.LdSt16.isLoad ? 1 : 0;
+ ARMAMode2* am = i->ARMin.LdSt16.amode;
+ ARMCondCode cc = i->ARMin.LdSt16.cc;
+ vassert(cc != ARMcc_NV);
if (am->tag == ARMam2_RI) {
HReg rN = am->ARMam2.RI.reg;
Int simm8;
vassert(!(bL == 0 && bS == 1)); // "! signed store"
/**/ if (bL == 0 && bS == 0) {
// strh
- instr = XXXXXXXX(X1110,X0001, BITS4(bP,1,0,0), iregNo(rN),
+ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,0), iregNo(rN),
iregNo(rD), imm8hi, X1011, imm8lo);
*p++ = instr;
goto done;
}
else if (bL == 1 && bS == 0) {
// ldrh
- instr = XXXXXXXX(X1110,X0001, BITS4(bP,1,0,1), iregNo(rN),
+ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregNo(rN),
iregNo(rD), imm8hi, X1011, imm8lo);
*p++ = instr;
goto done;
}
else if (bL == 1 && bS == 1) {
- goto bad;
+ // ldrsh
+ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregNo(rN),
+ iregNo(rD), imm8hi, X1111, imm8lo);
+ *p++ = instr;
+ goto done;
}
else vassert(0); // ill-constructed insn
} else {
goto bad;
}
}
- case ARMin_Ld8S:
- goto bad;
+ case ARMin_Ld8S: {
+ HReg rD = i->ARMin.Ld8S.rD;
+ ARMAMode2* am = i->ARMin.Ld8S.amode;
+ ARMCondCode cc = i->ARMin.Ld8S.cc;
+ vassert(cc != ARMcc_NV);
+ if (am->tag == ARMam2_RI) {
+ HReg rN = am->ARMam2.RI.reg;
+ Int simm8;
+ UInt bP, imm8hi, imm8lo, instr;
+ if (am->ARMam2.RI.simm9 < 0) {
+ bP = 0;
+ simm8 = -am->ARMam2.RI.simm9;
+ } else {
+ bP = 1;
+ simm8 = am->ARMam2.RI.simm9;
+ }
+ vassert(simm8 >= 0 && simm8 <= 255);
+ imm8hi = (simm8 >> 4) & 0xF;
+ imm8lo = simm8 & 0xF;
+ // ldrsb
+ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregNo(rN),
+ iregNo(rD), imm8hi, X1101, imm8lo);
+ *p++ = instr;
+ goto done;
+ } else {
+ // RR case
+ goto bad;
+ }
+ }
case ARMin_XDirect: {
/* NB: what goes on here has to be very closely coordinated
*p++ = instr;
goto done;
}
+
case ARMin_Call: {
UInt instr;
/* Decide on a scratch reg used to hold to the call address.
case 4: scratchNo = 11; break;
default: vassert(0);
}
- // r"scratchNo" = &target
- p = imm32_to_iregNo( (UInt*)p,
- scratchNo, (UInt)i->ARMin.Call.target );
- // blx{cond} r"scratchNo"
- instr = XXX___XX(i->ARMin.Call.cond, X0001, X0010, /*___*/
- X0011, scratchNo);
- instr |= 0xFFF << 8; // stick in the SBOnes
- *p++ = instr;
+ /* If we don't need to do any fixup actions in the case that
+ the call doesn't happen, just do the simple thing and emit
+ straight-line code. We hope this is the common case. */
+ if (i->ARMin.Call.cond == ARMcc_AL/*call always happens*/
+ || i->ARMin.Call.rloc == RetLocNone/*no fixup action*/) {
+ // r"scratchNo" = &target
+ p = imm32_to_iregNo( (UInt*)p,
+ scratchNo, (UInt)i->ARMin.Call.target );
+ // blx{cond} r"scratchNo"
+ instr = XXX___XX(i->ARMin.Call.cond, X0001, X0010, /*___*/
+ X0011, scratchNo);
+ instr |= 0xFFF << 8; // stick in the SBOnes
+ *p++ = instr;
+ } else {
+ Int delta;
+ /* Complex case. We have to generate an if-then-else
+ diamond. */
+ // before:
+ // b{!cond} else:
+ // r"scratchNo" = &target
+ // blx{AL} r"scratchNo"
+ // preElse:
+ // b after:
+ // else:
+ // mvn r0, #0 // possibly
+ // mvn r1, #0 // possibly
+ // after:
+
+ // before:
+ UInt* pBefore = p;
+
+ // b{!cond} else: // ptmp1 points here
+ *p++ = 0; // filled in later
+
+ // r"scratchNo" = &target
+ p = imm32_to_iregNo( (UInt*)p,
+ scratchNo, (UInt)i->ARMin.Call.target );
+
+ // blx{AL} r"scratchNo"
+ instr = XXX___XX(ARMcc_AL, X0001, X0010, /*___*/
+ X0011, scratchNo);
+ instr |= 0xFFF << 8; // stick in the SBOnes
+ *p++ = instr;
+
+ // preElse:
+ UInt* pPreElse = p;
+
+ // b after:
+ *p++ = 0; // filled in later
+
+ // else:
+ delta = (UChar*)p - (UChar*)pBefore;
+ delta = (delta >> 2) - 2;
+ *pBefore
+ = XX______(1 ^ i->ARMin.Call.cond, X1010) | (delta & 0xFFFFFF);
+
+ /* Do the 'else' actions */
+ switch (i->ARMin.Call.rloc) {
+ case RetLocInt:
+ *p++ = 0xE3E00000; break; // mvn r0, #0
+ case RetLoc2Int:
+ // mvn r0, #0 ; mvn r1, #0
+ vassert(0); //ATC
+ *p++ = 0xE3E00000; *p++ = 0xE3E01000; break;
+ case RetLocNone:
+ case RetLocINVALID:
+ default:
+ vassert(0);
+ }
+
+ // after:
+ delta = (UChar*)p - (UChar*)pPreElse;
+ delta = (delta >> 2) - 2;
+ *pPreElse = XX______(ARMcc_AL, X1010) | (delta & 0xFFFFFF);
+ }
+
goto done;
}
case ARMin_Mul: {
HReg dst;
UInt imm32;
} Imm32;
- /* 32-bit load or store */
+ /* 32-bit load or store, may be conditional */
struct {
- Bool isLoad;
- HReg rD;
- ARMAMode1* amode;
+ ARMCondCode cc; /* ARMcc_NV is not allowed */
+ Bool isLoad;
+ HReg rD;
+ ARMAMode1* amode;
} LdSt32;
- /* 16-bit load or store */
+ /* 16-bit load or store, may be conditional */
struct {
- Bool isLoad;
- Bool signedLoad;
- HReg rD;
- ARMAMode2* amode;
+ ARMCondCode cc; /* ARMcc_NV is not allowed */
+ Bool isLoad;
+ Bool signedLoad;
+ HReg rD;
+ ARMAMode2* amode;
} LdSt16;
- /* 8-bit (unsigned) load or store */
+ /* 8-bit (unsigned) load or store, may be conditional */
struct {
- Bool isLoad;
- HReg rD;
- ARMAMode1* amode;
+ ARMCondCode cc; /* ARMcc_NV is not allowed */
+ Bool isLoad;
+ HReg rD;
+ ARMAMode1* amode;
} LdSt8U;
- /* 8-bit signed load */
+ /* 8-bit signed load, may be conditional */
struct {
- HReg rD;
- ARMAMode2* amode;
+ ARMCondCode cc; /* ARMcc_NV is not allowed */
+ HReg rD;
+ ARMAMode2* amode;
} Ld8S;
/* Update the guest R15T value, then exit requesting to chain
to it. May be conditional. Urr, use of Addr32 implicitly
ARMCondCode cond;
HWord target;
Int nArgRegs; /* # regs carrying args: 0 .. 4 */
+ RetLoc rloc; /* where the return value will be */
} Call;
/* (PLAIN) 32 * 32 -> 32: r0 = r2 * r3
(ZX) 32 *u 32 -> 64: r1:r0 = r2 *u r3
extern ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg, ARMRI84* );
extern ARMInstr* ARMInstr_Mov ( HReg, ARMRI84* );
extern ARMInstr* ARMInstr_Imm32 ( HReg, UInt );
-extern ARMInstr* ARMInstr_LdSt32 ( Bool isLoad, HReg, ARMAMode1* );
-extern ARMInstr* ARMInstr_LdSt16 ( Bool isLoad, Bool signedLoad,
+extern ARMInstr* ARMInstr_LdSt32 ( ARMCondCode,
+ Bool isLoad, HReg, ARMAMode1* );
+extern ARMInstr* ARMInstr_LdSt16 ( ARMCondCode,
+ Bool isLoad, Bool signedLoad,
HReg, ARMAMode2* );
-extern ARMInstr* ARMInstr_LdSt8U ( Bool isLoad, HReg, ARMAMode1* );
-extern ARMInstr* ARMInstr_Ld8S ( HReg, ARMAMode2* );
+extern ARMInstr* ARMInstr_LdSt8U ( ARMCondCode,
+ Bool isLoad, HReg, ARMAMode1* );
+extern ARMInstr* ARMInstr_Ld8S ( ARMCondCode, HReg, ARMAMode2* );
extern ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T,
ARMCondCode cond, Bool toFastEP );
extern ARMInstr* ARMInstr_XIndir ( HReg dstGA, ARMAMode1* amR15T,
extern ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond, IRJumpKind jk );
extern ARMInstr* ARMInstr_CMov ( ARMCondCode, HReg dst, ARMRI84* src );
-extern ARMInstr* ARMInstr_Call ( ARMCondCode, HWord, Int nArgRegs );
+extern ARMInstr* ARMInstr_Call ( ARMCondCode, HWord, Int nArgRegs,
+ RetLoc rloc );
extern ARMInstr* ARMInstr_Mul ( ARMMulOp op );
extern ARMInstr* ARMInstr_LdrEX ( Int szB );
extern ARMInstr* ARMInstr_StrEX ( Int szB );
static
Bool doHelperCall ( ISelEnv* env,
Bool passBBP,
- IRExpr* guard, IRCallee* cee, IRExpr** args )
+ IRExpr* guard, IRCallee* cee, IRExpr** args,
+ RetLoc rloc )
{
ARMCondCode cc;
HReg argregs[ARM_N_ARGREGS];
values. But that's too much hassle. */
/* Finally, the call itself. */
- addInstr(env, ARMInstr_Call( cc, target, nextArgReg ));
+ addInstr(env, ARMInstr_Call( cc, target, nextArgReg, rloc ));
return True; /* success */
}
{
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
-// vassert(ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
switch (e->tag) {
if (ty == Ity_I32) {
ARMAMode1* amode = iselIntExpr_AMode1 ( env, e->Iex.Load.addr );
- addInstr(env, ARMInstr_LdSt32(True/*isLoad*/, dst, amode));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/, dst, amode));
return dst;
}
if (ty == Ity_I16) {
ARMAMode2* amode = iselIntExpr_AMode2 ( env, e->Iex.Load.addr );
- addInstr(env, ARMInstr_LdSt16(True/*isLoad*/, False/*!signedLoad*/,
+ addInstr(env, ARMInstr_LdSt16(ARMcc_AL,
+ True/*isLoad*/, False/*!signedLoad*/,
dst, amode));
return dst;
}
if (ty == Ity_I8) {
ARMAMode1* amode = iselIntExpr_AMode1 ( env, e->Iex.Load.addr );
- addInstr(env, ARMInstr_LdSt8U(True/*isLoad*/, dst, amode));
+ addInstr(env, ARMInstr_LdSt8U(ARMcc_AL, True/*isLoad*/, dst, amode));
return dst;
}
-
-//zz if (ty == Ity_I16) {
-//zz addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
-//zz return dst;
-//zz }
-//zz if (ty == Ity_I8) {
-//zz addInstr(env, X86Instr_LoadEX(1,False,amode,dst));
-//zz return dst;
-//zz }
break;
}
HReg res = newVRegI(env);
addInstr(env, mk_iMOVds_RR(hregARM_R0(), regL));
addInstr(env, mk_iMOVds_RR(hregARM_R1(), regR));
- addInstr(env, ARMInstr_Call( ARMcc_AL, (HWord)Ptr_to_ULong(fn), 2 ));
+ addInstr(env, ARMInstr_Call( ARMcc_AL, (HWord)Ptr_to_ULong(fn),
+ 2, RetLocInt ));
addInstr(env, mk_iMOVds_RR(res, hregARM_R0()));
return res;
}
HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
HReg res = newVRegI(env);
addInstr(env, mk_iMOVds_RR(hregARM_R0(), arg));
- addInstr(env, ARMInstr_Call( ARMcc_AL, (HWord)Ptr_to_ULong(fn), 1 ));
+ addInstr(env, ARMInstr_Call( ARMcc_AL, (HWord)Ptr_to_ULong(fn),
+ 1, RetLocInt ));
addInstr(env, mk_iMOVds_RR(res, hregARM_R0()));
return res;
}
&& e->Iex.Get.offset < 4096-4) {
HReg dst = newVRegI(env);
addInstr(env, ARMInstr_LdSt32(
- True/*isLoad*/,
+ ARMcc_AL, True/*isLoad*/,
dst,
ARMAMode1_RI(hregARM_R8(), e->Iex.Get.offset)));
return dst;
HReg dst = newVRegI(env);
vassert(ty == e->Iex.CCall.retty);
- /* be very restrictive for now. Only 32/64-bit ints allowed
- for args, and 32 bits for return type. */
+ /* be very restrictive for now. Only 32/64-bit ints allowed for
+ args, and 32 bits for return type. Don't forget to change
+ the RetLoc if more types are allowed in future. */
if (e->Iex.CCall.retty != Ity_I32)
goto irreducible;
/* Marshal args, do the call, clear stack. */
Bool ok = doHelperCall( env, False,
- NULL, e->Iex.CCall.cee, e->Iex.CCall.args );
+ NULL, e->Iex.CCall.cee, e->Iex.CCall.args,
+ RetLocInt );
if (ok) {
addInstr(env, mk_iMOVds_RR(dst, hregARM_R0()));
return dst;
rA = iselIntExpr_R(env, e->Iex.Load.addr);
tHi = newVRegI(env);
tLo = newVRegI(env);
- addInstr(env, ARMInstr_LdSt32(True/*isLoad*/, tHi, ARMAMode1_RI(rA, 4)));
- addInstr(env, ARMInstr_LdSt32(True/*isLoad*/, tLo, ARMAMode1_RI(rA, 0)));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/,
+ tHi, ARMAMode1_RI(rA, 4)));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/,
+ tLo, ARMAMode1_RI(rA, 0)));
*rHi = tHi;
*rLo = tLo;
return;
ARMAMode1* am4 = ARMAMode1_RI(hregARM_R8(), e->Iex.Get.offset + 4);
HReg tHi = newVRegI(env);
HReg tLo = newVRegI(env);
- addInstr(env, ARMInstr_LdSt32(True/*isLoad*/, tHi, am4));
- addInstr(env, ARMInstr_LdSt32(True/*isLoad*/, tLo, am0));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/, tHi, am4));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/, tLo, am0));
*rHi = tHi;
*rLo = tLo;
return;
/* Store the less significant 64 bits */
iselInt64Expr(&w1, &w0, env, e->Iex.Binop.arg2);
- addInstr(env, ARMInstr_LdSt32(False/*store*/, w0, sp_0));
- addInstr(env, ARMInstr_LdSt32(False/*store*/, w1, sp_4));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+ w0, sp_0));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+ w1, sp_4));
/* Store the more significant 64 bits */
iselInt64Expr(&w3, &w2, env, e->Iex.Binop.arg1);
- addInstr(env, ARMInstr_LdSt32(False/*store*/, w2, sp_8));
- addInstr(env, ARMInstr_LdSt32(False/*store*/, w3, sp_12));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+ w2, sp_8));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+ w3, sp_12));
/* Load result back from stack. */
addInstr(env, ARMInstr_NLdStQ(True/*load*/, res,
if (tyd == Ity_I32) {
HReg rD = iselIntExpr_R(env, stmt->Ist.Store.data);
ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.Store.addr);
- addInstr(env, ARMInstr_LdSt32(False/*!isLoad*/, rD, am));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/, rD, am));
return;
}
if (tyd == Ity_I16) {
HReg rD = iselIntExpr_R(env, stmt->Ist.Store.data);
ARMAMode2* am = iselIntExpr_AMode2(env, stmt->Ist.Store.addr);
- addInstr(env, ARMInstr_LdSt16(False/*!isLoad*/,
+ addInstr(env, ARMInstr_LdSt16(ARMcc_AL,
+ False/*!isLoad*/,
False/*!isSignedLoad*/, rD, am));
return;
}
if (tyd == Ity_I8) {
HReg rD = iselIntExpr_R(env, stmt->Ist.Store.data);
ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.Store.addr);
- addInstr(env, ARMInstr_LdSt8U(False/*!isLoad*/, rD, am));
+ addInstr(env, ARMInstr_LdSt8U(ARMcc_AL, False/*!isLoad*/, rD, am));
return;
}
if (tyd == Ity_I64) {
HReg rDhi, rDlo, rA;
iselInt64Expr(&rDhi, &rDlo, env, stmt->Ist.Store.data);
rA = iselIntExpr_R(env, stmt->Ist.Store.addr);
- addInstr(env, ARMInstr_LdSt32(False/*!load*/, rDhi,
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!load*/, rDhi,
ARMAMode1_RI(rA,4)));
- addInstr(env, ARMInstr_LdSt32(False/*!load*/, rDlo,
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!load*/, rDlo,
ARMAMode1_RI(rA,0)));
}
return;
break;
}
+ /* --------- CONDITIONAL STORE --------- */
+ /* conditional little-endian write to memory */
+ case Ist_StoreG: {
+ IRStoreG* sg = stmt->Ist.StoreG.details;
+ IRType tya = typeOfIRExpr(env->type_env, sg->addr);
+ IRType tyd = typeOfIRExpr(env->type_env, sg->data);
+ IREndness end = sg->end;
+
+ if (tya != Ity_I32 || end != Iend_LE)
+ goto stmt_fail;
+
+ switch (tyd) {
+ case Ity_I8:
+ case Ity_I32: {
+ HReg rD = iselIntExpr_R(env, sg->data);
+ ARMAMode1* am = iselIntExpr_AMode1(env, sg->addr);
+ ARMCondCode cc = iselCondCode(env, sg->guard);
+ addInstr(env, (tyd == Ity_I32 ? ARMInstr_LdSt32 : ARMInstr_LdSt8U)
+ (cc, False/*!isLoad*/, rD, am));
+ return;
+ }
+ case Ity_I16: {
+ HReg rD = iselIntExpr_R(env, sg->data);
+ ARMAMode2* am = iselIntExpr_AMode2(env, sg->addr);
+ ARMCondCode cc = iselCondCode(env, sg->guard);
+ addInstr(env, ARMInstr_LdSt16(cc,
+ False/*!isLoad*/,
+ False/*!isSignedLoad*/, rD, am));
+ return;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+
+ /* --------- CONDITIONAL LOAD --------- */
+ /* conditional little-endian load from memory */
+ case Ist_LoadG: {
+ IRLoadG* lg = stmt->Ist.LoadG.details;
+ IRType tya = typeOfIRExpr(env->type_env, lg->addr);
+ IREndness end = lg->end;
+
+ if (tya != Ity_I32 || end != Iend_LE)
+ goto stmt_fail;
+
+ switch (lg->cvt) {
+ case ILGop_8Uto32:
+ case ILGop_Ident32: {
+ HReg rAlt = iselIntExpr_R(env, lg->alt);
+ ARMAMode1* am = iselIntExpr_AMode1(env, lg->addr);
+ HReg rD = lookupIRTemp(env, lg->dst);
+ addInstr(env, mk_iMOVds_RR(rD, rAlt));
+ ARMCondCode cc = iselCondCode(env, lg->guard);
+ addInstr(env, (lg->cvt == ILGop_Ident32 ? ARMInstr_LdSt32
+ : ARMInstr_LdSt8U)
+ (cc, True/*isLoad*/, rD, am));
+ return;
+ }
+ case ILGop_16Sto32:
+ case ILGop_16Uto32:
+ case ILGop_8Sto32: {
+ HReg rAlt = iselIntExpr_R(env, lg->alt);
+ ARMAMode2* am = iselIntExpr_AMode2(env, lg->addr);
+ HReg rD = lookupIRTemp(env, lg->dst);
+ addInstr(env, mk_iMOVds_RR(rD, rAlt));
+ ARMCondCode cc = iselCondCode(env, lg->guard);
+ if (lg->cvt == ILGop_8Sto32) {
+ addInstr(env, ARMInstr_Ld8S(cc, rD, am));
+ } else {
+ vassert(lg->cvt == ILGop_16Sto32 || lg->cvt == ILGop_16Uto32);
+ Bool sx = lg->cvt == ILGop_16Sto32;
+ addInstr(env, ARMInstr_LdSt16(cc, True/*isLoad*/, sx, rD, am));
+ }
+ return;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+
/* --------- PUT --------- */
/* write guest state, fixed offset */
case Ist_Put: {
if (tyd == Ity_I32) {
HReg rD = iselIntExpr_R(env, stmt->Ist.Put.data);
ARMAMode1* am = ARMAMode1_RI(hregARM_R8(), stmt->Ist.Put.offset);
- addInstr(env, ARMInstr_LdSt32(False/*!isLoad*/, rD, am));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/, rD, am));
return;
}
if (tyd == Ity_I64) {
ARMAMode1* am4 = ARMAMode1_RI(hregARM_R8(),
stmt->Ist.Put.offset + 4);
iselInt64Expr(&rDhi, &rDlo, env, stmt->Ist.Put.data);
- addInstr(env, ARMInstr_LdSt32(False/*!isLoad*/, rDhi, am4));
- addInstr(env, ARMInstr_LdSt32(False/*!isLoad*/, rDlo, am0));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/,
+ rDhi, am4));
+ addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/,
+ rDlo, am0));
}
return;
}
break;
}
-//zz /* --------- Indexed PUT --------- */
-//zz /* write guest state, run-time offset */
-//zz case Ist_PutI: {
-//zz ARMAMode2* am2
-//zz = genGuestArrayOffset(
-//zz env, stmt->Ist.PutI.descr,
-//zz stmt->Ist.PutI.ix, stmt->Ist.PutI.bias );
-//zz
-//zz IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.PutI.data);
-//zz
-//zz if (tyd == Ity_I8) {
-//zz HReg reg = iselIntExpr_R(env, stmt->Ist.PutI.data);
-//zz addInstr(env, ARMInstr_StoreB(reg, am2));
-//zz return;
-//zz }
-//zz// CAB: Ity_I32, Ity_I16 ?
-//zz break;
-//zz }
-
/* --------- TMP --------- */
/* assign value to temporary */
case Ist_WrTmp: {
/* --------- Call to DIRTY helper --------- */
/* call complex ("dirty") helper function */
case Ist_Dirty: {
- IRType retty;
IRDirty* d = stmt->Ist.Dirty.details;
Bool passBBP = False;
passBBP = toBool(d->nFxState > 0 && d->needsBBP);
- /* Marshal args, do the call, clear stack. */
- Bool ok = doHelperCall( env, passBBP, d->guard, d->cee, d->args );
+ /* Figure out the return type, if any. */
+ IRType retty = Ity_INVALID;
+ if (d->tmp != IRTemp_INVALID)
+ retty = typeOfIRTemp(env->type_env, d->tmp);
+
+ /* Marshal args, do the call, clear stack, set the return value
+ to all-ones if this is a conditional call that returns a
+ value and the call is skipped. We need to set the ret-loc
+ correctly in order to implement the IRDirty semantics that
+ the return value is all-ones if the call doesn't happen. */
+ RetLoc rloc = RetLocINVALID;
+ switch (retty) {
+ case Ity_INVALID: /* function doesn't return anything */
+ rloc = RetLocNone; break;
+ case Ity_I64:
+ rloc = RetLoc2Int; break;
+ case Ity_I32: case Ity_I16: case Ity_I8:
+ rloc = RetLocInt; break;
+ default:
+ break;
+ }
+ if (rloc == RetLocINVALID)
+ break; /* will go to stmt_fail: */
+
+ Bool ok = doHelperCall( env, passBBP, d->guard, d->cee, d->args, rloc );
if (!ok)
break; /* will go to stmt_fail: */
/* No return value. Nothing to do. */
return;
- retty = typeOfIRTemp(env->type_env, d->tmp);
-
if (retty == Ity_I64) {
if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
HReg tmp = lookupIRTemp(env, d->tmp);
/* sanity ... */
vassert(arch_host == VexArchARM);
+ /* guard against unexpected space regressions */
+ vassert(sizeof(ARMInstr) <= 28);
+
/* hwcaps should not change from one ISEL call to another. */
arm_hwcaps = hwcaps_host; // JRS 2012 Mar 31: FIXME (RM)
}
+/*---------------------------------------------------------*/
+/*--- C-Call return-location actions ---*/
+/*---------------------------------------------------------*/
+
+void ppRetLoc ( RetLoc ska )
+{
+ switch (ska) {
+ case RetLocINVALID: vex_printf("RetLocINVALID"); return;
+ case RetLocNone: vex_printf("RetLocNone"); return;
+ case RetLocInt: vex_printf("RetLocInt"); return;
+ case RetLoc2Int: vex_printf("RetLoc2Int"); return;
+ default: vpanic("ppRetLoc");
+ }
+}
+
+
/*---------------------------------------------------------------*/
/*--- end host_generic_regs.c ---*/
/*---------------------------------------------------------------*/
extern void addHInstr ( HInstrArray*, HInstr* );
+/*---------------------------------------------------------*/
+/*--- C-Call return-location descriptions ---*/
+/*---------------------------------------------------------*/
+
+/* This is common to all back ends. It describes where the return
+ value from a C call is located. This is important in the case that
+ the call is conditional, since the return locations will need to be
+ set to all-ones in the case that the call does not happen. */
+
+typedef
+ enum {
+ RetLocINVALID, /* INVALID */
+ RetLocNone, /* no return value (a.k.a C "void") */
+ RetLocInt, /* in the primary int return reg */
+ RetLoc2Int /* in both primary and secondary int ret regs */
+ }
+ RetLoc;
+
+extern void ppRetLoc ( RetLoc rloc );
+
+
/*---------------------------------------------------------*/
/*--- Reg alloc: TODO: move somewhere else ---*/
/*---------------------------------------------------------*/