return True;
}
+static IRExpr * bcd_sign_code_adjust( UInt ps, IRExpr * tmp)
+{
+ /* The Iop_BCDAdd and Iop_BCDSub will result in the corresponding Power PC
+ * instruction being issued with ps = 0. If ps = 1, the sign code, which
+ * is in the least significant four bits of the result, needs to be updated
+ * per the ISA:
+ *
+ * If PS=0, the sign code of the result is set to 0b1100.
+ * If PS=1, the sign code of the result is set to 0b1111.
+ *
+ * Note, the ps value is NOT being passed down to the instruction issue
+ * because passing a constant via triop() breaks the vbit-test test. The
+ * vbit-tester assumes it can set non-zero shadow bits for the triop()
+ * arguments. Thus they have to be expressions not a constant.
+ */
+ IRTemp mask = newTemp(Ity_I64);
+ IRExpr *rtn;
+
+ if ( ps == 0 ) {
+ /* sign code is correct, just return it. */
+ rtn = tmp;
+
+ } else {
+ /* check if lower four bits are 0b1100, if so, change to 0b1111 */
+ assign( mask, unop( Iop_1Sto64,
+ binop( Iop_CmpEQ64, mkU64( 0xC ),
+ binop( Iop_And64, mkU64( 0xF ),
+ unop( Iop_V128to64, tmp ) ) ) ) );
+ rtn = binop( Iop_64HLtoV128,
+ unop( Iop_V128HIto64, tmp ),
+ binop( Iop_Or64,
+ binop( Iop_And64, mkU64( 0xF ), mkexpr( mask ) ),
+ unop( Iop_V128to64, tmp ) ) );
+ }
+
+ return rtn;
+}
/*
AltiVec BCD Arithmetic instructions.
switch (opc2) {
case 0x1: // bcdadd
DIP("bcdadd. v%d,v%d,v%d,%u\n", vRT_addr, vRA_addr, vRB_addr, ps);
- assign( dst, triop( Iop_BCDAdd, mkexpr( vA ),
- mkexpr( vB ), mkU8( ps ) ) );
+ assign( dst, bcd_sign_code_adjust( ps,
+ binop( Iop_BCDAdd,
+ mkexpr( vA ),
+ mkexpr( vB ) ) ) );
putVReg( vRT_addr, mkexpr(dst));
return True;
case 0x41: // bcdsub
DIP("bcdsub. v%d,v%d,v%d,%u\n", vRT_addr, vRA_addr, vRB_addr, ps);
- assign( dst, triop( Iop_BCDSub, mkexpr( vA ),
- mkexpr( vB ), mkU8( ps ) ) );
+ assign( dst, bcd_sign_code_adjust( ps,
+ binop( Iop_BCDSub,
+ mkexpr( vA ),
+ mkexpr( vB ) ) ) );
putVReg( vRT_addr, mkexpr(dst));
return True;
i->Pin.AvHashV128Binary.s_field = s_field;
return i;
}
-PPCInstr* PPCInstr_AvBCDV128Trinary ( PPCAvOp op, HReg dst,
- HReg src1, HReg src2, PPCRI* ps ) {
+PPCInstr* PPCInstr_AvBCDV128Binary ( PPCAvOp op, HReg dst,
+ HReg src1, HReg src2 ) {
PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
- i->tag = Pin_AvBCDV128Trinary;
- i->Pin.AvBCDV128Trinary.op = op;
- i->Pin.AvBCDV128Trinary.dst = dst;
- i->Pin.AvBCDV128Trinary.src1 = src1;
- i->Pin.AvBCDV128Trinary.src2 = src2;
- i->Pin.AvBCDV128Trinary.ps = ps;
+ i->tag = Pin_AvBCDV128Binary;
+ i->Pin.AvBCDV128Binary.op = op;
+ i->Pin.AvBCDV128Binary.dst = dst;
+ i->Pin.AvBCDV128Binary.src1 = src1;
+ i->Pin.AvBCDV128Binary.src2 = src2;
return i;
}
ppPPCRI(i->Pin.AvHashV128Binary.s_field);
return;
- case Pin_AvBCDV128Trinary:
- vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvBCDV128Trinary.op));
- ppHRegPPC(i->Pin.AvBCDV128Trinary.dst);
+ case Pin_AvBCDV128Binary:
+ vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvBCDV128Binary.op));
+ ppHRegPPC(i->Pin.AvBCDV128Binary.dst);
vex_printf(",");
- ppHRegPPC(i->Pin.AvBCDV128Trinary.src1);
+ ppHRegPPC(i->Pin.AvBCDV128Binary.src1);
vex_printf(",");
- ppHRegPPC(i->Pin.AvBCDV128Trinary.src2);
- vex_printf(",");
- ppPPCRI(i->Pin.AvBCDV128Trinary.ps);
+ ppHRegPPC(i->Pin.AvBCDV128Binary.src2);
return;
case Pin_Dfp64Unary:
addHRegUse(u, HRmRead, i->Pin.AvHashV128Binary.src);
addRegUsage_PPCRI(u, i->Pin.AvHashV128Binary.s_field);
return;
- case Pin_AvBCDV128Trinary:
- addHRegUse(u, HRmWrite, i->Pin.AvBCDV128Trinary.dst);
- addHRegUse(u, HRmRead, i->Pin.AvBCDV128Trinary.src1);
- addHRegUse(u, HRmRead, i->Pin.AvBCDV128Trinary.src2);
- addRegUsage_PPCRI(u, i->Pin.AvBCDV128Trinary.ps);
+ case Pin_AvBCDV128Binary:
+ addHRegUse(u, HRmWrite, i->Pin.AvBCDV128Binary.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvBCDV128Binary.src1);
+ addHRegUse(u, HRmRead, i->Pin.AvBCDV128Binary.src2);
return;
case Pin_Dfp64Unary:
addHRegUse(u, HRmWrite, i->Pin.Dfp64Unary.dst);
mapReg(m, &i->Pin.AvHashV128Binary.dst);
mapReg(m, &i->Pin.AvHashV128Binary.src);
return;
- case Pin_AvBCDV128Trinary:
- mapReg(m, &i->Pin.AvBCDV128Trinary.dst);
- mapReg(m, &i->Pin.AvBCDV128Trinary.src1);
- mapReg(m, &i->Pin.AvBCDV128Trinary.src2);
- mapRegs_PPCRI(m, i->Pin.AvBCDV128Trinary.ps);
+ case Pin_AvBCDV128Binary:
+ mapReg(m, &i->Pin.AvBCDV128Binary.dst);
+ mapReg(m, &i->Pin.AvBCDV128Binary.src1);
+ mapReg(m, &i->Pin.AvBCDV128Binary.src2);
return;
case Pin_Dfp64Unary:
mapReg(m, &i->Pin.Dfp64Unary.dst);
p = mkFormVX( p, 4, v_dst, v_src, s_field->Pri.Imm, opc2, endness_host );
goto done;
}
- case Pin_AvBCDV128Trinary: {
- UInt v_dst = vregEnc(i->Pin.AvBCDV128Trinary.dst);
- UInt v_src1 = vregEnc(i->Pin.AvBCDV128Trinary.src1);
- UInt v_src2 = vregEnc(i->Pin.AvBCDV128Trinary.src2);
- PPCRI* ps = i->Pin.AvBCDV128Trinary.ps;
+ case Pin_AvBCDV128Binary: {
+ UInt v_dst = vregEnc(i->Pin.AvBCDV128Binary.dst);
+ UInt v_src1 = vregEnc(i->Pin.AvBCDV128Binary.src1);
+ UInt v_src2 = vregEnc(i->Pin.AvBCDV128Binary.src2);
+ UInt ps = 0; /* Issue the instruction with ps=0. The IR code will
+ * fix up the result if ps=1.
+ */
UInt opc2;
- switch (i->Pin.AvBCDV128Trinary.op) {
+ switch (i->Pin.AvBCDV128Binary.op) {
case Pav_BCDAdd: opc2 = 1; break; // bcdadd
case Pav_BCDSub: opc2 = 65; break; // bcdsub
default:
goto bad;
}
p = mkFormVXR( p, 4, v_dst, v_src1, v_src2,
- 0x1, (ps->Pri.Imm << 9) | opc2, endness_host );
+ 0x1, ps | opc2, endness_host );
goto done;
}
case Pin_AvBin32Fx4: {
Pin_AvCipherV128Unary, /* AV Vector unary Cipher */
Pin_AvCipherV128Binary, /* AV Vector binary Cipher */
Pin_AvHashV128Binary, /* AV Vector binary Hash */
- Pin_AvBCDV128Trinary, /* BCD Arithmetic */
+ Pin_AvBCDV128Binary, /* BCD Arithmetic */
Pin_Dfp64Unary, /* DFP64 unary op */
Pin_Dfp128Unary, /* DFP128 unary op */
Pin_DfpShift, /* Decimal floating point shift by immediate value */
HReg dst;
HReg src1;
HReg src2;
- PPCRI* ps;
- } AvBCDV128Trinary;
+ } AvBCDV128Binary;
struct {
PPCAvOp op;
HReg dst;
HReg srcL, HReg srcR );
extern PPCInstr* PPCInstr_AvHashV128Binary ( PPCAvOp op, HReg dst,
HReg src, PPCRI* s_field );
-extern PPCInstr* PPCInstr_AvBCDV128Trinary ( PPCAvOp op, HReg dst,
- HReg src1, HReg src2,
- PPCRI* ps );
+extern PPCInstr* PPCInstr_AvBCDV128Binary ( PPCAvOp op, HReg dst,
+ HReg src1, HReg src2 );
extern PPCInstr* PPCInstr_Dfp64Unary ( PPCFpOp op, HReg dst, HReg src );
extern PPCInstr* PPCInstr_Dfp64Binary ( PPCFpOp op, HReg dst, HReg srcL,
HReg srcR );
addInstr(env, PPCInstr_AvHashV128Binary(op, dst, arg1, s_field));
return dst;
}
- default:
- break;
- } /* switch (e->Iex.Binop.op) */
- } /* if (e->tag == Iex_Binop) */
- if (e->tag == Iex_Triop) {
- IRTriop *triop = e->Iex.Triop.details;
- switch (triop->op) {
case Iop_BCDAdd:op = Pav_BCDAdd; goto do_AvBCDV128;
case Iop_BCDSub:op = Pav_BCDSub; goto do_AvBCDV128;
do_AvBCDV128: {
- HReg arg1 = iselVecExpr(env, triop->arg1, IEndianess);
- HReg arg2 = iselVecExpr(env, triop->arg2, IEndianess);
+ HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+ HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
HReg dst = newVRegV(env);
- PPCRI* ps = iselWordExpr_RI(env, triop->arg3, IEndianess);
- addInstr(env, PPCInstr_AvBCDV128Trinary(op, dst, arg1, arg2, ps));
+ addInstr(env, PPCInstr_AvBCDV128Binary(op, dst, arg1, arg2));
return dst;
}
+ default:
+ break;
+ } /* switch (e->Iex.Binop.op) */
+ } /* if (e->tag == Iex_Binop) */
+
+ if (e->tag == Iex_Triop) {
+ IRTriop *triop = e->Iex.Triop.details;
+ switch (triop->op) {
case Iop_Add32Fx4: fpop = Pavfp_ADDF; goto do_32Fx4_with_rm;
case Iop_Sub32Fx4: fpop = Pavfp_SUBF; goto do_32Fx4_with_rm;
case Iop_Mul32Fx4: fpop = Pavfp_MULF; goto do_32Fx4_with_rm;
case Iop_BCDAdd:
case Iop_BCDSub:
- TERNARY(Ity_V128,Ity_V128, Ity_I8, Ity_V128);
+ BINARY(Ity_V128, Ity_V128, Ity_V128);
+
case Iop_QDMull16Sx4: case Iop_QDMull32Sx2:
BINARY(Ity_I64, Ity_I64, Ity_V128);