return IRExpr_Binop(op, a1, a2);
}
+static IRExpr* triop ( IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3 )
+{
+ return IRExpr_Triop(op, a1, a2, a3);
+}
+
static IRExpr* mkexpr ( IRTemp tmp )
{
return IRExpr_Tmp(tmp);
return binop( Iop_And32, get_fpround(), mkU32(3) );
}
+static IRExpr* /* :: Ity_I32 */ get_FAKE_roundingmode ( void )
+{
+ return mkU32(Irrm_NEAREST);
+}
+
/* --------- Get/set FP register tag bytes. --------- */
DIP("f%s%c %s\n", op_txt, dbl?'l':'s', dis_buf);
if (dbl) {
put_ST_UNCHECKED(0,
- binop( op,
+ triop( op,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
get_ST(0),
loadLE(Ity_F64,mkexpr(addr))
));
} else {
put_ST_UNCHECKED(0,
- binop( op,
+ triop( op,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
get_ST(0),
unop(Iop_F32toF64, loadLE(Ity_F32,mkexpr(addr)))
));
DIP("f%s%c %s\n", op_txt, dbl?'l':'s', dis_buf);
if (dbl) {
put_ST_UNCHECKED(0,
- binop( op,
+ triop( op,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
loadLE(Ity_F64,mkexpr(addr)),
get_ST(0)
));
} else {
put_ST_UNCHECKED(0,
- binop( op,
+ triop( op,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
unop(Iop_F32toF64, loadLE(Ity_F32,mkexpr(addr))),
get_ST(0)
));
DIP("f%s%s st(%u), st(%u)\n", op_txt, pop_after?"p":"", st_src, st_dst );
put_ST_UNCHECKED(
st_dst,
- binop(op, get_ST(st_dst), get_ST(st_src) )
+ triop( op,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(st_dst),
+ get_ST(st_src) )
);
if (pop_after)
fp_pop();
DIP("f%s%s st(%u), st(%u)\n", op_txt, pop_after?"p":"", st_src, st_dst );
put_ST_UNCHECKED(
st_dst,
- binop(op, get_ST(st_src), get_ST(st_dst) )
+ triop( op,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(st_src),
+ get_ST(st_dst) )
);
if (pop_after)
fp_pop();
case 0xF0: /* F2XM1 */
DIP("f2xm1\n");
- put_ST_UNCHECKED(0, unop(Iop_2xm1F64, get_ST(0)));
+ put_ST_UNCHECKED(0,
+ binop(Iop_2xm1F64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(0)));
break;
case 0xF1: /* FYL2X */
DIP("fyl2x\n");
- put_ST_UNCHECKED(1, binop(Iop_Yl2xF64,
- get_ST(1), get_ST(0)));
+ put_ST_UNCHECKED(1,
+ triop(Iop_Yl2xF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(1),
+ get_ST(0)));
fp_pop();
break;
case 0xF2: /* FPTAN */
DIP("ftan\n");
- put_ST_UNCHECKED(0, unop(Iop_TanF64, get_ST(0)));
+ put_ST_UNCHECKED(0,
+ binop(Iop_TanF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(0)));
fp_push();
put_ST(0, IRExpr_Const(IRConst_F64(1.0)));
clear_C2(); /* HACK */
case 0xF3: /* FPATAN */
DIP("fpatan\n");
- put_ST_UNCHECKED(1, binop(Iop_AtanF64,
- get_ST(1), get_ST(0)));
+ put_ST_UNCHECKED(1,
+ triop(Iop_AtanF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(1),
+ get_ST(0)));
fp_pop();
break;
//..
case 0xF9: /* FYL2XP1 */
DIP("fyl2xp1\n");
- put_ST_UNCHECKED(1, binop(Iop_Yl2xp1F64,
- get_ST(1), get_ST(0)));
+ put_ST_UNCHECKED(1,
+ triop(Iop_Yl2xp1F64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(1),
+ get_ST(0)));
fp_pop();
break;
case 0xFA: /* FSQRT */
DIP("fsqrt\n");
- put_ST_UNCHECKED(0, unop(Iop_SqrtF64, get_ST(0)));
+ put_ST_UNCHECKED(0,
+ binop(Iop_SqrtF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(0)));
break;
case 0xFB: { /* FSINCOS */
IRTemp a1 = newTemp(Ity_F64);
assign( a1, get_ST(0) );
DIP("fsincos\n");
- put_ST_UNCHECKED(0, unop(Iop_SinF64, mkexpr(a1)));
+ put_ST_UNCHECKED(0,
+ binop(Iop_SinF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ mkexpr(a1)));
fp_push();
- put_ST(0, unop(Iop_CosF64, mkexpr(a1)));
+ put_ST(0,
+ binop(Iop_CosF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ mkexpr(a1)));
clear_C2(); /* HACK */
break;
}
case 0xFD: /* FSCALE */
DIP("fscale\n");
- put_ST_UNCHECKED(0, binop(Iop_ScaleF64,
- get_ST(0), get_ST(1)));
+ put_ST_UNCHECKED(0,
+ triop(Iop_ScaleF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(0),
+ get_ST(1)));
break;
case 0xFE: /* FSIN */
DIP("fsin\n");
- put_ST_UNCHECKED(0, unop(Iop_SinF64, get_ST(0)));
+ put_ST_UNCHECKED(0,
+ binop(Iop_SinF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(0)));
clear_C2(); /* HACK */
break;
case 0xFF: /* FCOS */
DIP("fcos\n");
- put_ST_UNCHECKED(0, unop(Iop_CosF64, get_ST(0)));
+ put_ST_UNCHECKED(0,
+ binop(Iop_CosF64,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+ get_ST(0)));
clear_C2(); /* HACK */
break;
do_fop_m32:
put_ST_UNCHECKED(0,
- binop(fop,
+ triop(fop,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
get_ST(0),
unop(Iop_I32toF64,
loadLE(Ity_I32, mkexpr(addr)))));
do_foprev_m32:
put_ST_UNCHECKED(0,
- binop(fop,
+ triop(fop,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
unop(Iop_I32toF64,
loadLE(Ity_I32, mkexpr(addr))),
get_ST(0)));
do_fop_m16:
put_ST_UNCHECKED(0,
- binop(fop,
+ triop(fop,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
get_ST(0),
unop(Iop_I32toF64,
unop(Iop_16Sto32,
do_foprev_m16:
put_ST_UNCHECKED(0,
- binop(fop,
+ triop(fop,
+ get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
unop(Iop_I32toF64,
unop(Iop_16Sto32,
loadLE(Ity_I16, mkexpr(addr)))),
enum {
Afp_INVALID,
/* Binary */
-//.. Xfp_ADD, Xfp_SUB, Xfp_MUL, Xfp_DIV,
- Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, //Xfp_PREM, Xfp_PREM1,
+ Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1,
/* Unary */
- Afp_SQRT, //Xfp_ABS, Xfp_NEG, Xfp_MOV,
+ Afp_SQRT,
Afp_SIN, Afp_COS, Afp_TAN,
Afp_ROUND, Afp_2XM1
}
Asse_RCPF, Asse_RSQRTF, Asse_SQRTF,
/* Bitwise */
Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
-//.. /* Integer binary */
Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
Asse_QADD8U, Asse_QADD16U,
Asse_QADD8S, Asse_QADD16S,
return res;
}
- if (e->tag == Iex_Binop) {
+ if (e->tag == Iex_Triop) {
AMD64SseOp op = Asse_INVALID;
- switch (e->Iex.Binop.op) {
+ switch (e->Iex.Triop.op) {
case Iop_AddF64: op = Asse_ADDF; break;
case Iop_SubF64: op = Asse_SUBF; break;
case Iop_MulF64: op = Asse_MULF; break;
}
if (op != Asse_INVALID) {
HReg dst = newVRegV(env);
- HReg argL = iselDblExpr(env, e->Iex.Binop.arg1);
- HReg argR = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg argL = iselDblExpr(env, e->Iex.Triop.arg2);
+ HReg argR = iselDblExpr(env, e->Iex.Triop.arg3);
addInstr(env, mk_vMOVsd_RR(argL, dst));
+ /* XXXROUNDINGFIXME */
+ /* set roundingmode here */
addInstr(env, AMD64Instr_Sse64FLo(op, argR, dst));
return dst;
}
}
-//.. if (e->tag == Iex_Binop) {
-//.. X86FpOp fpop = Xfp_INVALID;
-//.. switch (e->Iex.Binop.op) {
-//.. case Iop_AddF64: fpop = Xfp_ADD; break;
-//.. case Iop_SubF64: fpop = Xfp_SUB; break;
-//.. case Iop_MulF64: fpop = Xfp_MUL; break;
-//.. case Iop_DivF64: fpop = Xfp_DIV; break;
-//.. case Iop_ScaleF64: fpop = Xfp_SCALE; break;
-//.. case Iop_AtanF64: fpop = Xfp_ATAN; break;
-//.. case Iop_Yl2xF64: fpop = Xfp_YL2X; break;
-//.. case Iop_Yl2xp1F64: fpop = Xfp_YL2XP1; break;
-//.. case Iop_PRemF64: fpop = Xfp_PREM; break;
-//.. case Iop_PRem1F64: fpop = Xfp_PREM1; break;
-//.. default: break;
-//.. }
-//.. if (fpop != Xfp_INVALID) {
-//.. HReg res = newVRegF(env);
-//.. HReg srcL = iselDblExpr(env, e->Iex.Binop.arg1);
-//.. HReg srcR = iselDblExpr(env, e->Iex.Binop.arg2);
-//.. addInstr(env, X86Instr_FpBinary(fpop,srcL,srcR,res));
-//.. if (fpop != Xfp_ADD && fpop != Xfp_SUB
-//.. && fpop != Xfp_MUL && fpop != Xfp_DIV)
-//.. roundToF64(env, res);
-//.. return res;
-//.. }
-//.. }
-
if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_RoundF64toInt) {
AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
HReg arg = iselDblExpr(env, e->Iex.Binop.arg2);
return dst;
}
- if (e->tag == Iex_Binop
- && (e->Iex.Binop.op == Iop_ScaleF64
- || e->Iex.Binop.op == Iop_AtanF64
- || e->Iex.Binop.op == Iop_Yl2xF64
- || e->Iex.Binop.op == Iop_Yl2xp1F64)
+ if (e->tag == Iex_Triop
+ && (e->Iex.Triop.op == Iop_ScaleF64
+ || e->Iex.Triop.op == Iop_AtanF64
+ || e->Iex.Triop.op == Iop_Yl2xF64
+ || e->Iex.Triop.op == Iop_Yl2xp1F64)
) {
AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
- HReg arg1 = iselDblExpr(env, e->Iex.Binop.arg1);
- HReg arg2 = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg arg1 = iselDblExpr(env, e->Iex.Triop.arg2);
+ HReg arg2 = iselDblExpr(env, e->Iex.Triop.arg3);
HReg dst = newVRegV(env);
- Bool arg2first = toBool(e->Iex.Binop.op == Iop_ScaleF64);
+ Bool arg2first = toBool(e->Iex.Triop.op == Iop_ScaleF64);
addInstr(env, AMD64Instr_A87Free(2));
/* one arg -> top of x87 stack */
addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/));
/* do it */
- switch (e->Iex.Binop.op) {
+ /* XXXROUNDINGFIXME */
+ /* set roundingmode here */
+ switch (e->Iex.Triop.op) {
case Iop_ScaleF64:
addInstr(env, AMD64Instr_A87FpOp(Afp_SCALE));
break;
return dst;
}
- if (e->tag == Iex_Unop) {
+ if (e->tag == Iex_Binop) {
A87FpOp fpop = Afp_INVALID;
- switch (e->Iex.Unop.op) {
-//.. case Iop_NegF64: fpop = Xfp_NEG; break;
-//.. case Iop_AbsF64: fpop = Xfp_ABS; break;
+ switch (e->Iex.Binop.op) {
case Iop_SqrtF64: fpop = Afp_SQRT; break;
case Iop_SinF64: fpop = Afp_SIN; break;
case Iop_CosF64: fpop = Afp_COS; break;
}
if (fpop != Afp_INVALID) {
AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
- HReg arg = iselDblExpr(env, e->Iex.Unop.arg);
+ HReg arg = iselDblExpr(env, e->Iex.Binop.arg2);
HReg dst = newVRegV(env);
- Int nNeeded = e->Iex.Unop.op==Iop_TanF64 ? 2 : 1;
+ Int nNeeded = e->Iex.Binop.op==Iop_TanF64 ? 2 : 1;
addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, arg, m8_rsp));
addInstr(env, AMD64Instr_A87Free(nNeeded));
addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/));
+ /* XXXROUNDINGFIXME */
+ /* set roundingmode here */
addInstr(env, AMD64Instr_A87FpOp(fpop));
- if (e->Iex.Unop.op==Iop_TanF64) {
+ if (e->Iex.Binop.op==Iop_TanF64) {
/* get rid of the extra 1.0 that fptan pushes */
addInstr(env, AMD64Instr_A87PushPop(m8_rsp, False/*pop*/));
}