}
/* fall through */
+ /* -------- VRINT{A,N,P,M}.F64 d_d, VRINT{A,N,P,M}.F32 s_s -------- */
+ /* 31 22 21 17 15 11 8 7 5 4 3
+ T1/A1: 111111101 D 1110 rm Vd 101 1 01 M 0 Vm VRINT{A,N,P,M}.F64 Dd, Dm
+ T1/A1: 111111101 D 1110 rm Vd 101 0 01 M 0 Vm VRINT{A,N,P,M}.F32 Sd, Sm
+
+ ARM encoding is in NV space.
+ In Thumb mode, we must not be in an IT block.
+ */
+ if (INSN(31,23) == BITS9(1,1,1,1,1,1,1,0,1)
+ && INSN(21,18) == BITS4(1,1,1,0) && INSN(11,9) == BITS3(1,0,1)
+ && INSN(7,6) == BITS2(0,1) && INSN(4,4) == 0) {
+ UInt bit_D = INSN(22,22);
+ UInt fld_rm = INSN(17,16);
+ UInt fld_d = INSN(15,12);
+ Bool isF64 = INSN(8,8) == 1;
+ UInt bit_M = INSN(5,5);
+ UInt fld_m = INSN(3,0);
+
+ UInt dd = isF64 ? ((bit_D << 4) | fld_d) : ((fld_d << 1) | bit_D);
+ UInt mm = isF64 ? ((bit_M << 4) | fld_m) : ((fld_m << 1) | bit_M);
+
+ if (isT) {
+ gen_SIGILL_T_if_in_ITBlock(old_itstate, new_itstate);
+ }
+ /* In ARM mode, this is statically unconditional. In Thumb mode,
+ this must be dynamically unconditional, and we've SIGILLd if not.
+ In either case we can create unconditional IR. */
+
+ UChar c = '?';
+ IRRoundingMode rm = Irrm_NEAREST;
+ switch (fld_rm) {
+ /* The use of NEAREST for both the 'a' and 'n' cases is a bit of a
+ kludge since it doesn't take into account the nearest-even vs
+ nearest-away semantics. */
+ case BITS2(0,0): c = 'a'; rm = Irrm_NEAREST; break;
+ case BITS2(0,1): c = 'n'; rm = Irrm_NEAREST; break;
+ case BITS2(1,0): c = 'p'; rm = Irrm_PosINF; break;
+ case BITS2(1,1): c = 'm'; rm = Irrm_NegINF; break;
+ default: vassert(0);
+ }
+
+ IRExpr* srcM = (isF64 ? llGetDReg : llGetFReg)(mm);
+ IRExpr* res = binop(isF64 ? Iop_RoundF64toInt : Iop_RoundF32toInt,
+ mkU32((UInt)rm), srcM);
+ (isF64 ? llPutDReg : llPutFReg)(dd, res);
+
+ UChar rch = isF64 ? 'd' : 'f';
+ DIP("vrint%c.%s %c%u, %c%u\n",
+ c, isF64 ? "f64" : "f32", rch, dd, rch, mm);
+ return True;
+ }
+ /* fall through */
+
/* ---------- Doesn't match anything. ---------- */
return False;
i->ARMin.VCvtID.src = src;
return i;
}
+ARMInstr* ARMInstr_VRIntR ( Bool isF64, HReg dst, HReg src )
+{
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+ i->tag = ARMin_VRIntR;
+ i->ARMin.VRIntR.isF64 = isF64;
+ i->ARMin.VRIntR.dst = dst ;
+ i->ARMin.VRIntR.src = src;
+ return i;
+}
ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg ) {
ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_FPSCR;
ppHRegARM(i->ARMin.VCvtID.src);
return;
}
+ case ARMin_VRIntR: {
+ const HChar* sz = i->ARMin.VRIntR.isF64 ? "f64" : "f32";
+ vex_printf("vrintr.%s.%s ", sz, sz);
+ ppHRegARM(i->ARMin.VRIntR.dst);
+ vex_printf(", ");
+ ppHRegARM(i->ARMin.VRIntR.src);
+ return;
+ }
case ARMin_FPSCR:
if (i->ARMin.FPSCR.toFPSCR) {
vex_printf("fmxr fpscr, ");
addHRegUse(u, HRmWrite, i->ARMin.VCvtID.dst);
addHRegUse(u, HRmRead, i->ARMin.VCvtID.src);
return;
+ case ARMin_VRIntR:
+ addHRegUse(u, HRmWrite, i->ARMin.VRIntR.dst);
+ addHRegUse(u, HRmRead, i->ARMin.VRIntR.src);
+ return;
case ARMin_FPSCR:
if (i->ARMin.FPSCR.toFPSCR)
addHRegUse(u, HRmRead, i->ARMin.FPSCR.iReg);
i->ARMin.VCvtID.dst = lookupHRegRemap(m, i->ARMin.VCvtID.dst);
i->ARMin.VCvtID.src = lookupHRegRemap(m, i->ARMin.VCvtID.src);
return;
+ case ARMin_VRIntR:
+ i->ARMin.VRIntR.dst = lookupHRegRemap(m, i->ARMin.VRIntR.dst);
+ i->ARMin.VRIntR.src = lookupHRegRemap(m, i->ARMin.VRIntR.src);
+ return;
case ARMin_FPSCR:
i->ARMin.FPSCR.iReg = lookupHRegRemap(m, i->ARMin.FPSCR.iReg);
return;
/*UNREACHED*/
vassert(0);
}
+ case ARMin_VRIntR: { /* NB: ARM v8 and above only */
+ Bool isF64 = i->ARMin.VRIntR.isF64;
+ UInt rDst = (isF64 ? dregEnc : fregEnc)(i->ARMin.VRIntR.dst);
+ UInt rSrc = (isF64 ? dregEnc : fregEnc)(i->ARMin.VRIntR.src);
+ /* The encoding of registers here differs strangely for the
+ F32 and F64 cases. */
+ UInt D, Vd, M, Vm;
+ if (isF64) {
+ D = (rDst >> 4) & 1;
+ Vd = rDst & 0xF;
+ M = (rSrc >> 4) & 1;
+ Vm = rSrc & 0xF;
+ } else {
+ Vd = (rDst >> 1) & 0xF;
+ D = rDst & 1;
+ Vm = (rSrc >> 1) & 0xF;
+ M = rSrc & 1;
+ }
+ vassert(D <= 1 && Vd <= 15 && M <= 1 && Vm <= 15);
+ *p++ = XXXXXXXX(0xE, X1110, X1011 | (D << 2), X0110, Vd,
+ isF64 ? X1011 : X1010, X0100 | (M << 1), Vm);
+ goto done;
+ }
case ARMin_FPSCR: {
Bool toFPSCR = i->ARMin.FPSCR.toFPSCR;
UInt iReg = iregEnc(i->ARMin.FPSCR.iReg);
ARMin_VXferD,
ARMin_VXferS,
ARMin_VCvtID,
+ ARMin_VRIntR,
ARMin_FPSCR,
ARMin_MFence,
ARMin_CLREX,
HReg dst;
HReg src;
} VCvtID;
+ /* Round a F32 or F64 value to the nearest integral value,
+ according to the FPSCR.RM. For ARM >= V8 hosts only. */
+ struct {
+ Bool isF64;
+ HReg dst;
+ HReg src;
+ } VRIntR;
/* Move a 32-bit value to/from the FPSCR (FMXR, FMRX) */
struct {
Bool toFPSCR;
extern ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo );
extern ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
HReg dst, HReg src );
+extern ARMInstr* ARMInstr_VRIntR ( Bool isF64, HReg dst, HReg src );
extern ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg );
extern ARMInstr* ARMInstr_MFence ( void );
extern ARMInstr* ARMInstr_CLREX ( void );
addInstr(env, ARMInstr_VUnaryD(ARMvfpu_SQRT, dst, src));
return dst;
}
+ case Iop_RoundF64toInt: {
+ /* We can only generate this on a >= V8 capable target. But
+ that's OK since we should only be asked to generate for V8
+ capable guests, and we assume here that host == guest. */
+ if (VEX_ARM_ARCHLEVEL(env->hwcaps) >= 8) {
+ HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegD(env);
+ set_VFP_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, ARMInstr_VRIntR(True/*isF64*/, dst, src));
+ set_VFP_rounding_default(env);
+ return dst;
+ }
+ /* not a V8 target, so we can't select insns for this. */
+ break;
+ }
default:
break;
}
set_VFP_rounding_default(env);
return valS;
}
+ case Iop_RoundF32toInt: {
+ /* We can only generate this on a >= V8 capable target. But
+ that's OK since we should only be asked to generate for V8
+ capable guests, and we assume here that host == guest. */
+ if (VEX_ARM_ARCHLEVEL(env->hwcaps) >= 8) {
+ HReg src = iselFltExpr(env, e->Iex.Binop.arg2);
+ HReg dst = newVRegF(env);
+ set_VFP_rounding_mode(env, e->Iex.Binop.arg1);
+ addInstr(env, ARMInstr_VRIntR(False/*!isF64*/, dst, src));
+ set_VFP_rounding_default(env);
+ return dst;
+ }
+ /* not a V8 target, so we can't select insns for this. */
+ break;
+ }
default:
break;
}