if (mreg & 1)
return False;
mreg >>= 1;
- putDRegI64(dreg, unop(Iop_F32toF16x4, getQReg(mreg)),
+ putDRegI64(dreg, unop(Iop_F32toF16x4_DEP, getQReg(mreg)),
condT);
DIP("vcvt.f16.f32 d%u, q%u\n", dreg, mreg);
}
return False;
switch ((B >> 1) & 3) {
case 0:
- op = Q ? Iop_I32StoFx4 : Iop_I32StoFx2;
+ op = Q ? Iop_I32StoF32x4_DEP : Iop_I32StoF32x2_DEP;
DIP("vcvt.f32.s32 %c%u, %c%u\n",
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
case 1:
- op = Q ? Iop_I32UtoFx4 : Iop_I32UtoFx2;
+ op = Q ? Iop_I32UtoF32x4_DEP : Iop_I32UtoF32x2_DEP;
DIP("vcvt.f32.u32 %c%u, %c%u\n",
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
case 2:
- op = Q ? Iop_FtoI32Sx4_RZ : Iop_FtoI32Sx2_RZ;
+ op = Q ? Iop_F32toI32Sx4_RZ : Iop_F32toI32Sx2_RZ;
DIP("vcvt.s32.f32 %c%u, %c%u\n",
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
case 3:
- op = Q ? Iop_FtoI32Ux4_RZ : Iop_FtoI32Ux2_RZ;
+ op = Q ? Iop_F32toI32Ux4_RZ : Iop_F32toI32Ux2_RZ;
DIP("vcvt.u32.f32 %c%u, %c%u\n",
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
t1 = newTemp(Ity_I64);
t2 = newTemp(Ity_I64);
assign(t1,
- unop(Iop_F32toF16x4,
+ unop(Iop_F32toF16x4_DEP,
getWReg(ws)));
assign(t2,
- unop(Iop_F32toF16x4,
+ unop(Iop_F32toF16x4_DEP,
getWReg(wt)));
putWReg(wd,
binop(Iop_64HLtoV128,
case 0x00: { /* FTRUNC_S.W */
DIP("FTRUNC_S.W w%d, w%d", wd, ws);
calculateMSACSR(ws, wd, FTRUNCSW, 1);
- putWReg(wd, unop(Iop_FtoI32Sx4_RZ, getWReg(ws)));
+ putWReg(wd, unop(Iop_F32toI32Sx4_RZ, getWReg(ws)));
break;
}
case 0x00: { /* FTRUNC_U.W */
DIP("FTRUNC_U.W w%d, w%d", wd, ws);
calculateMSACSR(ws, wd, FTRUNCUW, 1);
- putWReg(wd, unop(Iop_FtoI32Ux4_RZ, getWReg(ws)));
+ putWReg(wd, unop(Iop_F32toI32Ux4_RZ, getWReg(ws)));
break;
}
mkexpr(t3))),
binop(Iop_AndV128,
unop(Iop_NotV128, mkexpr(t4)),
- unop(Iop_FtoI32Ux4_RZ,
+ unop(Iop_F32toI32Ux4_RZ,
getWReg(ws)))));
break;
}
case 0x00: { /* FFINT_U.W */
DIP("FFINT_U.W w%d, w%d", wd, ws);
calculateMSACSR(ws, wt, FFINT_UW, 1);
- putWReg(wd, unop(Iop_I32UtoFx4, getWReg(ws)));
+ putWReg(wd, unop(Iop_I32UtoF32x4_DEP, getWReg(ws)));
break;
}
IRTemp hi64 = newTemp(Ity_I64);
IRTemp lo64 = newTemp(Ity_I64);
Bool un_signed = (opc2 == 0x110);
- IROp op = un_signed ? Iop_QFtoI32Ux4_RZ : Iop_QFtoI32Sx4_RZ;
+ IROp op = un_signed ? Iop_QF32toI32Ux4_RZ : Iop_QF32toI32Sx4_RZ;
DIP("xvcvsp%sxws v%u,v%u\n", un_signed ? "u" : "s", XT, XB);
/* The xvcvsp{s|u}xws instruction is similar to vct{s|u}xs, except if src is a NaN,
break;
case 0x170: // xvcvsxwsp (VSX Vector Convert Signed Integer Word to Single-Precision format)
DIP("xvcvsxwsp v%u,v%u\n", XT, XB);
- putVSReg( XT, unop( Iop_I32StoFx4, getVSReg( XB ) ) );
+ putVSReg( XT, unop( Iop_I32StoF32x4_DEP, getVSReg( XB ) ) );
break;
case 0x150: // xvcvuxwsp (VSX Vector Convert Unsigned Integer Word to Single-Precision format)
DIP("xvcvuxwsp v%u,v%u\n", XT, XB);
- putVSReg( XT, unop( Iop_I32UtoFx4, getVSReg( XB ) ) );
+ putVSReg( XT, unop( Iop_I32UtoF32x4_DEP, getVSReg( XB ) ) );
break;
default:
* V128 result. The contents of the lower 64-bits is undefined.
*/
DIP("xscvdphp v%d, v%d\n", (UInt)XT, (UInt)XB);
- assign( result, unop( Iop_F64toF16x2, mkexpr( vB ) ) );
+ assign( result, unop( Iop_F64toF16x2_DEP, mkexpr( vB ) ) );
assign( value, unop( Iop_64to32, unop( Iop_V128HIto64,
mkexpr( result ) ) ) );
putVSReg( XT, mkexpr( result ) );
/* Iop_F32toF16x4 is V128 -> I64, scatter the 16-bit floats in the
* I64 result to the V128 register to store.
*/
- assign( tmp64, unop( Iop_F32toF16x4, mkexpr( vB ) ) );
+ assign( tmp64, unop( Iop_F32toF16x4_DEP, mkexpr( vB ) ) );
/* Scatter 16-bit float values from returned 64-bit value
* of V128 result.
case 0x30A: // vcfux (Convert from Unsigned Fixed-Point W, AV p156)
DIP("vcfux v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
putVReg( vD_addr, triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
- unop(Iop_I32UtoFx4, mkexpr(vB)),
+ unop(Iop_I32UtoF32x4_DEP, mkexpr(vB)),
mkexpr(vInvScale)) );
return True;
DIP("vcfsx v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
putVReg( vD_addr, triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
- unop(Iop_I32StoFx4, mkexpr(vB)),
+ unop(Iop_I32StoF32x4_DEP, mkexpr(vB)),
mkexpr(vInvScale)) );
return True;
case 0x38A: // vctuxs (Convert to Unsigned Fixed-Point W Saturate, AV p172)
DIP("vctuxs v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
putVReg( vD_addr,
- unop(Iop_QFtoI32Ux4_RZ,
+ unop(Iop_QF32toI32Ux4_RZ,
triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
mkexpr(vB), mkexpr(vScale))) );
return True;
case 0x3CA: // vctsxs (Convert to Signed Fixed-Point W Saturate, AV p171)
DIP("vctsxs v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
putVReg( vD_addr,
- unop(Iop_QFtoI32Sx4_RZ,
+ unop(Iop_QF32toI32Sx4_RZ,
triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
mkexpr(vB), mkexpr(vScale))) );
return True;
res, arg, size, False));
return res;
}
- case Iop_FtoI32Sx2_RZ: {
+ case Iop_F32toI32Sx2_RZ: {
HReg res = newVRegD(env);
HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoS,
res, arg, 2, False));
return res;
}
- case Iop_FtoI32Ux2_RZ: {
+ case Iop_F32toI32Ux2_RZ: {
HReg res = newVRegD(env);
HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoU,
res, arg, 2, False));
return res;
}
- case Iop_I32StoFx2: {
+ case Iop_I32StoF32x2_DEP: {
HReg res = newVRegD(env);
HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTStoF,
res, arg, 2, False));
return res;
}
- case Iop_I32UtoFx2: {
+ case Iop_I32UtoF32x2_DEP: {
HReg res = newVRegD(env);
HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTUtoF,
res, arg, 2, False));
return res;
}
- case Iop_F32toF16x4: {
+ case Iop_F32toF16x4_DEP: {
HReg res = newVRegD(env);
HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTF32toF16,
addInstr(env, ARMInstr_NUnary(ARMneon_CLS, res, arg, size, True));
return res;
}
- case Iop_FtoI32Sx4_RZ: {
+ case Iop_F32toI32Sx4_RZ: {
HReg res = newVRegV(env);
HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoS,
res, arg, 2, True));
return res;
}
- case Iop_FtoI32Ux4_RZ: {
+ case Iop_F32toI32Ux4_RZ: {
HReg res = newVRegV(env);
HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoU,
res, arg, 2, True));
return res;
}
- case Iop_I32StoFx4: {
+ case Iop_I32StoF32x4_DEP: {
HReg res = newVRegV(env);
HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTStoF,
res, arg, 2, True));
return res;
}
- case Iop_I32UtoFx4: {
+ case Iop_I32UtoF32x4_DEP: {
HReg res = newVRegV(env);
HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
addInstr(env, ARMInstr_NUnary(ARMneon_VCVTUtoF,
return reg;
}
- case Iop_F32toF16x4: {
+ case Iop_F32toF16x4_DEP: {
vassert(mode64);
vassert(has_msa);
HReg v_arg = iselV128Expr(env, e->Iex.Unop.arg);
return v_dst;
}
- case Iop_I32UtoFx4: {
+ case Iop_I32UtoF32x4_DEP: {
HReg v_src = iselV128Expr(env, e->Iex.Unop.arg);
HReg v_dst = newVRegV(env);
set_guest_MIPS_rounding_mode_MSA(env);
return v_dst;
}
- case Iop_FtoI32Sx4_RZ: {
+ case Iop_F32toI32Sx4_RZ: {
HReg v_src = iselV128Expr(env, e->Iex.Unop.arg);
HReg v_dst = newVRegV(env);
addInstr(env,
return v_dst;
}
- case Iop_FtoI32Ux4_RZ: {
+ case Iop_F32toI32Ux4_RZ: {
HReg v_src = iselV128Expr(env, e->Iex.Unop.arg);
HReg v_dst = newVRegV(env);
addInstr(env,
return;
}
- case Iop_F32toF16x4: {
+ case Iop_F32toF16x4_DEP: {
vassert(has_msa);
HReg v_arg = iselV128Expr(env, e->Iex.Unop.arg);
HReg v_src = newVRegV(env);
addInstr(env, mk_iMOVds_RR(r_dst, argregs[0]));
return r_dst;
}
- case Iop_F32toF16x4: {
+ case Iop_F32toF16x4_DEP: {
HReg vdst = newVRegV(env); /* V128 */
HReg dst = newVRegI(env); /* I64*/
HReg r0 = newVRegI(env); /* I16*/
return dst;
}
- case Iop_F64toF16x2:
+ case Iop_F64toF16x2_DEP:
{
HReg dst = newVRegV(env);
HReg arg = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
return dst;
}
- case Iop_F32toF16x4:
+ case Iop_F32toF16x4_DEP:
{
HReg dst = newVRegI(env);
HReg tmp = newVRegV(env);
case Iop_RecipEst32Fx4: fpop = Pavfp_RCPF; goto do_32Fx4_unary;
case Iop_RSqrtEst32Fx4: fpop = Pavfp_RSQRTF; goto do_32Fx4_unary;
- case Iop_I32UtoFx4: fpop = Pavfp_CVTU2F; goto do_32Fx4_unary;
- case Iop_I32StoFx4: fpop = Pavfp_CVTS2F; goto do_32Fx4_unary;
- case Iop_QFtoI32Ux4_RZ: fpop = Pavfp_QCVTF2U; goto do_32Fx4_unary;
- case Iop_QFtoI32Sx4_RZ: fpop = Pavfp_QCVTF2S; goto do_32Fx4_unary;
+ case Iop_I32UtoF32x4_DEP: fpop = Pavfp_CVTU2F; goto do_32Fx4_unary;
+ case Iop_I32StoF32x4_DEP: fpop = Pavfp_CVTS2F; goto do_32Fx4_unary;
+ case Iop_QF32toI32Ux4_RZ: fpop = Pavfp_QCVTF2U; goto do_32Fx4_unary;
+ case Iop_QF32toI32Sx4_RZ: fpop = Pavfp_QCVTF2S; goto do_32Fx4_unary;
case Iop_RoundF32x4_RM: fpop = Pavfp_ROUNDM; goto do_32Fx4_unary;
case Iop_RoundF32x4_RP: fpop = Pavfp_ROUNDP; goto do_32Fx4_unary;
case Iop_RoundF32x4_RN: fpop = Pavfp_ROUNDN; goto do_32Fx4_unary;
case Iop_ReinterpF32asI32: vex_printf("ReinterpF32asI32"); return;
case Iop_ReinterpI32asF32: vex_printf("ReinterpI32asF32"); return;
- case Iop_I32UtoFx4: vex_printf("I32UtoFx4"); return;
- case Iop_I32StoFx4: vex_printf("I32StoFx4"); return;
+ case Iop_I32UtoF32x4_DEP: vex_printf("I32UtoF32x4_DEP"); return;
+ case Iop_I32StoF32x4_DEP: vex_printf("I32StoF32x4_DEP"); return;
case Iop_I32StoF32x4: vex_printf("I32StoF32x4"); return;
case Iop_F32toI32Sx4: vex_printf("F32toI32Sx4"); return;
- case Iop_F32toF16x4: vex_printf("F32toF16x4"); return;
+ case Iop_F32toF16x4_DEP: vex_printf("F32toF16x4_DEP"); return;
case Iop_F16toF32x4: vex_printf("F16toF32x4"); return;
case Iop_F16toF64x2: vex_printf("F16toF64x2"); return;
- case Iop_F64toF16x2: vex_printf("F64toF16x2"); return;
+ case Iop_F64toF16x2_DEP: vex_printf("F64toF16x2_DEP"); return;
case Iop_RSqrtEst32Fx4: vex_printf("RSqrtEst32Fx4"); return;
case Iop_RSqrtEst32Ux4: vex_printf("RSqrtEst32Ux4"); return;
case Iop_RSqrtEst32Fx2: vex_printf("RSqrtEst32Fx2"); return;
case Iop_RSqrtEst32Ux2: vex_printf("RSqrtEst32Ux2"); return;
- case Iop_QFtoI32Ux4_RZ: vex_printf("QFtoI32Ux4_RZ"); return;
- case Iop_QFtoI32Sx4_RZ: vex_printf("QFtoI32Sx4_RZ"); return;
+ case Iop_QF32toI32Ux4_RZ: vex_printf("QF32toI32Ux4_RZ"); return;
+ case Iop_QF32toI32Sx4_RZ: vex_printf("QF32toI32Sx4_RZ"); return;
- case Iop_FtoI32Ux4_RZ: vex_printf("FtoI32Ux4_RZ"); return;
- case Iop_FtoI32Sx4_RZ: vex_printf("FtoI32Sx4_RZ"); return;
+ case Iop_F32toI32Ux4_RZ: vex_printf("F32toI32Ux4_RZ"); return;
+ case Iop_F32toI32Sx4_RZ: vex_printf("F32toI32Sx4_RZ"); return;
- case Iop_I32UtoFx2: vex_printf("I32UtoFx2"); return;
- case Iop_I32StoFx2: vex_printf("I32StoFx2"); return;
+ case Iop_I32UtoF32x2_DEP: vex_printf("I32UtoF32x2_DEP"); return;
+ case Iop_I32StoF32x2_DEP: vex_printf("I32StoF32x2_DEP"); return;
- case Iop_FtoI32Ux2_RZ: vex_printf("FtoI32Ux2_RZ"); return;
- case Iop_FtoI32Sx2_RZ: vex_printf("FtoI32Sx2_RZ"); return;
+ case Iop_F32toI32Ux2_RZ: vex_printf("F32toI32Ux2_RZ"); return;
+ case Iop_F32toI32Sx2_RZ: vex_printf("F32toI32Sx2_RZ"); return;
case Iop_RoundF32x4_RM: vex_printf("RoundF32x4_RM"); return;
case Iop_RoundF32x4_RP: vex_printf("RoundF32x4_RP"); return;
case Iop_Reverse32sIn64_x1:
case Iop_Reverse8sIn32_x2: case Iop_Reverse16sIn32_x2:
case Iop_Reverse8sIn16_x4:
- case Iop_FtoI32Sx2_RZ: case Iop_FtoI32Ux2_RZ:
- case Iop_I32StoFx2: case Iop_I32UtoFx2:
+ case Iop_F32toI32Sx2_RZ: case Iop_F32toI32Ux2_RZ:
+ case Iop_I32StoF32x2_DEP: case Iop_I32UtoF32x2_DEP:
case Iop_RecipEst32Ux2: case Iop_RecipEst32Fx2:
case Iop_Abs32Fx2:
case Iop_RSqrtEst32Fx2:
case Iop_TruncF64asF32:
UNARY(Ity_F64, Ity_F32);
- case Iop_I32UtoFx4:
- case Iop_I32StoFx4:
- case Iop_QFtoI32Ux4_RZ:
- case Iop_QFtoI32Sx4_RZ:
- case Iop_FtoI32Ux4_RZ:
- case Iop_FtoI32Sx4_RZ:
+ case Iop_I32UtoF32x4_DEP:
+ case Iop_I32StoF32x4_DEP:
+ case Iop_QF32toI32Ux4_RZ:
+ case Iop_QF32toI32Sx4_RZ:
+ case Iop_F32toI32Ux4_RZ:
+ case Iop_F32toI32Sx4_RZ:
case Iop_RoundF32x4_RM:
case Iop_RoundF32x4_RP:
case Iop_RoundF32x4_RN:
case Iop_QNarrowUn16Sto8Ux8:
case Iop_QNarrowUn32Sto16Ux4:
case Iop_QNarrowUn64Sto32Ux2:
- case Iop_F32toF16x4:
+ case Iop_F32toF16x4_DEP:
UNARY(Ity_V128, Ity_I64);
case Iop_Widen8Uto16x8:
case Iop_ZeroHI64ofV128: case Iop_ZeroHI96ofV128:
case Iop_ZeroHI112ofV128: case Iop_ZeroHI120ofV128:
case Iop_F16toF64x2:
- case Iop_F64toF16x2:
+ case Iop_F64toF16x2_DEP:
case Iop_MulI128by10:
case Iop_MulI128by10Carry:
case Iop_Ctz8x16: case Iop_Ctz16x8:
their functionality. Such obscure ones are thus not directly visible
in the IR, but their effects on guest state (memory and registers)
are made visible via the annotations in IRDirty structures.
+
+ 2018-Dec-27: some of int<->fp conversion operations have been renamed so as
+ to have a trailing _DEP, meaning "deprecated". This is because they don't
+ specify a rounding mode to be used for the conversion and so are
+ underspecified. Their use should be replaced with equivalents that do
+ specify a rounding mode, either as a first argument or using a suffix on the
+ name, that indicates the rounding mode to use.
*/
typedef
enum {
/* ------------------ 64-bit SIMD FP ------------------------ */
- /* Convertion to/from int */
- Iop_I32UtoFx2, Iop_I32StoFx2, /* I32x4 -> F32x4 */
- Iop_FtoI32Ux2_RZ, Iop_FtoI32Sx2_RZ, /* F32x4 -> I32x4 */
+ /* Conversion to/from int */
+ // Deprecated: these don't specify a rounding mode
+ Iop_I32UtoF32x2_DEP, Iop_I32StoF32x2_DEP, /* I32x2 -> F32x2 */
+
+ Iop_F32toI32Ux2_RZ, Iop_F32toI32Sx2_RZ, /* F32x2 -> I32x2 */
+
/* Fixed32 format is floating-point number with fixed number of fraction
bits. The number of fraction bits is passed as a second argument of
type I8. */
rounding mode argument. Instead the irop trailers _R{M,P,N,Z}
indicate the mode: {-inf, +inf, nearest, zero} respectively. */
- // FIXME These carry no rounding mode
- Iop_I32UtoFx4, Iop_I32StoFx4, /* I32x4 -> F32x4 */
+ // These carry no rounding mode and are therefore deprecated
+ Iop_I32UtoF32x4_DEP, Iop_I32StoF32x4_DEP, /* I32x4 -> F32x4 */
Iop_I32StoF32x4, /* IRRoundingMode(I32) x V128 -> V128 */
Iop_F32toI32Sx4, /* IRRoundingMode(I32) x V128 -> V128 */
- Iop_FtoI32Ux4_RZ, Iop_FtoI32Sx4_RZ, /* F32x4 -> I32x4 */
- Iop_QFtoI32Ux4_RZ, Iop_QFtoI32Sx4_RZ, /* F32x4 -> I32x4 (saturating) */
+ Iop_F32toI32Ux4_RZ, Iop_F32toI32Sx4_RZ, /* F32x4 -> I32x4 */
+ Iop_QF32toI32Ux4_RZ, Iop_QF32toI32Sx4_RZ, /* F32x4 -> I32x4 (saturating) */
Iop_RoundF32x4_RM, Iop_RoundF32x4_RP, /* round to fp integer */
Iop_RoundF32x4_RN, Iop_RoundF32x4_RZ, /* round to fp integer */
/* Fixed32 format is floating-point number with fixed number of fraction
/* --- Single to/from half conversion --- */
/* FIXME: what kind of rounding in F32x4 -> F16x4 case? */
// FIXME these carry no rounding mode
- Iop_F32toF16x4, Iop_F16toF32x4, /* F32x4 <-> F16x4 */
+ Iop_F32toF16x4_DEP, /* F32x4 -> F16x4, NO ROUNDING MODE */
+ Iop_F16toF32x4, /* F16x4 -> F32x4 */
/* -- Double to/from half conversion -- */
- Iop_F64toF16x2, // FIXME this carries no rounding mode (?)
+ Iop_F64toF16x2_DEP, // F64x2 -> F16x2, NO ROUNDING MODE
Iop_F16toF64x2,
/* Values from two registers converted in smaller type and put in one
[Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
Iop_PwAdd8x16, Iop_PwAdd16x8, Iop_PwAdd32x4,
Iop_PwAdd32Fx2,
+
/* Longening variant is unary. The resulting vector contains two times
less elements than operand, but they are two times wider.
Example:
case Iop_QNarrowUn32Uto16Ux4:
case Iop_QNarrowUn32Sto16Sx4:
case Iop_QNarrowUn32Sto16Ux4:
- case Iop_F32toF16x4:
+ case Iop_F32toF16x4_DEP:
return Iop_NarrowUn32to16x4;
case Iop_QNarrowUn16Uto8Ux8:
case Iop_QNarrowUn16Sto8Sx8:
case Iop_NarrowUn16to8x8:
case Iop_NarrowUn32to16x4:
case Iop_NarrowUn64to32x2:
- case Iop_F32toF16x4:
+ case Iop_F32toF16x4_DEP:
at1 = assignNew('V', mce, Ity_I64, unop(narrow_op, vatom1));
return at1;
default:
return unary64Fx4(mce, vatom);
case Iop_RecipEst32Fx4:
- case Iop_I32UtoFx4:
- case Iop_I32StoFx4:
- case Iop_QFtoI32Ux4_RZ:
- case Iop_QFtoI32Sx4_RZ:
+ case Iop_I32UtoF32x4_DEP:
+ case Iop_I32StoF32x4_DEP:
+ case Iop_QF32toI32Ux4_RZ:
+ case Iop_QF32toI32Sx4_RZ:
case Iop_RoundF32x4_RM:
case Iop_RoundF32x4_RP:
case Iop_RoundF32x4_RN:
case Iop_Log2_32Fx4:
return unary32Fx4(mce, vatom);
- case Iop_I32UtoFx2:
- case Iop_I32StoFx2:
+ case Iop_I32UtoF32x2_DEP:
+ case Iop_I32StoF32x2_DEP:
case Iop_RecipEst32Fx2:
case Iop_RecipEst32Ux2:
case Iop_Abs32Fx2:
case Iop_MulI128by10:
case Iop_MulI128by10Carry:
case Iop_F16toF64x2:
- case Iop_F64toF16x2:
+ case Iop_F64toF16x2_DEP:
// FIXME JRS 2018-Nov-15. This is surely not correct!
return vatom;
case Iop_CmpNEZ32x2:
case Iop_Clz32x2:
case Iop_Cls32x2:
- case Iop_FtoI32Ux2_RZ:
- case Iop_FtoI32Sx2_RZ:
+ case Iop_F32toI32Ux2_RZ:
+ case Iop_F32toI32Sx2_RZ:
case Iop_Abs32x2:
return mkPCast32x2(mce, vatom);
case Iop_CmpNEZ32x4:
case Iop_Clz32x4:
case Iop_Cls32x4:
- case Iop_FtoI32Ux4_RZ:
- case Iop_FtoI32Sx4_RZ:
+ case Iop_F32toI32Ux4_RZ:
+ case Iop_F32toI32Sx4_RZ:
case Iop_Abs32x4:
case Iop_RSqrtEst32Ux4:
case Iop_Ctz32x4:
case Iop_QNarrowUn64Sto32Sx2:
case Iop_QNarrowUn64Sto32Ux2:
case Iop_QNarrowUn64Uto32Ux2:
- case Iop_F32toF16x4:
+ case Iop_F32toF16x4_DEP:
return vectorNarrowUnV128(mce, op, vatom);
case Iop_Widen8Sto16x8:
{ DEFOP(Iop_CmpNEZ8x4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Reverse8sIn32_x1, UNDEF_UNKNOWN) },
/* ------------------ 64-bit SIMD FP ------------------------ */
- { DEFOP(Iop_I32UtoFx2, UNDEF_UNKNOWN), },
- { DEFOP(Iop_I32StoFx2, UNDEF_UNKNOWN), },
- { DEFOP(Iop_FtoI32Ux2_RZ, UNDEF_UNKNOWN), },
- { DEFOP(Iop_FtoI32Sx2_RZ, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_I32UtoF32x2_DEP, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_I32StoF32x2_DEP, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F32toI32Ux2_RZ, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F32toI32Sx2_RZ, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F32ToFixed32Ux2_RZ, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F32ToFixed32Sx2_RZ, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Fixed32UToF32x2_RN, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RecipStep32Fx4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RSqrtEst32Fx4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RSqrtStep32Fx4, UNDEF_UNKNOWN), },
- { DEFOP(Iop_I32UtoFx4, UNDEF_UNKNOWN), },
- { DEFOP(Iop_I32StoFx4, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_I32UtoF32x4_DEP, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_I32StoF32x4_DEP, UNDEF_UNKNOWN), },
{ DEFOP(Iop_I32StoF32x4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F32toI32Sx4, UNDEF_UNKNOWN), },
- { DEFOP(Iop_FtoI32Ux4_RZ, UNDEF_UNKNOWN), },
- { DEFOP(Iop_FtoI32Sx4_RZ, UNDEF_UNKNOWN), },
- { DEFOP(Iop_QFtoI32Ux4_RZ, UNDEF_UNKNOWN), },
- { DEFOP(Iop_QFtoI32Sx4_RZ, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F32toI32Ux4_RZ, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F32toI32Sx4_RZ, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_QF32toI32Ux4_RZ, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_QF32toI32Sx4_RZ, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RoundF32x4_RM, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RoundF32x4_RP, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RoundF32x4_RN, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F32ToFixed32Sx4_RZ, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Fixed32UToF32x4_RN, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Fixed32SToF32x4_RN, UNDEF_UNKNOWN), },
- { DEFOP(Iop_F32toF16x4, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F32toF16x4_DEP, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F16toF32x4, UNDEF_UNKNOWN), },
- { DEFOP(Iop_F64toF16x2, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F64toF16x2_DEP, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F16toF64x2, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F32x4_2toQ16x8, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F64x2_2toQ32x4, UNDEF_UNKNOWN), },