extern ULong generate_C_FPCC_helper( ULong size, ULong src_hi, ULong src );
extern ULong extract_bits_under_mask_helper( ULong src, ULong mask,
UInt flag );
+extern UInt generate_DFP_FPRF_value_helper( UInt gfield, ULong exponent,
+ UInt exponent_bias,
+ Int min_norm_exp,
+ UInt sign, UInt T_value_is_zero );
extern UInt count_bits_under_mask_helper( ULong src, ULong mask,
UInt flag );
extern ULong deposit_bits_under_mask_helper( ULong src, ULong mask );
}
+UInt generate_DFP_FPRF_value_helper( UInt gfield,
+ ULong exponent,
+ UInt exponent_bias,
+ Int min_norm_exp,
+ UInt sign,
+ UInt T_value_is_zero )
+{
+ UInt gfield_5_bit_mask = 0xF8000000;
+ UInt gfield_upper_5_bits = (gfield & gfield_5_bit_mask) >> (32 - 5);
+ UInt gfield_6_bit_mask = 0xF8000000;
+ UInt gfield_upper_6_bits = (gfield & gfield_6_bit_mask) >> (32 - 6);
+ UInt fprf_value = 0;
+ Int unbiased_exponent = exponent - exponent_bias;
+
+ /* The assumption is the gfield bits are left justified. Mask off
+ the most significant 5-bits in the 32-bit wide field. */
+ if ( T_value_is_zero == 1) {
+ if (sign == 0)
+ fprf_value = 0b00010; // positive zero
+ else
+ fprf_value = 0b10010; // negative zero
+ } else if ( unbiased_exponent < min_norm_exp ) {
+ if (sign == 0)
+ fprf_value = 0b10100; // posative subnormal
+ else
+ fprf_value = 0b11000; // negative subnormal
+
+ } else if ( gfield_upper_5_bits == 0b11110 ) { // infinity
+ if (sign == 0)
+ fprf_value = 0b00101; // positive infinity
+ else
+ fprf_value = 0b01001; // negative infinity
+
+ } else if ( gfield_upper_6_bits == 0b111110 ) {
+ fprf_value = 0b10001; // Quiet NaN
+
+ } else if ( gfield_upper_6_bits == 0b111111 ) {
+ fprf_value = 0b10001; // Signaling NaN
+
+ } else {
+ if (sign == 0)
+ fprf_value = 0b00100; // positive normal
+ else
+ fprf_value = 0b01000; // negative normal
+ }
+
+ return fprf_value;
+}
+
/*---------------------------------------------------------------*/
/*--- Misc BCD clean helpers. ---*/
/*---------------------------------------------------------------*/
/*--- Misc Helpers ---*/
/*------------------------------------------------------------*/
+static void Get_lmd( IRTemp * lmd, IRExpr * gfield_0_4 );
+
/* Generate mask with 1's from 'begin' through 'end',
wrapping if begin > end.
begin->end works from right to left, 0=lsb
(((_b4) << 4) | ((_b3) << 3) | ((_b2) << 2) | \
((_b1) << 1) | ((_b0) << 0))
+static void generate_store_DFP_FPRF_value( ULong irType, IRExpr *src,
+ const VexAbiInfo* vbi )
+{
+ /* This function takes a DFP value and computes the value of the FPRF
+ field in the FPCC register and store it. It is done as a clean helper.
+ The FPRF[0:4]:
+ bits[0:4] =
+ 0b00001 Signaling NaN (DFP only)
+ 0b10001 Quite NaN
+ 0b01001 negative infinity
+ 0b01000 negative normal number
+ 0b11000 negative subnormal number
+ 0b10010 negative zero
+ 0b00010 positive zero
+ 0b10100 positive subnormal number
+ 0b00100 positive normal number
+ 0b00101 positive infinity
+ */
+
+ IRTemp sign = newTemp( Ity_I32 );
+ IRTemp gfield = newTemp( Ity_I32 );
+ IRTemp gfield_mask = newTemp( Ity_I32 );
+ IRTemp exponent = newTemp( Ity_I64 );
+ UInt exponent_bias = 0;
+ IRTemp T_value_is_zero = newTemp( Ity_I32 );
+ IRTemp fprf_value = newTemp( Ity_I32 );
+ IRTemp lmd = newTemp( Ity_I32 );
+ IRTemp lmd_zero_true = newTemp( Ity_I1 );
+ Int min_norm_exp = 0;
+
+ vassert( irType == Ity_D128);
+
+ if (irType == Ity_D128) {
+ assign( gfield_mask, mkU32( DFP_G_FIELD_EXTND_MASK ) );
+ /* The gfield bits are left justified. */
+ assign( gfield, binop( Iop_And32,
+ mkexpr( gfield_mask ),
+ unop( Iop_64HIto32,
+ unop( Iop_ReinterpD64asI64,
+ unop( Iop_D128HItoD64, src ) ) ) ) );
+ assign( exponent, unop( Iop_ExtractExpD128, src ) );
+ exponent_bias = 6176;
+ min_norm_exp = -6143;
+
+ /* The significand is zero if the T field and LMD are all zeros */
+ /* Check if LMD is zero */
+ Get_lmd( &lmd, binop( Iop_Shr32,
+ mkexpr( gfield ), mkU8( 31 - 5 ) ) );
+
+ assign( lmd_zero_true, binop( Iop_CmpEQ32,
+ mkexpr( lmd ),
+ mkU32( 0 ) ) );
+ /* The T value and the LMD are the BCD value of the significand.
+ If the upper and lower T value fields and the LMD are all zero
+ then the significand is zero. */
+ assign( T_value_is_zero,
+ unop( Iop_1Uto32,
+ mkAND1 (
+ mkexpr( lmd_zero_true ),
+ mkAND1 ( binop( Iop_CmpEQ64,
+ binop( Iop_And64,
+ mkU64( DFP_T_FIELD_EXTND_MASK ),
+ unop( Iop_ReinterpD64asI64,
+ unop( Iop_D128HItoD64,
+ src ) ) ),
+ mkU64( 0 ) ),
+ binop( Iop_CmpEQ64,
+ unop( Iop_ReinterpD64asI64,
+ unop( Iop_D128LOtoD64,
+ src ) ),
+ mkU64( 0 ) ) ) ) ) );
+
+ assign( sign,
+ unop( Iop_64to32,
+ binop( Iop_Shr64,
+ unop( Iop_ReinterpD64asI64,
+ unop( Iop_D128HItoD64, src ) ),
+ mkU8( 63 ) ) ) );
+ } else {
+ /* generate_store_DFP_FPRF_value, unknown value for irType */
+ vassert(0);
+ }
+
+ /* Determine what the type of the number is. */
+ assign( fprf_value,
+ mkIRExprCCall( Ity_I32, 0 /*regparms*/,
+ "generate_DFP_FPRF_value_helper",
+ fnptr_to_fnentry( vbi,
+ &generate_DFP_FPRF_value_helper ),
+ mkIRExprVec_6( mkexpr( gfield ),
+ mkexpr( exponent ),
+ mkU32( exponent_bias ),
+ mkU32( min_norm_exp ),
+ mkexpr( sign ),
+ mkexpr( T_value_is_zero ) ) ) );
+ /* fprf[0:4] = (C | FPCC[0:3]) */
+ putC( binop( Iop_Shr32, mkexpr( fprf_value ), mkU8( 4 ) ) );
+ putFPCC( binop( Iop_And32, mkexpr( fprf_value ), mkU32 (0xF ) ) );
+ return;
+}
+
static IRExpr * Gfield_encoding( IRExpr * lmexp, IRExpr * lmd32 )
{
IRTemp lmd_07_mask = newTemp( Ity_I32 );
}
/* Quad DFP format conversion instructions */
-static Bool dis_dfp_fmt_convq( UInt prefix, UInt theInstr ) {
+static Bool dis_dfp_fmt_convq( UInt prefix, UInt theInstr,
+ const VexAbiInfo* vbi ) {
UInt opc2 = ifieldOPClo10( theInstr );
UChar frS_addr = ifieldRegDS( theInstr );
UChar frB_addr = ifieldRegB( theInstr );
putDReg_pair( frS_addr, mkexpr( frS128 ) );
break;
}
+
+ case 0x3E2:
+ {
+ Int opc3 = IFIELD( theInstr, 16, 5 );
+
+ flag_rC = 0; // These instructions do not set condition codes.
+
+ if (opc3 == 0) { // dcffixqq
+ IRTemp tmpD128 = newTemp( Ity_D128 );
+ IRTemp vB_src = newTemp( Ity_V128 );
+
+ DIP( "dcffixqq fr%u,v%u\n", frS_addr, frB_addr );
+
+ assign( vB_src, getVReg( frB_addr ));
+ assign( tmpD128, binop( Iop_I128StoD128, round,
+ unop( Iop_ReinterpV128asI128,
+ mkexpr( vB_src ) ) ) );
+ /* tmp128 is a Dfp 128 value which is held in a hi/lo 64-bit values.
+ */
+ generate_store_DFP_FPRF_value( Ity_D128, mkexpr( tmpD128 ), vbi);
+ putDReg_pair( frS_addr, mkexpr( tmpD128 ) );
+
+ } else if (opc3 == 1) { // dctfixqq
+ IRTemp tmp128 = newTemp(Ity_I128);
+
+ DIP( "dctfixqq v%u,fr%u\n", frS_addr, frB_addr );
+ assign( tmp128, binop( Iop_D128toI128S, round,
+ getDReg_pair( frB_addr ) ) );
+
+ putVReg( frS_addr,
+ unop( Iop_ReinterpI128asV128, mkexpr( tmp128 ) ) );
+
+ } else {
+ vex_printf("ERROR: dis_dfp_fmt_convq unknown opc3 = %d value.\n",
+ opc3);
+ return False;
+ }
+ }
+ break;
}
if (flag_rC && clear_CR1) {
mkexpr( flags2 ),
mkexpr( flags3 ) ) ) ),
crfD );
-
break;
}
- case 0x174: // xvtdivsp (VSX Vector Test for software Divide Single-Precision)
+
+ case 0x174: // xvtdivsp (VSX Vector Test for software Divide Single-Precision)
{
IRTemp flags0 = newTemp(Ity_I32);
IRTemp flags1 = newTemp(Ity_I32);
UInt inst_select = IFIELD( theInstr, 16, 5);
switch (inst_select) {
+ case 0: // xscvqpuqz, VSX Scalar Convert with round to zero
+ // Quad-Precision to Unsigned Quadword X-form
+ {
+ DIP("xscvqpuqz, v%d,v%d\n", vT_addr, vB_addr);
+ assign( vT, unop( Iop_TruncF128toI128U, mkexpr( vB ) ) );
+ break;
+ }
case 1: // xscvqpuwz VSX Scalar Truncate & Convert Quad-Precision
// format to Unsigned Word format
{
generate_store_FPRF( Ity_F128, vT, vbi );
break;
}
+ case 3: // xscvuqqp, VSX Scalar Convert Unsigned Quadword
+ // to Quad-Precision X-form
+ {
+ DIP("xscvqpuqz, v%d,v%d\n", vT_addr, vB_addr);
+ assign( vT,
+ binop( Iop_I128UtoF128, rm,
+ unop ( Iop_ReinterpF128asI128,
+ getF128Reg( vB_addr ) ) ) );
+ generate_store_FPRF( Ity_F128, vT, vbi );
+ break;
+ }
+ case 8: // xscvqpsqz, VSX Scalar Convert with round to zero
+ // Quad-Precision to Signed Quadword X-form
+ {
+ DIP("xscvqpsqz, v%d,v%d\n", vT_addr, vB_addr);
+ assign( vT, unop( Iop_TruncF128toI128S, mkexpr( vB ) ) );
+ break;
+ }
case 9: // xsvqpswz VSX Scalar Truncate & Convert Quad-Precision
// format to Signed Word format
{
generate_store_FPRF( Ity_F128, vT, vbi );
break;
}
+ case 11: // xscvsqqp, VSX Scalar Convert Unsigned Quadword
+ // to Quad-Precision X-form
+ {
+ DIP("xscvsqqp, v%d,v%d\n", vT_addr, vB_addr);
+ assign( vT,
+ binop( Iop_I128StoF128, rm,
+ unop ( Iop_ReinterpF128asI128,
+ mkexpr( vB ) ) ) );
+ generate_store_FPRF( Ity_F128, vT, vbi );
+ break;
+ }
case 17: // xsvqpudz VSX Scalar Truncate & Convert Quad-Precision
// format to Unigned Doubleword format
{
IRTemp tmp128 = newTemp(Ity_I128);
if ( opc2 == 0x0C8) {
- DIP("vwuloud v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
+ DIP("vmuloud v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
/* multiply lower D-words together, upper D-words not used. */
assign( tmp128, binop( Iop_MullU64,
unop( Iop_V128to64, mkexpr( vA ) ),
unop( Iop_V128to64, mkexpr( vB ) ) ) );
} else if ( opc2 == 0x1C8) {
- DIP("vwulosd v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
+ DIP("vmulosd v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
/* multiply lower D-words together, upper D-words not used. */
assign( tmp128, binop( Iop_MullS64,
unop( Iop_V128to64, mkexpr( vA ) ),
unop( Iop_V128to64, mkexpr( vB ) ) ) );
} else if ( opc2 == 0x2C8) {
- DIP("vwuleud v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
+ DIP("vmuleud v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
/* multiply upper D-words together, lower D-words not used. */
assign( tmp128, binop( Iop_MullU64,
unop( Iop_V128HIto64, mkexpr( vA ) ),
unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
} else {
- DIP("vwulesd v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
+ DIP("vmulesd v%u,v%u,v%u\n", vD_addr, vA_addr, vB_addr);
/* multiply upper D-words together, lower D-words not used. */
assign( tmp128, binop( Iop_MullS64,
unop( Iop_V128HIto64, mkexpr( vA ) ),
mkexpr( res_tmp[ 1 ] ),
mkexpr( res_tmp[ 0 ] ) ) ) ) );
} else {
- /* Doing a modulo instruction,
+ /* Doing a modulo instruction, vmodsw/vmoduw
res_tmp[] contains the quotients of VRA/VRB.
Calculate modulo as VRA - VRB * res_tmp. */
IRTemp res_Tmp = newTemp( Ity_V128 );
UChar vA_addr = ifieldRegA(theInstr);
UChar vB_addr = ifieldRegB(theInstr);
UChar opc1 = ifieldOPC(theInstr);
- UInt opc2 = ifieldOPClo11( theInstr );
+ UInt opc2;
IRTemp vA = newTemp(Ity_V128);
IRTemp vB = newTemp(Ity_V128);
assign( vA, getVReg( vA_addr ) );
assign( vB, getVReg( vB_addr ) );
+ opc2 = IFIELD(theInstr, 0, 6);
+ switch (opc2) {
+ case 0x017: // vmsumcud Vector Multiply-Sum & write Carry-out Unsigned
+ // Doubleword VA-form
+ {
+ UChar vC_addr = ifieldRegC(theInstr);
+ IRTemp vC = newTemp(Ity_V128);
+
+ assign( vC, getVReg( vC_addr ) );
+
+ DIP("vmsumcud %d,%d,%d,%d\n", vT_addr, vA_addr, vB_addr, vC_addr);
+ putVReg( vT_addr, triop( Iop_2xMultU64Add128CarryOut,
+ mkexpr( vA ), mkexpr( vB ), mkexpr( vC ) ) );
+ return True;
+ }
+
+ default:
+ break; /* fall thru to next case statement */
+ } /* switch (opc2) */
+
+ opc2 = ifieldOPClo11( theInstr );
switch (opc2) {
case 0x005: //vrlq Vector Rotate Left Quadword
{
break;
case 0x00B: //vdivuq Vector Divide Unsigned Quadword
- vex_printf("WARNING: instruction vdivuq not currently supported. dis_vx_quadword_arith(ppc)\n");
+ DIP("vdivuq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
+ putVReg( vT_addr, binop( Iop_DivU128, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x101: //vcmpuq Vector Compare Unsigned Quadword
break;
case 0x10B: //vdivsq Vector Divide Signed Quadword
- vex_printf("WARNING: instruction vdivsq not currently supported. dis_vx_quadword_arith(ppc)\n");
+ DIP("vdivsq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
+ putVReg( vT_addr, binop( Iop_DivS128, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x141: //vcmpsq Vector Compare Signed Quadword
}
break;
- case 0x20B: //vdiveuq Vector Divide Extended Unsigned Quadword
- vex_printf("WARNING: instruction vdiveuq not currently supported. dis_vx_quadword_arith(ppc)\n");
+ case 0x20B: //vdiveuq Vector Divide Extended Unsigned Quadword VX form
+ DIP("vdiveuq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
+ putVReg( vT_addr, binop( Iop_DivU128E, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x305: //vsraq Vector Shift Right Algebraic Quadword
}
break;
- case 0x30B: //vdivesq Vector Divide Extended Signed Quadword
- vex_printf("WARNING: instruction vdivesq not currently supported. dis_vx_quadword_arith(ppc)\n");
+ case 0x30B: //vdivesq Vector Divide Extended Signed Quadword VX form
+ DIP("vdivesq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
+ putVReg( vT_addr, binop( Iop_DivS128E, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x60B: //vmoduq Vector Modulo Unsigned Quadword
- vex_printf("WARNING: instruction vmoduq not currently supported. dis_vx_quadword_arith(ppc)\n");
+ DIP("vmoduq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
+ putVReg( vT_addr, binop( Iop_ModU128, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x70B: //vmodsq Vector Modulo Signed Quadword
- vex_printf("WARNING: instruction vmodsq not currently supported. dis_vx_quadword_arith(ppc)\n");
+ DIP("vmodsq %d,%d,%d\n", vT_addr, vA_addr, vB_addr);
+ putVReg( vT_addr, binop( Iop_ModS128, mkexpr( vA ), mkexpr( vB ) ) );
break;
default:
- vex_printf("dis_av_arith(ppc)(opc2=0x%x)\n", opc2);
+ vex_printf("dis_av_arith(ppc)(opc2 bits[21:31]=0x%x)\n", opc2);
return False;
} /* switch (opc2) */
goto decode_success;
goto decode_failure;
+ case 0x3E2: // dcffixqq - DFP Convert From Fixed Quadword
+ // dctfixqq - DFP Convert To Fixed Quadword
+ if (!allow_DFP) goto decode_noDFP;
+ if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
+ if (dis_dfp_fmt_convq( prefix, theInstr, abiinfo ))
+ goto decode_success;
+ goto decode_failure;
+
case 0x102: // dctqpq - DFP convert to DFP extended
case 0x302: // drdpq - DFP round to dfp Long
case 0x122: // dctfixq - DFP convert to fixed quad
case 0x322: // dcffixq - DFP convert from fixed quad
if (!allow_DFP) goto decode_noDFP;
- if (dis_dfp_fmt_convq( prefix, theInstr ))
+ if (dis_dfp_fmt_convq( prefix, theInstr, abiinfo ))
goto decode_success;
goto decode_failure;
case 0x204: // xssubqp (VSX Scalar Subrtact Quad-Precision [using RN mode]
// xsdivqpo (VSX Scalar Divde Quad-Precision [using round to ODD]
case 0x224: // xsdivqp (VSX Scalar Divde Quad-Precision [using RN mode]
+ if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix,
+ theInstr,
+ abiinfo ) )
+ goto decode_success;
+ goto decode_failure;
+
case 0x344: // xscvudqp, xscvsdqp, xscvqpdp, xscvqpdpo, xsvqpdp
// xscvqpswz, xscvqpuwz, xscvqpudz, xscvqpsdz
+ /* ISA 3.1 instructions: xscvqpuqz, xscvuqqp, xscvqpsqz,
+ xscvsqqp. */
+ if (( IFIELD( theInstr, 16, 5) == 0 // xscvqpuqz
+ || IFIELD( theInstr, 16, 5) == 3 // xscvuqqp
+ || IFIELD( theInstr, 16, 5) == 8 // xscvqpsqz
+ || IFIELD( theInstr, 16, 5) == 11 )) { // xscvsqqp
+ if (!allow_isa_3_1)
+ goto decode_noIsa3_1;
+
+ if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix,
+ theInstr,
+ abiinfo ) )
+ goto decode_success;
+ goto decode_failure;
+ }
+
if ( !mode64 || !allow_isa_3_0 ) goto decode_failure;
- if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix, theInstr,
+ if ( dis_vx_Floating_Point_Arithmetic_quad_precision( prefix,
+ theInstr,
abiinfo ) )
goto decode_success;
goto decode_failure;
/* AV Mult-Add, Mult-Sum */
case 0x16: // vsldbi/vsrdbi
if (!allow_V) goto decode_noV;
- if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
if (dis_av_shift( prefix, theInstr )) goto decode_success;
goto decode_failure;
+ case 0x17: // vmsumcud
+ if ( !(allow_isa_3_1) ) goto decode_noIsa3_1;
+ if (dis_vx_quadword_arith( prefix, theInstr )) {
+ goto decode_success;
+ }
+ goto decode_failure;
+
case 0x18: case 0x19: // vextdubvlx, vextdubvrx
case 0x1A: case 0x1B: // vextduhvlx, vextduhvrx
case 0x1C: case 0x1D: // vextduwvlx, vextduwvrx
case 0x783: case 0x7c3: // vpopcntw, vpopcntd
if (!allow_isa_2_07) goto decode_noP8;
if (dis_av_count_bitTranspose( prefix, theInstr, opc2 ))
- goto decode_success;
+ goto decode_success;
goto decode_failure;
case 0x50c: // vgbbd
case Pfp_FPDTOQ: return "xscvdpqp";
case Pfp_IDSTOQ: return "xscvsdqp";
case Pfp_IDUTOQ: return "xscvudqp";
+ case Pfp_IQSTOQ: return "xscvsqqp";
+ case Pfp_IQUTOQ: return "xscvuqqp";
case Pfp_TRUNCFPQTOISD: return "xscvqpsdz";
case Pfp_TRUNCFPQTOISW: return "xscvqpswz";
case Pfp_TRUNCFPQTOIUD: return "xscvqpudz";
case Pfp_DFPMULQ: return "dmulq";
case Pfp_DFPDIV: return "ddivd";
case Pfp_DFPDIVQ: return "ddivq";
+ case Pfp_DFPTOIQ: return "dctfixqq";
+ case Pfp_IQUTODFP: return "dcffixqq";
case Pfp_DCTDP: return "dctdp";
case Pfp_DRSP: return "drsp";
case Pfp_DCTFIX: return "dctfix";
case Pav_F16toF64x2:
return"xvcvhpdp";
- /* Vector Half-precision format to Double precision conversion */
+ /* Vector Half-precision format to Double precision conversion */
case Pav_F64toF16x2:
return"xvcvdphp";
}
}
+const HChar* showPPCAvOpBin128 ( PPCAvOpBin128 op ) {
+
+ switch (op) {
+ /* Binary ops */
+
+ /* Vector Divide Signed Quadword VX-form */
+ case Pav_DivU128:
+ return "vdivuq";
+
+ case Pav_DivS128:
+ return "vdivsq";
+
+ case Pav_DivU128E:
+ return "vdivuq";
+
+ case Pav_DivS128E:
+ return "vdivsq";
+
+ case Pav_ModU128:
+ return "vmoduq";
+
+ case Pav_ModS128:
+ return "vmodsq";
+
+ default: vpanic("showPPCAvOpBin128");
+ }
+}
+
+const HChar* showPPCAvOpTri128 ( PPCAvOpTri128 op ) {
+
+ /* Vector Quadword VX-form */
+ switch (op) {
+ case Pav_2xMultU64Add128CarryOut:
+ return "vmsumcud";
+
+ default: vpanic("showPPCAvOpTri128");
+ }
+}
+
const HChar* showPPCAvFpOp ( PPCAvFpOp op ) {
switch (op) {
/* Floating Point Binary */
}
}
+const HChar* showXFormUnary994 ( PPCXFormUnary994 op ) {
+
+ /* Vector Quadword VX-form */
+ switch (op) {
+ case Px_IQSTODFP:
+ return "dcffixqq";
+ case Px_DFPTOIQS:
+ return "dctfixqq";
+
+ default: vpanic("showXFormUnary994");
+ }
+}
+
PPCInstr* PPCInstr_LI ( HReg dst, ULong imm64, Bool mode64 )
{
PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->Pin.Fp128Binary.srcR = srcR;
return i;
}
-PPCInstr* PPCInstr_Fp128Ternnary(PPCFpOp op, HReg dst, HReg srcL, HReg srcR) {
+PPCInstr* PPCInstr_Fp128Ternary(PPCFpOp op, HReg dst, HReg srcL, HReg srcR) {
PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
- i->tag = Pin_Fp128Ternnary;
- i->Pin.Fp128Ternnary.op = op;
- i->Pin.Fp128Ternnary.dst = dst;
- i->Pin.Fp128Ternnary.srcL = srcL;
- i->Pin.Fp128Ternnary.srcR = srcR;
+ i->tag = Pin_Fp128Ternary;
+ i->Pin.Fp128Ternary.op = op;
+ i->Pin.Fp128Ternary.dst = dst;
+ i->Pin.Fp128Ternary.srcL = srcL;
+ i->Pin.Fp128Ternary.srcR = srcR;
return i;
}
PPCInstr* PPCInstr_FpMulAcc ( PPCFpOp op, HReg dst, HReg srcML,
i->Pin.AvBinaryInt.val = val;
return i;
}
+PPCInstr* PPCInstr_AvBinaryInt128 ( PPCAvOpBin128 op, HReg dst,
+ HReg src1, HReg src2 ) {
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
+ i->tag = Pin_AvBinaryInt128;
+ i->Pin.AvBinaryInt128.op = op;
+ i->Pin.AvBinaryInt128.dst = dst;
+ i->Pin.AvBinaryInt128.src1 = src1;
+ i->Pin.AvBinaryInt128.src2 = src2;
+ return i;
+}
+PPCInstr* PPCInstr_AvTernaryInt128 ( PPCAvOpTri128 op, HReg dst,
+ HReg src1, HReg src2, HReg src3 ) {
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
+ i->tag = Pin_AvTernaryInt128;
+ i->Pin.AvTernaryInt128.op = op;
+ i->Pin.AvTernaryInt128.dst = dst;
+ i->Pin.AvTernaryInt128.src1 = src1;
+ i->Pin.AvTernaryInt128.src2 = src2;
+ i->Pin.AvTernaryInt128.src3 = src3;
+ return i;
+}
PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst,
HReg srcL, HReg srcR ) {
PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->Pin.AvBCDV128Binary.src2 = src2;
return i;
}
-
+PPCInstr* PPCInstr_XFormUnary994 ( PPCXFormUnary994 op, HReg reg0, HReg reg1,
+ HReg reg2 ) {
+ /* This is used to issue istructions with opc1=63, opc2=994. The specific
+ instruction is given in bits[11:15], the VRA field, of the instruction.
+ Currently only used for dcffixqq and dctfixqq instructions. */
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
+ i->tag = Pin_XFormUnary994;
+ i->Pin.XFormUnary994.op = op;
+ i->Pin.XFormUnary994.reg0 = reg0;
+ i->Pin.XFormUnary994.reg1 = reg1;
+ i->Pin.XFormUnary994.reg2 = reg2;
+ return i;
+}
/* Pretty Print instructions */
static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) {
vex_printf(",");
ppHRegPPC(i->Pin.Fp128Binary.srcR);
return;
- case Pin_Fp128Ternnary:
- vex_printf("%s ", showPPCFpOp(i->Pin.Fp128Ternnary.op));
- ppHRegPPC(i->Pin.Fp128Ternnary.dst);
+ case Pin_Fp128Ternary:
+ vex_printf("%s ", showPPCFpOp(i->Pin.Fp128Ternary.op));
+ ppHRegPPC(i->Pin.Fp128Ternary.dst);
vex_printf(",");
ppHRegPPC(i->Pin.Fp128Ternary.srcL);
vex_printf(",");
vex_printf(",");
ppPPCRI(i->Pin.AvBinaryInt.val);
return;
+ case Pin_AvBinaryInt128:
+ vex_printf("%s ", showPPCAvOpBin128(i->Pin.AvBinaryInt128.op));
+ ppHRegPPC(i->Pin.AvBinaryInt128.dst);
+ vex_printf(",");
+ ppHRegPPC(i->Pin.AvBinaryInt128.src1);
+ vex_printf(",");
+ ppHRegPPC(i->Pin.AvBinaryInt128.src2);
+ return;
+ case Pin_AvTernaryInt128:
+ vex_printf("%s ", showPPCAvOpTri128(i->Pin.AvTernaryInt128.op));
+ ppHRegPPC(i->Pin.AvTernaryInt128.dst);
+ vex_printf(",");
+ ppHRegPPC(i->Pin.AvTernaryInt128.src1);
+ vex_printf(",");
+ ppHRegPPC(i->Pin.AvTernaryInt128.src2);
+ vex_printf(",");
+ ppHRegPPC(i->Pin.AvTernaryInt128.src3);
+ return;
case Pin_AvBin8x16:
vex_printf("%s(b) ", showPPCAvOp(i->Pin.AvBin8x16.op));
ppHRegPPC(i->Pin.AvBin8x16.dst);
ppHRegPPC(i->Pin.Dfp128Cmp.dst);
vex_printf(",8,28,31");
return;
+
+ case Pin_XFormUnary994:
+ if (i->Pin.XFormUnary994.op == Px_DFPTOIQS) {
+ vex_printf("%s(w) ", showXFormUnary994(i->Pin.XFormUnary994.op));
+ ppHRegPPC(i->Pin.XFormUnary994.reg0);
+ vex_printf(",");
+ ppHRegPPC(i->Pin.XFormUnary994.reg1);
+ } else {
+ vex_printf("%s(w) ", showXFormUnary994(i->Pin.XFormUnary994.op));
+ ppHRegPPC(i->Pin.XFormUnary994.reg0);
+ vex_printf(",");
+ ppHRegPPC(i->Pin.XFormUnary994.reg2);
+ }
+ return;
+
case Pin_EvCheck:
/* Note that the counter dec is 32 bit even in 64-bit mode. */
vex_printf("(evCheck) ");
addHRegUse(u, HRmWrite, i->Pin.AvBinaryInt.dst);
addHRegUse(u, HRmRead, i->Pin.AvBinaryInt.src);
return;
+ case Pin_AvBinaryInt128:
+ addHRegUse(u, HRmWrite, i->Pin.AvBinaryInt128.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvBinaryInt128.src1);
+ addHRegUse(u, HRmRead, i->Pin.AvBinaryInt128.src2);
+ return;
+ case Pin_AvTernaryInt128:
+ addHRegUse(u, HRmWrite, i->Pin.AvTernaryInt128.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvTernaryInt128.src1);
+ addHRegUse(u, HRmRead, i->Pin.AvTernaryInt128.src2);
+ addHRegUse(u, HRmRead, i->Pin.AvTernaryInt128.src3);
+ return;
case Pin_AvBin8x16:
addHRegUse(u, HRmWrite, i->Pin.AvBin8x16.dst);
addHRegUse(u, HRmRead, i->Pin.AvBin8x16.srcL);
addHRegUse(u, HRmRead, i->Pin.Dfp128Cmp.srcR_hi);
addHRegUse(u, HRmRead, i->Pin.Dfp128Cmp.srcR_lo);
return;
+ case Pin_XFormUnary994:
+ if (i->Pin.XFormUnary994.op == Px_DFPTOIQS) {
+ addHRegUse(u, HRmWrite, i->Pin.XFormUnary994.reg0);
+ addHRegUse(u, HRmRead, i->Pin.XFormUnary994.reg1);
+ addHRegUse(u, HRmRead, i->Pin.XFormUnary994.reg2);
+ } else {
+ addHRegUse(u, HRmWrite, i->Pin.XFormUnary994.reg0);
+ addHRegUse(u, HRmWrite, i->Pin.XFormUnary994.reg1);
+ addHRegUse(u, HRmRead, i->Pin.XFormUnary994.reg2);
+ }
+ return;
case Pin_EvCheck:
/* We expect both amodes only to mention the GSP (r31), so this
is in fact pointless, since GSP isn't allocatable, but
mapReg(m, &i->Pin.AvBinaryInt.dst);
mapReg(m, &i->Pin.AvBinaryInt.src);
return;
+ case Pin_AvBinaryInt128:
+ mapReg(m, &i->Pin.AvBinaryInt128.dst);
+ mapReg(m, &i->Pin.AvBinaryInt128.src1);
+ mapReg(m, &i->Pin.AvBinaryInt128.src2);
+ return;
+ case Pin_AvTernaryInt128:
+ mapReg(m, &i->Pin.AvTernaryInt128.dst);
+ mapReg(m, &i->Pin.AvTernaryInt128.src1);
+ mapReg(m, &i->Pin.AvTernaryInt128.src2);
+ mapReg(m, &i->Pin.AvTernaryInt128.src3);
+ return;
case Pin_AvBin8x16:
mapReg(m, &i->Pin.AvBin8x16.dst);
mapReg(m, &i->Pin.AvBin8x16.srcL);
mapReg(m, &i->Pin.Dfp128Cmp.srcR_hi);
mapReg(m, &i->Pin.Dfp128Cmp.srcR_lo);
return;
+ case Pin_XFormUnary994:
+ mapReg(m, &i->Pin.XFormUnary994.reg0);
+ mapReg(m, &i->Pin.XFormUnary994.reg1);
+ mapReg(m, &i->Pin.XFormUnary994.reg2);
+ return;
case Pin_EvCheck:
/* We expect both amodes only to mention the GSP (r31), so this
is in fact pointless, since GSP isn't allocatable, but
return emit32(p, theInstr, endness_host);
}
+static UChar* mkFormX994 ( UChar* p, UInt inst_sel,
+ UInt rdst, UInt rsrc, VexEndness endness_host )
+{
+ /* This issues an X-Form instruction with opc1 = 63 and opc2 = 994. The
+ specific instruction is given in bits[11:15]. */
+ UInt theInstr;
+ vassert(inst_sel < 0x2);
+ vassert(rdst < 0x20);
+ vassert(rsrc < 0x20);
+ theInstr = ((63<<26) | (rdst<<21) | (inst_sel<<16) | (rsrc<<11) | (994 << 1));
+ return emit32(p, theInstr, endness_host);
+}
+
static UChar* mkFormXL ( UChar* p, UInt opc1, UInt f1, UInt f2,
UInt f3, UInt opc2, UInt b0, VexEndness endness_host )
{
case Pfp_IDUTOQ: // xscvudqp
p = mkFormVXR0( p, 63, fr_dst, 2, fr_src, 836, 0, endness_host );
break;
+ case Pfp_IQSTOQ: // xscvsqqp
+ p = mkFormVXR0( p, 63, fr_dst, 11, fr_src, 836, 0, endness_host );
+ break;
+ case Pfp_IQUTOQ: // xscvuqqp
+ p = mkFormVXR0( p, 63, fr_dst, 3, fr_src, 836, 0, endness_host );
+ break;
+ case Pfp_TRUNCFPQTOISQ: // xscvqpsqz
+ p = mkFormVXR0( p, 63, fr_dst, 8, fr_src, 836, 0, endness_host );
+ break;
case Pfp_TRUNCFPQTOISD: // xscvqpsdz
p = mkFormVXR0( p, 63, fr_dst, 25, fr_src, 836, 0, endness_host );
break;
- case Pfp_TRUNCFPQTOISW: // xscvqpswz
+ case Pfp_TRUNCFPQTOISW: // xscvqpswz
p = mkFormVXR0( p, 63, fr_dst, 9, fr_src, 836, 0, endness_host );
break;
- case Pfp_TRUNCFPQTOIUD: // xscvqpudz
+ case Pfp_TRUNCFPQTOIUQ: // xscvqpuqz
+ p = mkFormVXR0( p, 63, fr_dst, 0, fr_src, 836, 0, endness_host );
+ break;
+ case Pfp_TRUNCFPQTOIUD: // xscvqpudz
p = mkFormVXR0( p, 63, fr_dst, 17, fr_src, 836, 0, endness_host );
break;
case Pfp_TRUNCFPQTOIUW: // xscvqpuwz
goto done;
}
+ case Pin_AvBinaryInt128: {
+ UInt dst = vregEnc(i->Pin.AvBinaryInt128.dst);
+ UInt src1 = vregEnc(i->Pin.AvBinaryInt128.src1);
+ UInt src2 = vregEnc(i->Pin.AvBinaryInt128.src2);
+ int opc2;
+
+ switch (i->Pin.AvBinaryInt128.op) {
+ case Pav_DivS128: opc2 = 267; break; //vdivsq
+ case Pav_DivU128: opc2 = 11; break; //vdivuq
+ case Pav_DivU128E: opc2 = 523; break; //vdiveuq
+ case Pav_DivS128E: opc2 = 779; break; //vdivesq
+ case Pav_ModS128: opc2 = 1803; break; //vmodsq
+ case Pav_ModU128: opc2 = 1547; break; //vmoduq
+
+ default:
+ goto bad;
+ }
+ p = mkFormVX( p, 4, dst, src1, src2, opc2, endness_host );
+ goto done;
+ }
+
+ case Pin_AvTernaryInt128: {
+ UInt dst = vregEnc(i->Pin.AvTernaryInt128.dst);
+ UInt src1 = vregEnc(i->Pin.AvTernaryInt128.src1);
+ UInt src2 = vregEnc(i->Pin.AvTernaryInt128.src2);
+ UInt src3 = vregEnc(i->Pin.AvTernaryInt128.src3);
+ int opc2;
+
+ switch (i->Pin.AvTernaryInt128.op) {
+ case Pav_2xMultU64Add128CarryOut: opc2 = 23; break; //vsumcud
+
+ default:
+ goto bad;
+ }
+ p = mkFormVA( p, 4, dst, src1, src2, src3, opc2, endness_host );
+ goto done;
+ }
+
case Pin_AvBin8x16: {
UInt v_dst = vregEnc(i->Pin.AvBin8x16.dst);
UInt v_srcL = vregEnc(i->Pin.AvBin8x16.srcL);
goto done;
}
+ case Pin_XFormUnary994: {
+
+ switch (i->Pin.XFormUnary994.op) {
+
+ case Px_IQSTODFP: // dcffixqq
+ {
+ UInt dstHi = fregEnc(i->Pin.XFormUnary994.reg0);
+ UInt dstLo = fregEnc(i->Pin.XFormUnary994.reg1);
+ UInt src = vregEnc(i->Pin.XFormUnary994.reg2);
+ Int inst_sel = 0;
+
+ /* Do instruction, 128-bit integer source operand, result in two
+ floating point registers VSR(10,11) */
+ /* dcffixqq put result in VSR[10], VSR[11] dword 0 */
+ p = mkFormX994( p, inst_sel, 10, src, endness_host );
+
+ /* Move results to destination floating point register pair.
+ Floating point regs are VSR[0] to VSR[31] */
+ p = mkFormX( p, 63, dstHi, 0, 10, 72, 0, endness_host );
+ p = mkFormX( p, 63, dstLo, 0, 11, 72, 0, endness_host );
+ break;
+ }
+
+ case Px_DFPTOIQS: // dctfixqq
+ {
+ UInt dstVSR = vregEnc(i->Pin.XFormUnary994.reg0);
+ UInt srcHi = fregEnc(i->Pin.XFormUnary994.reg1);
+ UInt srcLo = fregEnc(i->Pin.XFormUnary994.reg2);
+ Int inst_sel = 1;
+
+ /* Setup the upper and lower registers of the source operand
+ * register pair.
+ */
+ p = mkFormX( p, 63, 10, 0, srcHi, 72, 0, endness_host );
+ p = mkFormX( p, 63, 11, 0, srcLo, 72, 0, endness_host );
+
+ /* Do instruction, two 64-bit source operands in registers floating
+ point registers VSR(10,11) */
+ p = mkFormX994( p, inst_sel, dstVSR, 10, endness_host );
+ break;
+ }
+ default:
+ goto bad;
+ }
+ goto done;
+ }
+
case Pin_EvCheck: {
/* This requires a 32-bit dec/test in both 32- and 64-bit
modes. */
Pfp_FPDTOQ,
Pfp_IDSTOQ,
Pfp_IDUTOQ,
+ Pfp_IQSTOQ,
+ Pfp_IQUTOQ,
+ Pfp_TRUNCFPQTOISQ,
Pfp_TRUNCFPQTOISD,
Pfp_TRUNCFPQTOISW,
+ Pfp_TRUNCFPQTOIUQ,
Pfp_TRUNCFPQTOIUD,
Pfp_TRUNCFPQTOIUW,
Pfp_DFPADD, Pfp_DFPADDQ,
Pfp_DFPMUL, Pfp_DFPMULQ,
Pfp_DFPDIV, Pfp_DFPDIVQ,
Pfp_DQUAQ, Pfp_DRRNDQ,
+ Pfp_DFPTOIQ, Pfp_IQUTODFP,
/* Binary */
Pfp_ADDD, Pfp_SUBD, Pfp_MULD, Pfp_DIVD,
extern const HChar* showPPCAvOp ( PPCAvOp );
+typedef
+ enum {
+ Pav_INVALIDBinary128,
+
+ /* 128-bit integer Binary Divide */
+ Pav_DivU128, Pav_DivS128, Pav_DivU128E, Pav_DivS128E,
+ Pav_ModU128, Pav_ModS128,
+ }
+ PPCAvOpBin128;
+
+extern const HChar* showPPCAvOpBin128 ( PPCAvOpBin128 );
+
+typedef
+ enum {
+ Pav_INVALIDTri128,
+
+ /* 128-bit integer */
+ Pav_2xMultU64Add128CarryOut,
+ }
+ PPCAvOpTri128;
+
+extern const HChar* showPPCAvOpTri128 ( PPCAvOpTri128 );
+
+typedef
+ enum {
+ Px_INVALID_XFormUnary994,
+
+ /* 128-bit integer */
+ Px_DFPTOIQS, Px_IQSTODFP,
+ }
+ PPCXFormUnary994;
+
+extern const HChar* showXFormUnary994 ( PPCXFormUnary994 );
/* --------- */
typedef
Pin_AvBinary, /* AV binary general reg,reg=>reg */
Pin_AvBinaryInt,/* AV binary reg,int=>reg */
+ Pin_AvBinaryInt128,/* AV binary 128-bit reg, 128-bitint => 128-bit reg */
+ Pin_AvTernaryInt128,/* AV ternary 128-bit reg, 128-bitint => 128-bit reg */
Pin_AvBin8x16, /* AV binary, 8x4 */
Pin_AvBin16x8, /* AV binary, 16x4 */
Pin_AvBin32x4, /* AV binary, 32x4 */
* round */
Pin_DfpQuantize128, /* D128 quantize using register value, significance
* round */
+
+ Pin_XFormUnary994, /* X-form instructions with opc1=63, opc2=994 */
Pin_EvCheck, /* Event check */
Pin_ProfInc /* 64-bit profile counter increment */
}
HReg src;
PPCRI* val;
} AvBinaryInt;
+ struct {
+ PPCAvOpBin128 op;
+ HReg dst;
+ HReg src1;
+ HReg src2;
+ } AvBinaryInt128;
+ struct {
+ PPCAvOpTri128 op;
+ HReg dst;
+ HReg src1;
+ HReg src2;
+ HReg src3;
+ } AvTernaryInt128;
struct {
PPCAvOp op;
HReg dst;
HReg srcR_hi;
HReg srcR_lo;
} Dfp128Cmp;
+ struct {
+ PPCXFormUnary994 op;
+ HReg reg0;
+ HReg reg1;
+ HReg reg2;
+ } XFormUnary994;
struct {
PPCAMode* amCounter;
PPCAMode* amFailAddr;
extern PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src );
extern PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
extern PPCInstr* PPCInstr_AvBinaryInt( PPCAvOp op, HReg dst, HReg src, PPCRI* val );
+extern PPCInstr* PPCInstr_AvBinaryInt128( PPCAvOpBin128 op, HReg dst,
+ HReg src1, HReg src2 );
+extern PPCInstr* PPCInstr_AvTernaryInt128( PPCAvOpTri128 op, HReg dst,
+ HReg src1, HReg src2, HReg src3 );
extern PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
extern PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
extern PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
extern PPCInstr* PPCInstr_Dfp64Cmp ( HReg dst, HReg srcL, HReg srcR );
extern PPCInstr* PPCInstr_Dfp128Cmp ( HReg dst, HReg srcL_hi, HReg srcL_lo,
HReg srcR_hi, HReg srcR_lo );
+extern PPCInstr* PPCInstr_XFormUnary994 ( PPCXFormUnary994 op, HReg dst,
+ HReg srcHi, HReg srcLo );
extern PPCInstr* PPCInstr_EvCheck ( PPCAMode* amCounter,
PPCAMode* amFailAddr );
extern PPCInstr* PPCInstr_ProfInc ( void );
*rHi = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
*rLo = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
return;
+
+ case Iop_D128toI128S: {
+ HReg srcHi = INVALID_HREG;
+ HReg srcLo = INVALID_HREG;
+ HReg dstLo = newVRegI(env);
+ HReg dstHi = newVRegI(env);
+ HReg tmp = newVRegV(env);
+ PPCAMode* am_addr;
+ PPCAMode* am_addr4;
+
+ /* Get the DF128 value, store in two 64-bit halves */
+ iselDfp128Expr( &srcHi, &srcLo, env, e->Iex.Binop.arg2, IEndianess );
+
+ sub_from_sp( env, 16 ); // Move SP down 16 bytes
+ am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+ am_addr4 = advance4(env, am_addr);
+
+ addInstr(env, PPCInstr_XFormUnary994(Px_DFPTOIQS, tmp, srcHi, srcLo));
+
+ // store the result in the VSR
+ addInstr(env, PPCInstr_AvLdSt( False/*store*/, 16, tmp, am_addr ));
+
+ // load the two Ity_64 values
+ addInstr(env, PPCInstr_Load( 8, dstHi, am_addr, mode64 ));
+ addInstr(env, PPCInstr_Load( 8, dstLo, am_addr4, mode64 ));
+
+ *rHi = dstHi;
+ *rLo = dstLo;
+
+ add_to_sp( env, 16 ); // Reset SP
+ return;
+ }
+
default:
break;
}
/* --------- UNARY ops --------- */
if (e->tag == Iex_Unop) {
switch (e->Iex.Unop.op) {
+ case Iop_ReinterpV128asI128:
+ case Iop_ReinterpF128asI128: {
+ HReg src;
+ HReg dstLo = newVRegI(env);
+ HReg dstHi = newVRegI(env);
+ PPCAMode* am_addr;
+ PPCAMode* am_addr4;
+
+ if (e->Iex.Unop.op == Iop_ReinterpF128asI128)
+ src = iselFp128Expr(env, e->Iex.Unop.arg, IEndianess);
+ else
+ src = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+
+ sub_from_sp( env, 16 ); // Move SP down 16 bytes
+ am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+ am_addr4 = advance4(env, am_addr);
+
+ // store the Ity_F128 value
+ addInstr(env, PPCInstr_AvLdSt( False/*store*/, 16, src, am_addr ));
+
+ // load the two Ity_64 values
+ addInstr(env, PPCInstr_Load( 8, dstHi, am_addr, mode64 ));
+ addInstr(env, PPCInstr_Load( 8, dstLo, am_addr4, mode64 ));
+
+ *rHi = dstHi;
+ *rLo = dstLo;
+ add_to_sp( env, 16 ); // Reset SP
+ return;
+ }
default:
break;
}
fpop = Pfp_TRUNCFPQTOIUD; goto do_Un_F128;
case Iop_TruncF128toI32U:
fpop = Pfp_TRUNCFPQTOIUW; goto do_Un_F128;
+ case Iop_TruncF128toI128U:
+ fpop = Pfp_TRUNCFPQTOIUQ; goto do_Un_F128;
+ case Iop_TruncF128toI128S:
+ fpop = Pfp_TRUNCFPQTOISQ; goto do_Un_F128;
do_Un_F128: {
HReg r_dst = newVRegV(env);
return r_dst;
}
+ case Iop_ReinterpI128asF128:
+ {
+ PPCAMode* am_addr;
+ PPCAMode* am_addr4;
+ HReg rHi = INVALID_HREG;
+ HReg rLo = INVALID_HREG;
+ HReg dst = newVRegV(env);
+
+ iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg, IEndianess);
+
+ sub_from_sp( env, 16 ); // Move SP down 16 bytes
+ am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+ am_addr4 = advance4(env, am_addr);
+
+ // store the two 64-bit pars
+ addInstr(env, PPCInstr_Store( 8, am_addr, rHi, mode64 ));
+ addInstr(env, PPCInstr_Store( 8, am_addr4, rLo, mode64 ));
+
+ // load as Ity_F128
+ addInstr(env, PPCInstr_AvLdSt( True/*fetch*/, 16, dst, am_addr ));
+
+ add_to_sp( env, 16 ); // Reset SP
+ return dst;
+ }
+
default:
break;
} /* switch (e->Iex.Unop.op) */
return r_dst;
}
+ case Iop_I128StoF128:
+ fpop = Pfp_IQSTOQ; goto do_Un_I128_F128_DFP_conversions;
+ case Iop_I128UtoF128:
+ fpop = Pfp_IQUTOQ; goto do_Un_I128_F128_DFP_conversions;
+ do_Un_I128_F128_DFP_conversions: {
+ PPCAMode* am_addr;
+ PPCAMode* am_addr4;
+ HReg rHi, rLo;
+ HReg r_tmp = newVRegV(env);
+ HReg r_dst = newVRegV(env);
+
+ iselInt128Expr(&rHi,&rLo, env, e->Iex.Binop.arg2, IEndianess);
+
+ /* Set host rounding mode for the conversion instruction */
+ set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+ sub_from_sp( env, 16 );
+
+ am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+ am_addr4 = advance4(env, am_addr);
+
+ // store the two 64-bit halfs of the I128
+ addInstr(env, PPCInstr_Store( 8, am_addr, rHi, mode64 ));
+ addInstr(env, PPCInstr_Store( 8, am_addr4, rLo, mode64 ));
+
+ /* Fetch the I128 into an V128 register */
+ addInstr(env, PPCInstr_AvLdSt( True/*fetch*/, 16, r_tmp, am_addr ));
+ addInstr(env, PPCInstr_Fp128Unary(fpop, r_dst, r_tmp));
+
+ add_to_sp( env, 16 ); // Reset SP
+
+ return r_dst;
+ }
+
default:
break;
} /* switch (e->Iex.Binop.op) */
*rLo = r_dstLo;
return;
}
+
+ case Iop_I128StoD128: {
+ HReg tmpF128 = newVRegV(env);
+ HReg FdstHi = newVRegF(env);
+ HReg FdstLo = newVRegF(env);
+ HReg srcLo = newVRegI(env);
+ HReg srcHi = newVRegI(env);
+ PPCAMode* am_addr;
+ PPCAMode* am_addr4;
+
+ set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+ // Get the I128 value, store into a VSR register
+ iselInt128Expr(&srcHi, &srcLo, env, e->Iex.Binop.arg2, IEndianess);
+
+ sub_from_sp( env, 16 ); // Move SP down 16 bytes
+ am_addr = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+ am_addr4 = advance4(env, am_addr);
+
+ addInstr(env, PPCInstr_Store( 8, am_addr, srcHi, env->mode64 ));
+ addInstr(env, PPCInstr_Store( 8, am_addr4, srcLo, env->mode64 ));
+
+ // load as Ity_F128
+ addInstr(env, PPCInstr_AvLdSt( True/*fetch*/, 16, tmpF128, am_addr ));
+
+ // do conversion
+ addInstr( env, PPCInstr_XFormUnary994( Px_IQSTODFP, FdstHi, FdstLo,
+ tmpF128 ) );
+
+ *rHi = FdstHi;
+ *rLo = FdstLo;
+ add_to_sp( env, 16 ); // Reset SP
+ return;
+ }
+
default:
vex_printf( "ERROR: iselDfp128Expr_wrk, UNKNOWN binop case %d\n",
(Int)e->Iex.Binop.op );
Bool mode64 = env->mode64;
PPCAvOp op = Pav_INVALID;
PPCAvFpOp fpop = Pavfp_INVALID;
+ PPCAvOpBin128 opav128 = Pav_INVALIDBinary128;
+ PPCAvOpTri128 optri128 = Pav_INVALIDTri128;
IRType ty = typeOfIRExpr(env->type_env,e);
vassert(e);
vassert(ty == Ity_V128);
return dst;
}
+ case Iop_ReinterpI128asV128: {
+ PPCAMode* am_addr;
+ PPCAMode* am_addr4;
+ HReg rHi, rLo;
+ HReg dst = newVRegV(env);
+
+ iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg, IEndianess);
+
+ sub_from_sp( env, 16 ); // Move SP down 16 bytes
+ am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+ am_addr4 = advance4(env, am_addr);
+
+ // store the two 64-bit pars
+ addInstr(env, PPCInstr_Store( 8, am_addr, rHi, mode64 ));
+ addInstr(env, PPCInstr_Store( 8, am_addr4, rLo, mode64 ));
+
+ // load as Ity_V128
+ addInstr(env, PPCInstr_AvLdSt( True/*fetch*/, 16, dst, am_addr ));
+
+ add_to_sp( env, 16 ); // Reset SP
+ return dst;
+ }
+
default:
break;
} /* switch (e->Iex.Unop.op) */
return dst;
}
+ case Iop_DivU128: opav128 = Pav_DivU128; goto do_IntArithBinaryI128;
+ case Iop_DivS128: opav128 = Pav_DivS128; goto do_IntArithBinaryI128;
+ case Iop_DivU128E: opav128 = Pav_DivU128E; goto do_IntArithBinaryI128;
+ case Iop_DivS128E: opav128 = Pav_DivS128E; goto do_IntArithBinaryI128;
+ case Iop_ModU128: opav128 = Pav_ModU128; goto do_IntArithBinaryI128;
+ case Iop_ModS128: opav128 = Pav_ModS128; goto do_IntArithBinaryI128;
+ do_IntArithBinaryI128: {
+ HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+ HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+ HReg dst = newVRegV(env);
+ addInstr(env, PPCInstr_AvBinaryInt128(opav128, dst, arg1, arg2));
+ return dst;
+ }
+
default:
break;
} /* switch (e->Iex.Binop.op) */
return dst;
}
+ case Iop_2xMultU64Add128CarryOut:
+ optri128 = Pav_2xMultU64Add128CarryOut; goto do_IntArithTrinaryI128;
+ do_IntArithTrinaryI128: {
+ HReg arg1 = iselVecExpr(env, triop->arg1, IEndianess);
+ HReg arg2 = iselVecExpr(env, triop->arg2, IEndianess);
+ HReg arg3 = iselVecExpr(env, triop->arg3, IEndianess);
+ HReg dst = newVRegV(env);
+ addInstr(env, PPCInstr_AvTernaryInt128(optri128, dst, arg1, arg2,
+ arg3));
+ return dst;
+ }
+
default:
break;
} /* switch (e->Iex.Triop.op) */
case Iop_MulF128: vex_printf("MulF128"); return;
case Iop_DivF128: vex_printf("DivF128"); return;
+ case Iop_TruncF128toI128S: vex_printf("TruncF128toI128S"); return;
case Iop_TruncF128toI64S: vex_printf("TruncF128toI64S"); return;
case Iop_TruncF128toI32S: vex_printf("TruncF128toI32S"); return;
+ case Iop_TruncF128toI128U: vex_printf("TruncF128toI128U"); return;
case Iop_TruncF128toI64U: vex_printf("TruncF128toI64U"); return;
case Iop_TruncF128toI32U: vex_printf("TruncF128toI32U"); return;
case Iop_F128LOtoF64: vex_printf("F128LOtoF64"); return;
case Iop_I32StoF128: vex_printf("I32StoF128"); return;
case Iop_I64StoF128: vex_printf("I64StoF128"); return;
+ case Iop_I128StoF128: vex_printf("I128StoF128"); return;
case Iop_I32UtoF128: vex_printf("I32UtoF128"); return;
case Iop_I64UtoF128: vex_printf("I64UtoF128"); return;
+ case Iop_I128UtoF128: vex_printf("I128UtoF128"); return;
case Iop_F128toI32S: vex_printf("F128toI32S"); return;
case Iop_F128toI64S: vex_printf("F128toI64S"); return;
case Iop_F128toI32U: vex_printf("F128toI32U"); return;
case Iop_F128toF32: vex_printf("F128toF32"); return;
case Iop_F128toI128S: vex_printf("F128toI128"); return;
case Iop_RndF128: vex_printf("RndF128"); return;
+ case Iop_I128StoD128: vex_printf("I128StoD128"); return;
+ case Iop_D128toI128S: vex_printf("D128toI128S"); return;
case Iop_MAddF32: vex_printf("MAddF32"); return;
case Iop_MSubF32: vex_printf("MSubF32"); return;
case Iop_RoundF32toInt: vex_printf("RoundF32toInt"); return;
case Iop_RoundF64toF32: vex_printf("RoundF64toF32"); return;
+ case Iop_ReinterpV128asI128: vex_printf("ReinterpV128asI128"); return;
+ case Iop_ReinterpI128asV128: vex_printf("ReinterpI128asV128"); return;
+ case Iop_ReinterpF128asI128: vex_printf("ReinterpF128asI128"); return;
+ case Iop_ReinterpI128asF128: vex_printf("ReinterpI128asF128"); return;
case Iop_ReinterpF64asI64: vex_printf("ReinterpF64asI64"); return;
case Iop_ReinterpI64asF64: vex_printf("ReinterpI64asF64"); return;
case Iop_ReinterpF32asI32: vex_printf("ReinterpF32asI32"); return;
case Iop_PwBitMtxXpose64x2: vex_printf("BitMatrixTranspose64x2"); return;
+ case Iop_DivU128: vex_printf("DivU128"); return;
+ case Iop_DivS128: vex_printf("DivS128"); return;
+ case Iop_DivU128E: vex_printf("DivU128E"); return;
+ case Iop_DivS128E: vex_printf("DivS128E"); return;
+ case Iop_ModU128: vex_printf("ModU128"); return;
+ case Iop_ModS128: vex_printf("ModS128"); return;
+ case Iop_2xMultU64Add128CarryOut: vex_printf("2xMultU64Add128CarryOut");
+ return;
+
default: vpanic("ppIROp(1)");
}
case Iop_DivU64E: case Iop_DivS64E: case Iop_DivU32E: case Iop_DivS32E:
case Iop_DivModU64to32: case Iop_DivModS64to32: case Iop_DivModU128to64:
case Iop_DivModS128to64: case Iop_DivModS64to64: case Iop_DivModU64to64:
- case Iop_DivModS32to32: case Iop_DivModU32to32:
+ case Iop_DivModS32to32: case Iop_DivModU32to32: case Iop_DivU128:
+ case Iop_DivS128: case Iop_DivU128E: case Iop_DivS128E: case Iop_ModU128:
+ case Iop_ModS128:
return True;
// All the rest are non-trapping
case Iop_I64StoF32: case Iop_F32toF64: case Iop_F64toF32:
case Iop_ReinterpF64asI64: case Iop_ReinterpI64asF64:
case Iop_ReinterpF32asI32: case Iop_ReinterpI32asF32:
+ case Iop_ReinterpV128asI128: case Iop_ReinterpI128asV128:
+ case Iop_ReinterpF128asI128: case Iop_ReinterpI128asF128:
case Iop_F64HLtoF128: case Iop_F128HItoF64: case Iop_F128LOtoF64:
case Iop_AddF128: case Iop_SubF128: case Iop_MulF128: case Iop_DivF128:
case Iop_MAddF128: case Iop_MSubF128: case Iop_NegMAddF128:
case Iop_I32UtoF128: case Iop_I64UtoF128: case Iop_F32toF128:
case Iop_F64toF128: case Iop_F128toI32S: case Iop_F128toI64S:
case Iop_F128toI32U: case Iop_F128toI64U: case Iop_F128toI128S:
- case Iop_F128toF64: case Iop_F128toF32: case Iop_RndF128:
- case Iop_TruncF128toI32S: case Iop_TruncF128toI32U: case Iop_TruncF128toI64U:
+ case Iop_F128toF64: case Iop_F128toF32: case Iop_I128UtoF128:
+ case Iop_I128StoD128: case Iop_D128toI128S:
+ case Iop_I128StoF128: case Iop_RndF128:
+ case Iop_TruncF128toI32S: case Iop_TruncF128toI32U:
+ case Iop_TruncF128toI64U: case Iop_TruncF128toI128U:
+ case Iop_TruncF128toI128S:
case Iop_TruncF128toI64S: case Iop_AtanF64: case Iop_Yl2xF64:
case Iop_Yl2xp1F64: case Iop_PRemF64: case Iop_PRemC3210F64:
case Iop_PRem1F64: case Iop_PRem1C3210F64: case Iop_ScaleF64:
case Iop_Max32Fx8: case Iop_Min32Fx8:
case Iop_Max64Fx4: case Iop_Min64Fx4:
case Iop_Rotx32: case Iop_Rotx64:
+ case Iop_2xMultU64Add128CarryOut:
return False;
case Iop_INVALID: case Iop_LAST:
case Iop_F64toF16: BINARY(ity_RMode,Ity_F64, Ity_F16);
case Iop_F32toF16: BINARY(ity_RMode,Ity_F32, Ity_F16);
+ case Iop_ReinterpV128asI128: UNARY(Ity_V128, Ity_I128);
+ case Iop_ReinterpI128asV128: UNARY(Ity_I128, Ity_V128);
+ case Iop_ReinterpI128asF128: UNARY(Ity_I128, Ity_F128);
+ case Iop_ReinterpF128asI128: UNARY(Ity_F128, Ity_I128);
case Iop_ReinterpI64asF64: UNARY(Ity_I64, Ity_F64);
case Iop_ReinterpF64asI64: UNARY(Ity_F64, Ity_I64);
case Iop_ReinterpI32asF32: UNARY(Ity_I32, Ity_F32);
case Iop_ReinterpF32asI32: UNARY(Ity_F32, Ity_I32);
+ case Iop_I128StoF128: BINARY(ity_RMode, Ity_I128, Ity_F128);
+ case Iop_I128UtoF128: BINARY(ity_RMode, Ity_I128, Ity_F128);
+ case Iop_I128StoD128: BINARY(ity_RMode, Ity_I128, Ity_D128);
+ case Iop_D128toI128S: BINARY(ity_RMode, Ity_D128, Ity_I128);
+
case Iop_AtanF64: case Iop_Yl2xF64: case Iop_Yl2xp1F64:
case Iop_ScaleF64: case Iop_PRemF64: case Iop_PRem1F64:
TERNARY(ity_RMode,Ity_F64,Ity_F64, Ity_F64);
case Iop_MulI128by10E:
case Iop_MulI128by10ECarry:
case Iop_PwExtUSMulQAdd8x16:
+ case Iop_DivU128: case Iop_DivS128:
+ case Iop_DivU128E: case Iop_DivS128E:
+ case Iop_ModU128: case Iop_ModS128:
BINARY(Ity_V128,Ity_V128, Ity_V128);
+ case Iop_2xMultU64Add128CarryOut:
case Iop_Perm8x16x2:
TERNARY(Ity_V128, Ity_V128, Ity_V128, Ity_V128);
case Iop_TruncF128toI32S:
case Iop_TruncF128toI64S:
+ case Iop_TruncF128toI128S:
case Iop_TruncF128toI32U:
case Iop_TruncF128toI64U:
+ case Iop_TruncF128toI128U:
UNARY(Ity_F128, Ity_F128);
case Iop_F128toI128S:
Iop_DivS32, // ditto, signed
Iop_DivU64, // :: I64,I64 -> I64 (simple div, no mod)
Iop_DivS64, // ditto, signed
- Iop_DivU64E, // :: I64,I64 -> I64 (dividend is 64-bit arg (hi)
- // concat with 64 0's (low))
- Iop_DivS64E, // ditto, signed
+ Iop_DivU128, // :: I128,I128 -> I128 (simple div, no mod)
+ Iop_DivS128, // ditto, signed
+
Iop_DivU32E, // :: I32,I32 -> I32 (dividend is 32-bit arg (hi)
// concat with 32 0's (low))
Iop_DivS32E, // ditto, signed
+ Iop_DivU64E, // :: I64,I64 -> I64 (dividend is 64-bit arg (hi)
+ // concat with 64 0's (low))
+ Iop_DivS64E, // ditto, signed
+ Iop_DivU128E, // :: I128,I128 -> I128 (dividend is 128-bit arg (hi)
+ // concat with 128 0's (low))
+ Iop_DivS128E, // ditto, signed
Iop_DivModU64to32, // :: I64,I32 -> I64
// of which lo half is div and hi half is mod
Iop_DivModU32to32, // :: I32,I32 -> I64
// of which lo half is div and hi half is mod
+ Iop_ModU128, // :: I128,I128 -> I128 normal modulo operation
+ Iop_ModS128, // ditto, signed
+
/* Integer conversions. Some of these are redundant (eg
Iop_64to8 is the same as Iop_64to32 and then Iop_32to8), but
having a complete set reduces the typical dynamic size of IR
Iop_F32toF64, /* F32 -> F64 */
Iop_F64toF32, /* IRRoundingMode(I32) x F64 -> F32 */
- /* Reinterpretation. Take an F64 and produce an I64 with
- the same bit pattern, or vice versa. */
+ /* Reinterpretation. Take an F32/64/128 and produce an I32/64/128
+ with the same bit pattern, or vice versa. */
+ Iop_ReinterpV128asI128, Iop_ReinterpI128asV128,
+ Iop_ReinterpF128asI128, Iop_ReinterpI128asF128,
Iop_ReinterpF64asI64, Iop_ReinterpI64asF64,
Iop_ReinterpF32asI32, Iop_ReinterpI32asF32,
Iop_I64UtoF128, /* unsigned I64 -> F128 */
Iop_F32toF128, /* F32 -> F128 */
Iop_F64toF128, /* F64 -> F128 */
+ Iop_I128UtoF128, /* unsigned I128 -> F128 */
+ Iop_I128StoF128, /* signed I128 -> F128 */
Iop_F128toI32S, /* IRRoundingMode(I32) x F128 -> signed I32 */
Iop_F128toI64S, /* IRRoundingMode(I32) x F128 -> signed I64 */
Iop_TruncF128toI32U, /* truncate F128 -> I32 */
Iop_TruncF128toI64U, /* truncate F128 -> I64 */
Iop_TruncF128toI64S, /* truncate F128 -> I64 */
+ Iop_TruncF128toI128U, /* truncate F128 -> I128 */
+ Iop_TruncF128toI128S, /* truncate F128 -> I128 */
/* --- guest x86/amd64 specifics, not mandated by 754. --- */
/* I64U -> D128 */
Iop_I64UtoD128,
+ /* IRRoundingMode(I32) x I128S -> D128 */
+ Iop_I128StoD128,
+
/* IRRoundingMode(I32) x D64 -> D32 */
Iop_D64toD32,
/* IRRoundingMode(I32) x D128 -> I64 */
Iop_D128toI64U,
+ /* IRRoundingMode(I32) x D128 -> I128 */
+ Iop_D128toI128S,
+
/* IRRoundingMode(I32) x F32 -> D32 */
Iop_F32toD32,
*/
Iop_MulI128by10ECarry,
+ /* 128-bit carry out from ((U64 * U64 -> U128) + (U64 * U64 -> U128)) */
+ Iop_2xMultU64Add128CarryOut,
+
/* ------------------ 256-bit SIMD Integer. ------------------ */
/* Pack/unpack */
complainIfUndefined(mce, atom2, NULL);
return assignNew('V', mce, Ity_V128, triop(op, vatom1, atom2, vatom3));
+ /* Int 128-bit Integer three arg */
+ case Iop_2xMultU64Add128CarryOut:
case Iop_Perm8x16x2:
/* (V128, V128, V128) -> V128 */
complainIfUndefined(mce, atom3, NULL);
unary64Fx2_w_rm(mce, vatom1, vatom2),
unary64Fx2_w_rm(mce, vatom1, vatom3)));
-
default:
ppIROp(op);
VG_(tool_panic)("memcheck:expr2vbits_Triop");
case Iop_CmpNEZ128x1:
return binary128Ix1(mce, vatom1, vatom2);
+ case Iop_DivU128:
+ case Iop_DivS128:
+ case Iop_DivU128E:
+ case Iop_DivS128E:
+ case Iop_ModU128:
+ case Iop_ModS128:
+ /* I128 x I128 -> I128 */
+ return mkLazy2(mce, Ity_V128, vatom1, vatom2);
+
case Iop_QNarrowBin64Sto32Sx4:
case Iop_QNarrowBin64Uto32Ux4:
case Iop_QNarrowBin32Sto16Sx8:
complainIfUndefined(mce, atom2, NULL);
return assignNew('V', mce, Ity_V128, binop(op, vatom1, atom2));
+ case Iop_I128UtoF128: /* I128 -> F128 */
+ case Iop_I128StoF128: /* I128 -> F128 */
+ return mkLazy2(mce, Ity_I128, vatom1, vatom2);
+
case Iop_BCDAdd:
case Iop_BCDSub:
return mkLazy2(mce, Ity_V128, vatom1, vatom2);
case Iop_D32toF128:
case Iop_D64toF128:
case Iop_D128toF128:
+ case Iop_I128StoD128:
/* I32(rm) x F32/F64/F128/D32/D64/D128 -> D128/F128 */
return mkLazy2(mce, Ity_I128, vatom1, vatom2);
case Iop_F128toI128S: /* IRRoundingMode(I32) x F128 -> signed I128 */
case Iop_RndF128: /* IRRoundingMode(I32) x F128 -> F128 */
+ case Iop_D128toI128S: /* IRRoundingMode(I32) x D128 -> signed I128 */
return mkLazy2(mce, Ity_I128, vatom1, vatom2);
case Iop_F128toI64S: /* IRRoundingMode(I32) x F128 -> signed I64 */
difd(mce, improve(mce, atom1, vatom1),
improve(mce, atom2, vatom2) ) ) );
+ return assignNew('V', mce, and_or_ty,
+ difd(mce, uifu(mce, vatom1, vatom2),
+ difd(mce, improve(mce, atom1, vatom1),
+ improve(mce, atom2, vatom2) ) ) );
case Iop_Xor8:
return mkUifU8(mce, vatom1, vatom2);
case Iop_Xor16:
case Iop_ZeroHI96ofV128:
case Iop_ZeroHI112ofV128:
case Iop_ZeroHI120ofV128:
+ case Iop_ReinterpI128asV128: /* I128 -> V128 */
return assignNew('V', mce, Ity_V128, unop(op, vatom));
case Iop_F128HItoF64: /* F128 -> high half of F128 */
case Iop_D128HItoD64: /* D128 -> high half of D128 */
return assignNew('V', mce, Ity_I64, unop(Iop_128HIto64, vatom));
+
case Iop_F128LOtoF64: /* F128 -> low half of F128 */
case Iop_D128LOtoD64: /* D128 -> low half of D128 */
return assignNew('V', mce, Ity_I64, unop(Iop_128to64, vatom));
case Iop_NegF128:
case Iop_AbsF128:
case Iop_RndF128:
+ case Iop_TruncF128toI128S: /* F128 -> I128S */
+ case Iop_TruncF128toI128U: /* F128 -> I128U */
+ case Iop_ReinterpV128asI128: /* V128 -> I128 */
+ case Iop_ReinterpI128asF128:
+ case Iop_ReinterpF128asI128:
return mkPCastTo(mce, Ity_I128, vatom);
case Iop_BCD128toI128S:
return assignNew('V', mce, Ity_I32, unop(op, vatom));
// These are self-shadowing.
+ case Iop_1Sto16:
case Iop_8Sto16:
case Iop_8Uto16:
case Iop_32to16:
{ DEFOP(Iop_Rotx32, UNDEF_ALL), },
{ DEFOP(Iop_Rotx64, UNDEF_ALL), },
{ DEFOP(Iop_PwBitMtxXpose64x2, UNDEF_64x2_TRANSPOSE), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_DivU128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_DivS128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_DivU128E, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_DivS128E, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_ModU128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_ModS128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_2xMultU64Add128CarryOut, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_TruncF128toI128U, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_TruncF128toI128S, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_I128UtoF128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_I128StoF128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_I128StoD128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_D128toI128S, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_ReinterpF128asI128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_ReinterpI128asF128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_ReinterpV128asI128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_ReinterpI128asV128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
};
/* Force compile time failure in case libvex_ir.h::IROp was updated
}
}
break;
-
+ case Iop_DivU128:
+ case Iop_DivS128:
+ case Iop_DivU128E:
+ case Iop_DivS128E:
+ case Iop_ModU128:
+ case Iop_ModS128:
+ case Iop_ReinterpV128asI128:
+ case Iop_ReinterpI128asV128:
+ case Iop_ReinterpF128asI128:
+ case Iop_ReinterpI128asF128:
+ case Iop_I128UtoF128:
+ case Iop_I128StoF128:
+ case Iop_TruncF128toI128U:
+ case Iop_TruncF128toI128S:
+ case Iop_I128StoD128:
+ case Iop_D128toI128S:
+ case Iop_2xMultU64Add128CarryOut: {
+ /* IROps require a processor that supports ISA 3.10 (Power 10)
+ or newer */
+ rc = system(MIN_POWER_ISA " 3.1 ");
+ rc /= 256;
+ /* MIN_POWER_ISA returns 0 if underlying HW supports the
+ * specified ISA or newer. Returns 1 if the HW does not support
+ * the specified ISA. Returns 2 on error.
+ */
+ if (rc == 1) return NULL;
+ if (rc > 2) {
+ panic(" ERROR, min_power_isa() return code is invalid.\n");
+ }
+ }
+ break;
/* Other */
default:
break;
isa_level = 8;
#endif
+#ifdef HAS_ISA_3_00
+ if (debug) printf("HAS_ISA_3_00 is set\n");
+ isa_level = 9;
+#endif
+
+#ifdef HAS_ISA_3_1
+ if (debug) printf("HAS_ISA_3_1 is set\n");
+ isa_level = 9;
+#endif
+
/* return 0 for supported (success), 1 for not supported (failure) */
if (strcmp (min_isa, "2.05") == 0) {
return !(isa_level >= 5);
} else if (strcmp (min_isa, "3.00") == 0) {
return !(isa_level >= 8);
+ } else if (strcmp (min_isa, "3.1") == 0) {
+ return !(isa_level >= 9);
+
} else {
fprintf(stderr, "ERROR: invalid ISA version '%s'. Valid versions numbers are:\n", min_isa);
- fprintf(stderr, " 2.05, 2.06, 2.07, 3.00\n" );
+ fprintf(stderr, " 2.05, 2.06, 2.07, 3.00, 3.1\n" );
exit(2);
}