case Iop_ShlV128: vex_printf("ShlV128"); return;
case Iop_ShrV128: vex_printf("ShrV128"); return;
+ case Iop_ShlN8x16: vex_printf("ShlN8x16"); return;
case Iop_ShlN16x8: vex_printf("ShlN16x8"); return;
case Iop_ShlN32x4: vex_printf("ShlN32x4"); return;
case Iop_ShlN64x2: vex_printf("ShlN64x2"); return;
+ case Iop_ShrN8x16: vex_printf("ShrN8x16"); return;
case Iop_ShrN16x8: vex_printf("ShrN16x8"); return;
case Iop_ShrN32x4: vex_printf("ShrN32x4"); return;
case Iop_ShrN64x2: vex_printf("ShrN64x2"); return;
+ case Iop_SarN8x16: vex_printf("SarN8x16"); return;
case Iop_SarN16x8: vex_printf("SarN16x8"); return;
case Iop_SarN32x4: vex_printf("SarN32x4"); return;
UNARY(Ity_V128, Ity_V128);
case Iop_ShlV128: case Iop_ShrV128:
- case Iop_ShlN16x8: case Iop_ShlN32x4: case Iop_ShlN64x2:
- case Iop_ShrN16x8: case Iop_ShrN32x4: case Iop_ShrN64x2:
- case Iop_SarN16x8: case Iop_SarN32x4:
+ case Iop_ShlN8x16: case Iop_ShlN16x8: case Iop_ShlN32x4: case Iop_ShlN64x2:
+ case Iop_ShrN8x16: case Iop_ShrN16x8: case Iop_ShrN32x4: case Iop_ShrN64x2:
+ case Iop_SarN8x16: case Iop_SarN16x8: case Iop_SarN32x4:
BINARY(Ity_V128, Ity_V128, Ity_I8);
default:
Iop_CmpGT8Ux16, Iop_CmpGT16Ux8, Iop_CmpGT32Ux4,
/* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
- Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2,
- Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2,
- Iop_SarN16x8, Iop_SarN32x4,
+ Iop_ShlN8x16, Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2,
+ Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2,
+ Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4,
/* VECTOR x VECTOR SHIFT / ROTATE */
Iop_Shl8x16, Iop_Shl16x8, Iop_Shl32x4,