case Iop_Narrow16Ux8: vex_printf("Narrow16Ux8"); return;
case Iop_Narrow32Ux4: vex_printf("Narrow32Ux4"); return;
case Iop_QNarrow16Ux8: vex_printf("QNarrow16Ux8"); return;
+ case Iop_QNarrow32Ux4: vex_printf("QNarrow32Ux4"); return;
case Iop_QNarrow16Sx8: vex_printf("QNarrow16Sx8"); return;
case Iop_QNarrow32Sx4: vex_printf("QNarrow32Sx4"); return;
case Iop_Shr8x16: case Iop_Shr16x8: case Iop_Shr32x4:
case Iop_Sar8x16: case Iop_Sar16x8: case Iop_Sar32x4:
case Iop_Rot8x16: case Iop_Rot16x8: case Iop_Rot32x4:
- case Iop_QNarrow16Ux8:
+ case Iop_QNarrow16Ux8: case Iop_QNarrow32Ux4:
case Iop_QNarrow16Sx8: case Iop_QNarrow32Sx4:
case Iop_Narrow16Ux8: case Iop_Narrow32Ux4:
case Iop_InterleaveHI8x16: case Iop_InterleaveHI16x8:
Iop_Rot8x16, Iop_Rot16x8, Iop_Rot32x4,
/* NARROWING -- narrow 2xV128 into 1xV128, hi half from left arg */
- Iop_QNarrow16Ux8,
- Iop_QNarrow16Sx8,
- Iop_QNarrow32Sx4,
+ Iop_QNarrow16Ux8, Iop_QNarrow32Ux4,
+ Iop_QNarrow16Sx8, Iop_QNarrow32Sx4,
Iop_Narrow16Ux8, Iop_Narrow32Ux4,
/* INTERLEAVING -- interleave lanes from low or high halves of