case ARM64vecb_ORR: *nm = "orr "; *ar = "all"; return;
case ARM64vecb_XOR: *nm = "eor "; *ar = "all"; return;
case ARM64vecb_CMEQ64x2: *nm = "cmeq"; *ar = "2d"; return;
+ case ARM64vecb_CMEQ32x4: *nm = "cmeq"; *ar = "4s"; return;
+ case ARM64vecb_CMEQ16x8: *nm = "cmeq"; *ar = "8h"; return;
+ case ARM64vecb_CMEQ8x16: *nm = "cmeq"; *ar = "16b"; return;
default: vpanic("showARM64VecBinOp");
}
}
010 01110 10 1 m 000111 n d ORR Vd, Vn, Vm
011 01110 00 1 m 000111 n d EOR Vd, Vn, Vm
- 011 01110 11 1 m 100011 n d CMEQ Vd.2d, Vn.2d, Vm.2d
+ 011 01110 11 1 m 100011 n d CMEQ Vd.2d, Vn.2d, Vm.2d
+ 011 01110 10 1 m 100011 n d CMEQ Vd.4s, Vn.4s, Vm.4s
+ 011 01110 01 1 m 100011 n d CMEQ Vd.8h, Vn.8h, Vm.8h
+ 011 01110 00 1 m 100011 n d CMEQ Vd.16b, Vn.16b, Vm.16b
+
011 01110 11 1 m 001101 n d CMHI Vd.2d, Vn.2d, Vm.2d >u, ATC
010 01110 11 1 m 001101 n d CMGT Vd.2d, Vn.2d, Vm.2d >s, ATC
*/
case ARM64vecb_CMEQ64x2:
*p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X100011, vN, vD);
break;
+ case ARM64vecb_CMEQ32x4:
+ *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X100011, vN, vD);
+ break;
+ case ARM64vecb_CMEQ16x8:
+ *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X100011, vN, vD);
+ break;
+ case ARM64vecb_CMEQ8x16:
+ *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X100011, vN, vD);
+ break;
default:
goto bad;
addInstr(env, ARM64Instr_VUnaryV(op, res, arg));
return res;
}
- //ATC case Iop_CmpNEZ8x16:
- //ATC case Iop_CmpNEZ16x8:
- //ATC case Iop_CmpNEZ32x4:
+ case Iop_CmpNEZ8x16:
+ case Iop_CmpNEZ16x8:
+ case Iop_CmpNEZ32x4:
case Iop_CmpNEZ64x2: {
HReg arg = iselV128Expr(env, e->Iex.Unop.arg);
HReg zero = newVRegV(env);
ARM64VecBinOp cmp = ARM64vecb_INVALID;
switch (e->Iex.Unop.op) {
case Iop_CmpNEZ64x2: cmp = ARM64vecb_CMEQ64x2; break;
+ case Iop_CmpNEZ32x4: cmp = ARM64vecb_CMEQ32x4; break;
+ case Iop_CmpNEZ16x8: cmp = ARM64vecb_CMEQ16x8; break;
+ case Iop_CmpNEZ8x16: cmp = ARM64vecb_CMEQ8x16; break;
default: vassert(0);
}
// This is pretty feeble. Better: use CMP against zero