/* else fall through */
}
+ /* ----------- VSEL<c>.F64 d_d_d, VSEL<c>.F32 s_s_s ----------- */
+ /* 31 27 22 21 19 15 11 8 7 6 5 4 3
+ T1/A1: 1111 11100 D cc n d 101 1 N 0 M 0 m VSEL<c>.F64 Dd, Dn, Dm
+ T1/A1: 1111 11100 D cc n d 101 0 N 0 M 0 m VSEL<c>.F32 Sd, Sn, Sm
+
+ ARM encoding is in NV space.
+ In Thumb mode, we must not be in an IT block.
+ */
+ if (INSN(31,23) == BITS9(1,1,1,1,1,1,1,0,0) && INSN(11,9) == BITS3(1,0,1)
+ && INSN(6,6) == 0 && INSN(4,4) == 0) {
+ UInt bit_D = INSN(22,22);
+ UInt fld_cc = INSN(21,20);
+ UInt fld_n = INSN(19,16);
+ UInt fld_d = INSN(15,12);
+ Bool isF64 = INSN(8,8) == 1;
+ UInt bit_N = INSN(7,7);
+ UInt bit_M = INSN(5,5);
+ UInt fld_m = INSN(3,0);
+
+ UInt dd = isF64 ? ((bit_D << 4) | fld_d) : ((fld_d << 1) | bit_D);
+ UInt nn = isF64 ? ((bit_N << 4) | fld_n) : ((fld_n << 1) | bit_N);
+ UInt mm = isF64 ? ((bit_M << 4) | fld_m) : ((fld_m << 1) | bit_M);
+
+ UInt cc_1 = (fld_cc >> 1) & 1;
+ UInt cc_0 = (fld_cc >> 0) & 1;
+ UInt cond = (fld_cc << 2) | ((cc_1 ^ cc_0) << 1) | 0;
+
+ if (isT) {
+ gen_SIGILL_T_if_in_ITBlock(old_itstate, new_itstate);
+ }
+ /* In ARM mode, this is statically unconditional. In Thumb mode,
+ this must be dynamically unconditional, and we've SIGILLd if not.
+ In either case we can create unconditional IR. */
+
+ IRTemp guard = newTemp(Ity_I32);
+ assign(guard, mk_armg_calculate_condition(cond));
+ IRExpr* srcN = (isF64 ? llGetDReg : llGetFReg)(nn);
+ IRExpr* srcM = (isF64 ? llGetDReg : llGetFReg)(mm);
+ IRExpr* res = IRExpr_ITE(unop(Iop_32to1, mkexpr(guard)), srcN, srcM);
+ (isF64 ? llPutDReg : llPutFReg)(dd, res);
+
+ UChar rch = isF64 ? 'd' : 'f';
+ DIP("vsel%s.%s %c%u, %c%u, %c%u\n",
+ nCC(cond), isF64 ? "f64" : "f32", rch, dd, rch, nn, rch, mm);
+ return True;
+ }
+ /* fall through */
+
/* ---------- Doesn't match anything. ---------- */
return False;