|
| Set *dest* to -1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
- * - movcond_i32/i64 *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
+ * - movcond *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
- | *dest* = (*c1* *cond* *c2* ? *v1* : *v2*)
|
DEF(divu, 1, 2, 0, TCG_OPF_INT)
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
+DEF(movcond, 1, 4, 1, TCG_OPF_INT)
DEF(mul, 1, 2, 0, TCG_OPF_INT)
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
DEF(sub, 1, 2, 0, TCG_OPF_INT)
DEF(xor, 1, 2, 0, TCG_OPF_INT)
-DEF(movcond_i32, 1, 4, 1, 0)
/* load/store */
DEF(ld8u_i32, 1, 1, 1, 0)
DEF(ld8s_i32, 1, 1, 1, 0)
DEF(bswap16_i32, 1, 1, 1, 0)
DEF(bswap32_i32, 1, 1, 1, 0)
-DEF(movcond_i64, 1, 4, 1, 0)
/* load/store */
DEF(ld8u_i64, 1, 1, 1, 0)
DEF(ld8s_i64, 1, 1, 1, 0)
case INDEX_op_mov_vec:
done = fold_mov(&ctx, op);
break;
- CASE_OP_32_64(movcond):
+ case INDEX_op_movcond:
done = fold_movcond(&ctx, op);
break;
case INDEX_op_mul:
} else if (cond == TCG_COND_NEVER) {
tcg_gen_mov_i32(ret, v2);
} else {
- tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
+ tcg_gen_op6i_i32(INDEX_op_movcond, ret, c1, c2, v1, v2, cond);
}
}
} else if (cond == TCG_COND_NEVER) {
tcg_gen_mov_i64(ret, v2);
} else if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
+ tcg_gen_op6i_i64(INDEX_op_movcond, ret, c1, c2, v1, v2, cond);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 zero = tcg_constant_i32(0);
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
- OUTOP(INDEX_op_movcond_i32, TCGOutOpMovcond, outop_movcond),
- OUTOP(INDEX_op_movcond_i64, TCGOutOpMovcond, outop_movcond),
+ OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
case INDEX_op_and:
case INDEX_op_brcond:
case INDEX_op_mov:
+ case INDEX_op_movcond:
case INDEX_op_negsetcond:
case INDEX_op_or:
case INDEX_op_setcond:
case INDEX_op_xor:
return has_type;
- case INDEX_op_movcond_i32:
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
case INDEX_op_setcond2_i32:
return TCG_TARGET_REG_BITS == 32;
- case INDEX_op_movcond_i64:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i64:
case INDEX_op_ld16u_i64:
case INDEX_op_brcond:
case INDEX_op_setcond:
case INDEX_op_negsetcond:
- case INDEX_op_movcond_i32:
+ case INDEX_op_movcond:
case INDEX_op_brcond2_i32:
case INDEX_op_setcond2_i32:
- case INDEX_op_movcond_i64:
case INDEX_op_cmp_vec:
case INDEX_op_cmpsel_vec:
if (op->args[k] < ARRAY_SIZE(cond_name)
case INDEX_op_brcond2_i32:
op_cond = op->args[4];
break;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
+ case INDEX_op_movcond:
case INDEX_op_setcond2_i32:
case INDEX_op_cmpsel_vec:
op_cond = op->args[5];
}
break;
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
+ case INDEX_op_movcond:
{
const TCGOutOpMovcond *out = &outop_movcond;
TCGCond cond = new_args[5];
tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
break;
- case INDEX_op_movcond_i64:
+ case INDEX_op_movcond:
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
tmp32 = tci_compare64(regs[r1], regs[r2], condition);
regs[r0] = regs[tmp32 ? r3 : r4];
break;
case INDEX_op_tci_movcond32:
- case INDEX_op_movcond_i64:
+ case INDEX_op_movcond:
case INDEX_op_setcond2_i32:
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
{
TCGOpcode opc = (type == TCG_TYPE_I32
? INDEX_op_tci_movcond32
- : INDEX_op_movcond_i64);
+ : INDEX_op_movcond);
tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
}