Rely on TCGOP_TYPE instead of opcodes specific to each type.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
.. list-table::
- * - add_i32/i64 *t0*, *t1*, *t2*
+ * - add *t0*, *t1*, *t2*
- | *t0* = *t1* + *t2*
DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
+DEF(add, 1, 2, 0, TCG_OPF_INT)
+
DEF(setcond_i32, 1, 2, 1, 0)
DEF(negsetcond_i32, 1, 2, 1, 0)
DEF(movcond_i32, 1, 4, 1, 0)
DEF(st16_i32, 0, 2, 1, 0)
DEF(st_i32, 0, 2, 1, 0)
/* arith */
-DEF(add_i32, 1, 2, 0, 0)
DEF(sub_i32, 1, 2, 0, 0)
DEF(mul_i32, 1, 2, 0, 0)
DEF(div_i32, 1, 2, 0, 0)
DEF(st32_i64, 0, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
/* arith */
-DEF(add_i64, 1, 2, 0, 0)
DEF(sub_i64, 1, 2, 0, 0)
DEF(mul_i64, 1, 2, 0, 0)
DEF(div_i64, 1, 2, 0, 0)
NEXT_INSN;
switch (ctx->opcode & 0xf00f) {
case 0x300c: /* add Rm,Rn */
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
goto do_reg_op;
case 0x2009: /* and Rm,Rn */
op_opc = INDEX_op_and_i32;
if (op_dst != B11_8 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
op_arg = tcg_constant_i32(B7_0s);
break;
ctx->memidx, ld_mop);
break;
- case INDEX_op_add_i32:
+ case INDEX_op_add:
if (op_dst != st_src) {
goto fail;
}
uint64_t l64, h64;
switch (op) {
- CASE_OP_32_64(add):
+ case INDEX_op_add:
return x + y;
CASE_OP_32_64(sub):
break;
}
if (convert) {
- TCGOpcode add_opc, xor_opc, neg_opc;
+ TCGOpcode xor_opc, neg_opc;
if (!inv && !neg) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
switch (ctx->type) {
case TCG_TYPE_I32:
- add_opc = INDEX_op_add_i32;
neg_opc = INDEX_op_neg_i32;
xor_opc = INDEX_op_xor_i32;
break;
case TCG_TYPE_I64:
- add_opc = INDEX_op_add_i64;
neg_opc = INDEX_op_neg_i64;
xor_opc = INDEX_op_xor_i64;
break;
if (!inv) {
op->opc = neg_opc;
} else if (neg) {
- op->opc = add_opc;
+ op->opc = INDEX_op_add;
op->args[2] = arg_new_constant(ctx, -1);
} else {
op->opc = xor_opc;
if (arg_is_const(op->args[2])) {
uint64_t val = arg_info(op->args[2])->val;
- op->opc = (ctx->type == TCG_TYPE_I32
- ? INDEX_op_add_i32 : INDEX_op_add_i64);
+ op->opc = INDEX_op_add;
op->args[2] = arg_new_constant(ctx, -val);
}
return finish_folding(ctx, op);
* Sorted alphabetically by opcode as much as possible.
*/
switch (opc) {
- CASE_OP_32_64(add):
+ case INDEX_op_add:
done = fold_add(&ctx, op);
break;
case INDEX_op_add_vec:
void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_add, ret, arg1, arg2);
}
void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_add, ret, arg1, arg2);
} else {
tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
/* Register allocation descriptions for every TCGOpcode. */
static const TCGOutOp * const all_outop[NB_OPS] = {
- OUTOP(INDEX_op_add_i32, TCGOutOpBinary, outop_add),
- OUTOP(INDEX_op_add_i64, TCGOutOpBinary, outop_add),
+ OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
};
#undef OUTOP
case INDEX_op_qemu_st_i128:
return TCG_TARGET_HAS_qemu_ldst_i128;
+ case INDEX_op_add:
case INDEX_op_mov:
return has_type;
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
- case INDEX_op_add_i32:
case INDEX_op_sub_i32:
case INDEX_op_neg_i32:
case INDEX_op_mul_i32:
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
- case INDEX_op_add_i64:
case INDEX_op_sub_i64:
case INDEX_op_neg_i64:
case INDEX_op_mul_i64:
break;
case INDEX_op_add2_i32:
- opc_new = INDEX_op_add_i32;
+ case INDEX_op_add2_i64:
+ opc_new = INDEX_op_add;
goto do_addsub2;
case INDEX_op_sub2_i32:
opc_new = INDEX_op_sub_i32;
goto do_addsub2;
- case INDEX_op_add2_i64:
- opc_new = INDEX_op_add_i64;
- goto do_addsub2;
case INDEX_op_sub2_i64:
opc_new = INDEX_op_sub_i64;
do_addsub2:
tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
break;
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
+ case INDEX_op_add:
{
const TCGOutOpBinary *out =
container_of(all_outop[op->opc], TCGOutOpBinary, base);
/* Arithmetic operations (mixed 32/64 bit). */
- CASE_32_64(add)
+ case INDEX_op_add:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] + regs[r2];
break;
op_name, str_r(r0), str_r(r1));
break;
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
+ case INDEX_op_add:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
stack_bounds_check(base, offset);
if (offset != sextract32(offset, 0, 16)) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
- tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32
- ? INDEX_op_add_i32 : INDEX_op_add_i64),
- TCG_REG_TMP, TCG_REG_TMP, base);
+ tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
base = TCG_REG_TMP;
offset = 0;
}
static void tgen_add(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
- tcg_out_op_rrr(s, glue(INDEX_op_add_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+ tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
}
static const TCGOutOpBinary outop_add = {