Drop all backend support for an immediate as the first operand.
This should never happen in any case, as we swap commutative
operands to place immediates as the second operand.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
}
-static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
- TCGReg rn, int64_t aimm)
-{
- if (aimm >= 0) {
- tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm);
- } else {
- tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm);
- }
-}
-
static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
TCGReg rh, TCGReg al, TCGReg ah,
tcg_target_long bl, tcg_target_long bh,
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3502, ADD, type, a0, a1, a2);
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a2 >= 0) {
+ tcg_out_insn(s, 3401, ADDI, type, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3401, SUBI, type, a0, a1, -a2);
+ }
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rA),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
break;
- case INDEX_op_add_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_add_i64:
- if (c2) {
- tcg_out_addsubi(s, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2);
- }
- break;
-
case INDEX_op_sub_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
case INDEX_op_sub_i64:
if (c2) {
- tcg_out_addsubi(s, ext, a0, a1, -a2);
+ tgen_addi(s, ext, a0, a1, -a2);
} else {
tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
}
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
return C_O1_I2(r, r, rA);
}
}
+static void tcg_out_dat_IN(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs)
+{
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(-rhs);
+ opc = opneg;
+ }
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+}
+
static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
* rhs must satisfy the "rIN" constraint.
*/
if (rhs_is_const) {
- int imm12 = encode_imm(rhs);
- if (imm12 < 0) {
- imm12 = encode_imm_nofail(-rhs);
- opc = opneg;
- }
- tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+ tcg_out_dat_IN(s, cond, opc, opneg, dst, lhs, rhs);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IN(s, COND_AL, ARITH_ADD, ARITH_SUB, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rIN),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
ARITH_MVN, args[0], 0, args[3], const_args[3]);
break;
- case INDEX_op_add_i32:
- tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
- args[0], args[1], args[2], const_args[2]);
- break;
case INDEX_op_sub_i32:
if (const_args[1]) {
if (const_args[2]) {
case INDEX_op_st_i32:
return C_O0_I2(r, r);
- case INDEX_op_add_i32:
case INDEX_op_sub_i32:
case INDEX_op_setcond_i32:
case INDEX_op_negsetcond_i32:
/* no need to flush icache explicitly */
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ if (a0 == a1) {
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a2);
+ } else if (a0 == a2) {
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
+ } else {
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, 0);
+ }
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ if (a0 == a1) {
+ tgen_arithi(s, ARITH_ADD + rexw, a0, a2, false);
+ } else {
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, -1, 0, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, re),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
}
break;
- OP_32_64(add):
- /* For 3-operand addition, use LEA. */
- if (a0 != a1) {
- TCGArg c3 = 0;
- if (const_a2) {
- c3 = a2, a2 = -1;
- } else if (a0 == a2) {
- /* Watch out for dest = src + dest, since we've removed
- the matching constraint on the add. */
- tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
- break;
- }
-
- tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
- break;
- }
- c = ARITH_ADD;
- goto gen_arith;
OP_32_64(sub):
c = ARITH_SUB;
goto gen_arith;
case INDEX_op_st_i64:
return C_O0_I2(re, r);
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, re);
-
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_add_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_d(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_add,
+ .out_rri = tcg_out_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
}
break;
- case INDEX_op_add_i32:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
- } else {
- tcg_out_opc_add_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_add_i64:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
- } else {
- tcg_out_opc_add_d(s, a0, a1, a2);
- }
- break;
-
case INDEX_op_sub_i32:
if (c2) {
tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
case INDEX_op_rotr_i64:
return C_O1_I2(r, r, ri);
- case INDEX_op_add_i32:
- return C_O1_I2(r, r, ri);
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, rJ);
-
case INDEX_op_and_i32:
case INDEX_op_and_i64:
case INDEX_op_nor_i32:
/* Always indirect, nothing to do */
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDU : OPC_DADDU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIU : OPC_DADDIU;
+ tcg_out_opc_imm(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
tcg_out_ldst(s, i1, a0, a1, a2);
break;
- case INDEX_op_add_i32:
- i1 = OPC_ADDU, i2 = OPC_ADDIU;
- goto do_binary;
- case INDEX_op_add_i64:
- i1 = OPC_DADDU, i2 = OPC_DADDIU;
- goto do_binary;
case INDEX_op_or_i32:
case INDEX_op_or_i64:
i1 = OPC_OR, i2 = OPC_ORI;
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, rJ);
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
return C_O1_I2(r, rz, rN);
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, ADD | TAB(a0, a1, a2));
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rT),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
break;
- case INDEX_op_add_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_32:
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
- } else {
- tcg_out32(s, ADD | TAB(a0, a1, a2));
- }
- break;
case INDEX_op_sub_i32:
a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[1]) {
tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
}
} else if (const_args[2]) {
- a2 = -a2;
- goto do_addi_32;
+ tgen_addi(s, type, a0, a1, (int32_t)-a2);
} else {
tcg_out32(s, SUBF | TAB(a0, a2, a1));
}
tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
break;
- case INDEX_op_add_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_64:
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
- } else {
- tcg_out32(s, ADD | TAB(a0, a1, a2));
- }
- break;
case INDEX_op_sub_i64:
a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[1]) {
tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
}
} else if (const_args[2]) {
- a2 = -a2;
- goto do_addi_64;
+ tgen_addi(s, type, a0, a1, -a2);
} else {
tcg_out32(s, SUBF | TAB(a0, a2, a1));
}
case INDEX_op_st_i64:
return C_O0_I2(r, r);
- case INDEX_op_add_i32:
case INDEX_op_and_i32:
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
case INDEX_op_sub_i32:
return C_O1_I2(r, rI, ri);
- case INDEX_op_add_i64:
- return C_O1_I2(r, r, rT);
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
return C_O1_I2(r, r, rU);
flush_idcache_range(jmp_rx, jmp_rw, 4);
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDW : OPC_ADD;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI;
+ tcg_out_opc_imm(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
break;
- case INDEX_op_add_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
- }
- break;
- case INDEX_op_add_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
- }
- break;
-
case INDEX_op_sub_i32:
if (c2) {
tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_add_i32:
case INDEX_op_and_i32:
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
- case INDEX_op_add_i64:
case INDEX_op_and_i64:
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
/* no need to flush icache explicitly */
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (a0 != a1) {
+ tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
+ } else if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RR, AR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRE, AGR, a0, a2);
+ }
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (a0 == a1) {
+ if (type == TCG_TYPE_I32) {
+ if (a2 == (int16_t)a2) {
+ tcg_out_insn(s, RI, AHI, a0, a2);
+ } else {
+ tcg_out_insn(s, RIL, AFI, a0, a2);
+ }
+ return;
+ }
+ if (a2 == (int16_t)a2) {
+ tcg_out_insn(s, RI, AGHI, a0, a2);
+ return;
+ }
+ if (a2 == (int32_t)a2) {
+ tcg_out_insn(s, RIL, AGFI, a0, a2);
+ return;
+ }
+ if (a2 == (uint32_t)a2) {
+ tcg_out_insn(s, RIL, ALGFI, a0, a2);
+ return;
+ }
+ if (-a2 == (uint32_t)-a2) {
+ tcg_out_insn(s, RIL, SLGFI, a0, -a2);
+ return;
+ }
+ }
+ tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
# define OP_32_64(x) \
case glue(glue(INDEX_op_,x),_i32): \
case glue(glue(INDEX_op_,x),_i64)
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
break;
- case INDEX_op_add_i32:
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
- if (const_args[2]) {
- do_addi_32:
- if (a0 == a1) {
- if (a2 == (int16_t)a2) {
- tcg_out_insn(s, RI, AHI, a0, a2);
- break;
- }
- tcg_out_insn(s, RIL, AFI, a0, a2);
- break;
- }
- tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, AR, a0, a2);
- } else {
- tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
- }
- break;
case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
+ a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[2]) {
- a2 = -a2;
- goto do_addi_32;
+ tgen_addi(s, type, a0, a1, (int32_t)-a2);
} else if (a0 == a1) {
tcg_out_insn(s, RR, SR, a0, a2);
} else {
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
break;
- case INDEX_op_add_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- do_addi_64:
- if (a0 == a1) {
- if (a2 == (int16_t)a2) {
- tcg_out_insn(s, RI, AGHI, a0, a2);
- break;
- }
- if (a2 == (int32_t)a2) {
- tcg_out_insn(s, RIL, AGFI, a0, a2);
- break;
- }
- if (a2 == (uint32_t)a2) {
- tcg_out_insn(s, RIL, ALGFI, a0, a2);
- break;
- }
- if (-a2 == (uint32_t)-a2) {
- tcg_out_insn(s, RIL, SLGFI, a0, -a2);
- break;
- }
- }
- tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RRE, AGR, a0, a2);
- } else {
- tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
- }
- break;
case INDEX_op_sub_i64:
a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[2]) {
- a2 = -a2;
- goto do_addi_64;
+ tgen_addi(s, type, a0, a1, -a2);
} else {
tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
}
case INDEX_op_st_i64:
return C_O0_I2(r, r);
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
case INDEX_op_shl_i64:
case INDEX_op_shr_i64:
case INDEX_op_sar_i64:
C_O0_I2(rz, rJ)
C_O1_I1(r, r)
C_O1_I2(r, r, r)
+C_O1_I2(r, r, rJ)
C_O1_I2(r, rz, rJ)
C_O1_I4(r, rz, rJ, rI, 0)
C_O2_I2(r, r, rz, rJ)
{
}
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADD);
+}
+
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADD);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
case INDEX_op_st32_i64:
tcg_out_ldst(s, a0, a1, a2, STW);
break;
- OP_32_64(add):
- c = ARITH_ADD;
- goto gen_arith;
OP_32_64(sub):
c = ARITH_SUB;
goto gen_arith;
case INDEX_op_qemu_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
case INDEX_op_div_i32:
TCGConstraintSetIndex (*dynamic_constraint)(TCGType type, unsigned flags);
} TCGOutOp;
+typedef struct TCGOutOpBinary {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2);
+ void (*out_rri)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2);
+} TCGOutOpBinary;
+
#include "tcg-target.c.inc"
#ifndef CONFIG_TCG_INTERPRETER
< MIN_TLB_MASK_TABLE_OFS);
#endif
+/*
+ * Register V as the TCGOutOp for O.
+ * This verifies that V is of type T, otherwise give a nice compiler error.
+ * This prevents trivial mistakes within each arch/tcg-target.c.inc.
+ */
+#define OUTOP(O, T, V) [O] = _Generic(V, T: &V.base)
+
/* Register allocation descriptions for every TCGOpcode. */
static const TCGOutOp * const all_outop[NB_OPS] = {
+ OUTOP(INDEX_op_add_i32, TCGOutOpBinary, outop_add),
+ OUTOP(INDEX_op_add_i64, TCGOutOpBinary, outop_add),
};
+#undef OUTOP
+
/*
* All TCG threads except the parent (i.e. the one that called tcg_context_init
* and registered the target's TCG globals) must register with this function
}
/* emit instruction */
+ TCGType type = TCGOP_TYPE(op);
switch (op->opc) {
case INDEX_op_ext_i32_i64:
tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
case INDEX_op_extrl_i64_i32:
tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
break;
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ {
+ const TCGOutOpBinary *out =
+ container_of(all_outop[op->opc], TCGOutOpBinary, base);
+
+ /* Constants should never appear in the first source operand. */
+ tcg_debug_assert(!const_args[1]);
+ if (const_args[2]) {
+ out->out_rri(s, type, new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
+ }
+ }
+ break;
+
default:
if (def->flags & TCG_OPF_VECTOR) {
- tcg_out_vec_op(s, op->opc, TCGOP_TYPE(op) - TCG_TYPE_V64,
+ tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
TCGOP_VECE(op), new_args, const_args);
} else {
- tcg_out_op(s, op->opc, TCGOP_TYPE(op), new_args, const_args);
+ tcg_out_op(s, op->opc, type, new_args, const_args);
}
break;
}
case INDEX_op_rem_i64:
case INDEX_op_remu_i32:
case INDEX_op_remu_i64:
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
/* Always indirect, nothing to do */
}
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, glue(INDEX_op_add_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_add,
+};
+
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
break;
- CASE_32_64(add)
CASE_32_64(sub)
CASE_32_64(mul)
CASE_32_64(and)