.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3502, SUB, type, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
break;
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- if (c2) {
- tgen_addi(s, ext, a0, a1, -a2);
- } else {
- tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i64:
case INDEX_op_neg_i32:
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, r, rA);
-
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
case INDEX_op_negsetcond_i32:
.base.static_constraint = C_NotImplemented,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_subfi(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1));
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, rI, r),
+ .out_rrr = tgen_sub,
+ .out_rir = tgen_subfi,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
ARITH_MVN, args[0], 0, args[3], const_args[3]);
break;
- case INDEX_op_sub_i32:
- if (const_args[1]) {
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB,
- args[0], args[2], encode_imm_nofail(args[1]));
- } else {
- tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
- args[0], args[1], args[2], const_args[2]);
- }
- break;
case INDEX_op_add2_i32:
a0 = args[0], a1 = args[1], a2 = args[2];
a3 = args[3], a4 = args[4], a5 = args[5];
case INDEX_op_setcond_i32:
case INDEX_op_negsetcond_i32:
return C_O1_I2(r, r, rIN);
- case INDEX_op_sub_i32:
- return C_O1_I2(r, rI, r);
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
.base.static_constraint = C_NotImplemented,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_SUB + rexw, a0, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, 0, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
}
break;
- OP_32_64(sub):
- c = ARITH_SUB;
- if (const_a2) {
- tgen_arithi(s, c + rexw, a0, a2, 0);
- } else {
- tgen_arithr(s, c + rexw, a0, a2);
- }
- break;
-
OP_32_64(mul):
if (const_a2) {
int32_t val;
case INDEX_op_st_i64:
return C_O0_I2(re, r);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
return C_O1_I2(r, 0, re);
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_sub_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_sub_d(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
}
break;
- case INDEX_op_sub_i32:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
- } else {
- tcg_out_opc_sub_w(s, a0, a1, a2);
- }
- break;
- case INDEX_op_sub_i64:
- if (c2) {
- tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
- } else {
- tcg_out_opc_sub_d(s, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i32:
tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
break;
/* Must deposit into the same register as input */
return C_O1_I2(r, 0, rz);
- case INDEX_op_sub_i32:
case INDEX_op_setcond_i32:
return C_O1_I2(r, rz, ri);
- case INDEX_op_sub_i64:
case INDEX_op_setcond_i64:
return C_O1_I2(r, rz, rJ);
C_O1_I2(r, r, rIK)
C_O1_I2(r, r, rJ)
C_O1_I2(r, r, rzW)
-C_O1_I2(r, rz, rN)
C_O1_I2(r, rz, rz)
C_O1_I4(r, rz, rz, rz, 0)
C_O1_I4(r, rz, rz, rz, rz)
.base.static_constraint = C_NotImplemented,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SUBU : OPC_DSUBU;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
tcg_out_ldst(s, i1, a0, a1, a2);
break;
- do_binaryv:
- tcg_out_opc_reg(s, i1, a0, a1, a2);
- break;
-
- case INDEX_op_sub_i32:
- i1 = OPC_SUBU, i2 = OPC_ADDIU;
- goto do_subtract;
- case INDEX_op_sub_i64:
- i1 = OPC_DSUBU, i2 = OPC_DADDIU;
- do_subtract:
- if (c2) {
- tcg_out_opc_imm(s, i2, a0, a1, -a2);
- break;
- }
- goto do_binaryv;
-
case INDEX_op_mul_i32:
if (use_mips32_instructions) {
tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rz, rN);
case INDEX_op_mul_i32:
case INDEX_op_mulsh_i32:
case INDEX_op_muluh_i32:
C_O1_I1(v, v)
C_O1_I1(v, vr)
C_O1_I2(r, 0, rZ)
-C_O1_I2(r, rI, ri)
-C_O1_I2(r, rI, rT)
+C_O1_I2(r, rI, r)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rC)
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, SUBF | TAB(a0, a2, a1));
+}
+
+static void tgen_subfi(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
+{
+ tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, rI, r),
+ .out_rrr = tgen_sub,
+ .out_rir = tgen_subfi,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
break;
- case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- tgen_addi(s, type, a0, a1, (int32_t)-a2);
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
-
case INDEX_op_clz_i32:
tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
args[2], const_args[2]);
tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
break;
- case INDEX_op_sub_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[1]) {
- if (const_args[2]) {
- tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
- } else {
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
- }
- } else if (const_args[2]) {
- tgen_addi(s, type, a0, a1, -a2);
- } else {
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
- }
- break;
-
case INDEX_op_shl_i64:
if (const_args[2]) {
/* Limit immediate shift count lest we create an illegal insn. */
case INDEX_op_muluh_i64:
return C_O1_I2(r, r, r);
- case INDEX_op_sub_i32:
- return C_O1_I2(r, rI, ri);
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rI, rT);
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
case INDEX_op_clz_i64:
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
-C_O1_I2(r, rz, rN)
C_O1_I2(r, rz, rz)
C_N1_I2(r, r, rM)
C_O1_I4(r, r, rI, rM, rM)
CONST('I', TCG_CT_CONST_S12)
CONST('K', TCG_CT_CONST_S5)
CONST('L', TCG_CT_CONST_CMP_VI)
-CONST('N', TCG_CT_CONST_N12)
CONST('M', TCG_CT_CONST_M12)
}
#define TCG_CT_CONST_S12 0x100
-#define TCG_CT_CONST_N12 0x200
-#define TCG_CT_CONST_M12 0x400
-#define TCG_CT_CONST_S5 0x800
-#define TCG_CT_CONST_CMP_VI 0x1000
+#define TCG_CT_CONST_M12 0x200
+#define TCG_CT_CONST_S5 0x400
+#define TCG_CT_CONST_CMP_VI 0x800
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
return 1;
}
- /*
- * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
- * Used for subtraction, where a constant must be handled by ADDI.
- */
- if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
- return 1;
- }
/*
* Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
* Used by addsub2 and movcond, which may need the negative value,
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SUBW : OPC_SUB;
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
break;
- case INDEX_op_sub_i32:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
- } else {
- tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
- }
- break;
- case INDEX_op_sub_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
- } else {
- tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
- }
- break;
-
case INDEX_op_not_i32:
case INDEX_op_not_i64:
tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
case INDEX_op_negsetcond_i64:
return C_O1_I2(r, r, rI);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, rz, rN);
-
case INDEX_op_mul_i32:
case INDEX_op_mulsh_i32:
case INDEX_op_muluh_i32:
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, SR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
break;
- case INDEX_op_sub_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tgen_addi(s, type, a0, a1, (int32_t)-a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, SR, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i32:
tcg_out_insn(s, RR, LCR, args[0], args[1]);
break;
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
break;
- case INDEX_op_sub_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tgen_addi(s, type, a0, a1, -a2);
- } else {
- tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
- }
- break;
-
case INDEX_op_neg_i64:
tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
break;
case INDEX_op_clz_i64:
return C_O1_I2(r, r, rI);
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- return C_O1_I2(r, r, ri);
-
case INDEX_op_mul_i32:
return (HAVE_FACILITY(MISC_INSN_EXT2)
? C_O1_I2(r, r, ri)
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUB);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
case INDEX_op_st32_i64:
tcg_out_ldst(s, a0, a1, a2, STW);
break;
- OP_32_64(sub):
- c = ARITH_SUB;
- goto gen_arith;
case INDEX_op_shl_i32:
c = SHIFT_SLL;
do_shift32:
case INDEX_op_div_i64:
case INDEX_op_divu_i32:
case INDEX_op_divu_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
case INDEX_op_shl_i32:
case INDEX_op_shl_i64:
case INDEX_op_shr_i32:
TCGReg a0, TCGReg a1, tcg_target_long a2);
} TCGOutOpBinary;
+typedef struct TCGOutOpSubtract {
+ TCGOutOp base;
+ void (*out_rrr)(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2);
+ void (*out_rir)(TCGContext *s, TCGType type,
+ TCGReg a0, tcg_target_long a1, TCGReg a2);
+} TCGOutOpSubtract;
+
#include "tcg-target.c.inc"
#ifndef CONFIG_TCG_INTERPRETER
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
+ OUTOP(INDEX_op_sub_i32, TCGOutOpSubtract, outop_sub),
+ OUTOP(INDEX_op_sub_i64, TCGOutOpSubtract, outop_sub),
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
};
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
- case INDEX_op_sub_i32:
case INDEX_op_neg_i32:
case INDEX_op_mul_i32:
case INDEX_op_shl_i32:
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
- case INDEX_op_sub_i64:
case INDEX_op_neg_i64:
case INDEX_op_mul_i64:
case INDEX_op_shl_i64:
}
break;
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ {
+ const TCGOutOpSubtract *out = &outop_sub;
+
+ /*
+ * Constants should never appear in the second source operand.
+ * These are folded to add with negative constant.
+ */
+ tcg_debug_assert(!const_args[2]);
+ if (const_args[1]) {
+ out->out_rir(s, type, new_args[0], new_args[1], new_args[2]);
+ } else {
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
+ }
+ }
+ break;
+
default:
if (def->flags & TCG_OPF_VECTOR) {
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
case INDEX_op_rem_i64:
case INDEX_op_remu_i32:
case INDEX_op_remu_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
case INDEX_op_shl_i32:
.out_rrr = tgen_orc,
};
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, glue(INDEX_op_sub_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
static void tgen_xor(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
break;
- CASE_32_64(sub)
CASE_32_64(mul)
CASE_32_64(shl)
CASE_32_64(shr)