Drop all backend support for an immediate as the first operand.
This should never happen in any case, as we swap commutative
operands to place immediates as the second operand.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_insn(s, 3510, AND, type, a0, a1, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_logicali(s, I3404_ANDI, type, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rL),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
const TCGArg args[TCG_MAX_OP_ARGS],
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
break;
- case INDEX_op_and_i32:
- a2 = (int32_t)a2;
- /* FALLTHRU */
- case INDEX_op_and_i64:
- if (c2) {
- tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2);
- } else {
- tcg_out_insn(s, 3510, AND, ext, a0, a1, a2);
- }
- break;
-
case INDEX_op_andc_i32:
a2 = (int32_t)a2;
/* FALLTHRU */
case INDEX_op_mulsh_i64:
return C_O1_I2(r, r, r);
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
case INDEX_op_or_i32:
case INDEX_op_or_i64:
case INDEX_op_xor_i32:
* Emit either the reg,imm or reg,reg form of a data-processing insn.
* rhs must satisfy the "rIK" constraint.
*/
+static void tcg_out_dat_IK(TCGContext *s, ARMCond cond, ARMInsn opc,
+ ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs)
+{
+ int imm12 = encode_imm(rhs);
+ if (imm12 < 0) {
+ imm12 = encode_imm_nofail(~rhs);
+ opc = opinv;
+ }
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+}
+
static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
bool rhs_is_const)
{
if (rhs_is_const) {
- int imm12 = encode_imm(rhs);
- if (imm12 < 0) {
- imm12 = encode_imm_nofail(~rhs);
- opc = opinv;
- }
- tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
+ tcg_out_dat_IK(s, cond, opc, opinv, dst, lhs, rhs);
} else {
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
}
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, a0, a1, a2, SHIFT_IMM_LSL(0));
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_dat_IK(s, COND_AL, ARITH_AND, ARITH_BIC, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rIK),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
args[0], args[1], args[2], const_args[2]);
}
break;
- case INDEX_op_and_i32:
- tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
- args[0], args[1], args[2], const_args[2]);
- break;
case INDEX_op_andc_i32:
tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
args[0], args[1], args[2], const_args[2]);
case INDEX_op_negsetcond_i32:
return C_O1_I2(r, r, rIN);
- case INDEX_op_and_i32:
case INDEX_op_andc_i32:
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithr(s, ARITH_AND + rexw, a0, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+ tgen_arithi(s, ARITH_AND + rexw, a0, a2, false);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, 0, reZ),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
OP_32_64(sub):
c = ARITH_SUB;
goto gen_arith;
- OP_32_64(and):
- c = ARITH_AND;
- goto gen_arith;
OP_32_64(or):
c = ARITH_OR;
goto gen_arith;
case INDEX_op_xor_i64:
return C_O1_I2(r, 0, re);
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- return C_O1_I2(r, 0, reZ);
-
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
return C_O1_I2(r, r, rI);
.out_rri = tcg_out_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_and(s, a0, a1, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_andi(s, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rU),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
}
break;
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- if (c2) {
- tcg_out_opc_andi(s, a0, a1, a2);
- } else {
- tcg_out_opc_and(s, a0, a1, a2);
- }
- break;
-
case INDEX_op_or_i32:
case INDEX_op_or_i64:
if (c2) {
case INDEX_op_rotr_i64:
return C_O1_I2(r, r, ri);
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
case INDEX_op_nor_i32:
case INDEX_op_nor_i64:
case INDEX_op_or_i32:
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ int msb;
+
+ if (a2 == (uint16_t)a2) {
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
+ return;
+ }
+
+ tcg_debug_assert(use_mips32r2_instructions);
+ tcg_debug_assert(is_p2m1(a2));
+ msb = ctz64(~a2) - 1;
+ if (type == TCG_TYPE_I32) {
+ tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
+ } else {
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
+ }
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rIK),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
break;
}
goto do_binaryv;
- case INDEX_op_and_i32:
- if (c2 && a2 != (uint16_t)a2) {
- int msb = ctz32(~a2) - 1;
- tcg_debug_assert(use_mips32r2_instructions);
- tcg_debug_assert(is_p2m1(a2));
- tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
- break;
- }
- i1 = OPC_AND, i2 = OPC_ANDI;
- goto do_binary;
- case INDEX_op_and_i64:
- if (c2 && a2 != (uint16_t)a2) {
- int msb = ctz64(~a2) - 1;
- tcg_debug_assert(use_mips32r2_instructions);
- tcg_debug_assert(is_p2m1(a2));
- tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
- break;
- }
- i1 = OPC_AND, i2 = OPC_ANDI;
- goto do_binary;
case INDEX_op_nor_i32:
case INDEX_op_nor_i64:
i1 = OPC_NOR;
case INDEX_op_muls2_i64:
case INDEX_op_mulu2_i64:
return C_O2_I2(r, r, r, r);
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- return C_O1_I2(r, r, rIK);
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
case INDEX_op_or_i64:
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out32(s, AND | SAB(a1, a0, a2));
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_andi32(s, a0, a1, a2);
+ } else {
+ tcg_out_andi64(s, a0, a1, a2);
+ }
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, ri),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
}
break;
- case INDEX_op_and_i32:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi32(s, a0, a1, a2);
- } else {
- tcg_out32(s, AND | SAB(a1, a0, a2));
- }
- break;
- case INDEX_op_and_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_andi64(s, a0, a1, a2);
- } else {
- tcg_out32(s, AND | SAB(a1, a0, a2));
- }
- break;
case INDEX_op_or_i64:
case INDEX_op_or_i32:
a0 = args[0], a1 = args[1], a2 = args[2];
case INDEX_op_st_i64:
return C_O0_I2(r, r);
- case INDEX_op_and_i32:
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
case INDEX_op_andc_i32:
case INDEX_op_sar_i32:
case INDEX_op_rotl_i32:
case INDEX_op_rotr_i32:
- case INDEX_op_and_i64:
case INDEX_op_andc_i64:
case INDEX_op_shl_i64:
case INDEX_op_shr_i64:
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rI),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
}
break;
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- if (c2) {
- tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
- }
- break;
-
case INDEX_op_or_i32:
case INDEX_op_or_i64:
if (c2) {
case INDEX_op_st_i64:
return C_O0_I2(rz, r);
- case INDEX_op_and_i32:
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
- case INDEX_op_and_i64:
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
case INDEX_op_setcond_i32:
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
+ } else if (a0 == a1) {
+ tcg_out_insn(s, RR, NR, a0, a2);
+ } else {
+ tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
+ }
+}
+
+static void tgen_andi_3(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_mov(s, type, a0, a1);
+ tgen_andi(s, type, a0, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rNKR),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi_3,
+};
+
# define OP_32_64(x) \
case glue(glue(INDEX_op_,x),_i32): \
}
break;
- case INDEX_op_and_i32:
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
- tgen_andi(s, TCG_TYPE_I32, a0, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RR, NR, a0, a2);
- } else {
- tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
- }
- break;
case INDEX_op_or_i32:
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
if (const_args[2]) {
}
break;
- case INDEX_op_and_i64:
- a0 = args[0], a1 = args[1], a2 = args[2];
- if (const_args[2]) {
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
- tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
- } else {
- tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
- }
- break;
case INDEX_op_or_i64:
a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[2]) {
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
- case INDEX_op_and_i32:
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
return C_O1_I2(r, r, ri);
- case INDEX_op_and_i64:
- return C_O1_I2(r, r, rNKR);
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
return C_O1_I2(r, r, rK);
.out_rri = tgen_addi,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_AND);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_AND);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
OP_32_64(sub):
c = ARITH_SUB;
goto gen_arith;
- OP_32_64(and):
- c = ARITH_AND;
- goto gen_arith;
OP_32_64(andc):
c = ARITH_ANDN;
goto gen_arith;
case INDEX_op_divu_i64:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
case INDEX_op_or_i32:
/* Register allocation descriptions for every TCGOpcode. */
static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
+ OUTOP(INDEX_op_and_i32, TCGOutOpBinary, outop_and),
+ OUTOP(INDEX_op_and_i64, TCGOutOpBinary, outop_and),
};
#undef OUTOP
break;
case INDEX_op_add:
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
{
const TCGOutOpBinary *out =
container_of(all_outop[op->opc], TCGOutOpBinary, base);
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
case INDEX_op_eqv_i32:
.out_rrr = tgen_add,
};
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_op_rrr(s, glue(INDEX_op_and_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_and,
+};
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
CASE_32_64(sub)
CASE_32_64(mul)
- CASE_32_64(and)
CASE_32_64(or)
CASE_32_64(xor)
CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */