Split these functions out from tcg_out_op.
Define outop_goto_ptr generically.
Call tcg_out_goto_ptr from tcg_reg_alloc_op.
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
tcg_out_bti(s, BTI_J);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_insn(s, 3207, BR, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
TCGArg a2 = args[2];
switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_insn(s, 3207, BR, a0);
- break;
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0);
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_b_reg(s, COND_AL, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
const int const_args[TCG_MAX_OP_ARGS])
{
switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_b_reg(s, COND_AL, args[0]);
- break;
-
case INDEX_op_ld8u_i32:
tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
break;
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ /* Jump to the given host address (could be epilogue) */
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
switch (opc) {
- case INDEX_op_goto_ptr:
- /* jmp to the given host address (could be epilogue) */
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
- break;
OP_32_64(ld8u):
/* Note that we can ignore REXW for the zero-extend to 64-bit. */
tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i32:
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
TCGArg a3 = args[3];
switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
- break;
-
case INDEX_op_ld8s_i32:
case INDEX_op_ld8s_i64:
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_st8_i32:
case INDEX_op_st8_i64:
case INDEX_op_st16_i32:
}
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
+ } else {
+ tcg_out_nop(s);
+ }
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
a2 = args[2];
switch (opc) {
- case INDEX_op_goto_ptr:
- /* jmp to the given host address (could be epilogue) */
- tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
- if (TCG_TARGET_REG_BITS == 64) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
- } else {
- tcg_out_nop(s);
- }
- break;
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
i1 = OPC_LBU;
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out32(s, MTSPR | RS(a0) | CTR);
+ tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
const int const_args[TCG_MAX_OP_ARGS])
{
switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out32(s, MTSPR | RS(args[0]) | CTR);
- tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
- tcg_out32(s, BCCTR | BO_ALWAYS);
- break;
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
TCGArg a2 = args[2];
switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
- break;
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
{
- TCGArg a0;
-
switch (opc) {
- case INDEX_op_goto_ptr:
- a0 = args[0];
- tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
- break;
-
OP_32_64(ld8u):
/* ??? LLC (RXY format) is only present with the extended-immediate
facility, whereas LLGC is always present. */
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i32:
}
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
+ tcg_out_mov_delay(s, TCG_REG_TB, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
a2 = args[2];
switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
- tcg_out_mov_delay(s, TCG_REG_TB, a0);
- break;
-
#define OP_32_64(x) \
glue(glue(case INDEX_op_, x), _i32): \
glue(glue(case INDEX_op_, x), _i64)
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i32:
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
static void tcg_out_goto_tb(TCGContext *s, int which);
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg dest);
static void tcg_out_mb(TCGContext *s, unsigned bar);
static void tcg_out_br(TCGContext *s, TCGLabel *l);
static void tcg_out_set_carry(TCGContext *s);
};
#endif
+static const TCGOutOp outop_goto_ptr = {
+ .static_constraint = C_O0_I1(r),
+};
+
/*
* Register V as the TCGOutOp for O.
* This verifies that V is of type T, otherwise give a nice compiler error.
OUTOP(INDEX_op_subb1o, TCGOutOpAddSubCarry, outop_subbio),
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
+ [INDEX_op_goto_ptr] = &outop_goto_ptr,
+
#if TCG_TARGET_REG_BITS == 32
OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2),
OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2),
g_assert_not_reached();
#endif
+ case INDEX_op_goto_ptr:
+ tcg_debug_assert(!const_args[0]);
+ tcg_out_goto_ptr(s, new_args[0]);
+ break;
+
default:
if (def->flags & TCG_OPF_VECTOR) {
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
set_jmp_reset_offset(s, which);
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_op_r(s, INDEX_op_goto_ptr, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
const int const_args[TCG_MAX_OP_ARGS])
{
switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_op_r(s, opc, args[0]);
- break;
-
CASE_32_64(ld8u)
CASE_32_64(ld8s)
CASE_32_64(ld16u)