[MO_SW] = helper_ldsw_mmu,
[MO_UL] = helper_ldul_mmu,
[MO_UQ] = helper_ldq_mmu,
-#if TCG_TARGET_REG_BITS == 64
[MO_SL] = helper_ldsl_mmu,
[MO_128] = helper_ld16_mmu,
-#endif
};
static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = {
[MO_16] = helper_stw_mmu,
[MO_32] = helper_stl_mmu,
[MO_64] = helper_stq_mmu,
-#if TCG_TARGET_REG_BITS == 64
[MO_128] = helper_st16_mmu,
-#endif
};
typedef struct {
}
break;
case MO_UQ:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
if (dst_type == TCG_TYPE_I32) {
tcg_out_extrl_i64_i32(s, dst, src);
} else {
< MIN_TLB_MASK_TABLE_OFS);
#endif
-#if TCG_TARGET_REG_BITS == 64
/*
* We require these functions for slow-path function calls.
* Adapt them generically for opcode output.
.base.static_constraint = C_O1_I1(r, r),
.out_rr = TCG_TARGET_HAS_extr_i64_i32 ? tgen_extrl_i64_i32 : NULL,
};
-#endif
static const TCGOutOp outop_goto_ptr = {
.static_constraint = C_O0_I1(r),
* the helpers, with the end result that it's easier to build manually.
*/
-#if TCG_TARGET_REG_BITS == 32
-# define dh_typecode_ttl dh_typecode_i32
-#else
-# define dh_typecode_ttl dh_typecode_i64
-#endif
+#define dh_typecode_ttl dh_typecode_i64
static TCGHelperInfo info_helper_ld32_mmu = {
.flags = TCG_CALL_NO_WG,
break;
case dh_typecode_i32:
case dh_typecode_s32:
- case dh_typecode_ptr:
- info->nr_out = 1;
- info->out_kind = TCG_CALL_RET_NORMAL;
- break;
case dh_typecode_i64:
case dh_typecode_s64:
- info->nr_out = 64 / TCG_TARGET_REG_BITS;
+ case dh_typecode_ptr:
+ info->nr_out = 1;
info->out_kind = TCG_CALL_RET_NORMAL;
- /* Query the last register now to trigger any assert early. */
- tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
break;
case dh_typecode_i128:
info->nr_out = 128 / TCG_TARGET_REG_BITS;
layout_arg_even(&cum);
/* fall through */
case TCG_CALL_ARG_NORMAL:
- if (TCG_TARGET_REG_BITS == 32) {
- layout_arg_normal_n(&cum, info, 2);
- } else {
- layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
- }
+ layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
break;
default:
qemu_build_not_reached();
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
TCGReg reg, const char *name)
{
- TCGTemp *ts;
-
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
+ TCGTemp *ts = tcg_global_alloc(s);
- ts = tcg_global_alloc(s);
ts->base_type = type;
ts->type = type;
ts->kind = TEMP_FIXED;
/* We do not support double-indirect registers. */
tcg_debug_assert(!base_ts->indirect_reg);
base_ts->indirect_base = 1;
- s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
- ? 2 : 1);
+ s->nb_indirects += 1;
indirect_reg = 1;
break;
default:
g_assert_not_reached();
}
- if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
- TCGTemp *ts2 = tcg_global_alloc(s);
- char buf[64];
-
- ts->base_type = TCG_TYPE_I64;
- ts->type = TCG_TYPE_I32;
- ts->indirect_reg = indirect_reg;
- ts->mem_allocated = 1;
- ts->mem_base = base_ts;
- ts->mem_offset = offset;
- pstrcpy(buf, sizeof(buf), name);
- pstrcat(buf, sizeof(buf), "_0");
- ts->name = strdup(buf);
-
- tcg_debug_assert(ts2 == ts + 1);
- ts2->base_type = TCG_TYPE_I64;
- ts2->type = TCG_TYPE_I32;
- ts2->indirect_reg = indirect_reg;
- ts2->mem_allocated = 1;
- ts2->mem_base = base_ts;
- ts2->mem_offset = offset + 4;
- ts2->temp_subindex = 1;
- pstrcpy(buf, sizeof(buf), name);
- pstrcat(buf, sizeof(buf), "_1");
- ts2->name = strdup(buf);
- } else {
- ts->base_type = type;
- ts->type = type;
- ts->indirect_reg = indirect_reg;
- ts->mem_allocated = 1;
- ts->mem_base = base_ts;
- ts->mem_offset = offset;
- ts->name = name;
- }
+ ts->base_type = type;
+ ts->type = type;
+ ts->indirect_reg = indirect_reg;
+ ts->mem_allocated = 1;
+ ts->mem_base = base_ts;
+ ts->mem_offset = offset;
+ ts->name = name;
return ts;
}
switch (type) {
case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
case TCG_TYPE_V64:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
n = 1;
break;
- case TCG_TYPE_I64:
- n = 64 / TCG_TARGET_REG_BITS;
- break;
case TCG_TYPE_I128:
n = 128 / TCG_TARGET_REG_BITS;
break;
ts = g_hash_table_lookup(h, &val);
if (ts == NULL) {
- int64_t *val_ptr;
-
ts = tcg_temp_alloc(s);
-
- if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
- TCGTemp *ts2 = tcg_temp_alloc(s);
-
- tcg_debug_assert(ts2 == ts + 1);
-
- ts->base_type = TCG_TYPE_I64;
- ts->type = TCG_TYPE_I32;
- ts->kind = TEMP_CONST;
- ts->temp_allocated = 1;
-
- ts2->base_type = TCG_TYPE_I64;
- ts2->type = TCG_TYPE_I32;
- ts2->kind = TEMP_CONST;
- ts2->temp_allocated = 1;
- ts2->temp_subindex = 1;
-
- /*
- * Retain the full value of the 64-bit constant in the low
- * part, so that the hash table works. Actual uses will
- * truncate the value to the low part.
- */
- ts[HOST_BIG_ENDIAN].val = val;
- ts[!HOST_BIG_ENDIAN].val = val >> 32;
- val_ptr = &ts[HOST_BIG_ENDIAN].val;
- } else {
- ts->base_type = type;
- ts->type = type;
- ts->kind = TEMP_CONST;
- ts->temp_allocated = 1;
- ts->val = val;
- val_ptr = &ts->val;
- }
- g_hash_table_insert(h, val_ptr, ts);
+ ts->base_type = type;
+ ts->type = type;
+ ts->kind = TEMP_CONST;
+ ts->temp_allocated = 1;
+ ts->val = val;
+ g_hash_table_insert(h, &ts->val, ts);
}
return ts;
switch (type) {
case TCG_TYPE_I32:
- has_type = true;
- break;
case TCG_TYPE_I64:
- has_type = TCG_TARGET_REG_BITS == 64;
+ has_type = true;
break;
case TCG_TYPE_V64:
has_type = TCG_TARGET_HAS_v64;
case INDEX_op_qemu_ld2:
case INDEX_op_qemu_st2:
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_debug_assert(type == TCG_TYPE_I64);
- return true;
- }
tcg_debug_assert(type == TCG_TYPE_I128);
goto do_lookup;
case INDEX_op_extu_i32_i64:
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
- return TCG_TARGET_REG_BITS == 64;
+ return true;
case INDEX_op_mov_vec:
case INDEX_op_dup_vec:
case TCG_TYPE_I32:
snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
break;
-#if TCG_TARGET_REG_BITS > 32
case TCG_TYPE_I64:
snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
break;
-#endif
case TCG_TYPE_V64:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
case INDEX_op_extu_i32_i64:
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
- assert(TCG_TARGET_REG_BITS == 64);
- /* fall through */
case INDEX_op_ctpop:
case INDEX_op_neg:
case INDEX_op_not:
* Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
* require extension to uint64_t, adjust the address for uint32_t.
*/
- if (HOST_BIG_ENDIAN &&
- TCG_TARGET_REG_BITS == 64 &&
- type == TCG_TYPE_I32) {
+ if (HOST_BIG_ENDIAN && type == TCG_TYPE_I32) {
ofs += 4;
}
return ofs;
return 1;
}
- if (TCG_TARGET_REG_BITS == 32) {
- assert(dst_type == TCG_TYPE_I64);
- reg_mo = MO_32;
- } else {
- assert(dst_type == TCG_TYPE_I128);
- reg_mo = MO_64;
- }
+ assert(dst_type == TCG_TYPE_I128);
+ reg_mo = MO_64;
mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot;
mov[0].src = lo;
next_arg = 1;
loc = &info->in[next_arg];
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
- /*
- * 32-bit host with 32-bit guest: zero-extend the guest address
- * to 64-bits for the helper by storing the low part, then
- * load a zero for the high part.
- */
- tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
- TCG_TYPE_I32, TCG_TYPE_I32,
- ldst->addr_reg, -1);
- tcg_out_helper_load_slots(s, 1, mov, parm);
-
- tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
- TCG_TYPE_I32, 0, parm);
- next_arg += 2;
- } else {
- nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
- ldst->addr_reg, -1);
- tcg_out_helper_load_slots(s, nmov, mov, parm);
- next_arg += nmov;
- }
+ nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
+ ldst->addr_reg, -1);
+ tcg_out_helper_load_slots(s, nmov, mov, parm);
+ next_arg += nmov;
switch (info->out_kind) {
case TCG_CALL_RET_NORMAL:
int ofs_slot0;
switch (ldst->type) {
- case TCG_TYPE_I64:
- if (TCG_TARGET_REG_BITS == 32) {
- break;
- }
- /* fall through */
-
case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
mov[0].dst = ldst->datalo_reg;
mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0);
mov[0].dst_type = ldst->type;
* helper functions.
*/
if (load_sign || !(mop & MO_SIGN)) {
- if (TCG_TARGET_REG_BITS == 32 || ldst->type == TCG_TYPE_I32) {
+ if (ldst->type == TCG_TYPE_I32) {
mov[0].src_ext = MO_32;
} else {
mov[0].src_ext = MO_64;
return;
case TCG_TYPE_I128:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
switch (TCG_TARGET_CALL_RET_I128) {
case TCG_CALL_RET_NORMAL:
tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN);
mov[0].dst_type = TCG_TYPE_REG;
mov[0].src_type = TCG_TYPE_REG;
- mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
+ mov[0].src_ext = MO_64;
mov[1].dst = ldst->datahi_reg;
mov[1].src =
tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN);
mov[1].dst_type = TCG_TYPE_REG;
mov[1].src_type = TCG_TYPE_REG;
- mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
+ mov[1].src_ext = MO_64;
tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1);
}
/* Handle addr argument. */
loc = &info->in[next_arg];
tcg_debug_assert(s->addr_type <= TCG_TYPE_REG);
- if (TCG_TARGET_REG_BITS == 32) {
- /*
- * 32-bit host (and thus 32-bit guest): zero-extend the guest address
- * to 64-bits for the helper by storing the low part. Later,
- * after we have processed the register inputs, we will load a
- * zero for the high part.
- */
- tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
- TCG_TYPE_I32, TCG_TYPE_I32,
+ n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
ldst->addr_reg, -1);
- next_arg += 2;
- nmov += 1;
- } else {
- n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
- ldst->addr_reg, -1);
- next_arg += n;
- nmov += n;
- }
+ next_arg += n;
+ nmov += n;
/* Handle data argument. */
loc = &info->in[next_arg];
break;
case TCG_CALL_ARG_BY_REF:
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
tcg_debug_assert(data_type == TCG_TYPE_I128);
tcg_out_st(s, TCG_TYPE_I64,
HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg,
g_assert_not_reached();
}
- if (TCG_TARGET_REG_BITS == 32) {
- /* Zero extend the address by loading a zero for the high part. */
- loc = &info->in[1 + !HOST_BIG_ENDIAN];
- tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
- }
-
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
}
switch (opc) {
case INDEX_op_extrl_i64_i32:
- assert(TCG_TARGET_REG_BITS == 64);
/*
* If TCG_TYPE_I32 is represented in some canonical form,
* e.g. zero or sign-extended, then emit as a unary op.