o->in1 = tcg_temp_new_i64();
if (non_atomic) {
- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic addition in memory. */
tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
tcg_gen_add_i64(o->out, o->in1, o->in2);
if (non_atomic) {
- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return DISAS_NEXT;
}
o->in1 = tcg_temp_new_i64();
if (non_atomic) {
- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic addition in memory. */
tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
if (non_atomic) {
- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return DISAS_NEXT;
}
o->in1 = tcg_temp_new_i64();
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic operation in memory. */
tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
tcg_gen_and_i64(o->out, o->in1, o->in2);
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return DISAS_NEXT;
}
mop = ctz32(l + 1) | MO_BE;
/* Do not update cc_src yet: loading cc_dst may cause an exception. */
src = tcg_temp_new_i64();
- tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
- tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
+ tcg_gen_qemu_ld_i64(src, o->addr1, get_mem_index(s), mop);
+ tcg_gen_qemu_ld_i64(cc_dst, o->in2, get_mem_index(s), mop);
gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
return DISAS_NEXT;
default:
static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
- MO_BESL | s->insn->data);
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
+ MO_BESL | s->insn->data);
return DISAS_NEXT;
}
static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
- MO_BEUL | s->insn->data);
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
+ MO_BEUL | s->insn->data);
return DISAS_NEXT;
}
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
+ tcg_gen_qemu_ld_i64(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
return DISAS_NEXT;
}
#endif
o->in1 = tcg_temp_new_i64();
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic operation in memory. */
tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
tcg_gen_or_i64(o->out, o->in1, o->in2);
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return DISAS_NEXT;
}
static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
+ tcg_gen_qemu_st_i64(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
if (s->base.tb->flags & FLAG_MASK_PER_STORE_REAL) {
update_cc_op(s);
static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
- MO_BEUL | s->insn->data);
+ tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
+ MO_BEUL | s->insn->data);
return DISAS_NEXT;
}
o->in1 = tcg_temp_new_i64();
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), s->insn->data);
} else {
/* Perform the atomic operation in memory. */
tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
tcg_gen_xor_i64(o->out, o->in1, o->in2);
if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), s->insn->data);
}
return DISAS_NEXT;
}
#ifndef CONFIG_USER_ONLY
static void wout_m1_16a(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_BEUW | MO_ALIGN);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_BEUW | MO_ALIGN);
}
#define SPEC_wout_m1_16a 0
#endif
#ifndef CONFIG_USER_ONLY
static void wout_m1_32a(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_BEUL | MO_ALIGN);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_BEUL | MO_ALIGN);
}
#define SPEC_wout_m1_32a 0
#endif
static void in2_m2_32ua(DisasContext *s, DisasOps *o)
{
in2_a2(s, o);
- tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_BEUL | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_BEUL | MO_ALIGN);
}
#define SPEC_in2_m2_32ua 0
#endif
static void in2_mri2_32s(DisasContext *s, DisasOps *o)
{
o->in2 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
- MO_BESL | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
+ MO_BESL | MO_ALIGN);
}
#define SPEC_in2_mri2_32s 0
static void in2_mri2_32u(DisasContext *s, DisasOps *o)
{
o->in2 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
- MO_BEUL | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
+ MO_BEUL | MO_ALIGN);
}
#define SPEC_in2_mri2_32u 0