break;
case X86_OP_SEG:
/* Note that gen_movl_seg takes care of interrupt shadow and TF. */
- gen_movl_seg(s, op->n, s->T0);
+ gen_movl_seg(s, op->n, v, op->n == R_SS);
break;
case X86_OP_INT:
if (op->has_ea) {
gen_op_ld_v(s, MO_16, s->T1, s->A0);
/* load the segment here to handle exceptions properly */
- gen_movl_seg(s, seg, s->T1);
+ gen_movl_seg(s, seg, s->T1, false);
}
static void gen_LDS(DisasContext *s, X86DecodedInsn *decode)
/* move SRC to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
-static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
+static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq)
{
if (PE(s) && !VM86(s)) {
- tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
- gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
- /* abort translation because the addseg value may change or
- because ss32 may change. For R_SS, translation must always
- stop as a special handling must be done to disable hardware
- interrupts for the next instruction */
- if (seg_reg == R_SS) {
- s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
- } else if (CODE32(s) && seg_reg < R_FS) {
+ TCGv_i32 sel = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(sel, src);
+ gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
+
+ /* For move to DS/ES/SS, the addseg or ss32 flags may change. */
+ if (CODE32(s) && seg_reg < R_FS) {
s->base.is_jmp = DISAS_EOB_NEXT;
}
} else {
gen_op_movl_seg_real(s, seg_reg, src);
- if (seg_reg == R_SS) {
- s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
- }
+ }
+
+ /*
+ * For MOV or POP to SS (but not LSS) translation must always
+ * stop as a special handling must be done to disable hardware
+ * interrupts for the next instruction.
+ *
+ * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which
+ * might have been set above.
+ */
+ if (inhibit_irq) {
+ s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
}
}