When we set cpu->cflags_next_tb it is because we want to carefully
control the execution of the next TB. Currently there is a race that
causes the second stage of watchpoint handling to get ignored if an
IRQ is processed before we finish executing the instruction that
triggers the watchpoint. Use the new CF_NOIRQ facility to avoid the
race.
We also suppress IRQs when handling precise self modifying code to
avoid unnecessary bouncing.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Cc: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru>
Fixes: https://gitlab.com/qemu-project/qemu/-/issues/245
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <
20211129140932.
4115115-3-alex.bennee@linaro.org>
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
+ /*
+ * If we have requested custom cflags with CF_NOIRQ we should
+ * skip checking here. Any pending interrupts will get picked up
+ * by the next TB we execute under normal cflags.
+ */
+ if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
+ return false;
+ }
+
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
return true;
}
#endif
*/
if (!cpu->can_do_io) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | CF_LAST_IO | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu);
cpu_loop_exit_restore(cpu, ra);
}
/*
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | CF_LAST_IO | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}