if (kvm_immediate_exit) {
qatomic_set(&cpu->kvm_run->immediate_exit, 0);
- /* Write kvm_run->immediate_exit before the cpu->exit_request
- * write in kvm_cpu_exec.
- */
- smp_wmb();
return;
}
}
kvm_arch_pre_run(cpu, run);
- if (qatomic_read(&cpu->exit_request)) {
+ /* Corresponding store-release is in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
trace_kvm_interrupt_exit_request();
/*
* KVM requires us to reenter the kernel after IO exits to complete
kvm_cpu_kick_self();
}
- /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
- * Matching barrier in kvm_eat_signals.
- */
- smp_rmb();
-
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
+ /*
+ * After writing cpu->exit_request, cpu_exit() sends a signal that writes
+ * kvm->run->immediate_exit. The signal is already happening after the
+ * write to cpu->exit_request so, if KVM read kvm->run->immediate_exit
+ * as true, cpu->exit_request will always read as true.
+ */
+
attrs = kvm_arch_post_run(cpu, run);
#ifdef KVM_HAVE_MCE_INJECTION
}
#endif /* !CONFIG_USER_ONLY */
- /* Finally, check if we need to exit to the main loop. */
- if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
+ /*
+ * Finally, check if we need to exit to the main loop.
+ * The corresponding store-release is in cpu_exit.
+ */
+ if (unlikely(qatomic_load_acquire(&cpu->exit_request)) || icount_exit_request(cpu)) {
qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
cpu = first_cpu;
}
- while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
- /* Store rr_current_cpu before evaluating cpu_can_run(). */
+ while (cpu && cpu_work_list_empty(cpu)) {
+ /*
+ * Store rr_current_cpu before evaluating cpu->exit_request.
+ * Pairs with rr_kick_next_cpu().
+ */
qatomic_set_mb(&rr_current_cpu, cpu);
+ /* Pairs with store-release in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
+ break;
+ }
current_cpu = cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
void cpu_exit(CPUState *cpu)
{
- qatomic_set(&cpu->exit_request, 1);
+ /* Ensure cpu_exec will see the reason why the exit request was set. */
+ qatomic_store_release(&cpu->exit_request, true);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
nvmm_vcpu_pre_run(cpu);
- if (qatomic_read(&cpu->exit_request)) {
+ /* Corresponding store-release is in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
#if NVMM_USER_VERSION >= 2
nvmm_vcpu_stop(vcpu);
#else
#endif
}
- /* Read exit_request before the kernel reads the immediate exit flag */
- smp_rmb();
ret = nvmm_vcpu_run(mach, vcpu);
if (ret == -1) {
error_report("NVMM: Failed to exec a virtual processor,"
if (exclusive_step_mode == WHPX_STEP_NONE) {
whpx_vcpu_pre_run(cpu);
- if (qatomic_read(&cpu->exit_request)) {
+ /* Corresponding store-release is in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
whpx_vcpu_kick(cpu);
}
}