if (unlikely(cpu_test_interrupt(cpu, ~0))) {
bql_lock();
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_DEBUG)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_DEBUG);
cpu->exception_index = EXCP_DEBUG;
bql_unlock();
return true;
/* Do nothing */
} else if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HALT)) {
replay_interrupt();
- cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
bql_unlock();
}
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_EXITTB)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_EXITTB);
/* ensure that no TB jump will be modified as
the program flow was changed */
*last_tb = NULL;
* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
* version_id is increased.
*/
- cpu->interrupt_request &= ~0x01;
+ cpu_reset_interrupt(cpu, 0x01);
tlb_flush(cpu);
cs->exception_index = EXCP_RESET;
avr_cpu_do_interrupt(cs);
- cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_RESET);
return true;
}
}
env->intsrc &= env->intsrc - 1; /* clear the interrupt */
if (!env->intsrc) {
- cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
}
return true;
}
if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI);
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info);
} else {
cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(env);
- cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
if (line >= 0) {
wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, line |
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
}
if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) {
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
do_cpu_sipi(cpu);
}
if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) {
- cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR);
cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
*/
events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
- cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
} else {
/* Keep these in cs->interrupt_request. */
events.smi.pending = 0;
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
bql_lock();
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
bql_unlock();
DPRINTF("injected NMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
bql_lock();
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
bql_unlock();
DPRINTF("injected SMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
bql_lock();
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
struct kvm_interrupt intr;
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
assert(env->mcg_cap);
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE);
kvm_cpu_synchronize_state(cs);
}
if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) {
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
do_cpu_sipi(cpu);
}
if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) {
- cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR);
kvm_cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
if (nvmm_can_take_nmi(cpu)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
event->type = NVMM_VCPU_EVENT_INTR;
event->vector = 2;
has_event = true;
if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
if (nvmm_can_take_int(cpu)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
event->type = NVMM_VCPU_EVENT_INTR;
event->vector = cpu_get_pic_interrupt(env);
has_event = true;
/* Don't want SMIs. */
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
}
if (sync_tpr) {
/* set int/nmi windows back to the reset state */
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
apic_poll_irq(x86_cpu->apic_state);
}
if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
do_cpu_sipi(x86_cpu);
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR);
nvmm_cpu_synchronize_state(cpu);
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
env->tpr_access_type);
*/
switch (interrupt_request) {
case CPU_INTERRUPT_POLL:
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
break;
case CPU_INTERRUPT_SIPI:
break;
case CPU_INTERRUPT_SMI:
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_SMI);
do_smm_enter(cpu);
break;
case CPU_INTERRUPT_NMI:
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI);
env->hflags2 |= HF2_NMI_MASK;
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
break;
case CPU_INTERRUPT_MCE:
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE);
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
break;
case CPU_INTERRUPT_HARD:
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_VIRQ);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_INT,
"Servicing hardware INT=0x%02x\n", intno);
qemu_log_mask(CPU_LOG_INT,
"Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
env->int_ctl &= ~V_IRQ_MASK;
break;
}
env->intercept_exceptions = 0;
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
env->int_ctl = 0;
/* Clears the TSC_OFFSET inside the processor. */
if (!vcpu->interruption_pending &&
cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
vcpu->interruptable = false;
new_int.InterruptionType = WHvX64PendingNmi;
new_int.InterruptionPending = 1;
new_int.InterruptionVector = 2;
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
}
}
vcpu->interruptable && (env->eflags & IF_MASK)) {
assert(!new_int.InterruptionPending);
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
new_int.InterruptionType = WHvX64PendingInterrupt;
}
} else if (vcpu->ready_for_pic_interrupt &&
cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
reg_names[reg_count] = WHvRegisterPendingEvent;
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
apic_poll_irq(x86_cpu->apic_state);
}
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR);
whpx_cpu_synchronize_state(cpu);
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
env->tpr_access_type);
env->ttmr = (rb & ~TTMR_IP) | ip;
} else { /* Clear IP bit. */
env->ttmr = rb & ~TTMR_IP;
- cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER);
}
cpu_openrisc_timer_update(cpu);
bql_unlock();
env->bpsw = save_psw;
env->pc = env->fintv;
env->psw_ipl = 15;
- cs->interrupt_request &= ~CPU_INTERRUPT_FIR;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_FIR);
qemu_set_irq(env->ack, env->ack_irq);
qemu_log_mask(CPU_LOG_INT, "fast interrupt raised\n");
} else if (do_irq & CPU_INTERRUPT_HARD) {
cpu_stl_data(env, env->isp, env->pc);
env->pc = cpu_ldl_data(env, env->intb + env->ack_irq * 4);
env->psw_ipl = env->ack_ipl;
- cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
qemu_set_irq(env->ack, env->ack_irq);
qemu_log_mask(CPU_LOG_INT,
"interrupt 0x%02x raised\n", env->ack_irq);
/* we might still have pending interrupts, but not deliverable */
if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
- cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
}
/* WAIT PSW during interrupt injection or STOP interrupt */