4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
38 #define SIG_IPI (SIGRTMIN+4)
40 #define SIG_IPI SIGUSR1
43 static CPUState
*cur_cpu
;
44 static CPUState
*next_cpu
;
46 /***********************************************************/
47 void hw_error(const char *fmt
, ...)
53 fprintf(stderr
, "qemu: hardware error: ");
54 vfprintf(stderr
, fmt
, ap
);
55 fprintf(stderr
, "\n");
56 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
57 fprintf(stderr
, "CPU #%d:\n", env
->cpu_index
);
59 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
);
61 cpu_dump_state(env
, stderr
, fprintf
, 0);
68 void cpu_synchronize_all_states(void)
72 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
73 cpu_synchronize_state(cpu
);
77 void cpu_synchronize_all_post_reset(void)
81 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
82 cpu_synchronize_post_reset(cpu
);
86 void cpu_synchronize_all_post_init(void)
90 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
91 cpu_synchronize_post_init(cpu
);
95 int cpu_is_stopped(CPUState
*env
)
97 return !vm_running
|| env
->stopped
;
100 static void do_vm_stop(int reason
)
106 vm_state_notify(0, reason
);
107 monitor_protocol_event(QEVENT_STOP
, NULL
);
111 static int cpu_can_run(CPUState
*env
)
115 if (env
->stopped
|| !vm_running
)
120 static int cpu_has_work(CPUState
*env
)
124 if (env
->queued_work_first
)
126 if (env
->stopped
|| !vm_running
)
130 if (qemu_cpu_has_work(env
))
135 static int tcg_has_work(void)
139 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
140 if (cpu_has_work(env
))
146 static int io_thread_fd
= -1;
148 static void qemu_event_increment(void)
150 /* Write 8 bytes to be compatible with eventfd. */
151 static const uint64_t val
= 1;
154 if (io_thread_fd
== -1)
158 ret
= write(io_thread_fd
, &val
, sizeof(val
));
159 } while (ret
< 0 && errno
== EINTR
);
161 /* EAGAIN is fine, a read must be pending. */
162 if (ret
< 0 && errno
!= EAGAIN
) {
163 fprintf(stderr
, "qemu_event_increment: write() filed: %s\n",
169 static void qemu_event_read(void *opaque
)
171 int fd
= (unsigned long)opaque
;
175 /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
177 len
= read(fd
, buffer
, sizeof(buffer
));
178 } while ((len
== -1 && errno
== EINTR
) || len
== sizeof(buffer
));
181 static int qemu_event_init(void)
186 err
= qemu_eventfd(fds
);
190 err
= fcntl_setfl(fds
[0], O_NONBLOCK
);
194 err
= fcntl_setfl(fds
[1], O_NONBLOCK
);
198 qemu_set_fd_handler2(fds
[0], NULL
, qemu_event_read
, NULL
,
199 (void *)(unsigned long)fds
[0]);
201 io_thread_fd
= fds
[1];
210 HANDLE qemu_event_handle
;
212 static void dummy_event_handler(void *opaque
)
216 static int qemu_event_init(void)
218 qemu_event_handle
= CreateEvent(NULL
, FALSE
, FALSE
, NULL
);
219 if (!qemu_event_handle
) {
220 fprintf(stderr
, "Failed CreateEvent: %ld\n", GetLastError());
223 qemu_add_wait_object(qemu_event_handle
, dummy_event_handler
, NULL
);
227 static void qemu_event_increment(void)
229 if (!SetEvent(qemu_event_handle
)) {
230 fprintf(stderr
, "qemu_event_increment: SetEvent failed: %ld\n",
237 #ifndef CONFIG_IOTHREAD
238 int qemu_init_main_loop(void)
240 return qemu_event_init();
243 void qemu_main_loop_start(void)
247 void qemu_init_vcpu(void *_env
)
249 CPUState
*env
= _env
;
251 env
->nr_cores
= smp_cores
;
252 env
->nr_threads
= smp_threads
;
258 int qemu_cpu_self(void *env
)
263 void run_on_cpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
268 void resume_all_vcpus(void)
272 void pause_all_vcpus(void)
276 void qemu_cpu_kick(void *env
)
281 void qemu_notify_event(void)
283 CPUState
*env
= cpu_single_env
;
285 qemu_event_increment ();
289 if (next_cpu
&& env
!= next_cpu
) {
294 void qemu_mutex_lock_iothread(void) {}
295 void qemu_mutex_unlock_iothread(void) {}
297 void vm_stop(int reason
)
302 #else /* CONFIG_IOTHREAD */
304 #include "qemu-thread.h"
306 QemuMutex qemu_global_mutex
;
307 static QemuMutex qemu_fair_mutex
;
309 static QemuThread io_thread
;
311 static QemuThread
*tcg_cpu_thread
;
312 static QemuCond
*tcg_halt_cond
;
314 static int qemu_system_ready
;
316 static QemuCond qemu_cpu_cond
;
318 static QemuCond qemu_system_cond
;
319 static QemuCond qemu_pause_cond
;
320 static QemuCond qemu_work_cond
;
322 static void tcg_init_ipi(void);
323 static void kvm_init_ipi(CPUState
*env
);
324 static void unblock_io_signals(void);
326 int qemu_init_main_loop(void)
330 ret
= qemu_event_init();
334 qemu_cond_init(&qemu_pause_cond
);
335 qemu_cond_init(&qemu_system_cond
);
336 qemu_mutex_init(&qemu_fair_mutex
);
337 qemu_mutex_init(&qemu_global_mutex
);
338 qemu_mutex_lock(&qemu_global_mutex
);
340 unblock_io_signals();
341 qemu_thread_self(&io_thread
);
346 void qemu_main_loop_start(void)
348 qemu_system_ready
= 1;
349 qemu_cond_broadcast(&qemu_system_cond
);
352 void run_on_cpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
354 struct qemu_work_item wi
;
356 if (qemu_cpu_self(env
)) {
363 if (!env
->queued_work_first
)
364 env
->queued_work_first
= &wi
;
366 env
->queued_work_last
->next
= &wi
;
367 env
->queued_work_last
= &wi
;
373 CPUState
*self_env
= cpu_single_env
;
375 qemu_cond_wait(&qemu_work_cond
, &qemu_global_mutex
);
376 cpu_single_env
= self_env
;
380 static void flush_queued_work(CPUState
*env
)
382 struct qemu_work_item
*wi
;
384 if (!env
->queued_work_first
)
387 while ((wi
= env
->queued_work_first
)) {
388 env
->queued_work_first
= wi
->next
;
392 env
->queued_work_last
= NULL
;
393 qemu_cond_broadcast(&qemu_work_cond
);
396 static void qemu_wait_io_event_common(CPUState
*env
)
401 qemu_cond_signal(&qemu_pause_cond
);
403 flush_queued_work(env
);
406 static void qemu_tcg_wait_io_event(void)
410 while (!tcg_has_work())
411 qemu_cond_timedwait(tcg_halt_cond
, &qemu_global_mutex
, 1000);
413 qemu_mutex_unlock(&qemu_global_mutex
);
416 * Users of qemu_global_mutex can be starved, having no chance
417 * to acquire it since this path will get to it first.
418 * So use another lock to provide fairness.
420 qemu_mutex_lock(&qemu_fair_mutex
);
421 qemu_mutex_unlock(&qemu_fair_mutex
);
423 qemu_mutex_lock(&qemu_global_mutex
);
425 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
426 qemu_wait_io_event_common(env
);
430 static void qemu_kvm_eat_signal(CPUState
*env
, int timeout
)
437 ts
.tv_sec
= timeout
/ 1000;
438 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
440 sigemptyset(&waitset
);
441 sigaddset(&waitset
, SIG_IPI
);
443 qemu_mutex_unlock(&qemu_global_mutex
);
444 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
446 qemu_mutex_lock(&qemu_global_mutex
);
448 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
449 fprintf(stderr
, "sigtimedwait: %s\n", strerror(e
));
454 static void qemu_kvm_wait_io_event(CPUState
*env
)
456 while (!cpu_has_work(env
))
457 qemu_cond_timedwait(env
->halt_cond
, &qemu_global_mutex
, 1000);
459 qemu_kvm_eat_signal(env
, 0);
460 qemu_wait_io_event_common(env
);
463 static int qemu_cpu_exec(CPUState
*env
);
465 static void *kvm_cpu_thread_fn(void *arg
)
469 qemu_mutex_lock(&qemu_global_mutex
);
470 qemu_thread_self(env
->thread
);
476 /* signal CPU creation */
478 qemu_cond_signal(&qemu_cpu_cond
);
480 /* and wait for machine initialization */
481 while (!qemu_system_ready
)
482 qemu_cond_timedwait(&qemu_system_cond
, &qemu_global_mutex
, 100);
485 if (cpu_can_run(env
))
487 qemu_kvm_wait_io_event(env
);
493 static void *tcg_cpu_thread_fn(void *arg
)
498 qemu_thread_self(env
->thread
);
500 /* signal CPU creation */
501 qemu_mutex_lock(&qemu_global_mutex
);
502 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
504 qemu_cond_signal(&qemu_cpu_cond
);
506 /* and wait for machine initialization */
507 while (!qemu_system_ready
)
508 qemu_cond_timedwait(&qemu_system_cond
, &qemu_global_mutex
, 100);
512 qemu_tcg_wait_io_event();
518 void qemu_cpu_kick(void *_env
)
520 CPUState
*env
= _env
;
521 qemu_cond_broadcast(env
->halt_cond
);
522 qemu_thread_signal(env
->thread
, SIG_IPI
);
525 int qemu_cpu_self(void *_env
)
527 CPUState
*env
= _env
;
530 qemu_thread_self(&this);
532 return qemu_thread_equal(&this, env
->thread
);
535 static void cpu_signal(int sig
)
538 cpu_exit(cpu_single_env
);
542 static void tcg_init_ipi(void)
545 struct sigaction sigact
;
547 memset(&sigact
, 0, sizeof(sigact
));
548 sigact
.sa_handler
= cpu_signal
;
549 sigaction(SIG_IPI
, &sigact
, NULL
);
552 sigaddset(&set
, SIG_IPI
);
553 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
556 static void dummy_signal(int sig
)
560 static void kvm_init_ipi(CPUState
*env
)
564 struct sigaction sigact
;
566 memset(&sigact
, 0, sizeof(sigact
));
567 sigact
.sa_handler
= dummy_signal
;
568 sigaction(SIG_IPI
, &sigact
, NULL
);
570 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
571 sigdelset(&set
, SIG_IPI
);
572 r
= kvm_set_signal_mask(env
, &set
);
574 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(r
));
579 static void unblock_io_signals(void)
584 sigaddset(&set
, SIGUSR2
);
585 sigaddset(&set
, SIGIO
);
586 sigaddset(&set
, SIGALRM
);
587 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
590 sigaddset(&set
, SIG_IPI
);
591 pthread_sigmask(SIG_BLOCK
, &set
, NULL
);
594 void qemu_mutex_lock_iothread(void)
597 qemu_mutex_lock(&qemu_fair_mutex
);
598 qemu_mutex_lock(&qemu_global_mutex
);
599 qemu_mutex_unlock(&qemu_fair_mutex
);
601 qemu_mutex_lock(&qemu_fair_mutex
);
602 if (qemu_mutex_trylock(&qemu_global_mutex
)) {
603 qemu_thread_signal(tcg_cpu_thread
, SIG_IPI
);
604 qemu_mutex_lock(&qemu_global_mutex
);
606 qemu_mutex_unlock(&qemu_fair_mutex
);
610 void qemu_mutex_unlock_iothread(void)
612 qemu_mutex_unlock(&qemu_global_mutex
);
615 static int all_vcpus_paused(void)
617 CPUState
*penv
= first_cpu
;
622 penv
= (CPUState
*)penv
->next_cpu
;
628 void pause_all_vcpus(void)
630 CPUState
*penv
= first_cpu
;
635 penv
= (CPUState
*)penv
->next_cpu
;
638 while (!all_vcpus_paused()) {
639 qemu_cond_timedwait(&qemu_pause_cond
, &qemu_global_mutex
, 100);
643 penv
= (CPUState
*)penv
->next_cpu
;
648 void resume_all_vcpus(void)
650 CPUState
*penv
= first_cpu
;
656 penv
= (CPUState
*)penv
->next_cpu
;
660 static void tcg_init_vcpu(void *_env
)
662 CPUState
*env
= _env
;
663 /* share a single thread for all cpus with TCG */
664 if (!tcg_cpu_thread
) {
665 env
->thread
= qemu_mallocz(sizeof(QemuThread
));
666 env
->halt_cond
= qemu_mallocz(sizeof(QemuCond
));
667 qemu_cond_init(env
->halt_cond
);
668 qemu_thread_create(env
->thread
, tcg_cpu_thread_fn
, env
);
669 while (env
->created
== 0)
670 qemu_cond_timedwait(&qemu_cpu_cond
, &qemu_global_mutex
, 100);
671 tcg_cpu_thread
= env
->thread
;
672 tcg_halt_cond
= env
->halt_cond
;
674 env
->thread
= tcg_cpu_thread
;
675 env
->halt_cond
= tcg_halt_cond
;
679 static void kvm_start_vcpu(CPUState
*env
)
681 env
->thread
= qemu_mallocz(sizeof(QemuThread
));
682 env
->halt_cond
= qemu_mallocz(sizeof(QemuCond
));
683 qemu_cond_init(env
->halt_cond
);
684 qemu_thread_create(env
->thread
, kvm_cpu_thread_fn
, env
);
685 while (env
->created
== 0)
686 qemu_cond_timedwait(&qemu_cpu_cond
, &qemu_global_mutex
, 100);
689 void qemu_init_vcpu(void *_env
)
691 CPUState
*env
= _env
;
693 env
->nr_cores
= smp_cores
;
694 env
->nr_threads
= smp_threads
;
701 void qemu_notify_event(void)
703 qemu_event_increment();
706 static void qemu_system_vmstop_request(int reason
)
708 vmstop_requested
= reason
;
712 void vm_stop(int reason
)
715 qemu_thread_self(&me
);
717 if (!qemu_thread_equal(&me
, &io_thread
)) {
718 qemu_system_vmstop_request(reason
);
720 * FIXME: should not return to device code in case
721 * vm_stop() has been requested.
723 if (cpu_single_env
) {
724 cpu_exit(cpu_single_env
);
725 cpu_single_env
->stop
= 1;
734 static int qemu_cpu_exec(CPUState
*env
)
737 #ifdef CONFIG_PROFILER
741 #ifdef CONFIG_PROFILER
742 ti
= profile_getclock();
747 qemu_icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
748 env
->icount_decr
.u16
.low
= 0;
749 env
->icount_extra
= 0;
750 count
= qemu_icount_round (qemu_next_deadline());
751 qemu_icount
+= count
;
752 decr
= (count
> 0xffff) ? 0xffff : count
;
754 env
->icount_decr
.u16
.low
= decr
;
755 env
->icount_extra
= count
;
758 #ifdef CONFIG_PROFILER
759 qemu_time
+= profile_getclock() - ti
;
762 /* Fold pending instructions back into the
763 instruction counter, and clear the interrupt flag. */
764 qemu_icount
-= (env
->icount_decr
.u16
.low
765 + env
->icount_extra
);
766 env
->icount_decr
.u32
= 0;
767 env
->icount_extra
= 0;
772 bool tcg_cpu_exec(void)
776 if (next_cpu
== NULL
)
777 next_cpu
= first_cpu
;
778 for (; next_cpu
!= NULL
&& !exit_request
; next_cpu
= next_cpu
->next_cpu
) {
779 CPUState
*env
= cur_cpu
= next_cpu
;
781 qemu_clock_enable(vm_clock
,
782 (cur_cpu
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
784 if (qemu_alarm_pending())
786 if (cpu_can_run(env
))
787 ret
= qemu_cpu_exec(env
);
791 if (ret
== EXCP_DEBUG
) {
792 gdb_set_stop_cpu(env
);
793 debug_requested
= EXCP_DEBUG
;
798 return tcg_has_work();
801 void set_numa_modes(void)
806 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
807 for (i
= 0; i
< nb_numa_nodes
; i
++) {
808 if (node_cpumask
[i
] & (1 << env
->cpu_index
)) {
815 void set_cpu_log(const char *optarg
)
818 const CPULogItem
*item
;
820 mask
= cpu_str_to_log_mask(optarg
);
822 printf("Log items (comma separated):\n");
823 for (item
= cpu_log_items
; item
->mask
!= 0; item
++) {
824 printf("%-10s %s\n", item
->name
, item
->help
);
831 /* Return the virtual CPU time, based on the instruction counter. */
832 int64_t cpu_get_icount(void)
835 CPUState
*env
= cpu_single_env
;;
837 icount
= qemu_icount
;
839 if (!can_do_io(env
)) {
840 fprintf(stderr
, "Bad clock read\n");
842 icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
844 return qemu_icount_bias
+ (icount
<< icount_time_shift
);
847 void list_cpus(FILE *f
, int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
850 /* XXX: implement xxx_cpu_list for targets that still miss it */
851 #if defined(cpu_list_id)
852 cpu_list_id(f
, cpu_fprintf
, optarg
);
853 #elif defined(cpu_list)
854 cpu_list(f
, cpu_fprintf
); /* deprecated */