]> git.ipfire.org Git - thirdparty/qemu.git/blame - cpus.c
target/microblaze: Plug temp leak around eval_cond_jmp()
[thirdparty/qemu.git] / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
7b31bbc2 25#include "qemu/osdep.h"
a8d25326 26#include "qemu-common.h"
8d4e9146 27#include "qemu/config-file.h"
d6454270 28#include "migration/vmstate.h"
83c9089e 29#include "monitor/monitor.h"
e688df6b 30#include "qapi/error.h"
112ed241 31#include "qapi/qapi-commands-misc.h"
9af23989 32#include "qapi/qapi-events-run-state.h"
a4e15de9 33#include "qapi/qmp/qerror.h"
d49b6836 34#include "qemu/error-report.h"
76c86615 35#include "qemu/qemu-print.h"
14a48c1d 36#include "sysemu/tcg.h"
da31d594 37#include "sysemu/block-backend.h"
022c62cb 38#include "exec/gdbstub.h"
9c17d615 39#include "sysemu/dma.h"
b3946626 40#include "sysemu/hw_accel.h"
9c17d615 41#include "sysemu/kvm.h"
b0cb0a66 42#include "sysemu/hax.h"
c97d6d2c 43#include "sysemu/hvf.h"
19306806 44#include "sysemu/whpx.h"
63c91552 45#include "exec/exec-all.h"
296af7c9 46
1de7afc9 47#include "qemu/thread.h"
30865f31 48#include "qemu/plugin.h"
9c17d615
PB
49#include "sysemu/cpus.h"
50#include "sysemu/qtest.h"
1de7afc9 51#include "qemu/main-loop.h"
922a01a0 52#include "qemu/option.h"
1de7afc9 53#include "qemu/bitmap.h"
cb365646 54#include "qemu/seqlock.h"
9c09a251 55#include "qemu/guest-random.h"
8d4e9146 56#include "tcg.h"
9cb805fd 57#include "hw/nmi.h"
8b427044 58#include "sysemu/replay.h"
54d31236 59#include "sysemu/runstate.h"
5cc8767d 60#include "hw/boards.h"
650d103d 61#include "hw/hw.h"
0ff0fc19 62
6d9cb73c
JK
63#ifdef CONFIG_LINUX
64
65#include <sys/prctl.h>
66
c0532a76
MT
67#ifndef PR_MCE_KILL
68#define PR_MCE_KILL 33
69#endif
70
6d9cb73c
JK
71#ifndef PR_MCE_KILL_SET
72#define PR_MCE_KILL_SET 1
73#endif
74
75#ifndef PR_MCE_KILL_EARLY
76#define PR_MCE_KILL_EARLY 1
77#endif
78
79#endif /* CONFIG_LINUX */
80
bd1f7ff4
YK
81static QemuMutex qemu_global_mutex;
82
27498bef
ST
83int64_t max_delay;
84int64_t max_advance;
296af7c9 85
2adcc85d
JH
86/* vcpu throttling controls */
87static QEMUTimer *throttle_timer;
88static unsigned int throttle_percentage;
89
90#define CPU_THROTTLE_PCT_MIN 1
91#define CPU_THROTTLE_PCT_MAX 99
92#define CPU_THROTTLE_TIMESLICE_NS 10000000
93
321bc0b2
TC
94bool cpu_is_stopped(CPUState *cpu)
95{
96 return cpu->stopped || !runstate_is_running();
97}
98
a98ae1d8 99static bool cpu_thread_is_idle(CPUState *cpu)
ac873f1e 100{
c64ca814 101 if (cpu->stop || cpu->queued_work_first) {
ac873f1e
PM
102 return false;
103 }
321bc0b2 104 if (cpu_is_stopped(cpu)) {
ac873f1e
PM
105 return true;
106 }
8c2e1b00 107 if (!cpu->halted || cpu_has_work(cpu) ||
215e79c0 108 kvm_halt_in_kernel()) {
ac873f1e
PM
109 return false;
110 }
111 return true;
112}
113
114static bool all_cpu_threads_idle(void)
115{
182735ef 116 CPUState *cpu;
ac873f1e 117
bdc44640 118 CPU_FOREACH(cpu) {
182735ef 119 if (!cpu_thread_is_idle(cpu)) {
ac873f1e
PM
120 return false;
121 }
122 }
123 return true;
124}
125
946fb27c
PB
126/***********************************************************/
127/* guest cycle counter */
128
a3270e19
PB
129/* Protected by TimersState seqlock */
130
5045e9d9 131static bool icount_sleep = true;
946fb27c
PB
132/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
133#define MAX_ICOUNT_SHIFT 10
a3270e19 134
946fb27c 135typedef struct TimersState {
cb365646 136 /* Protected by BQL. */
946fb27c
PB
137 int64_t cpu_ticks_prev;
138 int64_t cpu_ticks_offset;
cb365646 139
94377115
PB
140 /* Protect fields that can be respectively read outside the
141 * BQL, and written from multiple threads.
cb365646
LPF
142 */
143 QemuSeqLock vm_clock_seqlock;
94377115
PB
144 QemuSpin vm_clock_lock;
145
146 int16_t cpu_ticks_enabled;
c96778bb 147
c1ff073c 148 /* Conversion factor from emulated instructions to virtual clock ticks. */
94377115
PB
149 int16_t icount_time_shift;
150
c96778bb
FK
151 /* Compensate for varying guest execution speed. */
152 int64_t qemu_icount_bias;
94377115
PB
153
154 int64_t vm_clock_warp_start;
155 int64_t cpu_clock_offset;
156
c96778bb
FK
157 /* Only written by TCG thread */
158 int64_t qemu_icount;
94377115 159
b39e3f34 160 /* for adjusting icount */
b39e3f34
PD
161 QEMUTimer *icount_rt_timer;
162 QEMUTimer *icount_vm_timer;
163 QEMUTimer *icount_warp_timer;
946fb27c
PB
164} TimersState;
165
d9cd4007 166static TimersState timers_state;
8d4e9146
FK
167bool mttcg_enabled;
168
169/*
170 * We default to false if we know other options have been enabled
171 * which are currently incompatible with MTTCG. Otherwise when each
172 * guest (target) has been updated to support:
173 * - atomic instructions
174 * - memory ordering primitives (barriers)
175 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
176 *
177 * Once a guest architecture has been converted to the new primitives
178 * there are two remaining limitations to check.
179 *
180 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
181 * - The host must have a stronger memory order than the guest
182 *
183 * It may be possible in future to support strong guests on weak hosts
184 * but that will require tagging all load/stores in a guest with their
185 * implicit memory order requirements which would likely slow things
186 * down a lot.
187 */
188
189static bool check_tcg_memory_orders_compatible(void)
190{
191#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
192 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
193#else
194 return false;
195#endif
196}
197
198static bool default_mttcg_enabled(void)
199{
83fd9629 200 if (use_icount || TCG_OVERSIZED_GUEST) {
8d4e9146
FK
201 return false;
202 } else {
203#ifdef TARGET_SUPPORTS_MTTCG
204 return check_tcg_memory_orders_compatible();
205#else
206 return false;
207#endif
208 }
209}
210
211void qemu_tcg_configure(QemuOpts *opts, Error **errp)
212{
213 const char *t = qemu_opt_get(opts, "thread");
214 if (t) {
215 if (strcmp(t, "multi") == 0) {
216 if (TCG_OVERSIZED_GUEST) {
217 error_setg(errp, "No MTTCG when guest word size > hosts");
83fd9629
AB
218 } else if (use_icount) {
219 error_setg(errp, "No MTTCG when icount is enabled");
8d4e9146 220 } else {
86953503 221#ifndef TARGET_SUPPORTS_MTTCG
0765691e
MA
222 warn_report("Guest not yet converted to MTTCG - "
223 "you may get unexpected results");
c34c7620 224#endif
8d4e9146 225 if (!check_tcg_memory_orders_compatible()) {
0765691e
MA
226 warn_report("Guest expects a stronger memory ordering "
227 "than the host provides");
8cfef892 228 error_printf("This may cause strange/hard to debug errors\n");
8d4e9146
FK
229 }
230 mttcg_enabled = true;
231 }
232 } else if (strcmp(t, "single") == 0) {
233 mttcg_enabled = false;
234 } else {
235 error_setg(errp, "Invalid 'thread' setting %s", t);
236 }
237 } else {
238 mttcg_enabled = default_mttcg_enabled();
239 }
240}
946fb27c 241
e4cd9657
AB
242/* The current number of executed instructions is based on what we
243 * originally budgeted minus the current state of the decrementing
244 * icount counters in extra/u16.low.
245 */
246static int64_t cpu_get_icount_executed(CPUState *cpu)
247{
5e140196
RH
248 return (cpu->icount_budget -
249 (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
e4cd9657
AB
250}
251
512d3c80
AB
252/*
253 * Update the global shared timer_state.qemu_icount to take into
254 * account executed instructions. This is done by the TCG vCPU
255 * thread so the main-loop can see time has moved forward.
256 */
9b4e6f49 257static void cpu_update_icount_locked(CPUState *cpu)
512d3c80
AB
258{
259 int64_t executed = cpu_get_icount_executed(cpu);
260 cpu->icount_budget -= executed;
261
38adcb6e
EC
262 atomic_set_i64(&timers_state.qemu_icount,
263 timers_state.qemu_icount + executed);
9b4e6f49
PB
264}
265
266/*
267 * Update the global shared timer_state.qemu_icount to take into
268 * account executed instructions. This is done by the TCG vCPU
269 * thread so the main-loop can see time has moved forward.
270 */
271void cpu_update_icount(CPUState *cpu)
272{
273 seqlock_write_lock(&timers_state.vm_clock_seqlock,
274 &timers_state.vm_clock_lock);
275 cpu_update_icount_locked(cpu);
94377115
PB
276 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
277 &timers_state.vm_clock_lock);
512d3c80
AB
278}
279
c1ff073c 280static int64_t cpu_get_icount_raw_locked(void)
946fb27c 281{
4917cf44 282 CPUState *cpu = current_cpu;
946fb27c 283
243c5f77 284 if (cpu && cpu->running) {
414b15c9 285 if (!cpu->can_do_io) {
493d89bf 286 error_report("Bad icount read");
2a62914b 287 exit(1);
946fb27c 288 }
e4cd9657 289 /* Take into account what has run */
9b4e6f49 290 cpu_update_icount_locked(cpu);
946fb27c 291 }
38adcb6e
EC
292 /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
293 return atomic_read_i64(&timers_state.qemu_icount);
2a62914b
PD
294}
295
2a62914b
PD
296static int64_t cpu_get_icount_locked(void)
297{
c1ff073c 298 int64_t icount = cpu_get_icount_raw_locked();
c97595d1
EC
299 return atomic_read_i64(&timers_state.qemu_icount_bias) +
300 cpu_icount_to_ns(icount);
c1ff073c
PB
301}
302
303int64_t cpu_get_icount_raw(void)
304{
305 int64_t icount;
306 unsigned start;
307
308 do {
309 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
310 icount = cpu_get_icount_raw_locked();
311 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
312
313 return icount;
946fb27c
PB
314}
315
c1ff073c 316/* Return the virtual CPU time, based on the instruction counter. */
17a15f1b
PB
317int64_t cpu_get_icount(void)
318{
319 int64_t icount;
320 unsigned start;
321
322 do {
323 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
324 icount = cpu_get_icount_locked();
325 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
326
327 return icount;
328}
329
3f031313
FK
330int64_t cpu_icount_to_ns(int64_t icount)
331{
c1ff073c 332 return icount << atomic_read(&timers_state.icount_time_shift);
3f031313
FK
333}
334
f2a4ad6d
PB
335static int64_t cpu_get_ticks_locked(void)
336{
337 int64_t ticks = timers_state.cpu_ticks_offset;
338 if (timers_state.cpu_ticks_enabled) {
339 ticks += cpu_get_host_ticks();
340 }
341
342 if (timers_state.cpu_ticks_prev > ticks) {
343 /* Non increasing ticks may happen if the host uses software suspend. */
344 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
345 ticks = timers_state.cpu_ticks_prev;
346 }
347
348 timers_state.cpu_ticks_prev = ticks;
349 return ticks;
350}
351
d90f3cca
C
352/* return the time elapsed in VM between vm_start and vm_stop. Unless
353 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
354 * counter.
d90f3cca 355 */
946fb27c
PB
356int64_t cpu_get_ticks(void)
357{
5f3e3101
PB
358 int64_t ticks;
359
946fb27c
PB
360 if (use_icount) {
361 return cpu_get_icount();
362 }
5f3e3101 363
f2a4ad6d
PB
364 qemu_spin_lock(&timers_state.vm_clock_lock);
365 ticks = cpu_get_ticks_locked();
366 qemu_spin_unlock(&timers_state.vm_clock_lock);
5f3e3101 367 return ticks;
946fb27c
PB
368}
369
cb365646 370static int64_t cpu_get_clock_locked(void)
946fb27c 371{
1d45cea5 372 int64_t time;
cb365646 373
1d45cea5 374 time = timers_state.cpu_clock_offset;
5f3e3101 375 if (timers_state.cpu_ticks_enabled) {
1d45cea5 376 time += get_clock();
946fb27c 377 }
cb365646 378
1d45cea5 379 return time;
cb365646
LPF
380}
381
d90f3cca 382/* Return the monotonic time elapsed in VM, i.e.,
8212ff86
PM
383 * the time between vm_start and vm_stop
384 */
cb365646
LPF
385int64_t cpu_get_clock(void)
386{
387 int64_t ti;
388 unsigned start;
389
390 do {
391 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
392 ti = cpu_get_clock_locked();
393 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
394
395 return ti;
946fb27c
PB
396}
397
cb365646 398/* enable cpu_get_ticks()
3224e878 399 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 400 */
946fb27c
PB
401void cpu_enable_ticks(void)
402{
94377115
PB
403 seqlock_write_lock(&timers_state.vm_clock_seqlock,
404 &timers_state.vm_clock_lock);
946fb27c 405 if (!timers_state.cpu_ticks_enabled) {
4a7428c5 406 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
946fb27c
PB
407 timers_state.cpu_clock_offset -= get_clock();
408 timers_state.cpu_ticks_enabled = 1;
409 }
94377115
PB
410 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
411 &timers_state.vm_clock_lock);
946fb27c
PB
412}
413
414/* disable cpu_get_ticks() : the clock is stopped. You must not call
cb365646 415 * cpu_get_ticks() after that.
3224e878 416 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
cb365646 417 */
946fb27c
PB
418void cpu_disable_ticks(void)
419{
94377115
PB
420 seqlock_write_lock(&timers_state.vm_clock_seqlock,
421 &timers_state.vm_clock_lock);
946fb27c 422 if (timers_state.cpu_ticks_enabled) {
4a7428c5 423 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
cb365646 424 timers_state.cpu_clock_offset = cpu_get_clock_locked();
946fb27c
PB
425 timers_state.cpu_ticks_enabled = 0;
426 }
94377115
PB
427 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
428 &timers_state.vm_clock_lock);
946fb27c
PB
429}
430
431/* Correlation between real and virtual time is always going to be
432 fairly approximate, so ignore small variation.
433 When the guest is idle real and virtual time will be aligned in
434 the IO wait loop. */
73bcb24d 435#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
946fb27c
PB
436
437static void icount_adjust(void)
438{
439 int64_t cur_time;
440 int64_t cur_icount;
441 int64_t delta;
a3270e19
PB
442
443 /* Protected by TimersState mutex. */
946fb27c 444 static int64_t last_delta;
468cc7cf 445
946fb27c
PB
446 /* If the VM is not running, then do nothing. */
447 if (!runstate_is_running()) {
448 return;
449 }
468cc7cf 450
94377115
PB
451 seqlock_write_lock(&timers_state.vm_clock_seqlock,
452 &timers_state.vm_clock_lock);
17a15f1b
PB
453 cur_time = cpu_get_clock_locked();
454 cur_icount = cpu_get_icount_locked();
468cc7cf 455
946fb27c
PB
456 delta = cur_icount - cur_time;
457 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
458 if (delta > 0
459 && last_delta + ICOUNT_WOBBLE < delta * 2
c1ff073c 460 && timers_state.icount_time_shift > 0) {
946fb27c 461 /* The guest is getting too far ahead. Slow time down. */
c1ff073c
PB
462 atomic_set(&timers_state.icount_time_shift,
463 timers_state.icount_time_shift - 1);
946fb27c
PB
464 }
465 if (delta < 0
466 && last_delta - ICOUNT_WOBBLE > delta * 2
c1ff073c 467 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
946fb27c 468 /* The guest is getting too far behind. Speed time up. */
c1ff073c
PB
469 atomic_set(&timers_state.icount_time_shift,
470 timers_state.icount_time_shift + 1);
946fb27c
PB
471 }
472 last_delta = delta;
c97595d1
EC
473 atomic_set_i64(&timers_state.qemu_icount_bias,
474 cur_icount - (timers_state.qemu_icount
475 << timers_state.icount_time_shift));
94377115
PB
476 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
477 &timers_state.vm_clock_lock);
946fb27c
PB
478}
479
480static void icount_adjust_rt(void *opaque)
481{
b39e3f34 482 timer_mod(timers_state.icount_rt_timer,
1979b908 483 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
946fb27c
PB
484 icount_adjust();
485}
486
487static void icount_adjust_vm(void *opaque)
488{
b39e3f34 489 timer_mod(timers_state.icount_vm_timer,
40daca54 490 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 491 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
492 icount_adjust();
493}
494
495static int64_t qemu_icount_round(int64_t count)
496{
c1ff073c
PB
497 int shift = atomic_read(&timers_state.icount_time_shift);
498 return (count + (1 << shift) - 1) >> shift;
946fb27c
PB
499}
500
efab87cf 501static void icount_warp_rt(void)
946fb27c 502{
ccffff48
AB
503 unsigned seq;
504 int64_t warp_start;
505
17a15f1b
PB
506 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
507 * changes from -1 to another value, so the race here is okay.
508 */
ccffff48
AB
509 do {
510 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
b39e3f34 511 warp_start = timers_state.vm_clock_warp_start;
ccffff48
AB
512 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
513
514 if (warp_start == -1) {
946fb27c
PB
515 return;
516 }
517
94377115
PB
518 seqlock_write_lock(&timers_state.vm_clock_seqlock,
519 &timers_state.vm_clock_lock);
946fb27c 520 if (runstate_is_running()) {
74c0b816
PB
521 int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
522 cpu_get_clock_locked());
8ed961d9
PB
523 int64_t warp_delta;
524
b39e3f34 525 warp_delta = clock - timers_state.vm_clock_warp_start;
8ed961d9 526 if (use_icount == 2) {
946fb27c 527 /*
40daca54 528 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
946fb27c
PB
529 * far ahead of real time.
530 */
17a15f1b 531 int64_t cur_icount = cpu_get_icount_locked();
bf2a7ddb 532 int64_t delta = clock - cur_icount;
8ed961d9 533 warp_delta = MIN(warp_delta, delta);
946fb27c 534 }
c97595d1
EC
535 atomic_set_i64(&timers_state.qemu_icount_bias,
536 timers_state.qemu_icount_bias + warp_delta);
946fb27c 537 }
b39e3f34 538 timers_state.vm_clock_warp_start = -1;
94377115
PB
539 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
540 &timers_state.vm_clock_lock);
8ed961d9
PB
541
542 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
543 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
544 }
946fb27c
PB
545}
546
e76d1798 547static void icount_timer_cb(void *opaque)
efab87cf 548{
e76d1798
PD
549 /* No need for a checkpoint because the timer already synchronizes
550 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
551 */
552 icount_warp_rt();
efab87cf
PD
553}
554
8156be56
PB
555void qtest_clock_warp(int64_t dest)
556{
40daca54 557 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
efef88b3 558 AioContext *aio_context;
8156be56 559 assert(qtest_enabled());
efef88b3 560 aio_context = qemu_get_aio_context();
8156be56 561 while (clock < dest) {
dcb15780
PD
562 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
563 QEMU_TIMER_ATTR_ALL);
c9299e2f 564 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
efef88b3 565
94377115
PB
566 seqlock_write_lock(&timers_state.vm_clock_seqlock,
567 &timers_state.vm_clock_lock);
c97595d1
EC
568 atomic_set_i64(&timers_state.qemu_icount_bias,
569 timers_state.qemu_icount_bias + warp);
94377115
PB
570 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
571 &timers_state.vm_clock_lock);
17a15f1b 572
40daca54 573 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
efef88b3 574 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
40daca54 575 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
8156be56 576 }
40daca54 577 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
8156be56
PB
578}
579
e76d1798 580void qemu_start_warp_timer(void)
946fb27c 581{
ce78d18c 582 int64_t clock;
946fb27c
PB
583 int64_t deadline;
584
e76d1798 585 if (!use_icount) {
946fb27c
PB
586 return;
587 }
588
8bd7f71d
PD
589 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
590 * do not fire, so computing the deadline does not make sense.
591 */
592 if (!runstate_is_running()) {
593 return;
594 }
595
0c08185f
PD
596 if (replay_mode != REPLAY_MODE_PLAY) {
597 if (!all_cpu_threads_idle()) {
598 return;
599 }
8bd7f71d 600
0c08185f
PD
601 if (qtest_enabled()) {
602 /* When testing, qtest commands advance icount. */
603 return;
604 }
946fb27c 605
0c08185f
PD
606 replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
607 } else {
608 /* warp clock deterministically in record/replay mode */
609 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
610 /* vCPU is sleeping and warp can't be started.
611 It is probably a race condition: notification sent
612 to vCPU was processed in advance and vCPU went to sleep.
613 Therefore we have to wake it up for doing someting. */
614 if (replay_has_checkpoint()) {
615 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
616 }
617 return;
618 }
8156be56
PB
619 }
620
ac70aafc 621 /* We want to use the earliest deadline from ALL vm_clocks */
bf2a7ddb 622 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
dcb15780
PD
623 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
624 ~QEMU_TIMER_ATTR_EXTERNAL);
ce78d18c 625 if (deadline < 0) {
d7a0f71d
VC
626 static bool notified;
627 if (!icount_sleep && !notified) {
3dc6f869 628 warn_report("icount sleep disabled and no active timers");
d7a0f71d
VC
629 notified = true;
630 }
ce78d18c 631 return;
ac70aafc
AB
632 }
633
946fb27c
PB
634 if (deadline > 0) {
635 /*
40daca54 636 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
946fb27c
PB
637 * sleep. Otherwise, the CPU might be waiting for a future timer
638 * interrupt to wake it up, but the interrupt never comes because
639 * the vCPU isn't running any insns and thus doesn't advance the
40daca54 640 * QEMU_CLOCK_VIRTUAL.
946fb27c 641 */
5045e9d9
VC
642 if (!icount_sleep) {
643 /*
644 * We never let VCPUs sleep in no sleep icount mode.
645 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
646 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
647 * It is useful when we want a deterministic execution time,
648 * isolated from host latencies.
649 */
94377115
PB
650 seqlock_write_lock(&timers_state.vm_clock_seqlock,
651 &timers_state.vm_clock_lock);
c97595d1
EC
652 atomic_set_i64(&timers_state.qemu_icount_bias,
653 timers_state.qemu_icount_bias + deadline);
94377115
PB
654 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
655 &timers_state.vm_clock_lock);
5045e9d9
VC
656 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
657 } else {
658 /*
659 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
660 * "real" time, (related to the time left until the next event) has
661 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
662 * This avoids that the warps are visible externally; for example,
663 * you will not be sending network packets continuously instead of
664 * every 100ms.
665 */
94377115
PB
666 seqlock_write_lock(&timers_state.vm_clock_seqlock,
667 &timers_state.vm_clock_lock);
b39e3f34
PD
668 if (timers_state.vm_clock_warp_start == -1
669 || timers_state.vm_clock_warp_start > clock) {
670 timers_state.vm_clock_warp_start = clock;
5045e9d9 671 }
94377115
PB
672 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
673 &timers_state.vm_clock_lock);
b39e3f34
PD
674 timer_mod_anticipate(timers_state.icount_warp_timer,
675 clock + deadline);
ce78d18c 676 }
ac70aafc 677 } else if (deadline == 0) {
40daca54 678 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
946fb27c
PB
679 }
680}
681
e76d1798
PD
682static void qemu_account_warp_timer(void)
683{
684 if (!use_icount || !icount_sleep) {
685 return;
686 }
687
688 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
689 * do not fire, so computing the deadline does not make sense.
690 */
691 if (!runstate_is_running()) {
692 return;
693 }
694
695 /* warp clock deterministically in record/replay mode */
696 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
697 return;
698 }
699
b39e3f34 700 timer_del(timers_state.icount_warp_timer);
e76d1798
PD
701 icount_warp_rt();
702}
703
d09eae37
FK
704static bool icount_state_needed(void *opaque)
705{
706 return use_icount;
707}
708
b39e3f34
PD
709static bool warp_timer_state_needed(void *opaque)
710{
711 TimersState *s = opaque;
712 return s->icount_warp_timer != NULL;
713}
714
715static bool adjust_timers_state_needed(void *opaque)
716{
717 TimersState *s = opaque;
718 return s->icount_rt_timer != NULL;
719}
720
721/*
722 * Subsection for warp timer migration is optional, because may not be created
723 */
724static const VMStateDescription icount_vmstate_warp_timer = {
725 .name = "timer/icount/warp_timer",
726 .version_id = 1,
727 .minimum_version_id = 1,
728 .needed = warp_timer_state_needed,
729 .fields = (VMStateField[]) {
730 VMSTATE_INT64(vm_clock_warp_start, TimersState),
731 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
732 VMSTATE_END_OF_LIST()
733 }
734};
735
736static const VMStateDescription icount_vmstate_adjust_timers = {
737 .name = "timer/icount/timers",
738 .version_id = 1,
739 .minimum_version_id = 1,
740 .needed = adjust_timers_state_needed,
741 .fields = (VMStateField[]) {
742 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
743 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
744 VMSTATE_END_OF_LIST()
745 }
746};
747
d09eae37
FK
748/*
749 * This is a subsection for icount migration.
750 */
751static const VMStateDescription icount_vmstate_timers = {
752 .name = "timer/icount",
753 .version_id = 1,
754 .minimum_version_id = 1,
5cd8cada 755 .needed = icount_state_needed,
d09eae37
FK
756 .fields = (VMStateField[]) {
757 VMSTATE_INT64(qemu_icount_bias, TimersState),
758 VMSTATE_INT64(qemu_icount, TimersState),
759 VMSTATE_END_OF_LIST()
b39e3f34
PD
760 },
761 .subsections = (const VMStateDescription*[]) {
762 &icount_vmstate_warp_timer,
763 &icount_vmstate_adjust_timers,
764 NULL
d09eae37
FK
765 }
766};
767
946fb27c
PB
768static const VMStateDescription vmstate_timers = {
769 .name = "timer",
770 .version_id = 2,
771 .minimum_version_id = 1,
35d08458 772 .fields = (VMStateField[]) {
946fb27c 773 VMSTATE_INT64(cpu_ticks_offset, TimersState),
c1ff073c 774 VMSTATE_UNUSED(8),
946fb27c
PB
775 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
776 VMSTATE_END_OF_LIST()
d09eae37 777 },
5cd8cada
JQ
778 .subsections = (const VMStateDescription*[]) {
779 &icount_vmstate_timers,
780 NULL
946fb27c
PB
781 }
782};
783
14e6fe12 784static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
2adcc85d 785{
2adcc85d
JH
786 double pct;
787 double throttle_ratio;
bd1f7ff4 788 int64_t sleeptime_ns, endtime_ns;
2adcc85d
JH
789
790 if (!cpu_throttle_get_percentage()) {
791 return;
792 }
793
794 pct = (double)cpu_throttle_get_percentage()/100;
795 throttle_ratio = pct / (1 - pct);
bd1f7ff4
YK
796 /* Add 1ns to fix double's rounding error (like 0.9999999...) */
797 sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
798 endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
799 while (sleeptime_ns > 0 && !cpu->stop) {
800 if (sleeptime_ns > SCALE_MS) {
801 qemu_cond_timedwait(cpu->halt_cond, &qemu_global_mutex,
802 sleeptime_ns / SCALE_MS);
803 } else {
804 qemu_mutex_unlock_iothread();
805 g_usleep(sleeptime_ns / SCALE_US);
806 qemu_mutex_lock_iothread();
807 }
808 sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
809 }
90bb0c04 810 atomic_set(&cpu->throttle_thread_scheduled, 0);
2adcc85d
JH
811}
812
813static void cpu_throttle_timer_tick(void *opaque)
814{
815 CPUState *cpu;
816 double pct;
817
818 /* Stop the timer if needed */
819 if (!cpu_throttle_get_percentage()) {
820 return;
821 }
822 CPU_FOREACH(cpu) {
823 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
14e6fe12
PB
824 async_run_on_cpu(cpu, cpu_throttle_thread,
825 RUN_ON_CPU_NULL);
2adcc85d
JH
826 }
827 }
828
829 pct = (double)cpu_throttle_get_percentage()/100;
830 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
831 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
832}
833
834void cpu_throttle_set(int new_throttle_pct)
835{
836 /* Ensure throttle percentage is within valid range */
837 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
838 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
839
840 atomic_set(&throttle_percentage, new_throttle_pct);
841
842 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
843 CPU_THROTTLE_TIMESLICE_NS);
844}
845
846void cpu_throttle_stop(void)
847{
848 atomic_set(&throttle_percentage, 0);
849}
850
851bool cpu_throttle_active(void)
852{
853 return (cpu_throttle_get_percentage() != 0);
854}
855
856int cpu_throttle_get_percentage(void)
857{
858 return atomic_read(&throttle_percentage);
859}
860
4603ea01
PD
861void cpu_ticks_init(void)
862{
ccdb3c1f 863 seqlock_init(&timers_state.vm_clock_seqlock);
87a09cdc 864 qemu_spin_init(&timers_state.vm_clock_lock);
4603ea01 865 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
2adcc85d
JH
866 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
867 cpu_throttle_timer_tick, NULL);
4603ea01
PD
868}
869
1ad9580b 870void configure_icount(QemuOpts *opts, Error **errp)
946fb27c 871{
1ad9580b 872 const char *option;
a8bfac37 873 char *rem_str = NULL;
1ad9580b 874
1ad9580b 875 option = qemu_opt_get(opts, "shift");
946fb27c 876 if (!option) {
a8bfac37
ST
877 if (qemu_opt_get(opts, "align") != NULL) {
878 error_setg(errp, "Please specify shift option when using align");
879 }
946fb27c
PB
880 return;
881 }
f1f4b57e
VC
882
883 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
5045e9d9 884 if (icount_sleep) {
b39e3f34 885 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
e76d1798 886 icount_timer_cb, NULL);
5045e9d9 887 }
f1f4b57e 888
a8bfac37 889 icount_align_option = qemu_opt_get_bool(opts, "align", false);
f1f4b57e
VC
890
891 if (icount_align_option && !icount_sleep) {
778d9f9b 892 error_setg(errp, "align=on and sleep=off are incompatible");
f1f4b57e 893 }
946fb27c 894 if (strcmp(option, "auto") != 0) {
a8bfac37 895 errno = 0;
c1ff073c 896 timers_state.icount_time_shift = strtol(option, &rem_str, 0);
a8bfac37
ST
897 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
898 error_setg(errp, "icount: Invalid shift value");
899 }
946fb27c
PB
900 use_icount = 1;
901 return;
a8bfac37
ST
902 } else if (icount_align_option) {
903 error_setg(errp, "shift=auto and align=on are incompatible");
f1f4b57e 904 } else if (!icount_sleep) {
778d9f9b 905 error_setg(errp, "shift=auto and sleep=off are incompatible");
946fb27c
PB
906 }
907
908 use_icount = 2;
909
910 /* 125MIPS seems a reasonable initial guess at the guest speed.
911 It will be corrected fairly quickly anyway. */
c1ff073c 912 timers_state.icount_time_shift = 3;
946fb27c
PB
913
914 /* Have both realtime and virtual time triggers for speed adjustment.
915 The realtime trigger catches emulated time passing too slowly,
916 the virtual time trigger catches emulated time passing too fast.
917 Realtime triggers occur even when idle, so use them less frequently
918 than VM triggers. */
b39e3f34
PD
919 timers_state.vm_clock_warp_start = -1;
920 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
bf2a7ddb 921 icount_adjust_rt, NULL);
b39e3f34 922 timer_mod(timers_state.icount_rt_timer,
bf2a7ddb 923 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
b39e3f34 924 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
40daca54 925 icount_adjust_vm, NULL);
b39e3f34 926 timer_mod(timers_state.icount_vm_timer,
40daca54 927 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 928 NANOSECONDS_PER_SECOND / 10);
946fb27c
PB
929}
930
6546706d
AB
931/***********************************************************/
932/* TCG vCPU kick timer
933 *
934 * The kick timer is responsible for moving single threaded vCPU
935 * emulation on to the next vCPU. If more than one vCPU is running a
936 * timer event with force a cpu->exit so the next vCPU can get
937 * scheduled.
938 *
939 * The timer is removed if all vCPUs are idle and restarted again once
940 * idleness is complete.
941 */
942
943static QEMUTimer *tcg_kick_vcpu_timer;
791158d9 944static CPUState *tcg_current_rr_cpu;
6546706d
AB
945
946#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
947
948static inline int64_t qemu_tcg_next_kick(void)
949{
950 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
951}
952
e8f22f76
AB
953/* Kick the currently round-robin scheduled vCPU to next */
954static void qemu_cpu_kick_rr_next_cpu(void)
791158d9
AB
955{
956 CPUState *cpu;
791158d9
AB
957 do {
958 cpu = atomic_mb_read(&tcg_current_rr_cpu);
959 if (cpu) {
960 cpu_exit(cpu);
961 }
962 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
963}
964
e8f22f76
AB
965/* Kick all RR vCPUs */
966static void qemu_cpu_kick_rr_cpus(void)
967{
968 CPUState *cpu;
969
970 CPU_FOREACH(cpu) {
971 cpu_exit(cpu);
972 };
973}
974
6b8f0187
PB
975static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
976{
977}
978
3f53bc61
PB
979void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
980{
6b8f0187
PB
981 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
982 qemu_notify_event();
983 return;
984 }
985
c52e7132
PM
986 if (qemu_in_vcpu_thread()) {
987 /* A CPU is currently running; kick it back out to the
988 * tcg_cpu_exec() loop so it will recalculate its
989 * icount deadline immediately.
990 */
991 qemu_cpu_kick(current_cpu);
992 } else if (first_cpu) {
6b8f0187
PB
993 /* qemu_cpu_kick is not enough to kick a halted CPU out of
994 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
995 * causes cpu_thread_is_idle to return false. This way,
996 * handle_icount_deadline can run.
c52e7132
PM
997 * If we have no CPUs at all for some reason, we don't
998 * need to do anything.
6b8f0187
PB
999 */
1000 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
1001 }
3f53bc61
PB
1002}
1003
6546706d
AB
1004static void kick_tcg_thread(void *opaque)
1005{
1006 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
e8f22f76 1007 qemu_cpu_kick_rr_next_cpu();
6546706d
AB
1008}
1009
1010static void start_tcg_kick_timer(void)
1011{
db08b687
PB
1012 assert(!mttcg_enabled);
1013 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
6546706d
AB
1014 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1015 kick_tcg_thread, NULL);
1926ab27
AB
1016 }
1017 if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
6546706d
AB
1018 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
1019 }
1020}
1021
1022static void stop_tcg_kick_timer(void)
1023{
db08b687 1024 assert(!mttcg_enabled);
1926ab27 1025 if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
6546706d 1026 timer_del(tcg_kick_vcpu_timer);
6546706d
AB
1027 }
1028}
1029
296af7c9
BS
1030/***********************************************************/
1031void hw_error(const char *fmt, ...)
1032{
1033 va_list ap;
55e5c285 1034 CPUState *cpu;
296af7c9
BS
1035
1036 va_start(ap, fmt);
1037 fprintf(stderr, "qemu: hardware error: ");
1038 vfprintf(stderr, fmt, ap);
1039 fprintf(stderr, "\n");
bdc44640 1040 CPU_FOREACH(cpu) {
55e5c285 1041 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
90c84c56 1042 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
296af7c9
BS
1043 }
1044 va_end(ap);
1045 abort();
1046}
1047
1048void cpu_synchronize_all_states(void)
1049{
182735ef 1050 CPUState *cpu;
296af7c9 1051
bdc44640 1052 CPU_FOREACH(cpu) {
182735ef 1053 cpu_synchronize_state(cpu);
c97d6d2c
SAGDR
1054 /* TODO: move to cpu_synchronize_state() */
1055 if (hvf_enabled()) {
1056 hvf_cpu_synchronize_state(cpu);
1057 }
296af7c9
BS
1058 }
1059}
1060
1061void cpu_synchronize_all_post_reset(void)
1062{
182735ef 1063 CPUState *cpu;
296af7c9 1064
bdc44640 1065 CPU_FOREACH(cpu) {
182735ef 1066 cpu_synchronize_post_reset(cpu);
c97d6d2c
SAGDR
1067 /* TODO: move to cpu_synchronize_post_reset() */
1068 if (hvf_enabled()) {
1069 hvf_cpu_synchronize_post_reset(cpu);
1070 }
296af7c9
BS
1071 }
1072}
1073
1074void cpu_synchronize_all_post_init(void)
1075{
182735ef 1076 CPUState *cpu;
296af7c9 1077
bdc44640 1078 CPU_FOREACH(cpu) {
182735ef 1079 cpu_synchronize_post_init(cpu);
c97d6d2c
SAGDR
1080 /* TODO: move to cpu_synchronize_post_init() */
1081 if (hvf_enabled()) {
1082 hvf_cpu_synchronize_post_init(cpu);
1083 }
296af7c9
BS
1084 }
1085}
1086
75e972da
DG
1087void cpu_synchronize_all_pre_loadvm(void)
1088{
1089 CPUState *cpu;
1090
1091 CPU_FOREACH(cpu) {
1092 cpu_synchronize_pre_loadvm(cpu);
1093 }
1094}
1095
4486e89c 1096static int do_vm_stop(RunState state, bool send_stop)
296af7c9 1097{
56983463
KW
1098 int ret = 0;
1099
1354869c 1100 if (runstate_is_running()) {
296af7c9 1101 cpu_disable_ticks();
296af7c9 1102 pause_all_vcpus();
f5bbfba1 1103 runstate_set(state);
1dfb4dd9 1104 vm_state_notify(0, state);
4486e89c 1105 if (send_stop) {
3ab72385 1106 qapi_event_send_stop();
4486e89c 1107 }
296af7c9 1108 }
56983463 1109
594a45ce 1110 bdrv_drain_all();
22af08ea 1111 ret = bdrv_flush_all();
594a45ce 1112
56983463 1113 return ret;
296af7c9
BS
1114}
1115
4486e89c
SH
1116/* Special vm_stop() variant for terminating the process. Historically clients
1117 * did not expect a QMP STOP event and so we need to retain compatibility.
1118 */
1119int vm_shutdown(void)
1120{
1121 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1122}
1123
a1fcaa73 1124static bool cpu_can_run(CPUState *cpu)
296af7c9 1125{
4fdeee7c 1126 if (cpu->stop) {
a1fcaa73 1127 return false;
0ab07c62 1128 }
321bc0b2 1129 if (cpu_is_stopped(cpu)) {
a1fcaa73 1130 return false;
0ab07c62 1131 }
a1fcaa73 1132 return true;
296af7c9
BS
1133}
1134
91325046 1135static void cpu_handle_guest_debug(CPUState *cpu)
83f338f7 1136{
64f6b346 1137 gdb_set_stop_cpu(cpu);
8cf71710 1138 qemu_system_debug_request();
f324e766 1139 cpu->stopped = true;
3c638d06
JK
1140}
1141
6d9cb73c
JK
1142#ifdef CONFIG_LINUX
1143static void sigbus_reraise(void)
1144{
1145 sigset_t set;
1146 struct sigaction action;
1147
1148 memset(&action, 0, sizeof(action));
1149 action.sa_handler = SIG_DFL;
1150 if (!sigaction(SIGBUS, &action, NULL)) {
1151 raise(SIGBUS);
1152 sigemptyset(&set);
1153 sigaddset(&set, SIGBUS);
a2d1761d 1154 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
6d9cb73c
JK
1155 }
1156 perror("Failed to re-raise SIGBUS!\n");
1157 abort();
1158}
1159
d98d4072 1160static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
6d9cb73c 1161{
a16fc07e
PB
1162 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1163 sigbus_reraise();
1164 }
1165
2ae41db2
PB
1166 if (current_cpu) {
1167 /* Called asynchronously in VCPU thread. */
1168 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1169 sigbus_reraise();
1170 }
1171 } else {
1172 /* Called synchronously (via signalfd) in main thread. */
1173 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1174 sigbus_reraise();
1175 }
6d9cb73c
JK
1176 }
1177}
1178
1179static void qemu_init_sigbus(void)
1180{
1181 struct sigaction action;
1182
1183 memset(&action, 0, sizeof(action));
1184 action.sa_flags = SA_SIGINFO;
d98d4072 1185 action.sa_sigaction = sigbus_handler;
6d9cb73c
JK
1186 sigaction(SIGBUS, &action, NULL);
1187
1188 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1189}
6d9cb73c 1190#else /* !CONFIG_LINUX */
6d9cb73c
JK
1191static void qemu_init_sigbus(void)
1192{
1193}
a16fc07e 1194#endif /* !CONFIG_LINUX */
ff48eb5f 1195
296af7c9
BS
1196static QemuThread io_thread;
1197
296af7c9
BS
1198/* cpu creation */
1199static QemuCond qemu_cpu_cond;
1200/* system init */
296af7c9
BS
1201static QemuCond qemu_pause_cond;
1202
d3b12f5d 1203void qemu_init_cpu_loop(void)
296af7c9 1204{
6d9cb73c 1205 qemu_init_sigbus();
ed94592b 1206 qemu_cond_init(&qemu_cpu_cond);
ed94592b 1207 qemu_cond_init(&qemu_pause_cond);
296af7c9 1208 qemu_mutex_init(&qemu_global_mutex);
296af7c9 1209
b7680cb6 1210 qemu_thread_get_self(&io_thread);
296af7c9
BS
1211}
1212
14e6fe12 1213void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
e82bcec2 1214{
d148d90e 1215 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
3c02270d
CV
1216}
1217
4c055ab5
GZ
1218static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1219{
1220 if (kvm_destroy_vcpu(cpu) < 0) {
1221 error_report("kvm_destroy_vcpu failed");
1222 exit(EXIT_FAILURE);
1223 }
1224}
1225
1226static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1227{
1228}
1229
ebd05fea
DH
1230static void qemu_cpu_stop(CPUState *cpu, bool exit)
1231{
1232 g_assert(qemu_cpu_is_self(cpu));
1233 cpu->stop = false;
1234 cpu->stopped = true;
1235 if (exit) {
1236 cpu_exit(cpu);
1237 }
1238 qemu_cond_broadcast(&qemu_pause_cond);
1239}
1240
509a0d78 1241static void qemu_wait_io_event_common(CPUState *cpu)
296af7c9 1242{
37257942 1243 atomic_mb_set(&cpu->thread_kicked, false);
4fdeee7c 1244 if (cpu->stop) {
ebd05fea 1245 qemu_cpu_stop(cpu, false);
296af7c9 1246 }
a5403c69 1247 process_queued_cpu_work(cpu);
37257942
AB
1248}
1249
a8efa606 1250static void qemu_tcg_rr_wait_io_event(void)
37257942 1251{
a8efa606
PB
1252 CPUState *cpu;
1253
db08b687 1254 while (all_cpu_threads_idle()) {
6546706d 1255 stop_tcg_kick_timer();
a8efa606 1256 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
16400322 1257 }
296af7c9 1258
6546706d
AB
1259 start_tcg_kick_timer();
1260
a8efa606
PB
1261 CPU_FOREACH(cpu) {
1262 qemu_wait_io_event_common(cpu);
1263 }
296af7c9
BS
1264}
1265
db08b687 1266static void qemu_wait_io_event(CPUState *cpu)
296af7c9 1267{
30865f31
EC
1268 bool slept = false;
1269
a98ae1d8 1270 while (cpu_thread_is_idle(cpu)) {
30865f31
EC
1271 if (!slept) {
1272 slept = true;
1273 qemu_plugin_vcpu_idle_cb(cpu);
1274 }
f5c121b8 1275 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
16400322 1276 }
30865f31
EC
1277 if (slept) {
1278 qemu_plugin_vcpu_resume_cb(cpu);
1279 }
296af7c9 1280
db08b687
PB
1281#ifdef _WIN32
1282 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1283 if (!tcg_enabled()) {
1284 SleepEx(0, TRUE);
c97d6d2c 1285 }
db08b687 1286#endif
c97d6d2c
SAGDR
1287 qemu_wait_io_event_common(cpu);
1288}
1289
7e97cd88 1290static void *qemu_kvm_cpu_thread_fn(void *arg)
296af7c9 1291{
48a106bd 1292 CPUState *cpu = arg;
84b4915d 1293 int r;
296af7c9 1294
ab28bd23
PB
1295 rcu_register_thread();
1296
2e7f7a3c 1297 qemu_mutex_lock_iothread();
814e612e 1298 qemu_thread_get_self(cpu->thread);
9f09e18a 1299 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1300 cpu->can_do_io = 1;
4917cf44 1301 current_cpu = cpu;
296af7c9 1302
504134d2 1303 r = kvm_init_vcpu(cpu);
84b4915d 1304 if (r < 0) {
493d89bf 1305 error_report("kvm_init_vcpu failed: %s", strerror(-r));
84b4915d
JK
1306 exit(1);
1307 }
296af7c9 1308
18268b60 1309 kvm_init_cpu_signals(cpu);
296af7c9
BS
1310
1311 /* signal CPU creation */
61a46217 1312 cpu->created = true;
296af7c9 1313 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1314 qemu_guest_random_seed_thread_part2(cpu->random_seed);
296af7c9 1315
4c055ab5 1316 do {
a1fcaa73 1317 if (cpu_can_run(cpu)) {
1458c363 1318 r = kvm_cpu_exec(cpu);
83f338f7 1319 if (r == EXCP_DEBUG) {
91325046 1320 cpu_handle_guest_debug(cpu);
83f338f7 1321 }
0ab07c62 1322 }
db08b687 1323 qemu_wait_io_event(cpu);
4c055ab5 1324 } while (!cpu->unplug || cpu_can_run(cpu));
296af7c9 1325
4c055ab5 1326 qemu_kvm_destroy_vcpu(cpu);
2c579042
BR
1327 cpu->created = false;
1328 qemu_cond_signal(&qemu_cpu_cond);
4c055ab5 1329 qemu_mutex_unlock_iothread();
57615ed5 1330 rcu_unregister_thread();
296af7c9
BS
1331 return NULL;
1332}
1333
c7f0f3b1
AL
1334static void *qemu_dummy_cpu_thread_fn(void *arg)
1335{
1336#ifdef _WIN32
493d89bf 1337 error_report("qtest is not supported under Windows");
c7f0f3b1
AL
1338 exit(1);
1339#else
10a9021d 1340 CPUState *cpu = arg;
c7f0f3b1
AL
1341 sigset_t waitset;
1342 int r;
1343
ab28bd23
PB
1344 rcu_register_thread();
1345
c7f0f3b1 1346 qemu_mutex_lock_iothread();
814e612e 1347 qemu_thread_get_self(cpu->thread);
9f09e18a 1348 cpu->thread_id = qemu_get_thread_id();
626cf8f4 1349 cpu->can_do_io = 1;
37257942 1350 current_cpu = cpu;
c7f0f3b1
AL
1351
1352 sigemptyset(&waitset);
1353 sigaddset(&waitset, SIG_IPI);
1354
1355 /* signal CPU creation */
61a46217 1356 cpu->created = true;
c7f0f3b1 1357 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1358 qemu_guest_random_seed_thread_part2(cpu->random_seed);
c7f0f3b1 1359
d2831ab0 1360 do {
c7f0f3b1
AL
1361 qemu_mutex_unlock_iothread();
1362 do {
1363 int sig;
1364 r = sigwait(&waitset, &sig);
1365 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1366 if (r == -1) {
1367 perror("sigwait");
1368 exit(1);
1369 }
1370 qemu_mutex_lock_iothread();
db08b687 1371 qemu_wait_io_event(cpu);
d2831ab0 1372 } while (!cpu->unplug);
c7f0f3b1 1373
d40bfcbb 1374 qemu_mutex_unlock_iothread();
d2831ab0 1375 rcu_unregister_thread();
c7f0f3b1
AL
1376 return NULL;
1377#endif
1378}
1379
1be7fcb8
AB
1380static int64_t tcg_get_icount_limit(void)
1381{
1382 int64_t deadline;
1383
1384 if (replay_mode != REPLAY_MODE_PLAY) {
dcb15780
PD
1385 /*
1386 * Include all the timers, because they may need an attention.
1387 * Too long CPU execution may create unnecessary delay in UI.
1388 */
1389 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
1390 QEMU_TIMER_ATTR_ALL);
1be7fcb8
AB
1391
1392 /* Maintain prior (possibly buggy) behaviour where if no deadline
1393 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1394 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1395 * nanoseconds.
1396 */
1397 if ((deadline < 0) || (deadline > INT32_MAX)) {
1398 deadline = INT32_MAX;
1399 }
1400
1401 return qemu_icount_round(deadline);
1402 } else {
1403 return replay_get_instructions();
1404 }
1405}
1406
12e9700d
AB
1407static void handle_icount_deadline(void)
1408{
6b8f0187 1409 assert(qemu_in_vcpu_thread());
12e9700d 1410 if (use_icount) {
dcb15780
PD
1411 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
1412 QEMU_TIMER_ATTR_ALL);
12e9700d
AB
1413
1414 if (deadline == 0) {
6b8f0187 1415 /* Wake up other AioContexts. */
12e9700d 1416 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
6b8f0187 1417 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
12e9700d
AB
1418 }
1419 }
1420}
1421
05248382 1422static void prepare_icount_for_run(CPUState *cpu)
1be7fcb8 1423{
1be7fcb8 1424 if (use_icount) {
eda5f7c6 1425 int insns_left;
05248382
AB
1426
1427 /* These should always be cleared by process_icount_data after
1428 * each vCPU execution. However u16.high can be raised
1429 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1430 */
5e140196 1431 g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
05248382
AB
1432 g_assert(cpu->icount_extra == 0);
1433
eda5f7c6
AB
1434 cpu->icount_budget = tcg_get_icount_limit();
1435 insns_left = MIN(0xffff, cpu->icount_budget);
5e140196 1436 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
eda5f7c6 1437 cpu->icount_extra = cpu->icount_budget - insns_left;
d759c951
AB
1438
1439 replay_mutex_lock();
1be7fcb8 1440 }
05248382
AB
1441}
1442
1443static void process_icount_data(CPUState *cpu)
1444{
1be7fcb8 1445 if (use_icount) {
e4cd9657 1446 /* Account for executed instructions */
512d3c80 1447 cpu_update_icount(cpu);
05248382
AB
1448
1449 /* Reset the counters */
5e140196 1450 cpu_neg(cpu)->icount_decr.u16.low = 0;
1be7fcb8 1451 cpu->icount_extra = 0;
e4cd9657
AB
1452 cpu->icount_budget = 0;
1453
1be7fcb8 1454 replay_account_executed_instructions();
d759c951
AB
1455
1456 replay_mutex_unlock();
1be7fcb8 1457 }
05248382
AB
1458}
1459
1460
1461static int tcg_cpu_exec(CPUState *cpu)
1462{
1463 int ret;
1464#ifdef CONFIG_PROFILER
1465 int64_t ti;
1466#endif
1467
f28d0dfd 1468 assert(tcg_enabled());
05248382
AB
1469#ifdef CONFIG_PROFILER
1470 ti = profile_getclock();
1471#endif
05248382
AB
1472 cpu_exec_start(cpu);
1473 ret = cpu_exec(cpu);
1474 cpu_exec_end(cpu);
05248382 1475#ifdef CONFIG_PROFILER
72fd2efb
EC
1476 atomic_set(&tcg_ctx->prof.cpu_exec_time,
1477 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
05248382 1478#endif
1be7fcb8
AB
1479 return ret;
1480}
1481
c93bbbef
AB
1482/* Destroy any remaining vCPUs which have been unplugged and have
1483 * finished running
1484 */
1485static void deal_with_unplugged_cpus(void)
1be7fcb8 1486{
c93bbbef 1487 CPUState *cpu;
1be7fcb8 1488
c93bbbef
AB
1489 CPU_FOREACH(cpu) {
1490 if (cpu->unplug && !cpu_can_run(cpu)) {
1491 qemu_tcg_destroy_vcpu(cpu);
1492 cpu->created = false;
1493 qemu_cond_signal(&qemu_cpu_cond);
1be7fcb8
AB
1494 break;
1495 }
1496 }
1be7fcb8 1497}
bdb7ca67 1498
6546706d
AB
1499/* Single-threaded TCG
1500 *
1501 * In the single-threaded case each vCPU is simulated in turn. If
1502 * there is more than a single vCPU we create a simple timer to kick
1503 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1504 * This is done explicitly rather than relying on side-effects
1505 * elsewhere.
1506 */
1507
37257942 1508static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
296af7c9 1509{
c3586ba7 1510 CPUState *cpu = arg;
296af7c9 1511
f28d0dfd 1512 assert(tcg_enabled());
ab28bd23 1513 rcu_register_thread();
3468b59e 1514 tcg_register_thread();
ab28bd23 1515
2e7f7a3c 1516 qemu_mutex_lock_iothread();
814e612e 1517 qemu_thread_get_self(cpu->thread);
296af7c9 1518
5a9c973b
DH
1519 cpu->thread_id = qemu_get_thread_id();
1520 cpu->created = true;
1521 cpu->can_do_io = 1;
296af7c9 1522 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1523 qemu_guest_random_seed_thread_part2(cpu->random_seed);
296af7c9 1524
fa7d1867 1525 /* wait for initial kick-off after machine start */
c28e399c 1526 while (first_cpu->stopped) {
d5f8d613 1527 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
8e564b4e
JK
1528
1529 /* process any pending work */
bdc44640 1530 CPU_FOREACH(cpu) {
37257942 1531 current_cpu = cpu;
182735ef 1532 qemu_wait_io_event_common(cpu);
8e564b4e 1533 }
0ab07c62 1534 }
296af7c9 1535
6546706d
AB
1536 start_tcg_kick_timer();
1537
c93bbbef
AB
1538 cpu = first_cpu;
1539
e5143e30
AB
1540 /* process any pending work */
1541 cpu->exit_request = 1;
1542
296af7c9 1543 while (1) {
d759c951
AB
1544 qemu_mutex_unlock_iothread();
1545 replay_mutex_lock();
1546 qemu_mutex_lock_iothread();
c93bbbef
AB
1547 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1548 qemu_account_warp_timer();
1549
6b8f0187
PB
1550 /* Run the timers here. This is much more efficient than
1551 * waking up the I/O thread and waiting for completion.
1552 */
1553 handle_icount_deadline();
1554
d759c951
AB
1555 replay_mutex_unlock();
1556
c93bbbef
AB
1557 if (!cpu) {
1558 cpu = first_cpu;
1559 }
1560
e5143e30
AB
1561 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1562
791158d9 1563 atomic_mb_set(&tcg_current_rr_cpu, cpu);
37257942 1564 current_cpu = cpu;
c93bbbef
AB
1565
1566 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1567 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1568
1569 if (cpu_can_run(cpu)) {
1570 int r;
05248382 1571
d759c951 1572 qemu_mutex_unlock_iothread();
05248382
AB
1573 prepare_icount_for_run(cpu);
1574
c93bbbef 1575 r = tcg_cpu_exec(cpu);
05248382
AB
1576
1577 process_icount_data(cpu);
d759c951 1578 qemu_mutex_lock_iothread();
05248382 1579
c93bbbef
AB
1580 if (r == EXCP_DEBUG) {
1581 cpu_handle_guest_debug(cpu);
1582 break;
08e73c48
PK
1583 } else if (r == EXCP_ATOMIC) {
1584 qemu_mutex_unlock_iothread();
1585 cpu_exec_step_atomic(cpu);
1586 qemu_mutex_lock_iothread();
1587 break;
c93bbbef 1588 }
37257942 1589 } else if (cpu->stop) {
c93bbbef
AB
1590 if (cpu->unplug) {
1591 cpu = CPU_NEXT(cpu);
1592 }
1593 break;
1594 }
1595
e5143e30
AB
1596 cpu = CPU_NEXT(cpu);
1597 } /* while (cpu && !cpu->exit_request).. */
1598
791158d9
AB
1599 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1600 atomic_set(&tcg_current_rr_cpu, NULL);
c93bbbef 1601
e5143e30
AB
1602 if (cpu && cpu->exit_request) {
1603 atomic_mb_set(&cpu->exit_request, 0);
1604 }
ac70aafc 1605
013aabdc
CD
1606 if (use_icount && all_cpu_threads_idle()) {
1607 /*
1608 * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
1609 * in the main_loop, wake it up in order to start the warp timer.
1610 */
1611 qemu_notify_event();
1612 }
1613
a8efa606 1614 qemu_tcg_rr_wait_io_event();
c93bbbef 1615 deal_with_unplugged_cpus();
296af7c9
BS
1616 }
1617
9b0605f9 1618 rcu_unregister_thread();
296af7c9
BS
1619 return NULL;
1620}
1621
b0cb0a66
VP
1622static void *qemu_hax_cpu_thread_fn(void *arg)
1623{
1624 CPUState *cpu = arg;
1625 int r;
b3d3a426 1626
9857c2d2 1627 rcu_register_thread();
b3d3a426 1628 qemu_mutex_lock_iothread();
b0cb0a66 1629 qemu_thread_get_self(cpu->thread);
b0cb0a66
VP
1630
1631 cpu->thread_id = qemu_get_thread_id();
1632 cpu->created = true;
b0cb0a66
VP
1633 current_cpu = cpu;
1634
1635 hax_init_vcpu(cpu);
1636 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1637 qemu_guest_random_seed_thread_part2(cpu->random_seed);
b0cb0a66 1638
9857c2d2 1639 do {
b0cb0a66
VP
1640 if (cpu_can_run(cpu)) {
1641 r = hax_smp_cpu_exec(cpu);
1642 if (r == EXCP_DEBUG) {
1643 cpu_handle_guest_debug(cpu);
1644 }
1645 }
1646
db08b687 1647 qemu_wait_io_event(cpu);
9857c2d2
PB
1648 } while (!cpu->unplug || cpu_can_run(cpu));
1649 rcu_unregister_thread();
b0cb0a66
VP
1650 return NULL;
1651}
1652
c97d6d2c
SAGDR
1653/* The HVF-specific vCPU thread function. This one should only run when the host
1654 * CPU supports the VMX "unrestricted guest" feature. */
1655static void *qemu_hvf_cpu_thread_fn(void *arg)
1656{
1657 CPUState *cpu = arg;
1658
1659 int r;
1660
1661 assert(hvf_enabled());
1662
1663 rcu_register_thread();
1664
1665 qemu_mutex_lock_iothread();
1666 qemu_thread_get_self(cpu->thread);
1667
1668 cpu->thread_id = qemu_get_thread_id();
1669 cpu->can_do_io = 1;
1670 current_cpu = cpu;
1671
1672 hvf_init_vcpu(cpu);
1673
1674 /* signal CPU creation */
1675 cpu->created = true;
1676 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1677 qemu_guest_random_seed_thread_part2(cpu->random_seed);
c97d6d2c
SAGDR
1678
1679 do {
1680 if (cpu_can_run(cpu)) {
1681 r = hvf_vcpu_exec(cpu);
1682 if (r == EXCP_DEBUG) {
1683 cpu_handle_guest_debug(cpu);
1684 }
1685 }
db08b687 1686 qemu_wait_io_event(cpu);
c97d6d2c
SAGDR
1687 } while (!cpu->unplug || cpu_can_run(cpu));
1688
1689 hvf_vcpu_destroy(cpu);
1690 cpu->created = false;
1691 qemu_cond_signal(&qemu_cpu_cond);
1692 qemu_mutex_unlock_iothread();
8178e637 1693 rcu_unregister_thread();
c97d6d2c
SAGDR
1694 return NULL;
1695}
1696
19306806
JTV
1697static void *qemu_whpx_cpu_thread_fn(void *arg)
1698{
1699 CPUState *cpu = arg;
1700 int r;
1701
1702 rcu_register_thread();
1703
1704 qemu_mutex_lock_iothread();
1705 qemu_thread_get_self(cpu->thread);
1706 cpu->thread_id = qemu_get_thread_id();
1707 current_cpu = cpu;
1708
1709 r = whpx_init_vcpu(cpu);
1710 if (r < 0) {
1711 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1712 exit(1);
1713 }
1714
1715 /* signal CPU creation */
1716 cpu->created = true;
1717 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1718 qemu_guest_random_seed_thread_part2(cpu->random_seed);
19306806
JTV
1719
1720 do {
1721 if (cpu_can_run(cpu)) {
1722 r = whpx_vcpu_exec(cpu);
1723 if (r == EXCP_DEBUG) {
1724 cpu_handle_guest_debug(cpu);
1725 }
1726 }
1727 while (cpu_thread_is_idle(cpu)) {
1728 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1729 }
1730 qemu_wait_io_event_common(cpu);
1731 } while (!cpu->unplug || cpu_can_run(cpu));
1732
1733 whpx_destroy_vcpu(cpu);
1734 cpu->created = false;
1735 qemu_cond_signal(&qemu_cpu_cond);
1736 qemu_mutex_unlock_iothread();
1737 rcu_unregister_thread();
c97d6d2c
SAGDR
1738 return NULL;
1739}
1740
b0cb0a66
VP
1741#ifdef _WIN32
1742static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1743{
1744}
1745#endif
1746
37257942
AB
1747/* Multi-threaded TCG
1748 *
1749 * In the multi-threaded case each vCPU has its own thread. The TLS
1750 * variable current_cpu can be used deep in the code to find the
1751 * current CPUState for a given thread.
1752 */
1753
1754static void *qemu_tcg_cpu_thread_fn(void *arg)
1755{
1756 CPUState *cpu = arg;
1757
f28d0dfd 1758 assert(tcg_enabled());
bf51c720
AB
1759 g_assert(!use_icount);
1760
37257942 1761 rcu_register_thread();
3468b59e 1762 tcg_register_thread();
37257942
AB
1763
1764 qemu_mutex_lock_iothread();
1765 qemu_thread_get_self(cpu->thread);
1766
1767 cpu->thread_id = qemu_get_thread_id();
1768 cpu->created = true;
1769 cpu->can_do_io = 1;
1770 current_cpu = cpu;
1771 qemu_cond_signal(&qemu_cpu_cond);
9c09a251 1772 qemu_guest_random_seed_thread_part2(cpu->random_seed);
37257942
AB
1773
1774 /* process any pending work */
1775 cpu->exit_request = 1;
1776
54961aac 1777 do {
37257942
AB
1778 if (cpu_can_run(cpu)) {
1779 int r;
d759c951 1780 qemu_mutex_unlock_iothread();
37257942 1781 r = tcg_cpu_exec(cpu);
d759c951 1782 qemu_mutex_lock_iothread();
37257942
AB
1783 switch (r) {
1784 case EXCP_DEBUG:
1785 cpu_handle_guest_debug(cpu);
1786 break;
1787 case EXCP_HALTED:
1788 /* during start-up the vCPU is reset and the thread is
1789 * kicked several times. If we don't ensure we go back
1790 * to sleep in the halted state we won't cleanly
1791 * start-up when the vCPU is enabled.
1792 *
1793 * cpu->halted should ensure we sleep in wait_io_event
1794 */
1795 g_assert(cpu->halted);
1796 break;
08e73c48
PK
1797 case EXCP_ATOMIC:
1798 qemu_mutex_unlock_iothread();
1799 cpu_exec_step_atomic(cpu);
1800 qemu_mutex_lock_iothread();
37257942
AB
1801 default:
1802 /* Ignore everything else? */
1803 break;
1804 }
1805 }
1806
37257942 1807 atomic_mb_set(&cpu->exit_request, 0);
db08b687 1808 qemu_wait_io_event(cpu);
9b0605f9 1809 } while (!cpu->unplug || cpu_can_run(cpu));
37257942 1810
9b0605f9
PB
1811 qemu_tcg_destroy_vcpu(cpu);
1812 cpu->created = false;
1813 qemu_cond_signal(&qemu_cpu_cond);
1814 qemu_mutex_unlock_iothread();
1815 rcu_unregister_thread();
37257942
AB
1816 return NULL;
1817}
1818
2ff09a40 1819static void qemu_cpu_kick_thread(CPUState *cpu)
cc015e9a
PB
1820{
1821#ifndef _WIN32
1822 int err;
1823
e0c38211
PB
1824 if (cpu->thread_kicked) {
1825 return;
9102deda 1826 }
e0c38211 1827 cpu->thread_kicked = true;
814e612e 1828 err = pthread_kill(cpu->thread->thread, SIG_IPI);
d455ebc4 1829 if (err && err != ESRCH) {
cc015e9a
PB
1830 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1831 exit(1);
1832 }
1833#else /* _WIN32 */
b0cb0a66 1834 if (!qemu_cpu_is_self(cpu)) {
19306806
JTV
1835 if (whpx_enabled()) {
1836 whpx_vcpu_kick(cpu);
1837 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
b0cb0a66
VP
1838 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1839 __func__, GetLastError());
1840 exit(1);
1841 }
1842 }
e0c38211
PB
1843#endif
1844}
ed9164a3 1845
c08d7424 1846void qemu_cpu_kick(CPUState *cpu)
296af7c9 1847{
f5c121b8 1848 qemu_cond_broadcast(cpu->halt_cond);
e0c38211 1849 if (tcg_enabled()) {
e8f22f76
AB
1850 if (qemu_tcg_mttcg_enabled()) {
1851 cpu_exit(cpu);
1852 } else {
1853 qemu_cpu_kick_rr_cpus();
1854 }
e0c38211 1855 } else {
b0cb0a66
VP
1856 if (hax_enabled()) {
1857 /*
1858 * FIXME: race condition with the exit_request check in
1859 * hax_vcpu_hax_exec
1860 */
1861 cpu->exit_request = 1;
1862 }
e0c38211
PB
1863 qemu_cpu_kick_thread(cpu);
1864 }
296af7c9
BS
1865}
1866
46d62fac 1867void qemu_cpu_kick_self(void)
296af7c9 1868{
4917cf44 1869 assert(current_cpu);
9102deda 1870 qemu_cpu_kick_thread(current_cpu);
296af7c9
BS
1871}
1872
60e82579 1873bool qemu_cpu_is_self(CPUState *cpu)
296af7c9 1874{
814e612e 1875 return qemu_thread_is_self(cpu->thread);
296af7c9
BS
1876}
1877
79e2b9ae 1878bool qemu_in_vcpu_thread(void)
aa723c23 1879{
4917cf44 1880 return current_cpu && qemu_cpu_is_self(current_cpu);
aa723c23
JQ
1881}
1882
afbe7053
PB
1883static __thread bool iothread_locked = false;
1884
1885bool qemu_mutex_iothread_locked(void)
1886{
1887 return iothread_locked;
1888}
1889
cb764d06
EC
1890/*
1891 * The BQL is taken from so many places that it is worth profiling the
1892 * callers directly, instead of funneling them all through a single function.
1893 */
1894void qemu_mutex_lock_iothread_impl(const char *file, int line)
296af7c9 1895{
cb764d06
EC
1896 QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
1897
8d04fb55 1898 g_assert(!qemu_mutex_iothread_locked());
cb764d06 1899 bql_lock(&qemu_global_mutex, file, line);
afbe7053 1900 iothread_locked = true;
296af7c9
BS
1901}
1902
1903void qemu_mutex_unlock_iothread(void)
1904{
8d04fb55 1905 g_assert(qemu_mutex_iothread_locked());
afbe7053 1906 iothread_locked = false;
296af7c9
BS
1907 qemu_mutex_unlock(&qemu_global_mutex);
1908}
1909
e8faee06 1910static bool all_vcpus_paused(void)
296af7c9 1911{
bdc44640 1912 CPUState *cpu;
296af7c9 1913
bdc44640 1914 CPU_FOREACH(cpu) {
182735ef 1915 if (!cpu->stopped) {
e8faee06 1916 return false;
0ab07c62 1917 }
296af7c9
BS
1918 }
1919
e8faee06 1920 return true;
296af7c9
BS
1921}
1922
1923void pause_all_vcpus(void)
1924{
bdc44640 1925 CPUState *cpu;
296af7c9 1926
40daca54 1927 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
bdc44640 1928 CPU_FOREACH(cpu) {
ebd05fea
DH
1929 if (qemu_cpu_is_self(cpu)) {
1930 qemu_cpu_stop(cpu, true);
1931 } else {
1932 cpu->stop = true;
1933 qemu_cpu_kick(cpu);
1934 }
d798e974
JK
1935 }
1936
d759c951
AB
1937 /* We need to drop the replay_lock so any vCPU threads woken up
1938 * can finish their replay tasks
1939 */
1940 replay_mutex_unlock();
1941
296af7c9 1942 while (!all_vcpus_paused()) {
be7d6c57 1943 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
bdc44640 1944 CPU_FOREACH(cpu) {
182735ef 1945 qemu_cpu_kick(cpu);
296af7c9
BS
1946 }
1947 }
d759c951
AB
1948
1949 qemu_mutex_unlock_iothread();
1950 replay_mutex_lock();
1951 qemu_mutex_lock_iothread();
296af7c9
BS
1952}
1953
2993683b
IM
1954void cpu_resume(CPUState *cpu)
1955{
1956 cpu->stop = false;
1957 cpu->stopped = false;
1958 qemu_cpu_kick(cpu);
1959}
1960
296af7c9
BS
1961void resume_all_vcpus(void)
1962{
bdc44640 1963 CPUState *cpu;
296af7c9 1964
40daca54 1965 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
bdc44640 1966 CPU_FOREACH(cpu) {
182735ef 1967 cpu_resume(cpu);
296af7c9
BS
1968 }
1969}
1970
dbadee4f 1971void cpu_remove_sync(CPUState *cpu)
4c055ab5
GZ
1972{
1973 cpu->stop = true;
1974 cpu->unplug = true;
1975 qemu_cpu_kick(cpu);
dbadee4f
PB
1976 qemu_mutex_unlock_iothread();
1977 qemu_thread_join(cpu->thread);
1978 qemu_mutex_lock_iothread();
2c579042
BR
1979}
1980
4900116e
DDAG
1981/* For temporary buffers for forming a name */
1982#define VCPU_THREAD_NAME_SIZE 16
1983
e5ab30a2 1984static void qemu_tcg_init_vcpu(CPUState *cpu)
296af7c9 1985{
4900116e 1986 char thread_name[VCPU_THREAD_NAME_SIZE];
37257942
AB
1987 static QemuCond *single_tcg_halt_cond;
1988 static QemuThread *single_tcg_cpu_thread;
e8feb96f
EC
1989 static int tcg_region_inited;
1990
f28d0dfd 1991 assert(tcg_enabled());
e8feb96f
EC
1992 /*
1993 * Initialize TCG regions--once. Now is a good time, because:
1994 * (1) TCG's init context, prologue and target globals have been set up.
1995 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1996 * -accel flag is processed, so the check doesn't work then).
1997 */
1998 if (!tcg_region_inited) {
1999 tcg_region_inited = 1;
2000 tcg_region_init();
2001 }
4900116e 2002
37257942 2003 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
814e612e 2004 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
2005 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2006 qemu_cond_init(cpu->halt_cond);
37257942
AB
2007
2008 if (qemu_tcg_mttcg_enabled()) {
2009 /* create a thread per vCPU with TCG (MTTCG) */
2010 parallel_cpus = true;
2011 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
4900116e 2012 cpu->cpu_index);
37257942
AB
2013
2014 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
2015 cpu, QEMU_THREAD_JOINABLE);
2016
2017 } else {
2018 /* share a single thread for all cpus with TCG */
2019 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
2020 qemu_thread_create(cpu->thread, thread_name,
2021 qemu_tcg_rr_cpu_thread_fn,
2022 cpu, QEMU_THREAD_JOINABLE);
2023
2024 single_tcg_halt_cond = cpu->halt_cond;
2025 single_tcg_cpu_thread = cpu->thread;
2026 }
1ecf47bf 2027#ifdef _WIN32
814e612e 2028 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1ecf47bf 2029#endif
296af7c9 2030 } else {
37257942
AB
2031 /* For non-MTTCG cases we share the thread */
2032 cpu->thread = single_tcg_cpu_thread;
2033 cpu->halt_cond = single_tcg_halt_cond;
a342173a
DH
2034 cpu->thread_id = first_cpu->thread_id;
2035 cpu->can_do_io = 1;
2036 cpu->created = true;
296af7c9
BS
2037 }
2038}
2039
b0cb0a66
VP
2040static void qemu_hax_start_vcpu(CPUState *cpu)
2041{
2042 char thread_name[VCPU_THREAD_NAME_SIZE];
2043
2044 cpu->thread = g_malloc0(sizeof(QemuThread));
2045 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2046 qemu_cond_init(cpu->halt_cond);
2047
2048 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
2049 cpu->cpu_index);
2050 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
2051 cpu, QEMU_THREAD_JOINABLE);
2052#ifdef _WIN32
2053 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2054#endif
b0cb0a66
VP
2055}
2056
48a106bd 2057static void qemu_kvm_start_vcpu(CPUState *cpu)
296af7c9 2058{
4900116e
DDAG
2059 char thread_name[VCPU_THREAD_NAME_SIZE];
2060
814e612e 2061 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
2062 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2063 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
2064 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
2065 cpu->cpu_index);
2066 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
2067 cpu, QEMU_THREAD_JOINABLE);
296af7c9
BS
2068}
2069
c97d6d2c
SAGDR
2070static void qemu_hvf_start_vcpu(CPUState *cpu)
2071{
2072 char thread_name[VCPU_THREAD_NAME_SIZE];
2073
2074 /* HVF currently does not support TCG, and only runs in
2075 * unrestricted-guest mode. */
2076 assert(hvf_enabled());
2077
2078 cpu->thread = g_malloc0(sizeof(QemuThread));
2079 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2080 qemu_cond_init(cpu->halt_cond);
2081
2082 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
2083 cpu->cpu_index);
2084 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
2085 cpu, QEMU_THREAD_JOINABLE);
c97d6d2c
SAGDR
2086}
2087
19306806
JTV
2088static void qemu_whpx_start_vcpu(CPUState *cpu)
2089{
2090 char thread_name[VCPU_THREAD_NAME_SIZE];
2091
2092 cpu->thread = g_malloc0(sizeof(QemuThread));
2093 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2094 qemu_cond_init(cpu->halt_cond);
2095 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
2096 cpu->cpu_index);
2097 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
2098 cpu, QEMU_THREAD_JOINABLE);
2099#ifdef _WIN32
2100 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2101#endif
19306806
JTV
2102}
2103
10a9021d 2104static void qemu_dummy_start_vcpu(CPUState *cpu)
c7f0f3b1 2105{
4900116e
DDAG
2106 char thread_name[VCPU_THREAD_NAME_SIZE];
2107
814e612e 2108 cpu->thread = g_malloc0(sizeof(QemuThread));
f5c121b8
AF
2109 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2110 qemu_cond_init(cpu->halt_cond);
4900116e
DDAG
2111 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
2112 cpu->cpu_index);
2113 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
c7f0f3b1 2114 QEMU_THREAD_JOINABLE);
c7f0f3b1
AL
2115}
2116
c643bed9 2117void qemu_init_vcpu(CPUState *cpu)
296af7c9 2118{
5cc8767d
LX
2119 MachineState *ms = MACHINE(qdev_get_machine());
2120
2121 cpu->nr_cores = ms->smp.cores;
2122 cpu->nr_threads = ms->smp.threads;
f324e766 2123 cpu->stopped = true;
9c09a251 2124 cpu->random_seed = qemu_guest_random_seed_thread_part1();
56943e8c
PM
2125
2126 if (!cpu->as) {
2127 /* If the target cpu hasn't set up any address spaces itself,
2128 * give it the default one.
2129 */
12ebc9a7 2130 cpu->num_ases = 1;
80ceb07a 2131 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
56943e8c
PM
2132 }
2133
0ab07c62 2134 if (kvm_enabled()) {
48a106bd 2135 qemu_kvm_start_vcpu(cpu);
b0cb0a66
VP
2136 } else if (hax_enabled()) {
2137 qemu_hax_start_vcpu(cpu);
c97d6d2c
SAGDR
2138 } else if (hvf_enabled()) {
2139 qemu_hvf_start_vcpu(cpu);
c7f0f3b1 2140 } else if (tcg_enabled()) {
e5ab30a2 2141 qemu_tcg_init_vcpu(cpu);
19306806
JTV
2142 } else if (whpx_enabled()) {
2143 qemu_whpx_start_vcpu(cpu);
c7f0f3b1 2144 } else {
10a9021d 2145 qemu_dummy_start_vcpu(cpu);
0ab07c62 2146 }
81e96311
DH
2147
2148 while (!cpu->created) {
2149 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2150 }
296af7c9
BS
2151}
2152
b4a3d965 2153void cpu_stop_current(void)
296af7c9 2154{
4917cf44 2155 if (current_cpu) {
0ec7e677
PM
2156 current_cpu->stop = true;
2157 cpu_exit(current_cpu);
b4a3d965 2158 }
296af7c9
BS
2159}
2160
56983463 2161int vm_stop(RunState state)
296af7c9 2162{
aa723c23 2163 if (qemu_in_vcpu_thread()) {
74892d24 2164 qemu_system_vmstop_request_prepare();
1dfb4dd9 2165 qemu_system_vmstop_request(state);
296af7c9
BS
2166 /*
2167 * FIXME: should not return to device code in case
2168 * vm_stop() has been requested.
2169 */
b4a3d965 2170 cpu_stop_current();
56983463 2171 return 0;
296af7c9 2172 }
56983463 2173
4486e89c 2174 return do_vm_stop(state, true);
296af7c9
BS
2175}
2176
2d76e823
CI
2177/**
2178 * Prepare for (re)starting the VM.
2179 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2180 * running or in case of an error condition), 0 otherwise.
2181 */
2182int vm_prepare_start(void)
2183{
2184 RunState requested;
2d76e823
CI
2185
2186 qemu_vmstop_requested(&requested);
2187 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2188 return -1;
2189 }
2190
2191 /* Ensure that a STOP/RESUME pair of events is emitted if a
2192 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2193 * example, according to documentation is always followed by
2194 * the STOP event.
2195 */
2196 if (runstate_is_running()) {
3ab72385
PX
2197 qapi_event_send_stop();
2198 qapi_event_send_resume();
f056158d 2199 return -1;
2d76e823
CI
2200 }
2201
2202 /* We are sending this now, but the CPUs will be resumed shortly later */
3ab72385 2203 qapi_event_send_resume();
f056158d 2204
f056158d
MA
2205 cpu_enable_ticks();
2206 runstate_set(RUN_STATE_RUNNING);
2207 vm_state_notify(1, RUN_STATE_RUNNING);
2208 return 0;
2d76e823
CI
2209}
2210
2211void vm_start(void)
2212{
2213 if (!vm_prepare_start()) {
2214 resume_all_vcpus();
2215 }
2216}
2217
8a9236f1
LC
2218/* does a state transition even if the VM is already stopped,
2219 current state is forgotten forever */
56983463 2220int vm_stop_force_state(RunState state)
8a9236f1
LC
2221{
2222 if (runstate_is_running()) {
56983463 2223 return vm_stop(state);
8a9236f1
LC
2224 } else {
2225 runstate_set(state);
b2780d32
WC
2226
2227 bdrv_drain_all();
594a45ce
KW
2228 /* Make sure to return an error if the flush in a previous vm_stop()
2229 * failed. */
22af08ea 2230 return bdrv_flush_all();
8a9236f1
LC
2231 }
2232}
2233
0442428a 2234void list_cpus(const char *optarg)
262353cb
BS
2235{
2236 /* XXX: implement xxx_cpu_list for targets that still miss it */
e916cbf8 2237#if defined(cpu_list)
0442428a 2238 cpu_list();
262353cb
BS
2239#endif
2240}
de0b36b6 2241
0cfd6a9a
LC
2242void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2243 bool has_cpu, int64_t cpu_index, Error **errp)
2244{
2245 FILE *f;
2246 uint32_t l;
55e5c285 2247 CPUState *cpu;
0cfd6a9a 2248 uint8_t buf[1024];
0dc9daf0 2249 int64_t orig_addr = addr, orig_size = size;
0cfd6a9a
LC
2250
2251 if (!has_cpu) {
2252 cpu_index = 0;
2253 }
2254
151d1322
AF
2255 cpu = qemu_get_cpu(cpu_index);
2256 if (cpu == NULL) {
c6bd8c70
MA
2257 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2258 "a CPU number");
0cfd6a9a
LC
2259 return;
2260 }
2261
2262 f = fopen(filename, "wb");
2263 if (!f) {
618da851 2264 error_setg_file_open(errp, errno, filename);
0cfd6a9a
LC
2265 return;
2266 }
2267
2268 while (size != 0) {
2269 l = sizeof(buf);
2270 if (l > size)
2271 l = size;
2f4d0f59 2272 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
0dc9daf0
BP
2273 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2274 " specified", orig_addr, orig_size);
2f4d0f59
AK
2275 goto exit;
2276 }
0cfd6a9a 2277 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 2278 error_setg(errp, QERR_IO_ERROR);
0cfd6a9a
LC
2279 goto exit;
2280 }
2281 addr += l;
2282 size -= l;
2283 }
2284
2285exit:
2286 fclose(f);
2287}
6d3962bf
LC
2288
2289void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2290 Error **errp)
2291{
2292 FILE *f;
2293 uint32_t l;
2294 uint8_t buf[1024];
2295
2296 f = fopen(filename, "wb");
2297 if (!f) {
618da851 2298 error_setg_file_open(errp, errno, filename);
6d3962bf
LC
2299 return;
2300 }
2301
2302 while (size != 0) {
2303 l = sizeof(buf);
2304 if (l > size)
2305 l = size;
eb6282f2 2306 cpu_physical_memory_read(addr, buf, l);
6d3962bf 2307 if (fwrite(buf, 1, l, f) != l) {
c6bd8c70 2308 error_setg(errp, QERR_IO_ERROR);
6d3962bf
LC
2309 goto exit;
2310 }
2311 addr += l;
2312 size -= l;
2313 }
2314
2315exit:
2316 fclose(f);
2317}
ab49ab5c
LC
2318
2319void qmp_inject_nmi(Error **errp)
2320{
9cb805fd 2321 nmi_monitor_handle(monitor_get_cpu_index(), errp);
ab49ab5c 2322}
27498bef 2323
76c86615 2324void dump_drift_info(void)
27498bef
ST
2325{
2326 if (!use_icount) {
2327 return;
2328 }
2329
76c86615 2330 qemu_printf("Host - Guest clock %"PRIi64" ms\n",
27498bef
ST
2331 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2332 if (icount_align_option) {
76c86615
MA
2333 qemu_printf("Max guest delay %"PRIi64" ms\n",
2334 -max_delay / SCALE_MS);
2335 qemu_printf("Max guest advance %"PRIi64" ms\n",
2336 max_advance / SCALE_MS);
27498bef 2337 } else {
76c86615
MA
2338 qemu_printf("Max guest delay NA\n");
2339 qemu_printf("Max guest advance NA\n");
27498bef
ST
2340 }
2341}